]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
fd6ce8f6 | 2 | * virtual page mapping and translated block handling |
54936004 FB |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
67b915a5 | 20 | #include "config.h" |
d5a8f07c FB |
21 | #ifdef _WIN32 |
22 | #include <windows.h> | |
23 | #else | |
a98d49b1 | 24 | #include <sys/types.h> |
d5a8f07c FB |
25 | #include <sys/mman.h> |
26 | #endif | |
54936004 FB |
27 | #include <stdlib.h> |
28 | #include <stdio.h> | |
29 | #include <stdarg.h> | |
30 | #include <string.h> | |
31 | #include <errno.h> | |
32 | #include <unistd.h> | |
33 | #include <inttypes.h> | |
34 | ||
6180a181 FB |
35 | #include "cpu.h" |
36 | #include "exec-all.h" | |
53a5960a PB |
37 | #if defined(CONFIG_USER_ONLY) |
38 | #include <qemu.h> | |
39 | #endif | |
54936004 | 40 | |
fd6ce8f6 | 41 | //#define DEBUG_TB_INVALIDATE |
66e85a21 | 42 | //#define DEBUG_FLUSH |
9fa3e853 | 43 | //#define DEBUG_TLB |
67d3b957 | 44 | //#define DEBUG_UNASSIGNED |
fd6ce8f6 FB |
45 | |
46 | /* make various TB consistency checks */ | |
47 | //#define DEBUG_TB_CHECK | |
98857888 | 48 | //#define DEBUG_TLB_CHECK |
fd6ce8f6 | 49 | |
99773bd4 PB |
50 | #if !defined(CONFIG_USER_ONLY) |
51 | /* TB consistency checks only implemented for usermode emulation. */ | |
52 | #undef DEBUG_TB_CHECK | |
53 | #endif | |
54 | ||
fd6ce8f6 FB |
55 | /* threshold to flush the translated code buffer */ |
56 | #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) | |
57 | ||
9fa3e853 FB |
58 | #define SMC_BITMAP_USE_THRESHOLD 10 |
59 | ||
60 | #define MMAP_AREA_START 0x00000000 | |
61 | #define MMAP_AREA_END 0xa8000000 | |
fd6ce8f6 | 62 | |
108c49b8 FB |
63 | #if defined(TARGET_SPARC64) |
64 | #define TARGET_PHYS_ADDR_SPACE_BITS 41 | |
65 | #elif defined(TARGET_PPC64) | |
66 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 | |
67 | #else | |
68 | /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */ | |
69 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | |
70 | #endif | |
71 | ||
fd6ce8f6 | 72 | TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
9fa3e853 | 73 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
fd6ce8f6 | 74 | int nb_tbs; |
eb51d102 FB |
75 | /* any access to the tbs or the page table must use this lock */ |
76 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
fd6ce8f6 | 77 | |
b8076a74 | 78 | uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32))); |
fd6ce8f6 FB |
79 | uint8_t *code_gen_ptr; |
80 | ||
9fa3e853 FB |
81 | int phys_ram_size; |
82 | int phys_ram_fd; | |
83 | uint8_t *phys_ram_base; | |
1ccde1cb | 84 | uint8_t *phys_ram_dirty; |
e9a1ab19 | 85 | static ram_addr_t phys_ram_alloc_offset = 0; |
9fa3e853 | 86 | |
6a00d601 FB |
87 | CPUState *first_cpu; |
88 | /* current CPU in the current thread. It is only valid inside | |
89 | cpu_exec() */ | |
90 | CPUState *cpu_single_env; | |
91 | ||
54936004 | 92 | typedef struct PageDesc { |
92e873b9 | 93 | /* list of TBs intersecting this ram page */ |
fd6ce8f6 | 94 | TranslationBlock *first_tb; |
9fa3e853 FB |
95 | /* in order to optimize self modifying code, we count the number |
96 | of lookups we do to a given page to use a bitmap */ | |
97 | unsigned int code_write_count; | |
98 | uint8_t *code_bitmap; | |
99 | #if defined(CONFIG_USER_ONLY) | |
100 | unsigned long flags; | |
101 | #endif | |
54936004 FB |
102 | } PageDesc; |
103 | ||
92e873b9 FB |
104 | typedef struct PhysPageDesc { |
105 | /* offset in host memory of the page + io_index in the low 12 bits */ | |
e04f40b5 | 106 | uint32_t phys_offset; |
92e873b9 FB |
107 | } PhysPageDesc; |
108 | ||
54936004 FB |
109 | #define L2_BITS 10 |
110 | #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) | |
111 | ||
112 | #define L1_SIZE (1 << L1_BITS) | |
113 | #define L2_SIZE (1 << L2_BITS) | |
114 | ||
33417e70 | 115 | static void io_mem_init(void); |
fd6ce8f6 | 116 | |
83fb7adf FB |
117 | unsigned long qemu_real_host_page_size; |
118 | unsigned long qemu_host_page_bits; | |
119 | unsigned long qemu_host_page_size; | |
120 | unsigned long qemu_host_page_mask; | |
54936004 | 121 | |
92e873b9 | 122 | /* XXX: for system emulation, it could just be an array */ |
54936004 | 123 | static PageDesc *l1_map[L1_SIZE]; |
0a962c02 | 124 | PhysPageDesc **l1_phys_map; |
54936004 | 125 | |
33417e70 | 126 | /* io memory support */ |
33417e70 FB |
127 | CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
128 | CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
a4193c8a | 129 | void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
33417e70 | 130 | static int io_mem_nb; |
6658ffb8 PB |
131 | #if defined(CONFIG_SOFTMMU) |
132 | static int io_mem_watch; | |
133 | #endif | |
33417e70 | 134 | |
34865134 FB |
135 | /* log support */ |
136 | char *logfilename = "/tmp/qemu.log"; | |
137 | FILE *logfile; | |
138 | int loglevel; | |
139 | ||
e3db7226 FB |
140 | /* statistics */ |
141 | static int tlb_flush_count; | |
142 | static int tb_flush_count; | |
143 | static int tb_phys_invalidate_count; | |
144 | ||
b346ff46 | 145 | static void page_init(void) |
54936004 | 146 | { |
83fb7adf | 147 | /* NOTE: we can always suppose that qemu_host_page_size >= |
54936004 | 148 | TARGET_PAGE_SIZE */ |
67b915a5 | 149 | #ifdef _WIN32 |
d5a8f07c FB |
150 | { |
151 | SYSTEM_INFO system_info; | |
152 | DWORD old_protect; | |
153 | ||
154 | GetSystemInfo(&system_info); | |
155 | qemu_real_host_page_size = system_info.dwPageSize; | |
156 | ||
157 | VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer), | |
158 | PAGE_EXECUTE_READWRITE, &old_protect); | |
159 | } | |
67b915a5 | 160 | #else |
83fb7adf | 161 | qemu_real_host_page_size = getpagesize(); |
d5a8f07c FB |
162 | { |
163 | unsigned long start, end; | |
164 | ||
165 | start = (unsigned long)code_gen_buffer; | |
166 | start &= ~(qemu_real_host_page_size - 1); | |
167 | ||
168 | end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); | |
169 | end += qemu_real_host_page_size - 1; | |
170 | end &= ~(qemu_real_host_page_size - 1); | |
171 | ||
172 | mprotect((void *)start, end - start, | |
173 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
174 | } | |
67b915a5 | 175 | #endif |
d5a8f07c | 176 | |
83fb7adf FB |
177 | if (qemu_host_page_size == 0) |
178 | qemu_host_page_size = qemu_real_host_page_size; | |
179 | if (qemu_host_page_size < TARGET_PAGE_SIZE) | |
180 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
181 | qemu_host_page_bits = 0; | |
182 | while ((1 << qemu_host_page_bits) < qemu_host_page_size) | |
183 | qemu_host_page_bits++; | |
184 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
108c49b8 FB |
185 | l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
186 | memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); | |
54936004 FB |
187 | } |
188 | ||
fd6ce8f6 | 189 | static inline PageDesc *page_find_alloc(unsigned int index) |
54936004 | 190 | { |
54936004 FB |
191 | PageDesc **lp, *p; |
192 | ||
54936004 FB |
193 | lp = &l1_map[index >> L2_BITS]; |
194 | p = *lp; | |
195 | if (!p) { | |
196 | /* allocate if not found */ | |
59817ccb | 197 | p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); |
fd6ce8f6 | 198 | memset(p, 0, sizeof(PageDesc) * L2_SIZE); |
54936004 FB |
199 | *lp = p; |
200 | } | |
201 | return p + (index & (L2_SIZE - 1)); | |
202 | } | |
203 | ||
fd6ce8f6 | 204 | static inline PageDesc *page_find(unsigned int index) |
54936004 | 205 | { |
54936004 FB |
206 | PageDesc *p; |
207 | ||
54936004 FB |
208 | p = l1_map[index >> L2_BITS]; |
209 | if (!p) | |
210 | return 0; | |
fd6ce8f6 FB |
211 | return p + (index & (L2_SIZE - 1)); |
212 | } | |
213 | ||
108c49b8 | 214 | static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
92e873b9 | 215 | { |
108c49b8 | 216 | void **lp, **p; |
e3f4e2a4 | 217 | PhysPageDesc *pd; |
92e873b9 | 218 | |
108c49b8 FB |
219 | p = (void **)l1_phys_map; |
220 | #if TARGET_PHYS_ADDR_SPACE_BITS > 32 | |
221 | ||
222 | #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) | |
223 | #error unsupported TARGET_PHYS_ADDR_SPACE_BITS | |
224 | #endif | |
225 | lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); | |
92e873b9 FB |
226 | p = *lp; |
227 | if (!p) { | |
228 | /* allocate if not found */ | |
108c49b8 FB |
229 | if (!alloc) |
230 | return NULL; | |
231 | p = qemu_vmalloc(sizeof(void *) * L1_SIZE); | |
232 | memset(p, 0, sizeof(void *) * L1_SIZE); | |
233 | *lp = p; | |
234 | } | |
235 | #endif | |
236 | lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); | |
e3f4e2a4 PB |
237 | pd = *lp; |
238 | if (!pd) { | |
239 | int i; | |
108c49b8 FB |
240 | /* allocate if not found */ |
241 | if (!alloc) | |
242 | return NULL; | |
e3f4e2a4 PB |
243 | pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); |
244 | *lp = pd; | |
245 | for (i = 0; i < L2_SIZE; i++) | |
246 | pd[i].phys_offset = IO_MEM_UNASSIGNED; | |
92e873b9 | 247 | } |
e3f4e2a4 | 248 | return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); |
92e873b9 FB |
249 | } |
250 | ||
108c49b8 | 251 | static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
92e873b9 | 252 | { |
108c49b8 | 253 | return phys_page_find_alloc(index, 0); |
92e873b9 FB |
254 | } |
255 | ||
9fa3e853 | 256 | #if !defined(CONFIG_USER_ONLY) |
6a00d601 | 257 | static void tlb_protect_code(ram_addr_t ram_addr); |
3a7d929e FB |
258 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
259 | target_ulong vaddr); | |
9fa3e853 | 260 | #endif |
fd6ce8f6 | 261 | |
6a00d601 | 262 | void cpu_exec_init(CPUState *env) |
fd6ce8f6 | 263 | { |
6a00d601 FB |
264 | CPUState **penv; |
265 | int cpu_index; | |
266 | ||
fd6ce8f6 FB |
267 | if (!code_gen_ptr) { |
268 | code_gen_ptr = code_gen_buffer; | |
b346ff46 | 269 | page_init(); |
33417e70 | 270 | io_mem_init(); |
fd6ce8f6 | 271 | } |
6a00d601 FB |
272 | env->next_cpu = NULL; |
273 | penv = &first_cpu; | |
274 | cpu_index = 0; | |
275 | while (*penv != NULL) { | |
276 | penv = (CPUState **)&(*penv)->next_cpu; | |
277 | cpu_index++; | |
278 | } | |
279 | env->cpu_index = cpu_index; | |
6658ffb8 | 280 | env->nb_watchpoints = 0; |
6a00d601 | 281 | *penv = env; |
fd6ce8f6 FB |
282 | } |
283 | ||
9fa3e853 FB |
284 | static inline void invalidate_page_bitmap(PageDesc *p) |
285 | { | |
286 | if (p->code_bitmap) { | |
59817ccb | 287 | qemu_free(p->code_bitmap); |
9fa3e853 FB |
288 | p->code_bitmap = NULL; |
289 | } | |
290 | p->code_write_count = 0; | |
291 | } | |
292 | ||
fd6ce8f6 FB |
293 | /* set to NULL all the 'first_tb' fields in all PageDescs */ |
294 | static void page_flush_tb(void) | |
295 | { | |
296 | int i, j; | |
297 | PageDesc *p; | |
298 | ||
299 | for(i = 0; i < L1_SIZE; i++) { | |
300 | p = l1_map[i]; | |
301 | if (p) { | |
9fa3e853 FB |
302 | for(j = 0; j < L2_SIZE; j++) { |
303 | p->first_tb = NULL; | |
304 | invalidate_page_bitmap(p); | |
305 | p++; | |
306 | } | |
fd6ce8f6 FB |
307 | } |
308 | } | |
309 | } | |
310 | ||
311 | /* flush all the translation blocks */ | |
d4e8164f | 312 | /* XXX: tb_flush is currently not thread safe */ |
6a00d601 | 313 | void tb_flush(CPUState *env1) |
fd6ce8f6 | 314 | { |
6a00d601 | 315 | CPUState *env; |
0124311e | 316 | #if defined(DEBUG_FLUSH) |
fd6ce8f6 FB |
317 | printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", |
318 | code_gen_ptr - code_gen_buffer, | |
319 | nb_tbs, | |
0124311e | 320 | nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
fd6ce8f6 FB |
321 | #endif |
322 | nb_tbs = 0; | |
6a00d601 FB |
323 | |
324 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
325 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
326 | } | |
9fa3e853 | 327 | |
8a8a608f | 328 | memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
fd6ce8f6 | 329 | page_flush_tb(); |
9fa3e853 | 330 | |
fd6ce8f6 | 331 | code_gen_ptr = code_gen_buffer; |
d4e8164f FB |
332 | /* XXX: flush processor icache at this point if cache flush is |
333 | expensive */ | |
e3db7226 | 334 | tb_flush_count++; |
fd6ce8f6 FB |
335 | } |
336 | ||
337 | #ifdef DEBUG_TB_CHECK | |
338 | ||
339 | static void tb_invalidate_check(unsigned long address) | |
340 | { | |
341 | TranslationBlock *tb; | |
342 | int i; | |
343 | address &= TARGET_PAGE_MASK; | |
99773bd4 PB |
344 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
345 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
fd6ce8f6 FB |
346 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
347 | address >= tb->pc + tb->size)) { | |
348 | printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", | |
99773bd4 | 349 | address, (long)tb->pc, tb->size); |
fd6ce8f6 FB |
350 | } |
351 | } | |
352 | } | |
353 | } | |
354 | ||
355 | /* verify that all the pages have correct rights for code */ | |
356 | static void tb_page_check(void) | |
357 | { | |
358 | TranslationBlock *tb; | |
359 | int i, flags1, flags2; | |
360 | ||
99773bd4 PB |
361 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
362 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
fd6ce8f6 FB |
363 | flags1 = page_get_flags(tb->pc); |
364 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
365 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
366 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
99773bd4 | 367 | (long)tb->pc, tb->size, flags1, flags2); |
fd6ce8f6 FB |
368 | } |
369 | } | |
370 | } | |
371 | } | |
372 | ||
d4e8164f FB |
373 | void tb_jmp_check(TranslationBlock *tb) |
374 | { | |
375 | TranslationBlock *tb1; | |
376 | unsigned int n1; | |
377 | ||
378 | /* suppress any remaining jumps to this TB */ | |
379 | tb1 = tb->jmp_first; | |
380 | for(;;) { | |
381 | n1 = (long)tb1 & 3; | |
382 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
383 | if (n1 == 2) | |
384 | break; | |
385 | tb1 = tb1->jmp_next[n1]; | |
386 | } | |
387 | /* check end of list */ | |
388 | if (tb1 != tb) { | |
389 | printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); | |
390 | } | |
391 | } | |
392 | ||
fd6ce8f6 FB |
393 | #endif |
394 | ||
395 | /* invalidate one TB */ | |
396 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
397 | int next_offset) | |
398 | { | |
399 | TranslationBlock *tb1; | |
400 | for(;;) { | |
401 | tb1 = *ptb; | |
402 | if (tb1 == tb) { | |
403 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
404 | break; | |
405 | } | |
406 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
407 | } | |
408 | } | |
409 | ||
9fa3e853 FB |
410 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
411 | { | |
412 | TranslationBlock *tb1; | |
413 | unsigned int n1; | |
414 | ||
415 | for(;;) { | |
416 | tb1 = *ptb; | |
417 | n1 = (long)tb1 & 3; | |
418 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
419 | if (tb1 == tb) { | |
420 | *ptb = tb1->page_next[n1]; | |
421 | break; | |
422 | } | |
423 | ptb = &tb1->page_next[n1]; | |
424 | } | |
425 | } | |
426 | ||
d4e8164f FB |
427 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) |
428 | { | |
429 | TranslationBlock *tb1, **ptb; | |
430 | unsigned int n1; | |
431 | ||
432 | ptb = &tb->jmp_next[n]; | |
433 | tb1 = *ptb; | |
434 | if (tb1) { | |
435 | /* find tb(n) in circular list */ | |
436 | for(;;) { | |
437 | tb1 = *ptb; | |
438 | n1 = (long)tb1 & 3; | |
439 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
440 | if (n1 == n && tb1 == tb) | |
441 | break; | |
442 | if (n1 == 2) { | |
443 | ptb = &tb1->jmp_first; | |
444 | } else { | |
445 | ptb = &tb1->jmp_next[n1]; | |
446 | } | |
447 | } | |
448 | /* now we can suppress tb(n) from the list */ | |
449 | *ptb = tb->jmp_next[n]; | |
450 | ||
451 | tb->jmp_next[n] = NULL; | |
452 | } | |
453 | } | |
454 | ||
455 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
456 | another TB */ | |
457 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
458 | { | |
459 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); | |
460 | } | |
461 | ||
8a40a180 | 462 | static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
fd6ce8f6 | 463 | { |
6a00d601 | 464 | CPUState *env; |
8a40a180 | 465 | PageDesc *p; |
d4e8164f | 466 | unsigned int h, n1; |
8a40a180 FB |
467 | target_ulong phys_pc; |
468 | TranslationBlock *tb1, *tb2; | |
d4e8164f | 469 | |
8a40a180 FB |
470 | /* remove the TB from the hash list */ |
471 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
472 | h = tb_phys_hash_func(phys_pc); | |
473 | tb_remove(&tb_phys_hash[h], tb, | |
474 | offsetof(TranslationBlock, phys_hash_next)); | |
475 | ||
476 | /* remove the TB from the page list */ | |
477 | if (tb->page_addr[0] != page_addr) { | |
478 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
479 | tb_page_remove(&p->first_tb, tb); | |
480 | invalidate_page_bitmap(p); | |
481 | } | |
482 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
483 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
484 | tb_page_remove(&p->first_tb, tb); | |
485 | invalidate_page_bitmap(p); | |
486 | } | |
487 | ||
36bdbe54 | 488 | tb_invalidated_flag = 1; |
59817ccb | 489 | |
fd6ce8f6 | 490 | /* remove the TB from the hash list */ |
8a40a180 | 491 | h = tb_jmp_cache_hash_func(tb->pc); |
6a00d601 FB |
492 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
493 | if (env->tb_jmp_cache[h] == tb) | |
494 | env->tb_jmp_cache[h] = NULL; | |
495 | } | |
d4e8164f FB |
496 | |
497 | /* suppress this TB from the two jump lists */ | |
498 | tb_jmp_remove(tb, 0); | |
499 | tb_jmp_remove(tb, 1); | |
500 | ||
501 | /* suppress any remaining jumps to this TB */ | |
502 | tb1 = tb->jmp_first; | |
503 | for(;;) { | |
504 | n1 = (long)tb1 & 3; | |
505 | if (n1 == 2) | |
506 | break; | |
507 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
508 | tb2 = tb1->jmp_next[n1]; | |
509 | tb_reset_jump(tb1, n1); | |
510 | tb1->jmp_next[n1] = NULL; | |
511 | tb1 = tb2; | |
512 | } | |
513 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ | |
9fa3e853 | 514 | |
e3db7226 | 515 | tb_phys_invalidate_count++; |
9fa3e853 FB |
516 | } |
517 | ||
518 | static inline void set_bits(uint8_t *tab, int start, int len) | |
519 | { | |
520 | int end, mask, end1; | |
521 | ||
522 | end = start + len; | |
523 | tab += start >> 3; | |
524 | mask = 0xff << (start & 7); | |
525 | if ((start & ~7) == (end & ~7)) { | |
526 | if (start < end) { | |
527 | mask &= ~(0xff << (end & 7)); | |
528 | *tab |= mask; | |
529 | } | |
530 | } else { | |
531 | *tab++ |= mask; | |
532 | start = (start + 8) & ~7; | |
533 | end1 = end & ~7; | |
534 | while (start < end1) { | |
535 | *tab++ = 0xff; | |
536 | start += 8; | |
537 | } | |
538 | if (start < end) { | |
539 | mask = ~(0xff << (end & 7)); | |
540 | *tab |= mask; | |
541 | } | |
542 | } | |
543 | } | |
544 | ||
545 | static void build_page_bitmap(PageDesc *p) | |
546 | { | |
547 | int n, tb_start, tb_end; | |
548 | TranslationBlock *tb; | |
549 | ||
59817ccb | 550 | p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); |
9fa3e853 FB |
551 | if (!p->code_bitmap) |
552 | return; | |
553 | memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); | |
554 | ||
555 | tb = p->first_tb; | |
556 | while (tb != NULL) { | |
557 | n = (long)tb & 3; | |
558 | tb = (TranslationBlock *)((long)tb & ~3); | |
559 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
560 | if (n == 0) { | |
561 | /* NOTE: tb_end may be after the end of the page, but | |
562 | it is not a problem */ | |
563 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
564 | tb_end = tb_start + tb->size; | |
565 | if (tb_end > TARGET_PAGE_SIZE) | |
566 | tb_end = TARGET_PAGE_SIZE; | |
567 | } else { | |
568 | tb_start = 0; | |
569 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
570 | } | |
571 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
572 | tb = tb->page_next[n]; | |
573 | } | |
574 | } | |
575 | ||
d720b93d FB |
576 | #ifdef TARGET_HAS_PRECISE_SMC |
577 | ||
578 | static void tb_gen_code(CPUState *env, | |
579 | target_ulong pc, target_ulong cs_base, int flags, | |
580 | int cflags) | |
581 | { | |
582 | TranslationBlock *tb; | |
583 | uint8_t *tc_ptr; | |
584 | target_ulong phys_pc, phys_page2, virt_page2; | |
585 | int code_gen_size; | |
586 | ||
c27004ec FB |
587 | phys_pc = get_phys_addr_code(env, pc); |
588 | tb = tb_alloc(pc); | |
d720b93d FB |
589 | if (!tb) { |
590 | /* flush must be done */ | |
591 | tb_flush(env); | |
592 | /* cannot fail at this point */ | |
c27004ec | 593 | tb = tb_alloc(pc); |
d720b93d FB |
594 | } |
595 | tc_ptr = code_gen_ptr; | |
596 | tb->tc_ptr = tc_ptr; | |
597 | tb->cs_base = cs_base; | |
598 | tb->flags = flags; | |
599 | tb->cflags = cflags; | |
600 | cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); | |
601 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
602 | ||
603 | /* check next page if needed */ | |
c27004ec | 604 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
d720b93d | 605 | phys_page2 = -1; |
c27004ec | 606 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { |
d720b93d FB |
607 | phys_page2 = get_phys_addr_code(env, virt_page2); |
608 | } | |
609 | tb_link_phys(tb, phys_pc, phys_page2); | |
610 | } | |
611 | #endif | |
612 | ||
9fa3e853 FB |
613 | /* invalidate all TBs which intersect with the target physical page |
614 | starting in range [start;end[. NOTE: start and end must refer to | |
d720b93d FB |
615 | the same physical page. 'is_cpu_write_access' should be true if called |
616 | from a real cpu write access: the virtual CPU will exit the current | |
617 | TB if code is modified inside this TB. */ | |
618 | void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, | |
619 | int is_cpu_write_access) | |
620 | { | |
621 | int n, current_tb_modified, current_tb_not_found, current_flags; | |
d720b93d | 622 | CPUState *env = cpu_single_env; |
9fa3e853 | 623 | PageDesc *p; |
ea1c1802 | 624 | TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; |
9fa3e853 | 625 | target_ulong tb_start, tb_end; |
d720b93d | 626 | target_ulong current_pc, current_cs_base; |
9fa3e853 FB |
627 | |
628 | p = page_find(start >> TARGET_PAGE_BITS); | |
629 | if (!p) | |
630 | return; | |
631 | if (!p->code_bitmap && | |
d720b93d FB |
632 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && |
633 | is_cpu_write_access) { | |
9fa3e853 FB |
634 | /* build code bitmap */ |
635 | build_page_bitmap(p); | |
636 | } | |
637 | ||
638 | /* we remove all the TBs in the range [start, end[ */ | |
639 | /* XXX: see if in some cases it could be faster to invalidate all the code */ | |
d720b93d FB |
640 | current_tb_not_found = is_cpu_write_access; |
641 | current_tb_modified = 0; | |
642 | current_tb = NULL; /* avoid warning */ | |
643 | current_pc = 0; /* avoid warning */ | |
644 | current_cs_base = 0; /* avoid warning */ | |
645 | current_flags = 0; /* avoid warning */ | |
9fa3e853 FB |
646 | tb = p->first_tb; |
647 | while (tb != NULL) { | |
648 | n = (long)tb & 3; | |
649 | tb = (TranslationBlock *)((long)tb & ~3); | |
650 | tb_next = tb->page_next[n]; | |
651 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
652 | if (n == 0) { | |
653 | /* NOTE: tb_end may be after the end of the page, but | |
654 | it is not a problem */ | |
655 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
656 | tb_end = tb_start + tb->size; | |
657 | } else { | |
658 | tb_start = tb->page_addr[1]; | |
659 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
660 | } | |
661 | if (!(tb_end <= start || tb_start >= end)) { | |
d720b93d FB |
662 | #ifdef TARGET_HAS_PRECISE_SMC |
663 | if (current_tb_not_found) { | |
664 | current_tb_not_found = 0; | |
665 | current_tb = NULL; | |
666 | if (env->mem_write_pc) { | |
667 | /* now we have a real cpu fault */ | |
668 | current_tb = tb_find_pc(env->mem_write_pc); | |
669 | } | |
670 | } | |
671 | if (current_tb == tb && | |
672 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
673 | /* If we are modifying the current TB, we must stop | |
674 | its execution. We could be more precise by checking | |
675 | that the modification is after the current PC, but it | |
676 | would require a specialized function to partially | |
677 | restore the CPU state */ | |
678 | ||
679 | current_tb_modified = 1; | |
680 | cpu_restore_state(current_tb, env, | |
681 | env->mem_write_pc, NULL); | |
682 | #if defined(TARGET_I386) | |
683 | current_flags = env->hflags; | |
684 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
685 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
686 | current_pc = current_cs_base + env->eip; | |
687 | #else | |
688 | #error unsupported CPU | |
689 | #endif | |
690 | } | |
691 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
6f5a9f7e FB |
692 | /* we need to do that to handle the case where a signal |
693 | occurs while doing tb_phys_invalidate() */ | |
694 | saved_tb = NULL; | |
695 | if (env) { | |
696 | saved_tb = env->current_tb; | |
697 | env->current_tb = NULL; | |
698 | } | |
9fa3e853 | 699 | tb_phys_invalidate(tb, -1); |
6f5a9f7e FB |
700 | if (env) { |
701 | env->current_tb = saved_tb; | |
702 | if (env->interrupt_request && env->current_tb) | |
703 | cpu_interrupt(env, env->interrupt_request); | |
704 | } | |
9fa3e853 FB |
705 | } |
706 | tb = tb_next; | |
707 | } | |
708 | #if !defined(CONFIG_USER_ONLY) | |
709 | /* if no code remaining, no need to continue to use slow writes */ | |
710 | if (!p->first_tb) { | |
711 | invalidate_page_bitmap(p); | |
d720b93d FB |
712 | if (is_cpu_write_access) { |
713 | tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); | |
714 | } | |
715 | } | |
716 | #endif | |
717 | #ifdef TARGET_HAS_PRECISE_SMC | |
718 | if (current_tb_modified) { | |
719 | /* we generate a block containing just the instruction | |
720 | modifying the memory. It will ensure that it cannot modify | |
721 | itself */ | |
ea1c1802 | 722 | env->current_tb = NULL; |
d720b93d FB |
723 | tb_gen_code(env, current_pc, current_cs_base, current_flags, |
724 | CF_SINGLE_INSN); | |
725 | cpu_resume_from_signal(env, NULL); | |
9fa3e853 | 726 | } |
fd6ce8f6 | 727 | #endif |
9fa3e853 | 728 | } |
fd6ce8f6 | 729 | |
9fa3e853 | 730 | /* len must be <= 8 and start must be a multiple of len */ |
d720b93d | 731 | static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) |
9fa3e853 FB |
732 | { |
733 | PageDesc *p; | |
734 | int offset, b; | |
59817ccb | 735 | #if 0 |
a4193c8a FB |
736 | if (1) { |
737 | if (loglevel) { | |
738 | fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
739 | cpu_single_env->mem_write_vaddr, len, | |
740 | cpu_single_env->eip, | |
741 | cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); | |
742 | } | |
59817ccb FB |
743 | } |
744 | #endif | |
9fa3e853 FB |
745 | p = page_find(start >> TARGET_PAGE_BITS); |
746 | if (!p) | |
747 | return; | |
748 | if (p->code_bitmap) { | |
749 | offset = start & ~TARGET_PAGE_MASK; | |
750 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
751 | if (b & ((1 << len) - 1)) | |
752 | goto do_invalidate; | |
753 | } else { | |
754 | do_invalidate: | |
d720b93d | 755 | tb_invalidate_phys_page_range(start, start + len, 1); |
9fa3e853 FB |
756 | } |
757 | } | |
758 | ||
9fa3e853 | 759 | #if !defined(CONFIG_SOFTMMU) |
d720b93d FB |
760 | static void tb_invalidate_phys_page(target_ulong addr, |
761 | unsigned long pc, void *puc) | |
9fa3e853 | 762 | { |
d720b93d FB |
763 | int n, current_flags, current_tb_modified; |
764 | target_ulong current_pc, current_cs_base; | |
9fa3e853 | 765 | PageDesc *p; |
d720b93d FB |
766 | TranslationBlock *tb, *current_tb; |
767 | #ifdef TARGET_HAS_PRECISE_SMC | |
768 | CPUState *env = cpu_single_env; | |
769 | #endif | |
9fa3e853 FB |
770 | |
771 | addr &= TARGET_PAGE_MASK; | |
772 | p = page_find(addr >> TARGET_PAGE_BITS); | |
773 | if (!p) | |
774 | return; | |
775 | tb = p->first_tb; | |
d720b93d FB |
776 | current_tb_modified = 0; |
777 | current_tb = NULL; | |
778 | current_pc = 0; /* avoid warning */ | |
779 | current_cs_base = 0; /* avoid warning */ | |
780 | current_flags = 0; /* avoid warning */ | |
781 | #ifdef TARGET_HAS_PRECISE_SMC | |
782 | if (tb && pc != 0) { | |
783 | current_tb = tb_find_pc(pc); | |
784 | } | |
785 | #endif | |
9fa3e853 FB |
786 | while (tb != NULL) { |
787 | n = (long)tb & 3; | |
788 | tb = (TranslationBlock *)((long)tb & ~3); | |
d720b93d FB |
789 | #ifdef TARGET_HAS_PRECISE_SMC |
790 | if (current_tb == tb && | |
791 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
792 | /* If we are modifying the current TB, we must stop | |
793 | its execution. We could be more precise by checking | |
794 | that the modification is after the current PC, but it | |
795 | would require a specialized function to partially | |
796 | restore the CPU state */ | |
797 | ||
798 | current_tb_modified = 1; | |
799 | cpu_restore_state(current_tb, env, pc, puc); | |
800 | #if defined(TARGET_I386) | |
801 | current_flags = env->hflags; | |
802 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
803 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
804 | current_pc = current_cs_base + env->eip; | |
805 | #else | |
806 | #error unsupported CPU | |
807 | #endif | |
808 | } | |
809 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
9fa3e853 FB |
810 | tb_phys_invalidate(tb, addr); |
811 | tb = tb->page_next[n]; | |
812 | } | |
fd6ce8f6 | 813 | p->first_tb = NULL; |
d720b93d FB |
814 | #ifdef TARGET_HAS_PRECISE_SMC |
815 | if (current_tb_modified) { | |
816 | /* we generate a block containing just the instruction | |
817 | modifying the memory. It will ensure that it cannot modify | |
818 | itself */ | |
ea1c1802 | 819 | env->current_tb = NULL; |
d720b93d FB |
820 | tb_gen_code(env, current_pc, current_cs_base, current_flags, |
821 | CF_SINGLE_INSN); | |
822 | cpu_resume_from_signal(env, puc); | |
823 | } | |
824 | #endif | |
fd6ce8f6 | 825 | } |
9fa3e853 | 826 | #endif |
fd6ce8f6 FB |
827 | |
828 | /* add the tb in the target page and protect it if necessary */ | |
9fa3e853 | 829 | static inline void tb_alloc_page(TranslationBlock *tb, |
53a5960a | 830 | unsigned int n, target_ulong page_addr) |
fd6ce8f6 FB |
831 | { |
832 | PageDesc *p; | |
9fa3e853 FB |
833 | TranslationBlock *last_first_tb; |
834 | ||
835 | tb->page_addr[n] = page_addr; | |
3a7d929e | 836 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); |
9fa3e853 FB |
837 | tb->page_next[n] = p->first_tb; |
838 | last_first_tb = p->first_tb; | |
839 | p->first_tb = (TranslationBlock *)((long)tb | n); | |
840 | invalidate_page_bitmap(p); | |
fd6ce8f6 | 841 | |
107db443 | 842 | #if defined(TARGET_HAS_SMC) || 1 |
d720b93d | 843 | |
9fa3e853 | 844 | #if defined(CONFIG_USER_ONLY) |
fd6ce8f6 | 845 | if (p->flags & PAGE_WRITE) { |
53a5960a PB |
846 | target_ulong addr; |
847 | PageDesc *p2; | |
9fa3e853 FB |
848 | int prot; |
849 | ||
fd6ce8f6 FB |
850 | /* force the host page as non writable (writes will have a |
851 | page fault + mprotect overhead) */ | |
53a5960a | 852 | page_addr &= qemu_host_page_mask; |
fd6ce8f6 | 853 | prot = 0; |
53a5960a PB |
854 | for(addr = page_addr; addr < page_addr + qemu_host_page_size; |
855 | addr += TARGET_PAGE_SIZE) { | |
856 | ||
857 | p2 = page_find (addr >> TARGET_PAGE_BITS); | |
858 | if (!p2) | |
859 | continue; | |
860 | prot |= p2->flags; | |
861 | p2->flags &= ~PAGE_WRITE; | |
862 | page_get_flags(addr); | |
863 | } | |
864 | mprotect(g2h(page_addr), qemu_host_page_size, | |
fd6ce8f6 FB |
865 | (prot & PAGE_BITS) & ~PAGE_WRITE); |
866 | #ifdef DEBUG_TB_INVALIDATE | |
867 | printf("protecting code page: 0x%08lx\n", | |
53a5960a | 868 | page_addr); |
fd6ce8f6 | 869 | #endif |
fd6ce8f6 | 870 | } |
9fa3e853 FB |
871 | #else |
872 | /* if some code is already present, then the pages are already | |
873 | protected. So we handle the case where only the first TB is | |
874 | allocated in a physical page */ | |
875 | if (!last_first_tb) { | |
6a00d601 | 876 | tlb_protect_code(page_addr); |
9fa3e853 FB |
877 | } |
878 | #endif | |
d720b93d FB |
879 | |
880 | #endif /* TARGET_HAS_SMC */ | |
fd6ce8f6 FB |
881 | } |
882 | ||
883 | /* Allocate a new translation block. Flush the translation buffer if | |
884 | too many translation blocks or too much generated code. */ | |
c27004ec | 885 | TranslationBlock *tb_alloc(target_ulong pc) |
fd6ce8f6 FB |
886 | { |
887 | TranslationBlock *tb; | |
fd6ce8f6 FB |
888 | |
889 | if (nb_tbs >= CODE_GEN_MAX_BLOCKS || | |
890 | (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) | |
d4e8164f | 891 | return NULL; |
fd6ce8f6 FB |
892 | tb = &tbs[nb_tbs++]; |
893 | tb->pc = pc; | |
b448f2f3 | 894 | tb->cflags = 0; |
d4e8164f FB |
895 | return tb; |
896 | } | |
897 | ||
9fa3e853 FB |
898 | /* add a new TB and link it to the physical page tables. phys_page2 is |
899 | (-1) to indicate that only one page contains the TB. */ | |
900 | void tb_link_phys(TranslationBlock *tb, | |
901 | target_ulong phys_pc, target_ulong phys_page2) | |
d4e8164f | 902 | { |
9fa3e853 FB |
903 | unsigned int h; |
904 | TranslationBlock **ptb; | |
905 | ||
906 | /* add in the physical hash table */ | |
907 | h = tb_phys_hash_func(phys_pc); | |
908 | ptb = &tb_phys_hash[h]; | |
909 | tb->phys_hash_next = *ptb; | |
910 | *ptb = tb; | |
fd6ce8f6 FB |
911 | |
912 | /* add in the page list */ | |
9fa3e853 FB |
913 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); |
914 | if (phys_page2 != -1) | |
915 | tb_alloc_page(tb, 1, phys_page2); | |
916 | else | |
917 | tb->page_addr[1] = -1; | |
9fa3e853 | 918 | |
d4e8164f FB |
919 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
920 | tb->jmp_next[0] = NULL; | |
921 | tb->jmp_next[1] = NULL; | |
b448f2f3 FB |
922 | #ifdef USE_CODE_COPY |
923 | tb->cflags &= ~CF_FP_USED; | |
924 | if (tb->cflags & CF_TB_FP_USED) | |
925 | tb->cflags |= CF_FP_USED; | |
926 | #endif | |
d4e8164f FB |
927 | |
928 | /* init original jump addresses */ | |
929 | if (tb->tb_next_offset[0] != 0xffff) | |
930 | tb_reset_jump(tb, 0); | |
931 | if (tb->tb_next_offset[1] != 0xffff) | |
932 | tb_reset_jump(tb, 1); | |
8a40a180 FB |
933 | |
934 | #ifdef DEBUG_TB_CHECK | |
935 | tb_page_check(); | |
936 | #endif | |
fd6ce8f6 FB |
937 | } |
938 | ||
9fa3e853 FB |
939 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
940 | tb[1].tc_ptr. Return NULL if not found */ | |
941 | TranslationBlock *tb_find_pc(unsigned long tc_ptr) | |
fd6ce8f6 | 942 | { |
9fa3e853 FB |
943 | int m_min, m_max, m; |
944 | unsigned long v; | |
945 | TranslationBlock *tb; | |
a513fe19 FB |
946 | |
947 | if (nb_tbs <= 0) | |
948 | return NULL; | |
949 | if (tc_ptr < (unsigned long)code_gen_buffer || | |
950 | tc_ptr >= (unsigned long)code_gen_ptr) | |
951 | return NULL; | |
952 | /* binary search (cf Knuth) */ | |
953 | m_min = 0; | |
954 | m_max = nb_tbs - 1; | |
955 | while (m_min <= m_max) { | |
956 | m = (m_min + m_max) >> 1; | |
957 | tb = &tbs[m]; | |
958 | v = (unsigned long)tb->tc_ptr; | |
959 | if (v == tc_ptr) | |
960 | return tb; | |
961 | else if (tc_ptr < v) { | |
962 | m_max = m - 1; | |
963 | } else { | |
964 | m_min = m + 1; | |
965 | } | |
966 | } | |
967 | return &tbs[m_max]; | |
968 | } | |
7501267e | 969 | |
ea041c0e FB |
970 | static void tb_reset_jump_recursive(TranslationBlock *tb); |
971 | ||
972 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
973 | { | |
974 | TranslationBlock *tb1, *tb_next, **ptb; | |
975 | unsigned int n1; | |
976 | ||
977 | tb1 = tb->jmp_next[n]; | |
978 | if (tb1 != NULL) { | |
979 | /* find head of list */ | |
980 | for(;;) { | |
981 | n1 = (long)tb1 & 3; | |
982 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
983 | if (n1 == 2) | |
984 | break; | |
985 | tb1 = tb1->jmp_next[n1]; | |
986 | } | |
987 | /* we are now sure now that tb jumps to tb1 */ | |
988 | tb_next = tb1; | |
989 | ||
990 | /* remove tb from the jmp_first list */ | |
991 | ptb = &tb_next->jmp_first; | |
992 | for(;;) { | |
993 | tb1 = *ptb; | |
994 | n1 = (long)tb1 & 3; | |
995 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
996 | if (n1 == n && tb1 == tb) | |
997 | break; | |
998 | ptb = &tb1->jmp_next[n1]; | |
999 | } | |
1000 | *ptb = tb->jmp_next[n]; | |
1001 | tb->jmp_next[n] = NULL; | |
1002 | ||
1003 | /* suppress the jump to next tb in generated code */ | |
1004 | tb_reset_jump(tb, n); | |
1005 | ||
0124311e | 1006 | /* suppress jumps in the tb on which we could have jumped */ |
ea041c0e FB |
1007 | tb_reset_jump_recursive(tb_next); |
1008 | } | |
1009 | } | |
1010 | ||
1011 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
1012 | { | |
1013 | tb_reset_jump_recursive2(tb, 0); | |
1014 | tb_reset_jump_recursive2(tb, 1); | |
1015 | } | |
1016 | ||
1fddef4b | 1017 | #if defined(TARGET_HAS_ICE) |
d720b93d FB |
1018 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
1019 | { | |
c2f07f81 PB |
1020 | target_ulong addr, pd; |
1021 | ram_addr_t ram_addr; | |
1022 | PhysPageDesc *p; | |
d720b93d | 1023 | |
c2f07f81 PB |
1024 | addr = cpu_get_phys_page_debug(env, pc); |
1025 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1026 | if (!p) { | |
1027 | pd = IO_MEM_UNASSIGNED; | |
1028 | } else { | |
1029 | pd = p->phys_offset; | |
1030 | } | |
1031 | ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); | |
706cd4b5 | 1032 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
d720b93d | 1033 | } |
c27004ec | 1034 | #endif |
d720b93d | 1035 | |
6658ffb8 PB |
1036 | /* Add a watchpoint. */ |
1037 | int cpu_watchpoint_insert(CPUState *env, target_ulong addr) | |
1038 | { | |
1039 | int i; | |
1040 | ||
1041 | for (i = 0; i < env->nb_watchpoints; i++) { | |
1042 | if (addr == env->watchpoint[i].vaddr) | |
1043 | return 0; | |
1044 | } | |
1045 | if (env->nb_watchpoints >= MAX_WATCHPOINTS) | |
1046 | return -1; | |
1047 | ||
1048 | i = env->nb_watchpoints++; | |
1049 | env->watchpoint[i].vaddr = addr; | |
1050 | tlb_flush_page(env, addr); | |
1051 | /* FIXME: This flush is needed because of the hack to make memory ops | |
1052 | terminate the TB. It can be removed once the proper IO trap and | |
1053 | re-execute bits are in. */ | |
1054 | tb_flush(env); | |
1055 | return i; | |
1056 | } | |
1057 | ||
1058 | /* Remove a watchpoint. */ | |
1059 | int cpu_watchpoint_remove(CPUState *env, target_ulong addr) | |
1060 | { | |
1061 | int i; | |
1062 | ||
1063 | for (i = 0; i < env->nb_watchpoints; i++) { | |
1064 | if (addr == env->watchpoint[i].vaddr) { | |
1065 | env->nb_watchpoints--; | |
1066 | env->watchpoint[i] = env->watchpoint[env->nb_watchpoints]; | |
1067 | tlb_flush_page(env, addr); | |
1068 | return 0; | |
1069 | } | |
1070 | } | |
1071 | return -1; | |
1072 | } | |
1073 | ||
c33a346e FB |
1074 | /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a |
1075 | breakpoint is reached */ | |
2e12669a | 1076 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc) |
4c3a88a2 | 1077 | { |
1fddef4b | 1078 | #if defined(TARGET_HAS_ICE) |
4c3a88a2 | 1079 | int i; |
d720b93d | 1080 | |
4c3a88a2 FB |
1081 | for(i = 0; i < env->nb_breakpoints; i++) { |
1082 | if (env->breakpoints[i] == pc) | |
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | if (env->nb_breakpoints >= MAX_BREAKPOINTS) | |
1087 | return -1; | |
1088 | env->breakpoints[env->nb_breakpoints++] = pc; | |
d720b93d FB |
1089 | |
1090 | breakpoint_invalidate(env, pc); | |
4c3a88a2 FB |
1091 | return 0; |
1092 | #else | |
1093 | return -1; | |
1094 | #endif | |
1095 | } | |
1096 | ||
1097 | /* remove a breakpoint */ | |
2e12669a | 1098 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc) |
4c3a88a2 | 1099 | { |
1fddef4b | 1100 | #if defined(TARGET_HAS_ICE) |
4c3a88a2 FB |
1101 | int i; |
1102 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1103 | if (env->breakpoints[i] == pc) | |
1104 | goto found; | |
1105 | } | |
1106 | return -1; | |
1107 | found: | |
4c3a88a2 | 1108 | env->nb_breakpoints--; |
1fddef4b FB |
1109 | if (i < env->nb_breakpoints) |
1110 | env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; | |
d720b93d FB |
1111 | |
1112 | breakpoint_invalidate(env, pc); | |
4c3a88a2 FB |
1113 | return 0; |
1114 | #else | |
1115 | return -1; | |
1116 | #endif | |
1117 | } | |
1118 | ||
c33a346e FB |
1119 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
1120 | CPU loop after each instruction */ | |
1121 | void cpu_single_step(CPUState *env, int enabled) | |
1122 | { | |
1fddef4b | 1123 | #if defined(TARGET_HAS_ICE) |
c33a346e FB |
1124 | if (env->singlestep_enabled != enabled) { |
1125 | env->singlestep_enabled = enabled; | |
1126 | /* must flush all the translated code to avoid inconsistancies */ | |
9fa3e853 | 1127 | /* XXX: only flush what is necessary */ |
0124311e | 1128 | tb_flush(env); |
c33a346e FB |
1129 | } |
1130 | #endif | |
1131 | } | |
1132 | ||
34865134 FB |
1133 | /* enable or disable low levels log */ |
1134 | void cpu_set_log(int log_flags) | |
1135 | { | |
1136 | loglevel = log_flags; | |
1137 | if (loglevel && !logfile) { | |
1138 | logfile = fopen(logfilename, "w"); | |
1139 | if (!logfile) { | |
1140 | perror(logfilename); | |
1141 | _exit(1); | |
1142 | } | |
9fa3e853 FB |
1143 | #if !defined(CONFIG_SOFTMMU) |
1144 | /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ | |
1145 | { | |
1146 | static uint8_t logfile_buf[4096]; | |
1147 | setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); | |
1148 | } | |
1149 | #else | |
34865134 | 1150 | setvbuf(logfile, NULL, _IOLBF, 0); |
9fa3e853 | 1151 | #endif |
34865134 FB |
1152 | } |
1153 | } | |
1154 | ||
1155 | void cpu_set_log_filename(const char *filename) | |
1156 | { | |
1157 | logfilename = strdup(filename); | |
1158 | } | |
c33a346e | 1159 | |
0124311e | 1160 | /* mask must never be zero, except for A20 change call */ |
68a79315 | 1161 | void cpu_interrupt(CPUState *env, int mask) |
ea041c0e FB |
1162 | { |
1163 | TranslationBlock *tb; | |
ee8b7021 | 1164 | static int interrupt_lock; |
59817ccb | 1165 | |
68a79315 | 1166 | env->interrupt_request |= mask; |
ea041c0e FB |
1167 | /* if the cpu is currently executing code, we must unlink it and |
1168 | all the potentially executing TB */ | |
1169 | tb = env->current_tb; | |
ee8b7021 FB |
1170 | if (tb && !testandset(&interrupt_lock)) { |
1171 | env->current_tb = NULL; | |
ea041c0e | 1172 | tb_reset_jump_recursive(tb); |
ee8b7021 | 1173 | interrupt_lock = 0; |
ea041c0e FB |
1174 | } |
1175 | } | |
1176 | ||
b54ad049 FB |
1177 | void cpu_reset_interrupt(CPUState *env, int mask) |
1178 | { | |
1179 | env->interrupt_request &= ~mask; | |
1180 | } | |
1181 | ||
f193c797 FB |
1182 | CPULogItem cpu_log_items[] = { |
1183 | { CPU_LOG_TB_OUT_ASM, "out_asm", | |
1184 | "show generated host assembly code for each compiled TB" }, | |
1185 | { CPU_LOG_TB_IN_ASM, "in_asm", | |
1186 | "show target assembly code for each compiled TB" }, | |
1187 | { CPU_LOG_TB_OP, "op", | |
1188 | "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, | |
1189 | #ifdef TARGET_I386 | |
1190 | { CPU_LOG_TB_OP_OPT, "op_opt", | |
1191 | "show micro ops after optimization for each compiled TB" }, | |
1192 | #endif | |
1193 | { CPU_LOG_INT, "int", | |
1194 | "show interrupts/exceptions in short format" }, | |
1195 | { CPU_LOG_EXEC, "exec", | |
1196 | "show trace before each executed TB (lots of logs)" }, | |
9fddaa0c FB |
1197 | { CPU_LOG_TB_CPU, "cpu", |
1198 | "show CPU state before bloc translation" }, | |
f193c797 FB |
1199 | #ifdef TARGET_I386 |
1200 | { CPU_LOG_PCALL, "pcall", | |
1201 | "show protected mode far calls/returns/exceptions" }, | |
1202 | #endif | |
8e3a9fd2 | 1203 | #ifdef DEBUG_IOPORT |
fd872598 FB |
1204 | { CPU_LOG_IOPORT, "ioport", |
1205 | "show all i/o ports accesses" }, | |
8e3a9fd2 | 1206 | #endif |
f193c797 FB |
1207 | { 0, NULL, NULL }, |
1208 | }; | |
1209 | ||
1210 | static int cmp1(const char *s1, int n, const char *s2) | |
1211 | { | |
1212 | if (strlen(s2) != n) | |
1213 | return 0; | |
1214 | return memcmp(s1, s2, n) == 0; | |
1215 | } | |
1216 | ||
1217 | /* takes a comma separated list of log masks. Return 0 if error. */ | |
1218 | int cpu_str_to_log_mask(const char *str) | |
1219 | { | |
1220 | CPULogItem *item; | |
1221 | int mask; | |
1222 | const char *p, *p1; | |
1223 | ||
1224 | p = str; | |
1225 | mask = 0; | |
1226 | for(;;) { | |
1227 | p1 = strchr(p, ','); | |
1228 | if (!p1) | |
1229 | p1 = p + strlen(p); | |
8e3a9fd2 FB |
1230 | if(cmp1(p,p1-p,"all")) { |
1231 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1232 | mask |= item->mask; | |
1233 | } | |
1234 | } else { | |
f193c797 FB |
1235 | for(item = cpu_log_items; item->mask != 0; item++) { |
1236 | if (cmp1(p, p1 - p, item->name)) | |
1237 | goto found; | |
1238 | } | |
1239 | return 0; | |
8e3a9fd2 | 1240 | } |
f193c797 FB |
1241 | found: |
1242 | mask |= item->mask; | |
1243 | if (*p1 != ',') | |
1244 | break; | |
1245 | p = p1 + 1; | |
1246 | } | |
1247 | return mask; | |
1248 | } | |
ea041c0e | 1249 | |
7501267e FB |
1250 | void cpu_abort(CPUState *env, const char *fmt, ...) |
1251 | { | |
1252 | va_list ap; | |
1253 | ||
1254 | va_start(ap, fmt); | |
1255 | fprintf(stderr, "qemu: fatal: "); | |
1256 | vfprintf(stderr, fmt, ap); | |
1257 | fprintf(stderr, "\n"); | |
1258 | #ifdef TARGET_I386 | |
7fe48483 FB |
1259 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); |
1260 | #else | |
1261 | cpu_dump_state(env, stderr, fprintf, 0); | |
7501267e FB |
1262 | #endif |
1263 | va_end(ap); | |
1264 | abort(); | |
1265 | } | |
1266 | ||
c5be9f08 TS |
1267 | CPUState *cpu_copy(CPUState *env) |
1268 | { | |
1269 | CPUState *new_env = cpu_init(); | |
1270 | /* preserve chaining and index */ | |
1271 | CPUState *next_cpu = new_env->next_cpu; | |
1272 | int cpu_index = new_env->cpu_index; | |
1273 | memcpy(new_env, env, sizeof(CPUState)); | |
1274 | new_env->next_cpu = next_cpu; | |
1275 | new_env->cpu_index = cpu_index; | |
1276 | return new_env; | |
1277 | } | |
1278 | ||
0124311e FB |
1279 | #if !defined(CONFIG_USER_ONLY) |
1280 | ||
ee8b7021 FB |
1281 | /* NOTE: if flush_global is true, also flush global entries (not |
1282 | implemented yet) */ | |
1283 | void tlb_flush(CPUState *env, int flush_global) | |
33417e70 | 1284 | { |
33417e70 | 1285 | int i; |
0124311e | 1286 | |
9fa3e853 FB |
1287 | #if defined(DEBUG_TLB) |
1288 | printf("tlb_flush:\n"); | |
1289 | #endif | |
0124311e FB |
1290 | /* must reset current TB so that interrupts cannot modify the |
1291 | links while we are modifying them */ | |
1292 | env->current_tb = NULL; | |
1293 | ||
33417e70 | 1294 | for(i = 0; i < CPU_TLB_SIZE; i++) { |
84b7b8e7 FB |
1295 | env->tlb_table[0][i].addr_read = -1; |
1296 | env->tlb_table[0][i].addr_write = -1; | |
1297 | env->tlb_table[0][i].addr_code = -1; | |
1298 | env->tlb_table[1][i].addr_read = -1; | |
1299 | env->tlb_table[1][i].addr_write = -1; | |
1300 | env->tlb_table[1][i].addr_code = -1; | |
33417e70 | 1301 | } |
9fa3e853 | 1302 | |
8a40a180 | 1303 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
9fa3e853 FB |
1304 | |
1305 | #if !defined(CONFIG_SOFTMMU) | |
1306 | munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); | |
0a962c02 FB |
1307 | #endif |
1308 | #ifdef USE_KQEMU | |
1309 | if (env->kqemu_enabled) { | |
1310 | kqemu_flush(env, flush_global); | |
1311 | } | |
9fa3e853 | 1312 | #endif |
e3db7226 | 1313 | tlb_flush_count++; |
33417e70 FB |
1314 | } |
1315 | ||
274da6b2 | 1316 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
61382a50 | 1317 | { |
84b7b8e7 FB |
1318 | if (addr == (tlb_entry->addr_read & |
1319 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1320 | addr == (tlb_entry->addr_write & | |
1321 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1322 | addr == (tlb_entry->addr_code & | |
1323 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
1324 | tlb_entry->addr_read = -1; | |
1325 | tlb_entry->addr_write = -1; | |
1326 | tlb_entry->addr_code = -1; | |
1327 | } | |
61382a50 FB |
1328 | } |
1329 | ||
2e12669a | 1330 | void tlb_flush_page(CPUState *env, target_ulong addr) |
33417e70 | 1331 | { |
8a40a180 | 1332 | int i; |
9fa3e853 | 1333 | TranslationBlock *tb; |
0124311e | 1334 | |
9fa3e853 | 1335 | #if defined(DEBUG_TLB) |
108c49b8 | 1336 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
9fa3e853 | 1337 | #endif |
0124311e FB |
1338 | /* must reset current TB so that interrupts cannot modify the |
1339 | links while we are modifying them */ | |
1340 | env->current_tb = NULL; | |
61382a50 FB |
1341 | |
1342 | addr &= TARGET_PAGE_MASK; | |
1343 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
84b7b8e7 FB |
1344 | tlb_flush_entry(&env->tlb_table[0][i], addr); |
1345 | tlb_flush_entry(&env->tlb_table[1][i], addr); | |
0124311e | 1346 | |
b362e5e0 PB |
1347 | /* Discard jump cache entries for any tb which might potentially |
1348 | overlap the flushed page. */ | |
1349 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
1350 | memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); | |
1351 | ||
1352 | i = tb_jmp_cache_hash_page(addr); | |
1353 | memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); | |
9fa3e853 | 1354 | |
0124311e | 1355 | #if !defined(CONFIG_SOFTMMU) |
9fa3e853 | 1356 | if (addr < MMAP_AREA_END) |
0124311e | 1357 | munmap((void *)addr, TARGET_PAGE_SIZE); |
61382a50 | 1358 | #endif |
0a962c02 FB |
1359 | #ifdef USE_KQEMU |
1360 | if (env->kqemu_enabled) { | |
1361 | kqemu_flush_page(env, addr); | |
1362 | } | |
1363 | #endif | |
9fa3e853 FB |
1364 | } |
1365 | ||
9fa3e853 FB |
1366 | /* update the TLBs so that writes to code in the virtual page 'addr' |
1367 | can be detected */ | |
6a00d601 | 1368 | static void tlb_protect_code(ram_addr_t ram_addr) |
9fa3e853 | 1369 | { |
6a00d601 FB |
1370 | cpu_physical_memory_reset_dirty(ram_addr, |
1371 | ram_addr + TARGET_PAGE_SIZE, | |
1372 | CODE_DIRTY_FLAG); | |
9fa3e853 FB |
1373 | } |
1374 | ||
9fa3e853 | 1375 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
3a7d929e FB |
1376 | tested for self modifying code */ |
1377 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
1378 | target_ulong vaddr) | |
9fa3e853 | 1379 | { |
3a7d929e | 1380 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; |
1ccde1cb FB |
1381 | } |
1382 | ||
1383 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | |
1384 | unsigned long start, unsigned long length) | |
1385 | { | |
1386 | unsigned long addr; | |
84b7b8e7 FB |
1387 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
1388 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1ccde1cb | 1389 | if ((addr - start) < length) { |
84b7b8e7 | 1390 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; |
1ccde1cb FB |
1391 | } |
1392 | } | |
1393 | } | |
1394 | ||
3a7d929e | 1395 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 1396 | int dirty_flags) |
1ccde1cb FB |
1397 | { |
1398 | CPUState *env; | |
4f2ac237 | 1399 | unsigned long length, start1; |
0a962c02 FB |
1400 | int i, mask, len; |
1401 | uint8_t *p; | |
1ccde1cb FB |
1402 | |
1403 | start &= TARGET_PAGE_MASK; | |
1404 | end = TARGET_PAGE_ALIGN(end); | |
1405 | ||
1406 | length = end - start; | |
1407 | if (length == 0) | |
1408 | return; | |
0a962c02 | 1409 | len = length >> TARGET_PAGE_BITS; |
3a7d929e | 1410 | #ifdef USE_KQEMU |
6a00d601 FB |
1411 | /* XXX: should not depend on cpu context */ |
1412 | env = first_cpu; | |
3a7d929e | 1413 | if (env->kqemu_enabled) { |
f23db169 FB |
1414 | ram_addr_t addr; |
1415 | addr = start; | |
1416 | for(i = 0; i < len; i++) { | |
1417 | kqemu_set_notdirty(env, addr); | |
1418 | addr += TARGET_PAGE_SIZE; | |
1419 | } | |
3a7d929e FB |
1420 | } |
1421 | #endif | |
f23db169 FB |
1422 | mask = ~dirty_flags; |
1423 | p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); | |
1424 | for(i = 0; i < len; i++) | |
1425 | p[i] &= mask; | |
1426 | ||
1ccde1cb FB |
1427 | /* we modify the TLB cache so that the dirty bit will be set again |
1428 | when accessing the range */ | |
59817ccb | 1429 | start1 = start + (unsigned long)phys_ram_base; |
6a00d601 FB |
1430 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
1431 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
84b7b8e7 | 1432 | tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); |
6a00d601 | 1433 | for(i = 0; i < CPU_TLB_SIZE; i++) |
84b7b8e7 | 1434 | tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); |
6a00d601 | 1435 | } |
59817ccb FB |
1436 | |
1437 | #if !defined(CONFIG_SOFTMMU) | |
1438 | /* XXX: this is expensive */ | |
1439 | { | |
1440 | VirtPageDesc *p; | |
1441 | int j; | |
1442 | target_ulong addr; | |
1443 | ||
1444 | for(i = 0; i < L1_SIZE; i++) { | |
1445 | p = l1_virt_map[i]; | |
1446 | if (p) { | |
1447 | addr = i << (TARGET_PAGE_BITS + L2_BITS); | |
1448 | for(j = 0; j < L2_SIZE; j++) { | |
1449 | if (p->valid_tag == virt_valid_tag && | |
1450 | p->phys_addr >= start && p->phys_addr < end && | |
1451 | (p->prot & PROT_WRITE)) { | |
1452 | if (addr < MMAP_AREA_END) { | |
1453 | mprotect((void *)addr, TARGET_PAGE_SIZE, | |
1454 | p->prot & ~PROT_WRITE); | |
1455 | } | |
1456 | } | |
1457 | addr += TARGET_PAGE_SIZE; | |
1458 | p++; | |
1459 | } | |
1460 | } | |
1461 | } | |
1462 | } | |
1463 | #endif | |
1ccde1cb FB |
1464 | } |
1465 | ||
3a7d929e FB |
1466 | static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
1467 | { | |
1468 | ram_addr_t ram_addr; | |
1469 | ||
84b7b8e7 FB |
1470 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
1471 | ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + | |
3a7d929e FB |
1472 | tlb_entry->addend - (unsigned long)phys_ram_base; |
1473 | if (!cpu_physical_memory_is_dirty(ram_addr)) { | |
84b7b8e7 | 1474 | tlb_entry->addr_write |= IO_MEM_NOTDIRTY; |
3a7d929e FB |
1475 | } |
1476 | } | |
1477 | } | |
1478 | ||
1479 | /* update the TLB according to the current state of the dirty bits */ | |
1480 | void cpu_tlb_update_dirty(CPUState *env) | |
1481 | { | |
1482 | int i; | |
1483 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
84b7b8e7 | 1484 | tlb_update_dirty(&env->tlb_table[0][i]); |
3a7d929e | 1485 | for(i = 0; i < CPU_TLB_SIZE; i++) |
84b7b8e7 | 1486 | tlb_update_dirty(&env->tlb_table[1][i]); |
3a7d929e FB |
1487 | } |
1488 | ||
1ccde1cb | 1489 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
108c49b8 | 1490 | unsigned long start) |
1ccde1cb FB |
1491 | { |
1492 | unsigned long addr; | |
84b7b8e7 FB |
1493 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { |
1494 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1ccde1cb | 1495 | if (addr == start) { |
84b7b8e7 | 1496 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; |
1ccde1cb FB |
1497 | } |
1498 | } | |
1499 | } | |
1500 | ||
1501 | /* update the TLB corresponding to virtual page vaddr and phys addr | |
1502 | addr so that it is no longer dirty */ | |
6a00d601 FB |
1503 | static inline void tlb_set_dirty(CPUState *env, |
1504 | unsigned long addr, target_ulong vaddr) | |
1ccde1cb | 1505 | { |
1ccde1cb FB |
1506 | int i; |
1507 | ||
1ccde1cb FB |
1508 | addr &= TARGET_PAGE_MASK; |
1509 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
84b7b8e7 FB |
1510 | tlb_set_dirty1(&env->tlb_table[0][i], addr); |
1511 | tlb_set_dirty1(&env->tlb_table[1][i], addr); | |
9fa3e853 FB |
1512 | } |
1513 | ||
59817ccb FB |
1514 | /* add a new TLB entry. At most one entry for a given virtual address |
1515 | is permitted. Return 0 if OK or 2 if the page could not be mapped | |
1516 | (can only happen in non SOFTMMU mode for I/O pages or pages | |
1517 | conflicting with the host address space). */ | |
84b7b8e7 FB |
1518 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
1519 | target_phys_addr_t paddr, int prot, | |
1520 | int is_user, int is_softmmu) | |
9fa3e853 | 1521 | { |
92e873b9 | 1522 | PhysPageDesc *p; |
4f2ac237 | 1523 | unsigned long pd; |
9fa3e853 | 1524 | unsigned int index; |
4f2ac237 | 1525 | target_ulong address; |
108c49b8 | 1526 | target_phys_addr_t addend; |
9fa3e853 | 1527 | int ret; |
84b7b8e7 | 1528 | CPUTLBEntry *te; |
6658ffb8 | 1529 | int i; |
9fa3e853 | 1530 | |
92e873b9 | 1531 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
9fa3e853 FB |
1532 | if (!p) { |
1533 | pd = IO_MEM_UNASSIGNED; | |
9fa3e853 FB |
1534 | } else { |
1535 | pd = p->phys_offset; | |
9fa3e853 FB |
1536 | } |
1537 | #if defined(DEBUG_TLB) | |
3a7d929e | 1538 | printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", |
84b7b8e7 | 1539 | vaddr, (int)paddr, prot, is_user, is_softmmu, pd); |
9fa3e853 FB |
1540 | #endif |
1541 | ||
1542 | ret = 0; | |
1543 | #if !defined(CONFIG_SOFTMMU) | |
1544 | if (is_softmmu) | |
1545 | #endif | |
1546 | { | |
2a4188a3 | 1547 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { |
9fa3e853 FB |
1548 | /* IO memory case */ |
1549 | address = vaddr | pd; | |
1550 | addend = paddr; | |
1551 | } else { | |
1552 | /* standard memory */ | |
1553 | address = vaddr; | |
1554 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); | |
1555 | } | |
6658ffb8 PB |
1556 | |
1557 | /* Make accesses to pages with watchpoints go via the | |
1558 | watchpoint trap routines. */ | |
1559 | for (i = 0; i < env->nb_watchpoints; i++) { | |
1560 | if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) { | |
1561 | if (address & ~TARGET_PAGE_MASK) { | |
1562 | env->watchpoint[i].is_ram = 0; | |
1563 | address = vaddr | io_mem_watch; | |
1564 | } else { | |
1565 | env->watchpoint[i].is_ram = 1; | |
1566 | /* TODO: Figure out how to make read watchpoints coexist | |
1567 | with code. */ | |
1568 | pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD; | |
1569 | } | |
1570 | } | |
1571 | } | |
9fa3e853 | 1572 | |
90f18422 | 1573 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
9fa3e853 | 1574 | addend -= vaddr; |
84b7b8e7 FB |
1575 | te = &env->tlb_table[is_user][index]; |
1576 | te->addend = addend; | |
67b915a5 | 1577 | if (prot & PAGE_READ) { |
84b7b8e7 FB |
1578 | te->addr_read = address; |
1579 | } else { | |
1580 | te->addr_read = -1; | |
1581 | } | |
1582 | if (prot & PAGE_EXEC) { | |
1583 | te->addr_code = address; | |
9fa3e853 | 1584 | } else { |
84b7b8e7 | 1585 | te->addr_code = -1; |
9fa3e853 | 1586 | } |
67b915a5 | 1587 | if (prot & PAGE_WRITE) { |
856074ec FB |
1588 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || |
1589 | (pd & IO_MEM_ROMD)) { | |
1590 | /* write access calls the I/O callback */ | |
1591 | te->addr_write = vaddr | | |
1592 | (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); | |
3a7d929e | 1593 | } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1ccde1cb | 1594 | !cpu_physical_memory_is_dirty(pd)) { |
84b7b8e7 | 1595 | te->addr_write = vaddr | IO_MEM_NOTDIRTY; |
9fa3e853 | 1596 | } else { |
84b7b8e7 | 1597 | te->addr_write = address; |
9fa3e853 FB |
1598 | } |
1599 | } else { | |
84b7b8e7 | 1600 | te->addr_write = -1; |
9fa3e853 FB |
1601 | } |
1602 | } | |
1603 | #if !defined(CONFIG_SOFTMMU) | |
1604 | else { | |
1605 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1606 | /* IO access: no mapping is done as it will be handled by the | |
1607 | soft MMU */ | |
1608 | if (!(env->hflags & HF_SOFTMMU_MASK)) | |
1609 | ret = 2; | |
1610 | } else { | |
1611 | void *map_addr; | |
59817ccb FB |
1612 | |
1613 | if (vaddr >= MMAP_AREA_END) { | |
1614 | ret = 2; | |
1615 | } else { | |
1616 | if (prot & PROT_WRITE) { | |
1617 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
d720b93d | 1618 | #if defined(TARGET_HAS_SMC) || 1 |
59817ccb | 1619 | first_tb || |
d720b93d | 1620 | #endif |
59817ccb FB |
1621 | ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1622 | !cpu_physical_memory_is_dirty(pd))) { | |
1623 | /* ROM: we do as if code was inside */ | |
1624 | /* if code is present, we only map as read only and save the | |
1625 | original mapping */ | |
1626 | VirtPageDesc *vp; | |
1627 | ||
90f18422 | 1628 | vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); |
59817ccb FB |
1629 | vp->phys_addr = pd; |
1630 | vp->prot = prot; | |
1631 | vp->valid_tag = virt_valid_tag; | |
1632 | prot &= ~PAGE_WRITE; | |
1633 | } | |
1634 | } | |
1635 | map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
1636 | MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
1637 | if (map_addr == MAP_FAILED) { | |
1638 | cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
1639 | paddr, vaddr); | |
9fa3e853 | 1640 | } |
9fa3e853 FB |
1641 | } |
1642 | } | |
1643 | } | |
1644 | #endif | |
1645 | return ret; | |
1646 | } | |
1647 | ||
1648 | /* called from signal handler: invalidate the code and unprotect the | |
1649 | page. Return TRUE if the fault was succesfully handled. */ | |
53a5960a | 1650 | int page_unprotect(target_ulong addr, unsigned long pc, void *puc) |
9fa3e853 FB |
1651 | { |
1652 | #if !defined(CONFIG_SOFTMMU) | |
1653 | VirtPageDesc *vp; | |
1654 | ||
1655 | #if defined(DEBUG_TLB) | |
1656 | printf("page_unprotect: addr=0x%08x\n", addr); | |
1657 | #endif | |
1658 | addr &= TARGET_PAGE_MASK; | |
59817ccb FB |
1659 | |
1660 | /* if it is not mapped, no need to worry here */ | |
1661 | if (addr >= MMAP_AREA_END) | |
1662 | return 0; | |
9fa3e853 FB |
1663 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
1664 | if (!vp) | |
1665 | return 0; | |
1666 | /* NOTE: in this case, validate_tag is _not_ tested as it | |
1667 | validates only the code TLB */ | |
1668 | if (vp->valid_tag != virt_valid_tag) | |
1669 | return 0; | |
1670 | if (!(vp->prot & PAGE_WRITE)) | |
1671 | return 0; | |
1672 | #if defined(DEBUG_TLB) | |
1673 | printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", | |
1674 | addr, vp->phys_addr, vp->prot); | |
1675 | #endif | |
59817ccb FB |
1676 | if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) |
1677 | cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", | |
1678 | (unsigned long)addr, vp->prot); | |
d720b93d | 1679 | /* set the dirty bit */ |
0a962c02 | 1680 | phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; |
d720b93d FB |
1681 | /* flush the code inside */ |
1682 | tb_invalidate_phys_page(vp->phys_addr, pc, puc); | |
9fa3e853 FB |
1683 | return 1; |
1684 | #else | |
1685 | return 0; | |
1686 | #endif | |
33417e70 FB |
1687 | } |
1688 | ||
0124311e FB |
1689 | #else |
1690 | ||
ee8b7021 | 1691 | void tlb_flush(CPUState *env, int flush_global) |
0124311e FB |
1692 | { |
1693 | } | |
1694 | ||
2e12669a | 1695 | void tlb_flush_page(CPUState *env, target_ulong addr) |
0124311e FB |
1696 | { |
1697 | } | |
1698 | ||
84b7b8e7 FB |
1699 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
1700 | target_phys_addr_t paddr, int prot, | |
1701 | int is_user, int is_softmmu) | |
9fa3e853 FB |
1702 | { |
1703 | return 0; | |
1704 | } | |
0124311e | 1705 | |
9fa3e853 FB |
1706 | /* dump memory mappings */ |
1707 | void page_dump(FILE *f) | |
33417e70 | 1708 | { |
9fa3e853 FB |
1709 | unsigned long start, end; |
1710 | int i, j, prot, prot1; | |
1711 | PageDesc *p; | |
33417e70 | 1712 | |
9fa3e853 FB |
1713 | fprintf(f, "%-8s %-8s %-8s %s\n", |
1714 | "start", "end", "size", "prot"); | |
1715 | start = -1; | |
1716 | end = -1; | |
1717 | prot = 0; | |
1718 | for(i = 0; i <= L1_SIZE; i++) { | |
1719 | if (i < L1_SIZE) | |
1720 | p = l1_map[i]; | |
1721 | else | |
1722 | p = NULL; | |
1723 | for(j = 0;j < L2_SIZE; j++) { | |
1724 | if (!p) | |
1725 | prot1 = 0; | |
1726 | else | |
1727 | prot1 = p[j].flags; | |
1728 | if (prot1 != prot) { | |
1729 | end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
1730 | if (start != -1) { | |
1731 | fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
1732 | start, end, end - start, | |
1733 | prot & PAGE_READ ? 'r' : '-', | |
1734 | prot & PAGE_WRITE ? 'w' : '-', | |
1735 | prot & PAGE_EXEC ? 'x' : '-'); | |
1736 | } | |
1737 | if (prot1 != 0) | |
1738 | start = end; | |
1739 | else | |
1740 | start = -1; | |
1741 | prot = prot1; | |
1742 | } | |
1743 | if (!p) | |
1744 | break; | |
1745 | } | |
33417e70 | 1746 | } |
33417e70 FB |
1747 | } |
1748 | ||
53a5960a | 1749 | int page_get_flags(target_ulong address) |
33417e70 | 1750 | { |
9fa3e853 FB |
1751 | PageDesc *p; |
1752 | ||
1753 | p = page_find(address >> TARGET_PAGE_BITS); | |
33417e70 | 1754 | if (!p) |
9fa3e853 FB |
1755 | return 0; |
1756 | return p->flags; | |
1757 | } | |
1758 | ||
1759 | /* modify the flags of a page and invalidate the code if | |
1760 | necessary. The flag PAGE_WRITE_ORG is positionned automatically | |
1761 | depending on PAGE_WRITE */ | |
53a5960a | 1762 | void page_set_flags(target_ulong start, target_ulong end, int flags) |
9fa3e853 FB |
1763 | { |
1764 | PageDesc *p; | |
53a5960a | 1765 | target_ulong addr; |
9fa3e853 FB |
1766 | |
1767 | start = start & TARGET_PAGE_MASK; | |
1768 | end = TARGET_PAGE_ALIGN(end); | |
1769 | if (flags & PAGE_WRITE) | |
1770 | flags |= PAGE_WRITE_ORG; | |
1771 | spin_lock(&tb_lock); | |
1772 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1773 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
1774 | /* if the write protection is set, then we invalidate the code | |
1775 | inside */ | |
1776 | if (!(p->flags & PAGE_WRITE) && | |
1777 | (flags & PAGE_WRITE) && | |
1778 | p->first_tb) { | |
d720b93d | 1779 | tb_invalidate_phys_page(addr, 0, NULL); |
9fa3e853 FB |
1780 | } |
1781 | p->flags = flags; | |
1782 | } | |
1783 | spin_unlock(&tb_lock); | |
33417e70 FB |
1784 | } |
1785 | ||
9fa3e853 FB |
1786 | /* called from signal handler: invalidate the code and unprotect the |
1787 | page. Return TRUE if the fault was succesfully handled. */ | |
53a5960a | 1788 | int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
9fa3e853 FB |
1789 | { |
1790 | unsigned int page_index, prot, pindex; | |
1791 | PageDesc *p, *p1; | |
53a5960a | 1792 | target_ulong host_start, host_end, addr; |
9fa3e853 | 1793 | |
83fb7adf | 1794 | host_start = address & qemu_host_page_mask; |
9fa3e853 FB |
1795 | page_index = host_start >> TARGET_PAGE_BITS; |
1796 | p1 = page_find(page_index); | |
1797 | if (!p1) | |
1798 | return 0; | |
83fb7adf | 1799 | host_end = host_start + qemu_host_page_size; |
9fa3e853 FB |
1800 | p = p1; |
1801 | prot = 0; | |
1802 | for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { | |
1803 | prot |= p->flags; | |
1804 | p++; | |
1805 | } | |
1806 | /* if the page was really writable, then we change its | |
1807 | protection back to writable */ | |
1808 | if (prot & PAGE_WRITE_ORG) { | |
1809 | pindex = (address - host_start) >> TARGET_PAGE_BITS; | |
1810 | if (!(p1[pindex].flags & PAGE_WRITE)) { | |
53a5960a | 1811 | mprotect((void *)g2h(host_start), qemu_host_page_size, |
9fa3e853 FB |
1812 | (prot & PAGE_BITS) | PAGE_WRITE); |
1813 | p1[pindex].flags |= PAGE_WRITE; | |
1814 | /* and since the content will be modified, we must invalidate | |
1815 | the corresponding translated code. */ | |
d720b93d | 1816 | tb_invalidate_phys_page(address, pc, puc); |
9fa3e853 FB |
1817 | #ifdef DEBUG_TB_CHECK |
1818 | tb_invalidate_check(address); | |
1819 | #endif | |
1820 | return 1; | |
1821 | } | |
1822 | } | |
1823 | return 0; | |
1824 | } | |
1825 | ||
1826 | /* call this function when system calls directly modify a memory area */ | |
53a5960a PB |
1827 | /* ??? This should be redundant now we have lock_user. */ |
1828 | void page_unprotect_range(target_ulong data, target_ulong data_size) | |
9fa3e853 | 1829 | { |
53a5960a | 1830 | target_ulong start, end, addr; |
9fa3e853 | 1831 | |
53a5960a | 1832 | start = data; |
9fa3e853 FB |
1833 | end = start + data_size; |
1834 | start &= TARGET_PAGE_MASK; | |
1835 | end = TARGET_PAGE_ALIGN(end); | |
1836 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
d720b93d | 1837 | page_unprotect(addr, 0, NULL); |
9fa3e853 FB |
1838 | } |
1839 | } | |
1840 | ||
6a00d601 FB |
1841 | static inline void tlb_set_dirty(CPUState *env, |
1842 | unsigned long addr, target_ulong vaddr) | |
1ccde1cb FB |
1843 | { |
1844 | } | |
9fa3e853 FB |
1845 | #endif /* defined(CONFIG_USER_ONLY) */ |
1846 | ||
33417e70 FB |
1847 | /* register physical memory. 'size' must be a multiple of the target |
1848 | page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an | |
1849 | io memory page */ | |
2e12669a FB |
1850 | void cpu_register_physical_memory(target_phys_addr_t start_addr, |
1851 | unsigned long size, | |
1852 | unsigned long phys_offset) | |
33417e70 | 1853 | { |
108c49b8 | 1854 | target_phys_addr_t addr, end_addr; |
92e873b9 | 1855 | PhysPageDesc *p; |
9d42037b | 1856 | CPUState *env; |
33417e70 | 1857 | |
5fd386f6 | 1858 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
33417e70 | 1859 | end_addr = start_addr + size; |
5fd386f6 | 1860 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
108c49b8 | 1861 | p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
9fa3e853 | 1862 | p->phys_offset = phys_offset; |
2a4188a3 FB |
1863 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
1864 | (phys_offset & IO_MEM_ROMD)) | |
33417e70 FB |
1865 | phys_offset += TARGET_PAGE_SIZE; |
1866 | } | |
9d42037b FB |
1867 | |
1868 | /* since each CPU stores ram addresses in its TLB cache, we must | |
1869 | reset the modified entries */ | |
1870 | /* XXX: slow ! */ | |
1871 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1872 | tlb_flush(env, 1); | |
1873 | } | |
33417e70 FB |
1874 | } |
1875 | ||
ba863458 FB |
1876 | /* XXX: temporary until new memory mapping API */ |
1877 | uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr) | |
1878 | { | |
1879 | PhysPageDesc *p; | |
1880 | ||
1881 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1882 | if (!p) | |
1883 | return IO_MEM_UNASSIGNED; | |
1884 | return p->phys_offset; | |
1885 | } | |
1886 | ||
e9a1ab19 FB |
1887 | /* XXX: better than nothing */ |
1888 | ram_addr_t qemu_ram_alloc(unsigned int size) | |
1889 | { | |
1890 | ram_addr_t addr; | |
1891 | if ((phys_ram_alloc_offset + size) >= phys_ram_size) { | |
1892 | fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", | |
1893 | size, phys_ram_size); | |
1894 | abort(); | |
1895 | } | |
1896 | addr = phys_ram_alloc_offset; | |
1897 | phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); | |
1898 | return addr; | |
1899 | } | |
1900 | ||
1901 | void qemu_ram_free(ram_addr_t addr) | |
1902 | { | |
1903 | } | |
1904 | ||
a4193c8a | 1905 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
33417e70 | 1906 | { |
67d3b957 PB |
1907 | #ifdef DEBUG_UNASSIGNED |
1908 | printf("Unassigned mem read 0x%08x\n", (int)addr); | |
1909 | #endif | |
33417e70 FB |
1910 | return 0; |
1911 | } | |
1912 | ||
a4193c8a | 1913 | static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
33417e70 | 1914 | { |
67d3b957 PB |
1915 | #ifdef DEBUG_UNASSIGNED |
1916 | printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val); | |
1917 | #endif | |
33417e70 FB |
1918 | } |
1919 | ||
1920 | static CPUReadMemoryFunc *unassigned_mem_read[3] = { | |
1921 | unassigned_mem_readb, | |
1922 | unassigned_mem_readb, | |
1923 | unassigned_mem_readb, | |
1924 | }; | |
1925 | ||
1926 | static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | |
1927 | unassigned_mem_writeb, | |
1928 | unassigned_mem_writeb, | |
1929 | unassigned_mem_writeb, | |
1930 | }; | |
1931 | ||
3a7d929e | 1932 | static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
9fa3e853 | 1933 | { |
3a7d929e FB |
1934 | unsigned long ram_addr; |
1935 | int dirty_flags; | |
1936 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1937 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1938 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 1939 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
1940 | tb_invalidate_phys_page_fast(ram_addr, 1); |
1941 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 1942 | #endif |
3a7d929e | 1943 | } |
c27004ec | 1944 | stb_p((uint8_t *)(long)addr, val); |
f32fc648 FB |
1945 | #ifdef USE_KQEMU |
1946 | if (cpu_single_env->kqemu_enabled && | |
1947 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1948 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1949 | #endif | |
f23db169 FB |
1950 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
1951 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1952 | /* we remove the notdirty callback only if the code has been | |
1953 | flushed */ | |
1954 | if (dirty_flags == 0xff) | |
6a00d601 | 1955 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
9fa3e853 FB |
1956 | } |
1957 | ||
3a7d929e | 1958 | static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
9fa3e853 | 1959 | { |
3a7d929e FB |
1960 | unsigned long ram_addr; |
1961 | int dirty_flags; | |
1962 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1963 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1964 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 1965 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
1966 | tb_invalidate_phys_page_fast(ram_addr, 2); |
1967 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 1968 | #endif |
3a7d929e | 1969 | } |
c27004ec | 1970 | stw_p((uint8_t *)(long)addr, val); |
f32fc648 FB |
1971 | #ifdef USE_KQEMU |
1972 | if (cpu_single_env->kqemu_enabled && | |
1973 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1974 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1975 | #endif | |
f23db169 FB |
1976 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
1977 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1978 | /* we remove the notdirty callback only if the code has been | |
1979 | flushed */ | |
1980 | if (dirty_flags == 0xff) | |
6a00d601 | 1981 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
9fa3e853 FB |
1982 | } |
1983 | ||
3a7d929e | 1984 | static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
9fa3e853 | 1985 | { |
3a7d929e FB |
1986 | unsigned long ram_addr; |
1987 | int dirty_flags; | |
1988 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1989 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1990 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 1991 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
1992 | tb_invalidate_phys_page_fast(ram_addr, 4); |
1993 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 1994 | #endif |
3a7d929e | 1995 | } |
c27004ec | 1996 | stl_p((uint8_t *)(long)addr, val); |
f32fc648 FB |
1997 | #ifdef USE_KQEMU |
1998 | if (cpu_single_env->kqemu_enabled && | |
1999 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
2000 | kqemu_modify_page(cpu_single_env, ram_addr); | |
2001 | #endif | |
f23db169 FB |
2002 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
2003 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
2004 | /* we remove the notdirty callback only if the code has been | |
2005 | flushed */ | |
2006 | if (dirty_flags == 0xff) | |
6a00d601 | 2007 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); |
9fa3e853 FB |
2008 | } |
2009 | ||
3a7d929e | 2010 | static CPUReadMemoryFunc *error_mem_read[3] = { |
9fa3e853 FB |
2011 | NULL, /* never used */ |
2012 | NULL, /* never used */ | |
2013 | NULL, /* never used */ | |
2014 | }; | |
2015 | ||
1ccde1cb FB |
2016 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { |
2017 | notdirty_mem_writeb, | |
2018 | notdirty_mem_writew, | |
2019 | notdirty_mem_writel, | |
2020 | }; | |
2021 | ||
6658ffb8 PB |
2022 | #if defined(CONFIG_SOFTMMU) |
2023 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, | |
2024 | so these check for a hit then pass through to the normal out-of-line | |
2025 | phys routines. */ | |
2026 | static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) | |
2027 | { | |
2028 | return ldub_phys(addr); | |
2029 | } | |
2030 | ||
2031 | static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) | |
2032 | { | |
2033 | return lduw_phys(addr); | |
2034 | } | |
2035 | ||
2036 | static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) | |
2037 | { | |
2038 | return ldl_phys(addr); | |
2039 | } | |
2040 | ||
2041 | /* Generate a debug exception if a watchpoint has been hit. | |
2042 | Returns the real physical address of the access. addr will be a host | |
2043 | address in the is_ram case. */ | |
2044 | static target_ulong check_watchpoint(target_phys_addr_t addr) | |
2045 | { | |
2046 | CPUState *env = cpu_single_env; | |
2047 | target_ulong watch; | |
2048 | target_ulong retaddr; | |
2049 | int i; | |
2050 | ||
2051 | retaddr = addr; | |
2052 | for (i = 0; i < env->nb_watchpoints; i++) { | |
2053 | watch = env->watchpoint[i].vaddr; | |
2054 | if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) { | |
2055 | if (env->watchpoint[i].is_ram) | |
2056 | retaddr = addr - (unsigned long)phys_ram_base; | |
2057 | if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) { | |
2058 | cpu_single_env->watchpoint_hit = i + 1; | |
2059 | cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG); | |
2060 | break; | |
2061 | } | |
2062 | } | |
2063 | } | |
2064 | return retaddr; | |
2065 | } | |
2066 | ||
2067 | static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, | |
2068 | uint32_t val) | |
2069 | { | |
2070 | addr = check_watchpoint(addr); | |
2071 | stb_phys(addr, val); | |
2072 | } | |
2073 | ||
2074 | static void watch_mem_writew(void *opaque, target_phys_addr_t addr, | |
2075 | uint32_t val) | |
2076 | { | |
2077 | addr = check_watchpoint(addr); | |
2078 | stw_phys(addr, val); | |
2079 | } | |
2080 | ||
2081 | static void watch_mem_writel(void *opaque, target_phys_addr_t addr, | |
2082 | uint32_t val) | |
2083 | { | |
2084 | addr = check_watchpoint(addr); | |
2085 | stl_phys(addr, val); | |
2086 | } | |
2087 | ||
2088 | static CPUReadMemoryFunc *watch_mem_read[3] = { | |
2089 | watch_mem_readb, | |
2090 | watch_mem_readw, | |
2091 | watch_mem_readl, | |
2092 | }; | |
2093 | ||
2094 | static CPUWriteMemoryFunc *watch_mem_write[3] = { | |
2095 | watch_mem_writeb, | |
2096 | watch_mem_writew, | |
2097 | watch_mem_writel, | |
2098 | }; | |
2099 | #endif | |
2100 | ||
33417e70 FB |
2101 | static void io_mem_init(void) |
2102 | { | |
3a7d929e | 2103 | cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); |
a4193c8a | 2104 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); |
3a7d929e | 2105 | cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); |
1ccde1cb FB |
2106 | io_mem_nb = 5; |
2107 | ||
6658ffb8 PB |
2108 | #if defined(CONFIG_SOFTMMU) |
2109 | io_mem_watch = cpu_register_io_memory(-1, watch_mem_read, | |
2110 | watch_mem_write, NULL); | |
2111 | #endif | |
1ccde1cb | 2112 | /* alloc dirty bits array */ |
0a962c02 | 2113 | phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); |
3a7d929e | 2114 | memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); |
33417e70 FB |
2115 | } |
2116 | ||
2117 | /* mem_read and mem_write are arrays of functions containing the | |
2118 | function to access byte (index 0), word (index 1) and dword (index | |
2119 | 2). All functions must be supplied. If io_index is non zero, the | |
2120 | corresponding io zone is modified. If it is zero, a new io zone is | |
2121 | allocated. The return value can be used with | |
2122 | cpu_register_physical_memory(). (-1) is returned if error. */ | |
2123 | int cpu_register_io_memory(int io_index, | |
2124 | CPUReadMemoryFunc **mem_read, | |
a4193c8a FB |
2125 | CPUWriteMemoryFunc **mem_write, |
2126 | void *opaque) | |
33417e70 FB |
2127 | { |
2128 | int i; | |
2129 | ||
2130 | if (io_index <= 0) { | |
b5ff1b31 | 2131 | if (io_mem_nb >= IO_MEM_NB_ENTRIES) |
33417e70 FB |
2132 | return -1; |
2133 | io_index = io_mem_nb++; | |
2134 | } else { | |
2135 | if (io_index >= IO_MEM_NB_ENTRIES) | |
2136 | return -1; | |
2137 | } | |
b5ff1b31 | 2138 | |
33417e70 FB |
2139 | for(i = 0;i < 3; i++) { |
2140 | io_mem_read[io_index][i] = mem_read[i]; | |
2141 | io_mem_write[io_index][i] = mem_write[i]; | |
2142 | } | |
a4193c8a | 2143 | io_mem_opaque[io_index] = opaque; |
33417e70 FB |
2144 | return io_index << IO_MEM_SHIFT; |
2145 | } | |
61382a50 | 2146 | |
8926b517 FB |
2147 | CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) |
2148 | { | |
2149 | return io_mem_write[io_index >> IO_MEM_SHIFT]; | |
2150 | } | |
2151 | ||
2152 | CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) | |
2153 | { | |
2154 | return io_mem_read[io_index >> IO_MEM_SHIFT]; | |
2155 | } | |
2156 | ||
13eb76e0 FB |
2157 | /* physical memory access (slow version, mainly for debug) */ |
2158 | #if defined(CONFIG_USER_ONLY) | |
2e12669a | 2159 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
13eb76e0 FB |
2160 | int len, int is_write) |
2161 | { | |
2162 | int l, flags; | |
2163 | target_ulong page; | |
53a5960a | 2164 | void * p; |
13eb76e0 FB |
2165 | |
2166 | while (len > 0) { | |
2167 | page = addr & TARGET_PAGE_MASK; | |
2168 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2169 | if (l > len) | |
2170 | l = len; | |
2171 | flags = page_get_flags(page); | |
2172 | if (!(flags & PAGE_VALID)) | |
2173 | return; | |
2174 | if (is_write) { | |
2175 | if (!(flags & PAGE_WRITE)) | |
2176 | return; | |
53a5960a PB |
2177 | p = lock_user(addr, len, 0); |
2178 | memcpy(p, buf, len); | |
2179 | unlock_user(p, addr, len); | |
13eb76e0 FB |
2180 | } else { |
2181 | if (!(flags & PAGE_READ)) | |
2182 | return; | |
53a5960a PB |
2183 | p = lock_user(addr, len, 1); |
2184 | memcpy(buf, p, len); | |
2185 | unlock_user(p, addr, 0); | |
13eb76e0 FB |
2186 | } |
2187 | len -= l; | |
2188 | buf += l; | |
2189 | addr += l; | |
2190 | } | |
2191 | } | |
8df1cd07 | 2192 | |
13eb76e0 | 2193 | #else |
2e12669a | 2194 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
13eb76e0 FB |
2195 | int len, int is_write) |
2196 | { | |
2197 | int l, io_index; | |
2198 | uint8_t *ptr; | |
2199 | uint32_t val; | |
2e12669a FB |
2200 | target_phys_addr_t page; |
2201 | unsigned long pd; | |
92e873b9 | 2202 | PhysPageDesc *p; |
13eb76e0 FB |
2203 | |
2204 | while (len > 0) { | |
2205 | page = addr & TARGET_PAGE_MASK; | |
2206 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2207 | if (l > len) | |
2208 | l = len; | |
92e873b9 | 2209 | p = phys_page_find(page >> TARGET_PAGE_BITS); |
13eb76e0 FB |
2210 | if (!p) { |
2211 | pd = IO_MEM_UNASSIGNED; | |
2212 | } else { | |
2213 | pd = p->phys_offset; | |
2214 | } | |
2215 | ||
2216 | if (is_write) { | |
3a7d929e | 2217 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
13eb76e0 | 2218 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
6a00d601 FB |
2219 | /* XXX: could force cpu_single_env to NULL to avoid |
2220 | potential bugs */ | |
13eb76e0 | 2221 | if (l >= 4 && ((addr & 3) == 0)) { |
1c213d19 | 2222 | /* 32 bit write access */ |
c27004ec | 2223 | val = ldl_p(buf); |
a4193c8a | 2224 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
13eb76e0 FB |
2225 | l = 4; |
2226 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
1c213d19 | 2227 | /* 16 bit write access */ |
c27004ec | 2228 | val = lduw_p(buf); |
a4193c8a | 2229 | io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); |
13eb76e0 FB |
2230 | l = 2; |
2231 | } else { | |
1c213d19 | 2232 | /* 8 bit write access */ |
c27004ec | 2233 | val = ldub_p(buf); |
a4193c8a | 2234 | io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); |
13eb76e0 FB |
2235 | l = 1; |
2236 | } | |
2237 | } else { | |
b448f2f3 FB |
2238 | unsigned long addr1; |
2239 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
13eb76e0 | 2240 | /* RAM case */ |
b448f2f3 | 2241 | ptr = phys_ram_base + addr1; |
13eb76e0 | 2242 | memcpy(ptr, buf, l); |
3a7d929e FB |
2243 | if (!cpu_physical_memory_is_dirty(addr1)) { |
2244 | /* invalidate code */ | |
2245 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
2246 | /* set dirty bit */ | |
f23db169 FB |
2247 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
2248 | (0xff & ~CODE_DIRTY_FLAG); | |
3a7d929e | 2249 | } |
13eb76e0 FB |
2250 | } |
2251 | } else { | |
2a4188a3 FB |
2252 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2253 | !(pd & IO_MEM_ROMD)) { | |
13eb76e0 FB |
2254 | /* I/O case */ |
2255 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2256 | if (l >= 4 && ((addr & 3) == 0)) { | |
2257 | /* 32 bit read access */ | |
a4193c8a | 2258 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
c27004ec | 2259 | stl_p(buf, val); |
13eb76e0 FB |
2260 | l = 4; |
2261 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
2262 | /* 16 bit read access */ | |
a4193c8a | 2263 | val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); |
c27004ec | 2264 | stw_p(buf, val); |
13eb76e0 FB |
2265 | l = 2; |
2266 | } else { | |
1c213d19 | 2267 | /* 8 bit read access */ |
a4193c8a | 2268 | val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); |
c27004ec | 2269 | stb_p(buf, val); |
13eb76e0 FB |
2270 | l = 1; |
2271 | } | |
2272 | } else { | |
2273 | /* RAM case */ | |
2274 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2275 | (addr & ~TARGET_PAGE_MASK); | |
2276 | memcpy(buf, ptr, l); | |
2277 | } | |
2278 | } | |
2279 | len -= l; | |
2280 | buf += l; | |
2281 | addr += l; | |
2282 | } | |
2283 | } | |
8df1cd07 | 2284 | |
d0ecd2aa FB |
2285 | /* used for ROM loading : can write in RAM and ROM */ |
2286 | void cpu_physical_memory_write_rom(target_phys_addr_t addr, | |
2287 | const uint8_t *buf, int len) | |
2288 | { | |
2289 | int l; | |
2290 | uint8_t *ptr; | |
2291 | target_phys_addr_t page; | |
2292 | unsigned long pd; | |
2293 | PhysPageDesc *p; | |
2294 | ||
2295 | while (len > 0) { | |
2296 | page = addr & TARGET_PAGE_MASK; | |
2297 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2298 | if (l > len) | |
2299 | l = len; | |
2300 | p = phys_page_find(page >> TARGET_PAGE_BITS); | |
2301 | if (!p) { | |
2302 | pd = IO_MEM_UNASSIGNED; | |
2303 | } else { | |
2304 | pd = p->phys_offset; | |
2305 | } | |
2306 | ||
2307 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && | |
2a4188a3 FB |
2308 | (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
2309 | !(pd & IO_MEM_ROMD)) { | |
d0ecd2aa FB |
2310 | /* do nothing */ |
2311 | } else { | |
2312 | unsigned long addr1; | |
2313 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2314 | /* ROM/RAM case */ | |
2315 | ptr = phys_ram_base + addr1; | |
2316 | memcpy(ptr, buf, l); | |
2317 | } | |
2318 | len -= l; | |
2319 | buf += l; | |
2320 | addr += l; | |
2321 | } | |
2322 | } | |
2323 | ||
2324 | ||
8df1cd07 FB |
2325 | /* warning: addr must be aligned */ |
2326 | uint32_t ldl_phys(target_phys_addr_t addr) | |
2327 | { | |
2328 | int io_index; | |
2329 | uint8_t *ptr; | |
2330 | uint32_t val; | |
2331 | unsigned long pd; | |
2332 | PhysPageDesc *p; | |
2333 | ||
2334 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2335 | if (!p) { | |
2336 | pd = IO_MEM_UNASSIGNED; | |
2337 | } else { | |
2338 | pd = p->phys_offset; | |
2339 | } | |
2340 | ||
2a4188a3 FB |
2341 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2342 | !(pd & IO_MEM_ROMD)) { | |
8df1cd07 FB |
2343 | /* I/O case */ |
2344 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2345 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2346 | } else { | |
2347 | /* RAM case */ | |
2348 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2349 | (addr & ~TARGET_PAGE_MASK); | |
2350 | val = ldl_p(ptr); | |
2351 | } | |
2352 | return val; | |
2353 | } | |
2354 | ||
84b7b8e7 FB |
2355 | /* warning: addr must be aligned */ |
2356 | uint64_t ldq_phys(target_phys_addr_t addr) | |
2357 | { | |
2358 | int io_index; | |
2359 | uint8_t *ptr; | |
2360 | uint64_t val; | |
2361 | unsigned long pd; | |
2362 | PhysPageDesc *p; | |
2363 | ||
2364 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2365 | if (!p) { | |
2366 | pd = IO_MEM_UNASSIGNED; | |
2367 | } else { | |
2368 | pd = p->phys_offset; | |
2369 | } | |
2370 | ||
2a4188a3 FB |
2371 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2372 | !(pd & IO_MEM_ROMD)) { | |
84b7b8e7 FB |
2373 | /* I/O case */ |
2374 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2375 | #ifdef TARGET_WORDS_BIGENDIAN | |
2376 | val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; | |
2377 | val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); | |
2378 | #else | |
2379 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2380 | val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; | |
2381 | #endif | |
2382 | } else { | |
2383 | /* RAM case */ | |
2384 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2385 | (addr & ~TARGET_PAGE_MASK); | |
2386 | val = ldq_p(ptr); | |
2387 | } | |
2388 | return val; | |
2389 | } | |
2390 | ||
aab33094 FB |
2391 | /* XXX: optimize */ |
2392 | uint32_t ldub_phys(target_phys_addr_t addr) | |
2393 | { | |
2394 | uint8_t val; | |
2395 | cpu_physical_memory_read(addr, &val, 1); | |
2396 | return val; | |
2397 | } | |
2398 | ||
2399 | /* XXX: optimize */ | |
2400 | uint32_t lduw_phys(target_phys_addr_t addr) | |
2401 | { | |
2402 | uint16_t val; | |
2403 | cpu_physical_memory_read(addr, (uint8_t *)&val, 2); | |
2404 | return tswap16(val); | |
2405 | } | |
2406 | ||
8df1cd07 FB |
2407 | /* warning: addr must be aligned. The ram page is not masked as dirty |
2408 | and the code inside is not invalidated. It is useful if the dirty | |
2409 | bits are used to track modified PTEs */ | |
2410 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | |
2411 | { | |
2412 | int io_index; | |
2413 | uint8_t *ptr; | |
2414 | unsigned long pd; | |
2415 | PhysPageDesc *p; | |
2416 | ||
2417 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2418 | if (!p) { | |
2419 | pd = IO_MEM_UNASSIGNED; | |
2420 | } else { | |
2421 | pd = p->phys_offset; | |
2422 | } | |
2423 | ||
3a7d929e | 2424 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
8df1cd07 FB |
2425 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2426 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2427 | } else { | |
2428 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2429 | (addr & ~TARGET_PAGE_MASK); | |
2430 | stl_p(ptr, val); | |
2431 | } | |
2432 | } | |
2433 | ||
2434 | /* warning: addr must be aligned */ | |
8df1cd07 FB |
2435 | void stl_phys(target_phys_addr_t addr, uint32_t val) |
2436 | { | |
2437 | int io_index; | |
2438 | uint8_t *ptr; | |
2439 | unsigned long pd; | |
2440 | PhysPageDesc *p; | |
2441 | ||
2442 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2443 | if (!p) { | |
2444 | pd = IO_MEM_UNASSIGNED; | |
2445 | } else { | |
2446 | pd = p->phys_offset; | |
2447 | } | |
2448 | ||
3a7d929e | 2449 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
8df1cd07 FB |
2450 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2451 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2452 | } else { | |
2453 | unsigned long addr1; | |
2454 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2455 | /* RAM case */ | |
2456 | ptr = phys_ram_base + addr1; | |
2457 | stl_p(ptr, val); | |
3a7d929e FB |
2458 | if (!cpu_physical_memory_is_dirty(addr1)) { |
2459 | /* invalidate code */ | |
2460 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2461 | /* set dirty bit */ | |
f23db169 FB |
2462 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
2463 | (0xff & ~CODE_DIRTY_FLAG); | |
3a7d929e | 2464 | } |
8df1cd07 FB |
2465 | } |
2466 | } | |
2467 | ||
aab33094 FB |
2468 | /* XXX: optimize */ |
2469 | void stb_phys(target_phys_addr_t addr, uint32_t val) | |
2470 | { | |
2471 | uint8_t v = val; | |
2472 | cpu_physical_memory_write(addr, &v, 1); | |
2473 | } | |
2474 | ||
2475 | /* XXX: optimize */ | |
2476 | void stw_phys(target_phys_addr_t addr, uint32_t val) | |
2477 | { | |
2478 | uint16_t v = tswap16(val); | |
2479 | cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); | |
2480 | } | |
2481 | ||
2482 | /* XXX: optimize */ | |
2483 | void stq_phys(target_phys_addr_t addr, uint64_t val) | |
2484 | { | |
2485 | val = tswap64(val); | |
2486 | cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); | |
2487 | } | |
2488 | ||
13eb76e0 FB |
2489 | #endif |
2490 | ||
2491 | /* virtual memory access for debug */ | |
b448f2f3 FB |
2492 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
2493 | uint8_t *buf, int len, int is_write) | |
13eb76e0 FB |
2494 | { |
2495 | int l; | |
2496 | target_ulong page, phys_addr; | |
2497 | ||
2498 | while (len > 0) { | |
2499 | page = addr & TARGET_PAGE_MASK; | |
2500 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2501 | /* if no physical page mapped, return an error */ | |
2502 | if (phys_addr == -1) | |
2503 | return -1; | |
2504 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2505 | if (l > len) | |
2506 | l = len; | |
b448f2f3 FB |
2507 | cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), |
2508 | buf, l, is_write); | |
13eb76e0 FB |
2509 | len -= l; |
2510 | buf += l; | |
2511 | addr += l; | |
2512 | } | |
2513 | return 0; | |
2514 | } | |
2515 | ||
e3db7226 FB |
2516 | void dump_exec_info(FILE *f, |
2517 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) | |
2518 | { | |
2519 | int i, target_code_size, max_target_code_size; | |
2520 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
2521 | TranslationBlock *tb; | |
2522 | ||
2523 | target_code_size = 0; | |
2524 | max_target_code_size = 0; | |
2525 | cross_page = 0; | |
2526 | direct_jmp_count = 0; | |
2527 | direct_jmp2_count = 0; | |
2528 | for(i = 0; i < nb_tbs; i++) { | |
2529 | tb = &tbs[i]; | |
2530 | target_code_size += tb->size; | |
2531 | if (tb->size > max_target_code_size) | |
2532 | max_target_code_size = tb->size; | |
2533 | if (tb->page_addr[1] != -1) | |
2534 | cross_page++; | |
2535 | if (tb->tb_next_offset[0] != 0xffff) { | |
2536 | direct_jmp_count++; | |
2537 | if (tb->tb_next_offset[1] != 0xffff) { | |
2538 | direct_jmp2_count++; | |
2539 | } | |
2540 | } | |
2541 | } | |
2542 | /* XXX: avoid using doubles ? */ | |
2543 | cpu_fprintf(f, "TB count %d\n", nb_tbs); | |
2544 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", | |
2545 | nb_tbs ? target_code_size / nb_tbs : 0, | |
2546 | max_target_code_size); | |
2547 | cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", | |
2548 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, | |
2549 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); | |
2550 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", | |
2551 | cross_page, | |
2552 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); | |
2553 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
2554 | direct_jmp_count, | |
2555 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, | |
2556 | direct_jmp2_count, | |
2557 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
2558 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); | |
2559 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
2560 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
2561 | } | |
2562 | ||
61382a50 FB |
2563 | #if !defined(CONFIG_USER_ONLY) |
2564 | ||
2565 | #define MMUSUFFIX _cmmu | |
2566 | #define GETPC() NULL | |
2567 | #define env cpu_single_env | |
b769d8fe | 2568 | #define SOFTMMU_CODE_ACCESS |
61382a50 FB |
2569 | |
2570 | #define SHIFT 0 | |
2571 | #include "softmmu_template.h" | |
2572 | ||
2573 | #define SHIFT 1 | |
2574 | #include "softmmu_template.h" | |
2575 | ||
2576 | #define SHIFT 2 | |
2577 | #include "softmmu_template.h" | |
2578 | ||
2579 | #define SHIFT 3 | |
2580 | #include "softmmu_template.h" | |
2581 | ||
2582 | #undef env | |
2583 | ||
2584 | #endif |