]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
fd6ce8f6 | 2 | * virtual page mapping and translated block handling |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
67b915a5 | 19 | #include "config.h" |
d5a8f07c FB |
20 | #ifdef _WIN32 |
21 | #include <windows.h> | |
22 | #else | |
a98d49b1 | 23 | #include <sys/types.h> |
d5a8f07c FB |
24 | #include <sys/mman.h> |
25 | #endif | |
54936004 FB |
26 | #include <stdlib.h> |
27 | #include <stdio.h> | |
28 | #include <stdarg.h> | |
29 | #include <string.h> | |
30 | #include <errno.h> | |
31 | #include <unistd.h> | |
32 | #include <inttypes.h> | |
33 | ||
6180a181 FB |
34 | #include "cpu.h" |
35 | #include "exec-all.h" | |
ca10f867 | 36 | #include "qemu-common.h" |
b67d9a52 | 37 | #include "tcg.h" |
b3c7724c | 38 | #include "hw/hw.h" |
74576198 | 39 | #include "osdep.h" |
7ba1e619 | 40 | #include "kvm.h" |
53a5960a PB |
41 | #if defined(CONFIG_USER_ONLY) |
42 | #include <qemu.h> | |
fd052bf6 | 43 | #include <signal.h> |
53a5960a | 44 | #endif |
54936004 | 45 | |
fd6ce8f6 | 46 | //#define DEBUG_TB_INVALIDATE |
66e85a21 | 47 | //#define DEBUG_FLUSH |
9fa3e853 | 48 | //#define DEBUG_TLB |
67d3b957 | 49 | //#define DEBUG_UNASSIGNED |
fd6ce8f6 FB |
50 | |
51 | /* make various TB consistency checks */ | |
5fafdf24 TS |
52 | //#define DEBUG_TB_CHECK |
53 | //#define DEBUG_TLB_CHECK | |
fd6ce8f6 | 54 | |
1196be37 | 55 | //#define DEBUG_IOPORT |
db7b5426 | 56 | //#define DEBUG_SUBPAGE |
1196be37 | 57 | |
99773bd4 PB |
58 | #if !defined(CONFIG_USER_ONLY) |
59 | /* TB consistency checks only implemented for usermode emulation. */ | |
60 | #undef DEBUG_TB_CHECK | |
61 | #endif | |
62 | ||
9fa3e853 FB |
63 | #define SMC_BITMAP_USE_THRESHOLD 10 |
64 | ||
108c49b8 FB |
65 | #if defined(TARGET_SPARC64) |
66 | #define TARGET_PHYS_ADDR_SPACE_BITS 41 | |
5dcb6b91 BS |
67 | #elif defined(TARGET_SPARC) |
68 | #define TARGET_PHYS_ADDR_SPACE_BITS 36 | |
bedb69ea JM |
69 | #elif defined(TARGET_ALPHA) |
70 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 | |
71 | #define TARGET_VIRT_ADDR_SPACE_BITS 42 | |
108c49b8 FB |
72 | #elif defined(TARGET_PPC64) |
73 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 | |
4a1418e0 | 74 | #elif defined(TARGET_X86_64) |
00f82b8a | 75 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 |
4a1418e0 | 76 | #elif defined(TARGET_I386) |
00f82b8a | 77 | #define TARGET_PHYS_ADDR_SPACE_BITS 36 |
108c49b8 | 78 | #else |
108c49b8 FB |
79 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 |
80 | #endif | |
81 | ||
bdaf78e0 | 82 | static TranslationBlock *tbs; |
26a5f13b | 83 | int code_gen_max_blocks; |
9fa3e853 | 84 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
bdaf78e0 | 85 | static int nb_tbs; |
eb51d102 | 86 | /* any access to the tbs or the page table must use this lock */ |
c227f099 | 87 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; |
fd6ce8f6 | 88 | |
141ac468 BS |
89 | #if defined(__arm__) || defined(__sparc_v9__) |
90 | /* The prologue must be reachable with a direct jump. ARM and Sparc64 | |
91 | have limited branch ranges (possibly also PPC) so place it in a | |
d03d860b BS |
92 | section close to code segment. */ |
93 | #define code_gen_section \ | |
94 | __attribute__((__section__(".gen_code"))) \ | |
95 | __attribute__((aligned (32))) | |
f8e2af11 SW |
96 | #elif defined(_WIN32) |
97 | /* Maximum alignment for Win32 is 16. */ | |
98 | #define code_gen_section \ | |
99 | __attribute__((aligned (16))) | |
d03d860b BS |
100 | #else |
101 | #define code_gen_section \ | |
102 | __attribute__((aligned (32))) | |
103 | #endif | |
104 | ||
105 | uint8_t code_gen_prologue[1024] code_gen_section; | |
bdaf78e0 BS |
106 | static uint8_t *code_gen_buffer; |
107 | static unsigned long code_gen_buffer_size; | |
26a5f13b | 108 | /* threshold to flush the translated code buffer */ |
bdaf78e0 | 109 | static unsigned long code_gen_buffer_max_size; |
fd6ce8f6 FB |
110 | uint8_t *code_gen_ptr; |
111 | ||
e2eef170 | 112 | #if !defined(CONFIG_USER_ONLY) |
9fa3e853 | 113 | int phys_ram_fd; |
1ccde1cb | 114 | uint8_t *phys_ram_dirty; |
74576198 | 115 | static int in_migration; |
94a6b54f PB |
116 | |
117 | typedef struct RAMBlock { | |
118 | uint8_t *host; | |
c227f099 AL |
119 | ram_addr_t offset; |
120 | ram_addr_t length; | |
94a6b54f PB |
121 | struct RAMBlock *next; |
122 | } RAMBlock; | |
123 | ||
124 | static RAMBlock *ram_blocks; | |
125 | /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) | |
ccbb4d44 | 126 | then we can no longer assume contiguous ram offsets, and external uses |
94a6b54f | 127 | of this variable will break. */ |
c227f099 | 128 | ram_addr_t last_ram_offset; |
e2eef170 | 129 | #endif |
9fa3e853 | 130 | |
6a00d601 FB |
131 | CPUState *first_cpu; |
132 | /* current CPU in the current thread. It is only valid inside | |
133 | cpu_exec() */ | |
5fafdf24 | 134 | CPUState *cpu_single_env; |
2e70f6ef | 135 | /* 0 = Do not count executed instructions. |
bf20dc07 | 136 | 1 = Precise instruction counting. |
2e70f6ef PB |
137 | 2 = Adaptive rate instruction counting. */ |
138 | int use_icount = 0; | |
139 | /* Current instruction counter. While executing translated code this may | |
140 | include some instructions that have not yet been executed. */ | |
141 | int64_t qemu_icount; | |
6a00d601 | 142 | |
54936004 | 143 | typedef struct PageDesc { |
92e873b9 | 144 | /* list of TBs intersecting this ram page */ |
fd6ce8f6 | 145 | TranslationBlock *first_tb; |
9fa3e853 FB |
146 | /* in order to optimize self modifying code, we count the number |
147 | of lookups we do to a given page to use a bitmap */ | |
148 | unsigned int code_write_count; | |
149 | uint8_t *code_bitmap; | |
150 | #if defined(CONFIG_USER_ONLY) | |
151 | unsigned long flags; | |
152 | #endif | |
54936004 FB |
153 | } PageDesc; |
154 | ||
92e873b9 | 155 | typedef struct PhysPageDesc { |
0f459d16 | 156 | /* offset in host memory of the page + io_index in the low bits */ |
c227f099 AL |
157 | ram_addr_t phys_offset; |
158 | ram_addr_t region_offset; | |
92e873b9 FB |
159 | } PhysPageDesc; |
160 | ||
54936004 | 161 | #define L2_BITS 10 |
bedb69ea JM |
162 | #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS) |
163 | /* XXX: this is a temporary hack for alpha target. | |
164 | * In the future, this is to be replaced by a multi-level table | |
165 | * to actually be able to handle the complete 64 bits address space. | |
166 | */ | |
167 | #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS) | |
168 | #else | |
03875444 | 169 | #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
bedb69ea | 170 | #endif |
54936004 FB |
171 | |
172 | #define L1_SIZE (1 << L1_BITS) | |
173 | #define L2_SIZE (1 << L2_BITS) | |
174 | ||
83fb7adf FB |
175 | unsigned long qemu_real_host_page_size; |
176 | unsigned long qemu_host_page_bits; | |
177 | unsigned long qemu_host_page_size; | |
178 | unsigned long qemu_host_page_mask; | |
54936004 | 179 | |
92e873b9 | 180 | /* XXX: for system emulation, it could just be an array */ |
54936004 FB |
181 | static PageDesc *l1_map[L1_SIZE]; |
182 | ||
e2eef170 | 183 | #if !defined(CONFIG_USER_ONLY) |
6d9a1304 PB |
184 | static PhysPageDesc **l1_phys_map; |
185 | ||
e2eef170 PB |
186 | static void io_mem_init(void); |
187 | ||
33417e70 | 188 | /* io memory support */ |
33417e70 FB |
189 | CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
190 | CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
a4193c8a | 191 | void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
511d2b14 | 192 | static char io_mem_used[IO_MEM_NB_ENTRIES]; |
6658ffb8 PB |
193 | static int io_mem_watch; |
194 | #endif | |
33417e70 | 195 | |
34865134 | 196 | /* log support */ |
1e8b27ca JR |
197 | #ifdef WIN32 |
198 | static const char *logfilename = "qemu.log"; | |
199 | #else | |
d9b630fd | 200 | static const char *logfilename = "/tmp/qemu.log"; |
1e8b27ca | 201 | #endif |
34865134 FB |
202 | FILE *logfile; |
203 | int loglevel; | |
e735b91c | 204 | static int log_append = 0; |
34865134 | 205 | |
e3db7226 FB |
206 | /* statistics */ |
207 | static int tlb_flush_count; | |
208 | static int tb_flush_count; | |
209 | static int tb_phys_invalidate_count; | |
210 | ||
7cb69cae FB |
211 | #ifdef _WIN32 |
212 | static void map_exec(void *addr, long size) | |
213 | { | |
214 | DWORD old_protect; | |
215 | VirtualProtect(addr, size, | |
216 | PAGE_EXECUTE_READWRITE, &old_protect); | |
217 | ||
218 | } | |
219 | #else | |
220 | static void map_exec(void *addr, long size) | |
221 | { | |
4369415f | 222 | unsigned long start, end, page_size; |
7cb69cae | 223 | |
4369415f | 224 | page_size = getpagesize(); |
7cb69cae | 225 | start = (unsigned long)addr; |
4369415f | 226 | start &= ~(page_size - 1); |
7cb69cae FB |
227 | |
228 | end = (unsigned long)addr + size; | |
4369415f FB |
229 | end += page_size - 1; |
230 | end &= ~(page_size - 1); | |
7cb69cae FB |
231 | |
232 | mprotect((void *)start, end - start, | |
233 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
234 | } | |
235 | #endif | |
236 | ||
b346ff46 | 237 | static void page_init(void) |
54936004 | 238 | { |
83fb7adf | 239 | /* NOTE: we can always suppose that qemu_host_page_size >= |
54936004 | 240 | TARGET_PAGE_SIZE */ |
c2b48b69 AL |
241 | #ifdef _WIN32 |
242 | { | |
243 | SYSTEM_INFO system_info; | |
244 | ||
245 | GetSystemInfo(&system_info); | |
246 | qemu_real_host_page_size = system_info.dwPageSize; | |
247 | } | |
248 | #else | |
249 | qemu_real_host_page_size = getpagesize(); | |
250 | #endif | |
83fb7adf FB |
251 | if (qemu_host_page_size == 0) |
252 | qemu_host_page_size = qemu_real_host_page_size; | |
253 | if (qemu_host_page_size < TARGET_PAGE_SIZE) | |
254 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
255 | qemu_host_page_bits = 0; | |
256 | while ((1 << qemu_host_page_bits) < qemu_host_page_size) | |
257 | qemu_host_page_bits++; | |
258 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
6d9a1304 | 259 | #if !defined(CONFIG_USER_ONLY) |
108c49b8 FB |
260 | l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
261 | memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); | |
6d9a1304 | 262 | #endif |
50a9569b AZ |
263 | |
264 | #if !defined(_WIN32) && defined(CONFIG_USER_ONLY) | |
265 | { | |
266 | long long startaddr, endaddr; | |
267 | FILE *f; | |
268 | int n; | |
269 | ||
c8a706fe | 270 | mmap_lock(); |
0776590d | 271 | last_brk = (unsigned long)sbrk(0); |
50a9569b AZ |
272 | f = fopen("/proc/self/maps", "r"); |
273 | if (f) { | |
274 | do { | |
275 | n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr); | |
276 | if (n == 2) { | |
e0b8d65a BS |
277 | startaddr = MIN(startaddr, |
278 | (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); | |
279 | endaddr = MIN(endaddr, | |
280 | (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); | |
b5fc909e | 281 | page_set_flags(startaddr & TARGET_PAGE_MASK, |
50a9569b AZ |
282 | TARGET_PAGE_ALIGN(endaddr), |
283 | PAGE_RESERVED); | |
284 | } | |
285 | } while (!feof(f)); | |
286 | fclose(f); | |
287 | } | |
c8a706fe | 288 | mmap_unlock(); |
50a9569b AZ |
289 | } |
290 | #endif | |
54936004 FB |
291 | } |
292 | ||
434929bf | 293 | static inline PageDesc **page_l1_map(target_ulong index) |
54936004 | 294 | { |
17e2377a PB |
295 | #if TARGET_LONG_BITS > 32 |
296 | /* Host memory outside guest VM. For 32-bit targets we have already | |
297 | excluded high addresses. */ | |
d8173e0f | 298 | if (index > ((target_ulong)L2_SIZE * L1_SIZE)) |
17e2377a PB |
299 | return NULL; |
300 | #endif | |
434929bf AL |
301 | return &l1_map[index >> L2_BITS]; |
302 | } | |
303 | ||
304 | static inline PageDesc *page_find_alloc(target_ulong index) | |
305 | { | |
306 | PageDesc **lp, *p; | |
307 | lp = page_l1_map(index); | |
308 | if (!lp) | |
309 | return NULL; | |
310 | ||
54936004 FB |
311 | p = *lp; |
312 | if (!p) { | |
313 | /* allocate if not found */ | |
17e2377a | 314 | #if defined(CONFIG_USER_ONLY) |
17e2377a PB |
315 | size_t len = sizeof(PageDesc) * L2_SIZE; |
316 | /* Don't use qemu_malloc because it may recurse. */ | |
660f11be | 317 | p = mmap(NULL, len, PROT_READ | PROT_WRITE, |
17e2377a | 318 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
54936004 | 319 | *lp = p; |
fb1c2cd7 AJ |
320 | if (h2g_valid(p)) { |
321 | unsigned long addr = h2g(p); | |
17e2377a PB |
322 | page_set_flags(addr & TARGET_PAGE_MASK, |
323 | TARGET_PAGE_ALIGN(addr + len), | |
324 | PAGE_RESERVED); | |
325 | } | |
326 | #else | |
327 | p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE); | |
328 | *lp = p; | |
329 | #endif | |
54936004 FB |
330 | } |
331 | return p + (index & (L2_SIZE - 1)); | |
332 | } | |
333 | ||
00f82b8a | 334 | static inline PageDesc *page_find(target_ulong index) |
54936004 | 335 | { |
434929bf AL |
336 | PageDesc **lp, *p; |
337 | lp = page_l1_map(index); | |
338 | if (!lp) | |
339 | return NULL; | |
54936004 | 340 | |
434929bf | 341 | p = *lp; |
660f11be BS |
342 | if (!p) { |
343 | return NULL; | |
344 | } | |
fd6ce8f6 FB |
345 | return p + (index & (L2_SIZE - 1)); |
346 | } | |
347 | ||
6d9a1304 | 348 | #if !defined(CONFIG_USER_ONLY) |
c227f099 | 349 | static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
92e873b9 | 350 | { |
108c49b8 | 351 | void **lp, **p; |
e3f4e2a4 | 352 | PhysPageDesc *pd; |
92e873b9 | 353 | |
108c49b8 FB |
354 | p = (void **)l1_phys_map; |
355 | #if TARGET_PHYS_ADDR_SPACE_BITS > 32 | |
356 | ||
357 | #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) | |
358 | #error unsupported TARGET_PHYS_ADDR_SPACE_BITS | |
359 | #endif | |
360 | lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); | |
92e873b9 FB |
361 | p = *lp; |
362 | if (!p) { | |
363 | /* allocate if not found */ | |
108c49b8 FB |
364 | if (!alloc) |
365 | return NULL; | |
366 | p = qemu_vmalloc(sizeof(void *) * L1_SIZE); | |
367 | memset(p, 0, sizeof(void *) * L1_SIZE); | |
368 | *lp = p; | |
369 | } | |
370 | #endif | |
371 | lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); | |
e3f4e2a4 PB |
372 | pd = *lp; |
373 | if (!pd) { | |
374 | int i; | |
108c49b8 FB |
375 | /* allocate if not found */ |
376 | if (!alloc) | |
377 | return NULL; | |
e3f4e2a4 PB |
378 | pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); |
379 | *lp = pd; | |
67c4d23c | 380 | for (i = 0; i < L2_SIZE; i++) { |
e3f4e2a4 | 381 | pd[i].phys_offset = IO_MEM_UNASSIGNED; |
67c4d23c PB |
382 | pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; |
383 | } | |
92e873b9 | 384 | } |
e3f4e2a4 | 385 | return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); |
92e873b9 FB |
386 | } |
387 | ||
c227f099 | 388 | static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
92e873b9 | 389 | { |
108c49b8 | 390 | return phys_page_find_alloc(index, 0); |
92e873b9 FB |
391 | } |
392 | ||
c227f099 AL |
393 | static void tlb_protect_code(ram_addr_t ram_addr); |
394 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
3a7d929e | 395 | target_ulong vaddr); |
c8a706fe PB |
396 | #define mmap_lock() do { } while(0) |
397 | #define mmap_unlock() do { } while(0) | |
9fa3e853 | 398 | #endif |
fd6ce8f6 | 399 | |
4369415f FB |
400 | #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) |
401 | ||
402 | #if defined(CONFIG_USER_ONLY) | |
ccbb4d44 | 403 | /* Currently it is not recommended to allocate big chunks of data in |
4369415f FB |
404 | user mode. It will change when a dedicated libc will be used */ |
405 | #define USE_STATIC_CODE_GEN_BUFFER | |
406 | #endif | |
407 | ||
408 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
409 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]; | |
410 | #endif | |
411 | ||
8fcd3692 | 412 | static void code_gen_alloc(unsigned long tb_size) |
26a5f13b | 413 | { |
4369415f FB |
414 | #ifdef USE_STATIC_CODE_GEN_BUFFER |
415 | code_gen_buffer = static_code_gen_buffer; | |
416 | code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
417 | map_exec(code_gen_buffer, code_gen_buffer_size); | |
418 | #else | |
26a5f13b FB |
419 | code_gen_buffer_size = tb_size; |
420 | if (code_gen_buffer_size == 0) { | |
4369415f FB |
421 | #if defined(CONFIG_USER_ONLY) |
422 | /* in user mode, phys_ram_size is not meaningful */ | |
423 | code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
424 | #else | |
ccbb4d44 | 425 | /* XXX: needs adjustments */ |
94a6b54f | 426 | code_gen_buffer_size = (unsigned long)(ram_size / 4); |
4369415f | 427 | #endif |
26a5f13b FB |
428 | } |
429 | if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) | |
430 | code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; | |
431 | /* The code gen buffer location may have constraints depending on | |
432 | the host cpu and OS */ | |
433 | #if defined(__linux__) | |
434 | { | |
435 | int flags; | |
141ac468 BS |
436 | void *start = NULL; |
437 | ||
26a5f13b FB |
438 | flags = MAP_PRIVATE | MAP_ANONYMOUS; |
439 | #if defined(__x86_64__) | |
440 | flags |= MAP_32BIT; | |
441 | /* Cannot map more than that */ | |
442 | if (code_gen_buffer_size > (800 * 1024 * 1024)) | |
443 | code_gen_buffer_size = (800 * 1024 * 1024); | |
141ac468 BS |
444 | #elif defined(__sparc_v9__) |
445 | // Map the buffer below 2G, so we can use direct calls and branches | |
446 | flags |= MAP_FIXED; | |
447 | start = (void *) 0x60000000UL; | |
448 | if (code_gen_buffer_size > (512 * 1024 * 1024)) | |
449 | code_gen_buffer_size = (512 * 1024 * 1024); | |
1cb0661e | 450 | #elif defined(__arm__) |
63d41246 | 451 | /* Map the buffer below 32M, so we can use direct calls and branches */ |
1cb0661e AZ |
452 | flags |= MAP_FIXED; |
453 | start = (void *) 0x01000000UL; | |
454 | if (code_gen_buffer_size > 16 * 1024 * 1024) | |
455 | code_gen_buffer_size = 16 * 1024 * 1024; | |
26a5f13b | 456 | #endif |
141ac468 BS |
457 | code_gen_buffer = mmap(start, code_gen_buffer_size, |
458 | PROT_WRITE | PROT_READ | PROT_EXEC, | |
26a5f13b FB |
459 | flags, -1, 0); |
460 | if (code_gen_buffer == MAP_FAILED) { | |
461 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); | |
462 | exit(1); | |
463 | } | |
464 | } | |
a167ba50 | 465 | #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) |
06e67a82 AL |
466 | { |
467 | int flags; | |
468 | void *addr = NULL; | |
469 | flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
470 | #if defined(__x86_64__) | |
471 | /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume | |
472 | * 0x40000000 is free */ | |
473 | flags |= MAP_FIXED; | |
474 | addr = (void *)0x40000000; | |
475 | /* Cannot map more than that */ | |
476 | if (code_gen_buffer_size > (800 * 1024 * 1024)) | |
477 | code_gen_buffer_size = (800 * 1024 * 1024); | |
478 | #endif | |
479 | code_gen_buffer = mmap(addr, code_gen_buffer_size, | |
480 | PROT_WRITE | PROT_READ | PROT_EXEC, | |
481 | flags, -1, 0); | |
482 | if (code_gen_buffer == MAP_FAILED) { | |
483 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); | |
484 | exit(1); | |
485 | } | |
486 | } | |
26a5f13b FB |
487 | #else |
488 | code_gen_buffer = qemu_malloc(code_gen_buffer_size); | |
26a5f13b FB |
489 | map_exec(code_gen_buffer, code_gen_buffer_size); |
490 | #endif | |
4369415f | 491 | #endif /* !USE_STATIC_CODE_GEN_BUFFER */ |
26a5f13b FB |
492 | map_exec(code_gen_prologue, sizeof(code_gen_prologue)); |
493 | code_gen_buffer_max_size = code_gen_buffer_size - | |
494 | code_gen_max_block_size(); | |
495 | code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
496 | tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); | |
497 | } | |
498 | ||
499 | /* Must be called before using the QEMU cpus. 'tb_size' is the size | |
500 | (in bytes) allocated to the translation buffer. Zero means default | |
501 | size. */ | |
502 | void cpu_exec_init_all(unsigned long tb_size) | |
503 | { | |
26a5f13b FB |
504 | cpu_gen_init(); |
505 | code_gen_alloc(tb_size); | |
506 | code_gen_ptr = code_gen_buffer; | |
4369415f | 507 | page_init(); |
e2eef170 | 508 | #if !defined(CONFIG_USER_ONLY) |
26a5f13b | 509 | io_mem_init(); |
e2eef170 | 510 | #endif |
26a5f13b FB |
511 | } |
512 | ||
9656f324 PB |
513 | #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
514 | ||
d4bfa4d7 | 515 | static void cpu_common_pre_save(void *opaque) |
9656f324 | 516 | { |
d4bfa4d7 | 517 | CPUState *env = opaque; |
9656f324 | 518 | |
4c0960c0 | 519 | cpu_synchronize_state(env); |
9656f324 PB |
520 | } |
521 | ||
e7f4eff7 | 522 | static int cpu_common_pre_load(void *opaque) |
9656f324 PB |
523 | { |
524 | CPUState *env = opaque; | |
525 | ||
4c0960c0 | 526 | cpu_synchronize_state(env); |
e7f4eff7 JQ |
527 | return 0; |
528 | } | |
529 | ||
e59fb374 | 530 | static int cpu_common_post_load(void *opaque, int version_id) |
e7f4eff7 JQ |
531 | { |
532 | CPUState *env = opaque; | |
9656f324 | 533 | |
3098dba0 AJ |
534 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
535 | version_id is increased. */ | |
536 | env->interrupt_request &= ~0x01; | |
9656f324 PB |
537 | tlb_flush(env, 1); |
538 | ||
539 | return 0; | |
540 | } | |
e7f4eff7 JQ |
541 | |
542 | static const VMStateDescription vmstate_cpu_common = { | |
543 | .name = "cpu_common", | |
544 | .version_id = 1, | |
545 | .minimum_version_id = 1, | |
546 | .minimum_version_id_old = 1, | |
547 | .pre_save = cpu_common_pre_save, | |
548 | .pre_load = cpu_common_pre_load, | |
549 | .post_load = cpu_common_post_load, | |
550 | .fields = (VMStateField []) { | |
551 | VMSTATE_UINT32(halted, CPUState), | |
552 | VMSTATE_UINT32(interrupt_request, CPUState), | |
553 | VMSTATE_END_OF_LIST() | |
554 | } | |
555 | }; | |
9656f324 PB |
556 | #endif |
557 | ||
950f1472 GC |
558 | CPUState *qemu_get_cpu(int cpu) |
559 | { | |
560 | CPUState *env = first_cpu; | |
561 | ||
562 | while (env) { | |
563 | if (env->cpu_index == cpu) | |
564 | break; | |
565 | env = env->next_cpu; | |
566 | } | |
567 | ||
568 | return env; | |
569 | } | |
570 | ||
6a00d601 | 571 | void cpu_exec_init(CPUState *env) |
fd6ce8f6 | 572 | { |
6a00d601 FB |
573 | CPUState **penv; |
574 | int cpu_index; | |
575 | ||
c2764719 PB |
576 | #if defined(CONFIG_USER_ONLY) |
577 | cpu_list_lock(); | |
578 | #endif | |
6a00d601 FB |
579 | env->next_cpu = NULL; |
580 | penv = &first_cpu; | |
581 | cpu_index = 0; | |
582 | while (*penv != NULL) { | |
1e9fa730 | 583 | penv = &(*penv)->next_cpu; |
6a00d601 FB |
584 | cpu_index++; |
585 | } | |
586 | env->cpu_index = cpu_index; | |
268a362c | 587 | env->numa_node = 0; |
72cf2d4f BS |
588 | QTAILQ_INIT(&env->breakpoints); |
589 | QTAILQ_INIT(&env->watchpoints); | |
6a00d601 | 590 | *penv = env; |
c2764719 PB |
591 | #if defined(CONFIG_USER_ONLY) |
592 | cpu_list_unlock(); | |
593 | #endif | |
b3c7724c | 594 | #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
e7f4eff7 | 595 | vmstate_register(cpu_index, &vmstate_cpu_common, env); |
b3c7724c PB |
596 | register_savevm("cpu", cpu_index, CPU_SAVE_VERSION, |
597 | cpu_save, cpu_load, env); | |
598 | #endif | |
fd6ce8f6 FB |
599 | } |
600 | ||
9fa3e853 FB |
601 | static inline void invalidate_page_bitmap(PageDesc *p) |
602 | { | |
603 | if (p->code_bitmap) { | |
59817ccb | 604 | qemu_free(p->code_bitmap); |
9fa3e853 FB |
605 | p->code_bitmap = NULL; |
606 | } | |
607 | p->code_write_count = 0; | |
608 | } | |
609 | ||
fd6ce8f6 FB |
610 | /* set to NULL all the 'first_tb' fields in all PageDescs */ |
611 | static void page_flush_tb(void) | |
612 | { | |
613 | int i, j; | |
614 | PageDesc *p; | |
615 | ||
616 | for(i = 0; i < L1_SIZE; i++) { | |
617 | p = l1_map[i]; | |
618 | if (p) { | |
9fa3e853 FB |
619 | for(j = 0; j < L2_SIZE; j++) { |
620 | p->first_tb = NULL; | |
621 | invalidate_page_bitmap(p); | |
622 | p++; | |
623 | } | |
fd6ce8f6 FB |
624 | } |
625 | } | |
626 | } | |
627 | ||
628 | /* flush all the translation blocks */ | |
d4e8164f | 629 | /* XXX: tb_flush is currently not thread safe */ |
6a00d601 | 630 | void tb_flush(CPUState *env1) |
fd6ce8f6 | 631 | { |
6a00d601 | 632 | CPUState *env; |
0124311e | 633 | #if defined(DEBUG_FLUSH) |
ab3d1727 BS |
634 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", |
635 | (unsigned long)(code_gen_ptr - code_gen_buffer), | |
636 | nb_tbs, nb_tbs > 0 ? | |
637 | ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); | |
fd6ce8f6 | 638 | #endif |
26a5f13b | 639 | if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size) |
a208e54a PB |
640 | cpu_abort(env1, "Internal error: code buffer overflow\n"); |
641 | ||
fd6ce8f6 | 642 | nb_tbs = 0; |
3b46e624 | 643 | |
6a00d601 FB |
644 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
645 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
646 | } | |
9fa3e853 | 647 | |
8a8a608f | 648 | memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
fd6ce8f6 | 649 | page_flush_tb(); |
9fa3e853 | 650 | |
fd6ce8f6 | 651 | code_gen_ptr = code_gen_buffer; |
d4e8164f FB |
652 | /* XXX: flush processor icache at this point if cache flush is |
653 | expensive */ | |
e3db7226 | 654 | tb_flush_count++; |
fd6ce8f6 FB |
655 | } |
656 | ||
657 | #ifdef DEBUG_TB_CHECK | |
658 | ||
bc98a7ef | 659 | static void tb_invalidate_check(target_ulong address) |
fd6ce8f6 FB |
660 | { |
661 | TranslationBlock *tb; | |
662 | int i; | |
663 | address &= TARGET_PAGE_MASK; | |
99773bd4 PB |
664 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
665 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
fd6ce8f6 FB |
666 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
667 | address >= tb->pc + tb->size)) { | |
0bf9e31a BS |
668 | printf("ERROR invalidate: address=" TARGET_FMT_lx |
669 | " PC=%08lx size=%04x\n", | |
99773bd4 | 670 | address, (long)tb->pc, tb->size); |
fd6ce8f6 FB |
671 | } |
672 | } | |
673 | } | |
674 | } | |
675 | ||
676 | /* verify that all the pages have correct rights for code */ | |
677 | static void tb_page_check(void) | |
678 | { | |
679 | TranslationBlock *tb; | |
680 | int i, flags1, flags2; | |
3b46e624 | 681 | |
99773bd4 PB |
682 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
683 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
fd6ce8f6 FB |
684 | flags1 = page_get_flags(tb->pc); |
685 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
686 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
687 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
99773bd4 | 688 | (long)tb->pc, tb->size, flags1, flags2); |
fd6ce8f6 FB |
689 | } |
690 | } | |
691 | } | |
692 | } | |
693 | ||
694 | #endif | |
695 | ||
696 | /* invalidate one TB */ | |
697 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
698 | int next_offset) | |
699 | { | |
700 | TranslationBlock *tb1; | |
701 | for(;;) { | |
702 | tb1 = *ptb; | |
703 | if (tb1 == tb) { | |
704 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
705 | break; | |
706 | } | |
707 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
708 | } | |
709 | } | |
710 | ||
9fa3e853 FB |
711 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
712 | { | |
713 | TranslationBlock *tb1; | |
714 | unsigned int n1; | |
715 | ||
716 | for(;;) { | |
717 | tb1 = *ptb; | |
718 | n1 = (long)tb1 & 3; | |
719 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
720 | if (tb1 == tb) { | |
721 | *ptb = tb1->page_next[n1]; | |
722 | break; | |
723 | } | |
724 | ptb = &tb1->page_next[n1]; | |
725 | } | |
726 | } | |
727 | ||
d4e8164f FB |
728 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) |
729 | { | |
730 | TranslationBlock *tb1, **ptb; | |
731 | unsigned int n1; | |
732 | ||
733 | ptb = &tb->jmp_next[n]; | |
734 | tb1 = *ptb; | |
735 | if (tb1) { | |
736 | /* find tb(n) in circular list */ | |
737 | for(;;) { | |
738 | tb1 = *ptb; | |
739 | n1 = (long)tb1 & 3; | |
740 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
741 | if (n1 == n && tb1 == tb) | |
742 | break; | |
743 | if (n1 == 2) { | |
744 | ptb = &tb1->jmp_first; | |
745 | } else { | |
746 | ptb = &tb1->jmp_next[n1]; | |
747 | } | |
748 | } | |
749 | /* now we can suppress tb(n) from the list */ | |
750 | *ptb = tb->jmp_next[n]; | |
751 | ||
752 | tb->jmp_next[n] = NULL; | |
753 | } | |
754 | } | |
755 | ||
756 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
757 | another TB */ | |
758 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
759 | { | |
760 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); | |
761 | } | |
762 | ||
2e70f6ef | 763 | void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr) |
fd6ce8f6 | 764 | { |
6a00d601 | 765 | CPUState *env; |
8a40a180 | 766 | PageDesc *p; |
d4e8164f | 767 | unsigned int h, n1; |
c227f099 | 768 | target_phys_addr_t phys_pc; |
8a40a180 | 769 | TranslationBlock *tb1, *tb2; |
3b46e624 | 770 | |
8a40a180 FB |
771 | /* remove the TB from the hash list */ |
772 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
773 | h = tb_phys_hash_func(phys_pc); | |
5fafdf24 | 774 | tb_remove(&tb_phys_hash[h], tb, |
8a40a180 FB |
775 | offsetof(TranslationBlock, phys_hash_next)); |
776 | ||
777 | /* remove the TB from the page list */ | |
778 | if (tb->page_addr[0] != page_addr) { | |
779 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
780 | tb_page_remove(&p->first_tb, tb); | |
781 | invalidate_page_bitmap(p); | |
782 | } | |
783 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
784 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
785 | tb_page_remove(&p->first_tb, tb); | |
786 | invalidate_page_bitmap(p); | |
787 | } | |
788 | ||
36bdbe54 | 789 | tb_invalidated_flag = 1; |
59817ccb | 790 | |
fd6ce8f6 | 791 | /* remove the TB from the hash list */ |
8a40a180 | 792 | h = tb_jmp_cache_hash_func(tb->pc); |
6a00d601 FB |
793 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
794 | if (env->tb_jmp_cache[h] == tb) | |
795 | env->tb_jmp_cache[h] = NULL; | |
796 | } | |
d4e8164f FB |
797 | |
798 | /* suppress this TB from the two jump lists */ | |
799 | tb_jmp_remove(tb, 0); | |
800 | tb_jmp_remove(tb, 1); | |
801 | ||
802 | /* suppress any remaining jumps to this TB */ | |
803 | tb1 = tb->jmp_first; | |
804 | for(;;) { | |
805 | n1 = (long)tb1 & 3; | |
806 | if (n1 == 2) | |
807 | break; | |
808 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
809 | tb2 = tb1->jmp_next[n1]; | |
810 | tb_reset_jump(tb1, n1); | |
811 | tb1->jmp_next[n1] = NULL; | |
812 | tb1 = tb2; | |
813 | } | |
814 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ | |
9fa3e853 | 815 | |
e3db7226 | 816 | tb_phys_invalidate_count++; |
9fa3e853 FB |
817 | } |
818 | ||
819 | static inline void set_bits(uint8_t *tab, int start, int len) | |
820 | { | |
821 | int end, mask, end1; | |
822 | ||
823 | end = start + len; | |
824 | tab += start >> 3; | |
825 | mask = 0xff << (start & 7); | |
826 | if ((start & ~7) == (end & ~7)) { | |
827 | if (start < end) { | |
828 | mask &= ~(0xff << (end & 7)); | |
829 | *tab |= mask; | |
830 | } | |
831 | } else { | |
832 | *tab++ |= mask; | |
833 | start = (start + 8) & ~7; | |
834 | end1 = end & ~7; | |
835 | while (start < end1) { | |
836 | *tab++ = 0xff; | |
837 | start += 8; | |
838 | } | |
839 | if (start < end) { | |
840 | mask = ~(0xff << (end & 7)); | |
841 | *tab |= mask; | |
842 | } | |
843 | } | |
844 | } | |
845 | ||
846 | static void build_page_bitmap(PageDesc *p) | |
847 | { | |
848 | int n, tb_start, tb_end; | |
849 | TranslationBlock *tb; | |
3b46e624 | 850 | |
b2a7081a | 851 | p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8); |
9fa3e853 FB |
852 | |
853 | tb = p->first_tb; | |
854 | while (tb != NULL) { | |
855 | n = (long)tb & 3; | |
856 | tb = (TranslationBlock *)((long)tb & ~3); | |
857 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
858 | if (n == 0) { | |
859 | /* NOTE: tb_end may be after the end of the page, but | |
860 | it is not a problem */ | |
861 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
862 | tb_end = tb_start + tb->size; | |
863 | if (tb_end > TARGET_PAGE_SIZE) | |
864 | tb_end = TARGET_PAGE_SIZE; | |
865 | } else { | |
866 | tb_start = 0; | |
867 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
868 | } | |
869 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
870 | tb = tb->page_next[n]; | |
871 | } | |
872 | } | |
873 | ||
2e70f6ef PB |
874 | TranslationBlock *tb_gen_code(CPUState *env, |
875 | target_ulong pc, target_ulong cs_base, | |
876 | int flags, int cflags) | |
d720b93d FB |
877 | { |
878 | TranslationBlock *tb; | |
879 | uint8_t *tc_ptr; | |
880 | target_ulong phys_pc, phys_page2, virt_page2; | |
881 | int code_gen_size; | |
882 | ||
c27004ec FB |
883 | phys_pc = get_phys_addr_code(env, pc); |
884 | tb = tb_alloc(pc); | |
d720b93d FB |
885 | if (!tb) { |
886 | /* flush must be done */ | |
887 | tb_flush(env); | |
888 | /* cannot fail at this point */ | |
c27004ec | 889 | tb = tb_alloc(pc); |
2e70f6ef PB |
890 | /* Don't forget to invalidate previous TB info. */ |
891 | tb_invalidated_flag = 1; | |
d720b93d FB |
892 | } |
893 | tc_ptr = code_gen_ptr; | |
894 | tb->tc_ptr = tc_ptr; | |
895 | tb->cs_base = cs_base; | |
896 | tb->flags = flags; | |
897 | tb->cflags = cflags; | |
d07bde88 | 898 | cpu_gen_code(env, tb, &code_gen_size); |
d720b93d | 899 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
3b46e624 | 900 | |
d720b93d | 901 | /* check next page if needed */ |
c27004ec | 902 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
d720b93d | 903 | phys_page2 = -1; |
c27004ec | 904 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { |
d720b93d FB |
905 | phys_page2 = get_phys_addr_code(env, virt_page2); |
906 | } | |
907 | tb_link_phys(tb, phys_pc, phys_page2); | |
2e70f6ef | 908 | return tb; |
d720b93d | 909 | } |
3b46e624 | 910 | |
9fa3e853 FB |
911 | /* invalidate all TBs which intersect with the target physical page |
912 | starting in range [start;end[. NOTE: start and end must refer to | |
d720b93d FB |
913 | the same physical page. 'is_cpu_write_access' should be true if called |
914 | from a real cpu write access: the virtual CPU will exit the current | |
915 | TB if code is modified inside this TB. */ | |
c227f099 | 916 | void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end, |
d720b93d FB |
917 | int is_cpu_write_access) |
918 | { | |
6b917547 | 919 | TranslationBlock *tb, *tb_next, *saved_tb; |
d720b93d | 920 | CPUState *env = cpu_single_env; |
9fa3e853 | 921 | target_ulong tb_start, tb_end; |
6b917547 AL |
922 | PageDesc *p; |
923 | int n; | |
924 | #ifdef TARGET_HAS_PRECISE_SMC | |
925 | int current_tb_not_found = is_cpu_write_access; | |
926 | TranslationBlock *current_tb = NULL; | |
927 | int current_tb_modified = 0; | |
928 | target_ulong current_pc = 0; | |
929 | target_ulong current_cs_base = 0; | |
930 | int current_flags = 0; | |
931 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
9fa3e853 FB |
932 | |
933 | p = page_find(start >> TARGET_PAGE_BITS); | |
5fafdf24 | 934 | if (!p) |
9fa3e853 | 935 | return; |
5fafdf24 | 936 | if (!p->code_bitmap && |
d720b93d FB |
937 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && |
938 | is_cpu_write_access) { | |
9fa3e853 FB |
939 | /* build code bitmap */ |
940 | build_page_bitmap(p); | |
941 | } | |
942 | ||
943 | /* we remove all the TBs in the range [start, end[ */ | |
944 | /* XXX: see if in some cases it could be faster to invalidate all the code */ | |
945 | tb = p->first_tb; | |
946 | while (tb != NULL) { | |
947 | n = (long)tb & 3; | |
948 | tb = (TranslationBlock *)((long)tb & ~3); | |
949 | tb_next = tb->page_next[n]; | |
950 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
951 | if (n == 0) { | |
952 | /* NOTE: tb_end may be after the end of the page, but | |
953 | it is not a problem */ | |
954 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
955 | tb_end = tb_start + tb->size; | |
956 | } else { | |
957 | tb_start = tb->page_addr[1]; | |
958 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
959 | } | |
960 | if (!(tb_end <= start || tb_start >= end)) { | |
d720b93d FB |
961 | #ifdef TARGET_HAS_PRECISE_SMC |
962 | if (current_tb_not_found) { | |
963 | current_tb_not_found = 0; | |
964 | current_tb = NULL; | |
2e70f6ef | 965 | if (env->mem_io_pc) { |
d720b93d | 966 | /* now we have a real cpu fault */ |
2e70f6ef | 967 | current_tb = tb_find_pc(env->mem_io_pc); |
d720b93d FB |
968 | } |
969 | } | |
970 | if (current_tb == tb && | |
2e70f6ef | 971 | (current_tb->cflags & CF_COUNT_MASK) != 1) { |
d720b93d FB |
972 | /* If we are modifying the current TB, we must stop |
973 | its execution. We could be more precise by checking | |
974 | that the modification is after the current PC, but it | |
975 | would require a specialized function to partially | |
976 | restore the CPU state */ | |
3b46e624 | 977 | |
d720b93d | 978 | current_tb_modified = 1; |
5fafdf24 | 979 | cpu_restore_state(current_tb, env, |
2e70f6ef | 980 | env->mem_io_pc, NULL); |
6b917547 AL |
981 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
982 | ¤t_flags); | |
d720b93d FB |
983 | } |
984 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
6f5a9f7e FB |
985 | /* we need to do that to handle the case where a signal |
986 | occurs while doing tb_phys_invalidate() */ | |
987 | saved_tb = NULL; | |
988 | if (env) { | |
989 | saved_tb = env->current_tb; | |
990 | env->current_tb = NULL; | |
991 | } | |
9fa3e853 | 992 | tb_phys_invalidate(tb, -1); |
6f5a9f7e FB |
993 | if (env) { |
994 | env->current_tb = saved_tb; | |
995 | if (env->interrupt_request && env->current_tb) | |
996 | cpu_interrupt(env, env->interrupt_request); | |
997 | } | |
9fa3e853 FB |
998 | } |
999 | tb = tb_next; | |
1000 | } | |
1001 | #if !defined(CONFIG_USER_ONLY) | |
1002 | /* if no code remaining, no need to continue to use slow writes */ | |
1003 | if (!p->first_tb) { | |
1004 | invalidate_page_bitmap(p); | |
d720b93d | 1005 | if (is_cpu_write_access) { |
2e70f6ef | 1006 | tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); |
d720b93d FB |
1007 | } |
1008 | } | |
1009 | #endif | |
1010 | #ifdef TARGET_HAS_PRECISE_SMC | |
1011 | if (current_tb_modified) { | |
1012 | /* we generate a block containing just the instruction | |
1013 | modifying the memory. It will ensure that it cannot modify | |
1014 | itself */ | |
ea1c1802 | 1015 | env->current_tb = NULL; |
2e70f6ef | 1016 | tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); |
d720b93d | 1017 | cpu_resume_from_signal(env, NULL); |
9fa3e853 | 1018 | } |
fd6ce8f6 | 1019 | #endif |
9fa3e853 | 1020 | } |
fd6ce8f6 | 1021 | |
9fa3e853 | 1022 | /* len must be <= 8 and start must be a multiple of len */ |
c227f099 | 1023 | static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len) |
9fa3e853 FB |
1024 | { |
1025 | PageDesc *p; | |
1026 | int offset, b; | |
59817ccb | 1027 | #if 0 |
a4193c8a | 1028 | if (1) { |
93fcfe39 AL |
1029 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", |
1030 | cpu_single_env->mem_io_vaddr, len, | |
1031 | cpu_single_env->eip, | |
1032 | cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); | |
59817ccb FB |
1033 | } |
1034 | #endif | |
9fa3e853 | 1035 | p = page_find(start >> TARGET_PAGE_BITS); |
5fafdf24 | 1036 | if (!p) |
9fa3e853 FB |
1037 | return; |
1038 | if (p->code_bitmap) { | |
1039 | offset = start & ~TARGET_PAGE_MASK; | |
1040 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
1041 | if (b & ((1 << len) - 1)) | |
1042 | goto do_invalidate; | |
1043 | } else { | |
1044 | do_invalidate: | |
d720b93d | 1045 | tb_invalidate_phys_page_range(start, start + len, 1); |
9fa3e853 FB |
1046 | } |
1047 | } | |
1048 | ||
9fa3e853 | 1049 | #if !defined(CONFIG_SOFTMMU) |
c227f099 | 1050 | static void tb_invalidate_phys_page(target_phys_addr_t addr, |
d720b93d | 1051 | unsigned long pc, void *puc) |
9fa3e853 | 1052 | { |
6b917547 | 1053 | TranslationBlock *tb; |
9fa3e853 | 1054 | PageDesc *p; |
6b917547 | 1055 | int n; |
d720b93d | 1056 | #ifdef TARGET_HAS_PRECISE_SMC |
6b917547 | 1057 | TranslationBlock *current_tb = NULL; |
d720b93d | 1058 | CPUState *env = cpu_single_env; |
6b917547 AL |
1059 | int current_tb_modified = 0; |
1060 | target_ulong current_pc = 0; | |
1061 | target_ulong current_cs_base = 0; | |
1062 | int current_flags = 0; | |
d720b93d | 1063 | #endif |
9fa3e853 FB |
1064 | |
1065 | addr &= TARGET_PAGE_MASK; | |
1066 | p = page_find(addr >> TARGET_PAGE_BITS); | |
5fafdf24 | 1067 | if (!p) |
9fa3e853 FB |
1068 | return; |
1069 | tb = p->first_tb; | |
d720b93d FB |
1070 | #ifdef TARGET_HAS_PRECISE_SMC |
1071 | if (tb && pc != 0) { | |
1072 | current_tb = tb_find_pc(pc); | |
1073 | } | |
1074 | #endif | |
9fa3e853 FB |
1075 | while (tb != NULL) { |
1076 | n = (long)tb & 3; | |
1077 | tb = (TranslationBlock *)((long)tb & ~3); | |
d720b93d FB |
1078 | #ifdef TARGET_HAS_PRECISE_SMC |
1079 | if (current_tb == tb && | |
2e70f6ef | 1080 | (current_tb->cflags & CF_COUNT_MASK) != 1) { |
d720b93d FB |
1081 | /* If we are modifying the current TB, we must stop |
1082 | its execution. We could be more precise by checking | |
1083 | that the modification is after the current PC, but it | |
1084 | would require a specialized function to partially | |
1085 | restore the CPU state */ | |
3b46e624 | 1086 | |
d720b93d FB |
1087 | current_tb_modified = 1; |
1088 | cpu_restore_state(current_tb, env, pc, puc); | |
6b917547 AL |
1089 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1090 | ¤t_flags); | |
d720b93d FB |
1091 | } |
1092 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
9fa3e853 FB |
1093 | tb_phys_invalidate(tb, addr); |
1094 | tb = tb->page_next[n]; | |
1095 | } | |
fd6ce8f6 | 1096 | p->first_tb = NULL; |
d720b93d FB |
1097 | #ifdef TARGET_HAS_PRECISE_SMC |
1098 | if (current_tb_modified) { | |
1099 | /* we generate a block containing just the instruction | |
1100 | modifying the memory. It will ensure that it cannot modify | |
1101 | itself */ | |
ea1c1802 | 1102 | env->current_tb = NULL; |
2e70f6ef | 1103 | tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); |
d720b93d FB |
1104 | cpu_resume_from_signal(env, puc); |
1105 | } | |
1106 | #endif | |
fd6ce8f6 | 1107 | } |
9fa3e853 | 1108 | #endif |
fd6ce8f6 FB |
1109 | |
1110 | /* add the tb in the target page and protect it if necessary */ | |
5fafdf24 | 1111 | static inline void tb_alloc_page(TranslationBlock *tb, |
53a5960a | 1112 | unsigned int n, target_ulong page_addr) |
fd6ce8f6 FB |
1113 | { |
1114 | PageDesc *p; | |
9fa3e853 FB |
1115 | TranslationBlock *last_first_tb; |
1116 | ||
1117 | tb->page_addr[n] = page_addr; | |
3a7d929e | 1118 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); |
9fa3e853 FB |
1119 | tb->page_next[n] = p->first_tb; |
1120 | last_first_tb = p->first_tb; | |
1121 | p->first_tb = (TranslationBlock *)((long)tb | n); | |
1122 | invalidate_page_bitmap(p); | |
fd6ce8f6 | 1123 | |
107db443 | 1124 | #if defined(TARGET_HAS_SMC) || 1 |
d720b93d | 1125 | |
9fa3e853 | 1126 | #if defined(CONFIG_USER_ONLY) |
fd6ce8f6 | 1127 | if (p->flags & PAGE_WRITE) { |
53a5960a PB |
1128 | target_ulong addr; |
1129 | PageDesc *p2; | |
9fa3e853 FB |
1130 | int prot; |
1131 | ||
fd6ce8f6 FB |
1132 | /* force the host page as non writable (writes will have a |
1133 | page fault + mprotect overhead) */ | |
53a5960a | 1134 | page_addr &= qemu_host_page_mask; |
fd6ce8f6 | 1135 | prot = 0; |
53a5960a PB |
1136 | for(addr = page_addr; addr < page_addr + qemu_host_page_size; |
1137 | addr += TARGET_PAGE_SIZE) { | |
1138 | ||
1139 | p2 = page_find (addr >> TARGET_PAGE_BITS); | |
1140 | if (!p2) | |
1141 | continue; | |
1142 | prot |= p2->flags; | |
1143 | p2->flags &= ~PAGE_WRITE; | |
1144 | page_get_flags(addr); | |
1145 | } | |
5fafdf24 | 1146 | mprotect(g2h(page_addr), qemu_host_page_size, |
fd6ce8f6 FB |
1147 | (prot & PAGE_BITS) & ~PAGE_WRITE); |
1148 | #ifdef DEBUG_TB_INVALIDATE | |
ab3d1727 | 1149 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", |
53a5960a | 1150 | page_addr); |
fd6ce8f6 | 1151 | #endif |
fd6ce8f6 | 1152 | } |
9fa3e853 FB |
1153 | #else |
1154 | /* if some code is already present, then the pages are already | |
1155 | protected. So we handle the case where only the first TB is | |
1156 | allocated in a physical page */ | |
1157 | if (!last_first_tb) { | |
6a00d601 | 1158 | tlb_protect_code(page_addr); |
9fa3e853 FB |
1159 | } |
1160 | #endif | |
d720b93d FB |
1161 | |
1162 | #endif /* TARGET_HAS_SMC */ | |
fd6ce8f6 FB |
1163 | } |
1164 | ||
1165 | /* Allocate a new translation block. Flush the translation buffer if | |
1166 | too many translation blocks or too much generated code. */ | |
c27004ec | 1167 | TranslationBlock *tb_alloc(target_ulong pc) |
fd6ce8f6 FB |
1168 | { |
1169 | TranslationBlock *tb; | |
fd6ce8f6 | 1170 | |
26a5f13b FB |
1171 | if (nb_tbs >= code_gen_max_blocks || |
1172 | (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) | |
d4e8164f | 1173 | return NULL; |
fd6ce8f6 FB |
1174 | tb = &tbs[nb_tbs++]; |
1175 | tb->pc = pc; | |
b448f2f3 | 1176 | tb->cflags = 0; |
d4e8164f FB |
1177 | return tb; |
1178 | } | |
1179 | ||
2e70f6ef PB |
1180 | void tb_free(TranslationBlock *tb) |
1181 | { | |
bf20dc07 | 1182 | /* In practice this is mostly used for single use temporary TB |
2e70f6ef PB |
1183 | Ignore the hard cases and just back up if this TB happens to |
1184 | be the last one generated. */ | |
1185 | if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { | |
1186 | code_gen_ptr = tb->tc_ptr; | |
1187 | nb_tbs--; | |
1188 | } | |
1189 | } | |
1190 | ||
9fa3e853 FB |
1191 | /* add a new TB and link it to the physical page tables. phys_page2 is |
1192 | (-1) to indicate that only one page contains the TB. */ | |
5fafdf24 | 1193 | void tb_link_phys(TranslationBlock *tb, |
9fa3e853 | 1194 | target_ulong phys_pc, target_ulong phys_page2) |
d4e8164f | 1195 | { |
9fa3e853 FB |
1196 | unsigned int h; |
1197 | TranslationBlock **ptb; | |
1198 | ||
c8a706fe PB |
1199 | /* Grab the mmap lock to stop another thread invalidating this TB |
1200 | before we are done. */ | |
1201 | mmap_lock(); | |
9fa3e853 FB |
1202 | /* add in the physical hash table */ |
1203 | h = tb_phys_hash_func(phys_pc); | |
1204 | ptb = &tb_phys_hash[h]; | |
1205 | tb->phys_hash_next = *ptb; | |
1206 | *ptb = tb; | |
fd6ce8f6 FB |
1207 | |
1208 | /* add in the page list */ | |
9fa3e853 FB |
1209 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); |
1210 | if (phys_page2 != -1) | |
1211 | tb_alloc_page(tb, 1, phys_page2); | |
1212 | else | |
1213 | tb->page_addr[1] = -1; | |
9fa3e853 | 1214 | |
d4e8164f FB |
1215 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
1216 | tb->jmp_next[0] = NULL; | |
1217 | tb->jmp_next[1] = NULL; | |
1218 | ||
1219 | /* init original jump addresses */ | |
1220 | if (tb->tb_next_offset[0] != 0xffff) | |
1221 | tb_reset_jump(tb, 0); | |
1222 | if (tb->tb_next_offset[1] != 0xffff) | |
1223 | tb_reset_jump(tb, 1); | |
8a40a180 FB |
1224 | |
1225 | #ifdef DEBUG_TB_CHECK | |
1226 | tb_page_check(); | |
1227 | #endif | |
c8a706fe | 1228 | mmap_unlock(); |
fd6ce8f6 FB |
1229 | } |
1230 | ||
9fa3e853 FB |
1231 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
1232 | tb[1].tc_ptr. Return NULL if not found */ | |
1233 | TranslationBlock *tb_find_pc(unsigned long tc_ptr) | |
fd6ce8f6 | 1234 | { |
9fa3e853 FB |
1235 | int m_min, m_max, m; |
1236 | unsigned long v; | |
1237 | TranslationBlock *tb; | |
a513fe19 FB |
1238 | |
1239 | if (nb_tbs <= 0) | |
1240 | return NULL; | |
1241 | if (tc_ptr < (unsigned long)code_gen_buffer || | |
1242 | tc_ptr >= (unsigned long)code_gen_ptr) | |
1243 | return NULL; | |
1244 | /* binary search (cf Knuth) */ | |
1245 | m_min = 0; | |
1246 | m_max = nb_tbs - 1; | |
1247 | while (m_min <= m_max) { | |
1248 | m = (m_min + m_max) >> 1; | |
1249 | tb = &tbs[m]; | |
1250 | v = (unsigned long)tb->tc_ptr; | |
1251 | if (v == tc_ptr) | |
1252 | return tb; | |
1253 | else if (tc_ptr < v) { | |
1254 | m_max = m - 1; | |
1255 | } else { | |
1256 | m_min = m + 1; | |
1257 | } | |
5fafdf24 | 1258 | } |
a513fe19 FB |
1259 | return &tbs[m_max]; |
1260 | } | |
7501267e | 1261 | |
ea041c0e FB |
1262 | static void tb_reset_jump_recursive(TranslationBlock *tb); |
1263 | ||
1264 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
1265 | { | |
1266 | TranslationBlock *tb1, *tb_next, **ptb; | |
1267 | unsigned int n1; | |
1268 | ||
1269 | tb1 = tb->jmp_next[n]; | |
1270 | if (tb1 != NULL) { | |
1271 | /* find head of list */ | |
1272 | for(;;) { | |
1273 | n1 = (long)tb1 & 3; | |
1274 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
1275 | if (n1 == 2) | |
1276 | break; | |
1277 | tb1 = tb1->jmp_next[n1]; | |
1278 | } | |
1279 | /* we are now sure now that tb jumps to tb1 */ | |
1280 | tb_next = tb1; | |
1281 | ||
1282 | /* remove tb from the jmp_first list */ | |
1283 | ptb = &tb_next->jmp_first; | |
1284 | for(;;) { | |
1285 | tb1 = *ptb; | |
1286 | n1 = (long)tb1 & 3; | |
1287 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
1288 | if (n1 == n && tb1 == tb) | |
1289 | break; | |
1290 | ptb = &tb1->jmp_next[n1]; | |
1291 | } | |
1292 | *ptb = tb->jmp_next[n]; | |
1293 | tb->jmp_next[n] = NULL; | |
3b46e624 | 1294 | |
ea041c0e FB |
1295 | /* suppress the jump to next tb in generated code */ |
1296 | tb_reset_jump(tb, n); | |
1297 | ||
0124311e | 1298 | /* suppress jumps in the tb on which we could have jumped */ |
ea041c0e FB |
1299 | tb_reset_jump_recursive(tb_next); |
1300 | } | |
1301 | } | |
1302 | ||
1303 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
1304 | { | |
1305 | tb_reset_jump_recursive2(tb, 0); | |
1306 | tb_reset_jump_recursive2(tb, 1); | |
1307 | } | |
1308 | ||
1fddef4b | 1309 | #if defined(TARGET_HAS_ICE) |
94df27fd PB |
1310 | #if defined(CONFIG_USER_ONLY) |
1311 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) | |
1312 | { | |
1313 | tb_invalidate_phys_page_range(pc, pc + 1, 0); | |
1314 | } | |
1315 | #else | |
d720b93d FB |
1316 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
1317 | { | |
c227f099 | 1318 | target_phys_addr_t addr; |
9b3c35e0 | 1319 | target_ulong pd; |
c227f099 | 1320 | ram_addr_t ram_addr; |
c2f07f81 | 1321 | PhysPageDesc *p; |
d720b93d | 1322 | |
c2f07f81 PB |
1323 | addr = cpu_get_phys_page_debug(env, pc); |
1324 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1325 | if (!p) { | |
1326 | pd = IO_MEM_UNASSIGNED; | |
1327 | } else { | |
1328 | pd = p->phys_offset; | |
1329 | } | |
1330 | ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); | |
706cd4b5 | 1331 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
d720b93d | 1332 | } |
c27004ec | 1333 | #endif |
94df27fd | 1334 | #endif /* TARGET_HAS_ICE */ |
d720b93d | 1335 | |
c527ee8f PB |
1336 | #if defined(CONFIG_USER_ONLY) |
1337 | void cpu_watchpoint_remove_all(CPUState *env, int mask) | |
1338 | ||
1339 | { | |
1340 | } | |
1341 | ||
1342 | int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, | |
1343 | int flags, CPUWatchpoint **watchpoint) | |
1344 | { | |
1345 | return -ENOSYS; | |
1346 | } | |
1347 | #else | |
6658ffb8 | 1348 | /* Add a watchpoint. */ |
a1d1bb31 AL |
1349 | int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, |
1350 | int flags, CPUWatchpoint **watchpoint) | |
6658ffb8 | 1351 | { |
b4051334 | 1352 | target_ulong len_mask = ~(len - 1); |
c0ce998e | 1353 | CPUWatchpoint *wp; |
6658ffb8 | 1354 | |
b4051334 AL |
1355 | /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
1356 | if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) { | |
1357 | fprintf(stderr, "qemu: tried to set invalid watchpoint at " | |
1358 | TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); | |
1359 | return -EINVAL; | |
1360 | } | |
a1d1bb31 | 1361 | wp = qemu_malloc(sizeof(*wp)); |
a1d1bb31 AL |
1362 | |
1363 | wp->vaddr = addr; | |
b4051334 | 1364 | wp->len_mask = len_mask; |
a1d1bb31 AL |
1365 | wp->flags = flags; |
1366 | ||
2dc9f411 | 1367 | /* keep all GDB-injected watchpoints in front */ |
c0ce998e | 1368 | if (flags & BP_GDB) |
72cf2d4f | 1369 | QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
c0ce998e | 1370 | else |
72cf2d4f | 1371 | QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
6658ffb8 | 1372 | |
6658ffb8 | 1373 | tlb_flush_page(env, addr); |
a1d1bb31 AL |
1374 | |
1375 | if (watchpoint) | |
1376 | *watchpoint = wp; | |
1377 | return 0; | |
6658ffb8 PB |
1378 | } |
1379 | ||
a1d1bb31 AL |
1380 | /* Remove a specific watchpoint. */ |
1381 | int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, | |
1382 | int flags) | |
6658ffb8 | 1383 | { |
b4051334 | 1384 | target_ulong len_mask = ~(len - 1); |
a1d1bb31 | 1385 | CPUWatchpoint *wp; |
6658ffb8 | 1386 | |
72cf2d4f | 1387 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 | 1388 | if (addr == wp->vaddr && len_mask == wp->len_mask |
6e140f28 | 1389 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
a1d1bb31 | 1390 | cpu_watchpoint_remove_by_ref(env, wp); |
6658ffb8 PB |
1391 | return 0; |
1392 | } | |
1393 | } | |
a1d1bb31 | 1394 | return -ENOENT; |
6658ffb8 PB |
1395 | } |
1396 | ||
a1d1bb31 AL |
1397 | /* Remove a specific watchpoint by reference. */ |
1398 | void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) | |
1399 | { | |
72cf2d4f | 1400 | QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
7d03f82f | 1401 | |
a1d1bb31 AL |
1402 | tlb_flush_page(env, watchpoint->vaddr); |
1403 | ||
1404 | qemu_free(watchpoint); | |
1405 | } | |
1406 | ||
1407 | /* Remove all matching watchpoints. */ | |
1408 | void cpu_watchpoint_remove_all(CPUState *env, int mask) | |
1409 | { | |
c0ce998e | 1410 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 1411 | |
72cf2d4f | 1412 | QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
a1d1bb31 AL |
1413 | if (wp->flags & mask) |
1414 | cpu_watchpoint_remove_by_ref(env, wp); | |
c0ce998e | 1415 | } |
7d03f82f | 1416 | } |
c527ee8f | 1417 | #endif |
7d03f82f | 1418 | |
a1d1bb31 AL |
1419 | /* Add a breakpoint. */ |
1420 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, | |
1421 | CPUBreakpoint **breakpoint) | |
4c3a88a2 | 1422 | { |
1fddef4b | 1423 | #if defined(TARGET_HAS_ICE) |
c0ce998e | 1424 | CPUBreakpoint *bp; |
3b46e624 | 1425 | |
a1d1bb31 | 1426 | bp = qemu_malloc(sizeof(*bp)); |
4c3a88a2 | 1427 | |
a1d1bb31 AL |
1428 | bp->pc = pc; |
1429 | bp->flags = flags; | |
1430 | ||
2dc9f411 | 1431 | /* keep all GDB-injected breakpoints in front */ |
c0ce998e | 1432 | if (flags & BP_GDB) |
72cf2d4f | 1433 | QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
c0ce998e | 1434 | else |
72cf2d4f | 1435 | QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
3b46e624 | 1436 | |
d720b93d | 1437 | breakpoint_invalidate(env, pc); |
a1d1bb31 AL |
1438 | |
1439 | if (breakpoint) | |
1440 | *breakpoint = bp; | |
4c3a88a2 FB |
1441 | return 0; |
1442 | #else | |
a1d1bb31 | 1443 | return -ENOSYS; |
4c3a88a2 FB |
1444 | #endif |
1445 | } | |
1446 | ||
a1d1bb31 AL |
1447 | /* Remove a specific breakpoint. */ |
1448 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) | |
1449 | { | |
7d03f82f | 1450 | #if defined(TARGET_HAS_ICE) |
a1d1bb31 AL |
1451 | CPUBreakpoint *bp; |
1452 | ||
72cf2d4f | 1453 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
a1d1bb31 AL |
1454 | if (bp->pc == pc && bp->flags == flags) { |
1455 | cpu_breakpoint_remove_by_ref(env, bp); | |
1456 | return 0; | |
1457 | } | |
7d03f82f | 1458 | } |
a1d1bb31 AL |
1459 | return -ENOENT; |
1460 | #else | |
1461 | return -ENOSYS; | |
7d03f82f EI |
1462 | #endif |
1463 | } | |
1464 | ||
a1d1bb31 AL |
1465 | /* Remove a specific breakpoint by reference. */ |
1466 | void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) | |
4c3a88a2 | 1467 | { |
1fddef4b | 1468 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 1469 | QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
d720b93d | 1470 | |
a1d1bb31 AL |
1471 | breakpoint_invalidate(env, breakpoint->pc); |
1472 | ||
1473 | qemu_free(breakpoint); | |
1474 | #endif | |
1475 | } | |
1476 | ||
1477 | /* Remove all matching breakpoints. */ | |
1478 | void cpu_breakpoint_remove_all(CPUState *env, int mask) | |
1479 | { | |
1480 | #if defined(TARGET_HAS_ICE) | |
c0ce998e | 1481 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 1482 | |
72cf2d4f | 1483 | QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
a1d1bb31 AL |
1484 | if (bp->flags & mask) |
1485 | cpu_breakpoint_remove_by_ref(env, bp); | |
c0ce998e | 1486 | } |
4c3a88a2 FB |
1487 | #endif |
1488 | } | |
1489 | ||
c33a346e FB |
1490 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
1491 | CPU loop after each instruction */ | |
1492 | void cpu_single_step(CPUState *env, int enabled) | |
1493 | { | |
1fddef4b | 1494 | #if defined(TARGET_HAS_ICE) |
c33a346e FB |
1495 | if (env->singlestep_enabled != enabled) { |
1496 | env->singlestep_enabled = enabled; | |
e22a25c9 AL |
1497 | if (kvm_enabled()) |
1498 | kvm_update_guest_debug(env, 0); | |
1499 | else { | |
ccbb4d44 | 1500 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 AL |
1501 | /* XXX: only flush what is necessary */ |
1502 | tb_flush(env); | |
1503 | } | |
c33a346e FB |
1504 | } |
1505 | #endif | |
1506 | } | |
1507 | ||
34865134 FB |
1508 | /* enable or disable low levels log */ |
1509 | void cpu_set_log(int log_flags) | |
1510 | { | |
1511 | loglevel = log_flags; | |
1512 | if (loglevel && !logfile) { | |
11fcfab4 | 1513 | logfile = fopen(logfilename, log_append ? "a" : "w"); |
34865134 FB |
1514 | if (!logfile) { |
1515 | perror(logfilename); | |
1516 | _exit(1); | |
1517 | } | |
9fa3e853 FB |
1518 | #if !defined(CONFIG_SOFTMMU) |
1519 | /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ | |
1520 | { | |
b55266b5 | 1521 | static char logfile_buf[4096]; |
9fa3e853 FB |
1522 | setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); |
1523 | } | |
bf65f53f FN |
1524 | #elif !defined(_WIN32) |
1525 | /* Win32 doesn't support line-buffering and requires size >= 2 */ | |
34865134 | 1526 | setvbuf(logfile, NULL, _IOLBF, 0); |
9fa3e853 | 1527 | #endif |
e735b91c PB |
1528 | log_append = 1; |
1529 | } | |
1530 | if (!loglevel && logfile) { | |
1531 | fclose(logfile); | |
1532 | logfile = NULL; | |
34865134 FB |
1533 | } |
1534 | } | |
1535 | ||
1536 | void cpu_set_log_filename(const char *filename) | |
1537 | { | |
1538 | logfilename = strdup(filename); | |
e735b91c PB |
1539 | if (logfile) { |
1540 | fclose(logfile); | |
1541 | logfile = NULL; | |
1542 | } | |
1543 | cpu_set_log(loglevel); | |
34865134 | 1544 | } |
c33a346e | 1545 | |
3098dba0 | 1546 | static void cpu_unlink_tb(CPUState *env) |
ea041c0e | 1547 | { |
3098dba0 AJ |
1548 | /* FIXME: TB unchaining isn't SMP safe. For now just ignore the |
1549 | problem and hope the cpu will stop of its own accord. For userspace | |
1550 | emulation this often isn't actually as bad as it sounds. Often | |
1551 | signals are used primarily to interrupt blocking syscalls. */ | |
ea041c0e | 1552 | TranslationBlock *tb; |
c227f099 | 1553 | static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; |
59817ccb | 1554 | |
cab1b4bd | 1555 | spin_lock(&interrupt_lock); |
3098dba0 AJ |
1556 | tb = env->current_tb; |
1557 | /* if the cpu is currently executing code, we must unlink it and | |
1558 | all the potentially executing TB */ | |
f76cfe56 | 1559 | if (tb) { |
3098dba0 AJ |
1560 | env->current_tb = NULL; |
1561 | tb_reset_jump_recursive(tb); | |
be214e6c | 1562 | } |
cab1b4bd | 1563 | spin_unlock(&interrupt_lock); |
3098dba0 AJ |
1564 | } |
1565 | ||
1566 | /* mask must never be zero, except for A20 change call */ | |
1567 | void cpu_interrupt(CPUState *env, int mask) | |
1568 | { | |
1569 | int old_mask; | |
be214e6c | 1570 | |
2e70f6ef | 1571 | old_mask = env->interrupt_request; |
68a79315 | 1572 | env->interrupt_request |= mask; |
3098dba0 | 1573 | |
8edac960 AL |
1574 | #ifndef CONFIG_USER_ONLY |
1575 | /* | |
1576 | * If called from iothread context, wake the target cpu in | |
1577 | * case its halted. | |
1578 | */ | |
1579 | if (!qemu_cpu_self(env)) { | |
1580 | qemu_cpu_kick(env); | |
1581 | return; | |
1582 | } | |
1583 | #endif | |
1584 | ||
2e70f6ef | 1585 | if (use_icount) { |
266910c4 | 1586 | env->icount_decr.u16.high = 0xffff; |
2e70f6ef | 1587 | #ifndef CONFIG_USER_ONLY |
2e70f6ef | 1588 | if (!can_do_io(env) |
be214e6c | 1589 | && (mask & ~old_mask) != 0) { |
2e70f6ef PB |
1590 | cpu_abort(env, "Raised interrupt while not in I/O function"); |
1591 | } | |
1592 | #endif | |
1593 | } else { | |
3098dba0 | 1594 | cpu_unlink_tb(env); |
ea041c0e FB |
1595 | } |
1596 | } | |
1597 | ||
b54ad049 FB |
1598 | void cpu_reset_interrupt(CPUState *env, int mask) |
1599 | { | |
1600 | env->interrupt_request &= ~mask; | |
1601 | } | |
1602 | ||
3098dba0 AJ |
1603 | void cpu_exit(CPUState *env) |
1604 | { | |
1605 | env->exit_request = 1; | |
1606 | cpu_unlink_tb(env); | |
1607 | } | |
1608 | ||
c7cd6a37 | 1609 | const CPULogItem cpu_log_items[] = { |
5fafdf24 | 1610 | { CPU_LOG_TB_OUT_ASM, "out_asm", |
f193c797 FB |
1611 | "show generated host assembly code for each compiled TB" }, |
1612 | { CPU_LOG_TB_IN_ASM, "in_asm", | |
1613 | "show target assembly code for each compiled TB" }, | |
5fafdf24 | 1614 | { CPU_LOG_TB_OP, "op", |
57fec1fe | 1615 | "show micro ops for each compiled TB" }, |
f193c797 | 1616 | { CPU_LOG_TB_OP_OPT, "op_opt", |
e01a1157 BS |
1617 | "show micro ops " |
1618 | #ifdef TARGET_I386 | |
1619 | "before eflags optimization and " | |
f193c797 | 1620 | #endif |
e01a1157 | 1621 | "after liveness analysis" }, |
f193c797 FB |
1622 | { CPU_LOG_INT, "int", |
1623 | "show interrupts/exceptions in short format" }, | |
1624 | { CPU_LOG_EXEC, "exec", | |
1625 | "show trace before each executed TB (lots of logs)" }, | |
9fddaa0c | 1626 | { CPU_LOG_TB_CPU, "cpu", |
e91c8a77 | 1627 | "show CPU state before block translation" }, |
f193c797 FB |
1628 | #ifdef TARGET_I386 |
1629 | { CPU_LOG_PCALL, "pcall", | |
1630 | "show protected mode far calls/returns/exceptions" }, | |
eca1bdf4 AL |
1631 | { CPU_LOG_RESET, "cpu_reset", |
1632 | "show CPU state before CPU resets" }, | |
f193c797 | 1633 | #endif |
8e3a9fd2 | 1634 | #ifdef DEBUG_IOPORT |
fd872598 FB |
1635 | { CPU_LOG_IOPORT, "ioport", |
1636 | "show all i/o ports accesses" }, | |
8e3a9fd2 | 1637 | #endif |
f193c797 FB |
1638 | { 0, NULL, NULL }, |
1639 | }; | |
1640 | ||
f6f3fbca MT |
1641 | #ifndef CONFIG_USER_ONLY |
1642 | static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list | |
1643 | = QLIST_HEAD_INITIALIZER(memory_client_list); | |
1644 | ||
1645 | static void cpu_notify_set_memory(target_phys_addr_t start_addr, | |
1646 | ram_addr_t size, | |
1647 | ram_addr_t phys_offset) | |
1648 | { | |
1649 | CPUPhysMemoryClient *client; | |
1650 | QLIST_FOREACH(client, &memory_client_list, list) { | |
1651 | client->set_memory(client, start_addr, size, phys_offset); | |
1652 | } | |
1653 | } | |
1654 | ||
1655 | static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start, | |
1656 | target_phys_addr_t end) | |
1657 | { | |
1658 | CPUPhysMemoryClient *client; | |
1659 | QLIST_FOREACH(client, &memory_client_list, list) { | |
1660 | int r = client->sync_dirty_bitmap(client, start, end); | |
1661 | if (r < 0) | |
1662 | return r; | |
1663 | } | |
1664 | return 0; | |
1665 | } | |
1666 | ||
1667 | static int cpu_notify_migration_log(int enable) | |
1668 | { | |
1669 | CPUPhysMemoryClient *client; | |
1670 | QLIST_FOREACH(client, &memory_client_list, list) { | |
1671 | int r = client->migration_log(client, enable); | |
1672 | if (r < 0) | |
1673 | return r; | |
1674 | } | |
1675 | return 0; | |
1676 | } | |
1677 | ||
1678 | static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map, | |
1679 | CPUPhysMemoryClient *client) | |
1680 | { | |
1681 | PhysPageDesc *pd; | |
1682 | int l1, l2; | |
1683 | ||
1684 | for (l1 = 0; l1 < L1_SIZE; ++l1) { | |
1685 | pd = phys_map[l1]; | |
1686 | if (!pd) { | |
1687 | continue; | |
1688 | } | |
1689 | for (l2 = 0; l2 < L2_SIZE; ++l2) { | |
1690 | if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) { | |
1691 | continue; | |
1692 | } | |
1693 | client->set_memory(client, pd[l2].region_offset, | |
1694 | TARGET_PAGE_SIZE, pd[l2].phys_offset); | |
1695 | } | |
1696 | } | |
1697 | } | |
1698 | ||
1699 | static void phys_page_for_each(CPUPhysMemoryClient *client) | |
1700 | { | |
1701 | #if TARGET_PHYS_ADDR_SPACE_BITS > 32 | |
1702 | ||
1703 | #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) | |
1704 | #error unsupported TARGET_PHYS_ADDR_SPACE_BITS | |
1705 | #endif | |
1706 | void **phys_map = (void **)l1_phys_map; | |
1707 | int l1; | |
1708 | if (!l1_phys_map) { | |
1709 | return; | |
1710 | } | |
1711 | for (l1 = 0; l1 < L1_SIZE; ++l1) { | |
1712 | if (phys_map[l1]) { | |
1713 | phys_page_for_each_in_l1_map(phys_map[l1], client); | |
1714 | } | |
1715 | } | |
1716 | #else | |
1717 | if (!l1_phys_map) { | |
1718 | return; | |
1719 | } | |
1720 | phys_page_for_each_in_l1_map(l1_phys_map, client); | |
1721 | #endif | |
1722 | } | |
1723 | ||
1724 | void cpu_register_phys_memory_client(CPUPhysMemoryClient *client) | |
1725 | { | |
1726 | QLIST_INSERT_HEAD(&memory_client_list, client, list); | |
1727 | phys_page_for_each(client); | |
1728 | } | |
1729 | ||
1730 | void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client) | |
1731 | { | |
1732 | QLIST_REMOVE(client, list); | |
1733 | } | |
1734 | #endif | |
1735 | ||
f193c797 FB |
1736 | static int cmp1(const char *s1, int n, const char *s2) |
1737 | { | |
1738 | if (strlen(s2) != n) | |
1739 | return 0; | |
1740 | return memcmp(s1, s2, n) == 0; | |
1741 | } | |
3b46e624 | 1742 | |
f193c797 FB |
1743 | /* takes a comma separated list of log masks. Return 0 if error. */ |
1744 | int cpu_str_to_log_mask(const char *str) | |
1745 | { | |
c7cd6a37 | 1746 | const CPULogItem *item; |
f193c797 FB |
1747 | int mask; |
1748 | const char *p, *p1; | |
1749 | ||
1750 | p = str; | |
1751 | mask = 0; | |
1752 | for(;;) { | |
1753 | p1 = strchr(p, ','); | |
1754 | if (!p1) | |
1755 | p1 = p + strlen(p); | |
8e3a9fd2 FB |
1756 | if(cmp1(p,p1-p,"all")) { |
1757 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1758 | mask |= item->mask; | |
1759 | } | |
1760 | } else { | |
f193c797 FB |
1761 | for(item = cpu_log_items; item->mask != 0; item++) { |
1762 | if (cmp1(p, p1 - p, item->name)) | |
1763 | goto found; | |
1764 | } | |
1765 | return 0; | |
8e3a9fd2 | 1766 | } |
f193c797 FB |
1767 | found: |
1768 | mask |= item->mask; | |
1769 | if (*p1 != ',') | |
1770 | break; | |
1771 | p = p1 + 1; | |
1772 | } | |
1773 | return mask; | |
1774 | } | |
ea041c0e | 1775 | |
7501267e FB |
1776 | void cpu_abort(CPUState *env, const char *fmt, ...) |
1777 | { | |
1778 | va_list ap; | |
493ae1f0 | 1779 | va_list ap2; |
7501267e FB |
1780 | |
1781 | va_start(ap, fmt); | |
493ae1f0 | 1782 | va_copy(ap2, ap); |
7501267e FB |
1783 | fprintf(stderr, "qemu: fatal: "); |
1784 | vfprintf(stderr, fmt, ap); | |
1785 | fprintf(stderr, "\n"); | |
1786 | #ifdef TARGET_I386 | |
7fe48483 FB |
1787 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); |
1788 | #else | |
1789 | cpu_dump_state(env, stderr, fprintf, 0); | |
7501267e | 1790 | #endif |
93fcfe39 AL |
1791 | if (qemu_log_enabled()) { |
1792 | qemu_log("qemu: fatal: "); | |
1793 | qemu_log_vprintf(fmt, ap2); | |
1794 | qemu_log("\n"); | |
f9373291 | 1795 | #ifdef TARGET_I386 |
93fcfe39 | 1796 | log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP); |
f9373291 | 1797 | #else |
93fcfe39 | 1798 | log_cpu_state(env, 0); |
f9373291 | 1799 | #endif |
31b1a7b4 | 1800 | qemu_log_flush(); |
93fcfe39 | 1801 | qemu_log_close(); |
924edcae | 1802 | } |
493ae1f0 | 1803 | va_end(ap2); |
f9373291 | 1804 | va_end(ap); |
fd052bf6 RV |
1805 | #if defined(CONFIG_USER_ONLY) |
1806 | { | |
1807 | struct sigaction act; | |
1808 | sigfillset(&act.sa_mask); | |
1809 | act.sa_handler = SIG_DFL; | |
1810 | sigaction(SIGABRT, &act, NULL); | |
1811 | } | |
1812 | #endif | |
7501267e FB |
1813 | abort(); |
1814 | } | |
1815 | ||
c5be9f08 TS |
1816 | CPUState *cpu_copy(CPUState *env) |
1817 | { | |
01ba9816 | 1818 | CPUState *new_env = cpu_init(env->cpu_model_str); |
c5be9f08 TS |
1819 | CPUState *next_cpu = new_env->next_cpu; |
1820 | int cpu_index = new_env->cpu_index; | |
5a38f081 AL |
1821 | #if defined(TARGET_HAS_ICE) |
1822 | CPUBreakpoint *bp; | |
1823 | CPUWatchpoint *wp; | |
1824 | #endif | |
1825 | ||
c5be9f08 | 1826 | memcpy(new_env, env, sizeof(CPUState)); |
5a38f081 AL |
1827 | |
1828 | /* Preserve chaining and index. */ | |
c5be9f08 TS |
1829 | new_env->next_cpu = next_cpu; |
1830 | new_env->cpu_index = cpu_index; | |
5a38f081 AL |
1831 | |
1832 | /* Clone all break/watchpoints. | |
1833 | Note: Once we support ptrace with hw-debug register access, make sure | |
1834 | BP_CPU break/watchpoints are handled correctly on clone. */ | |
72cf2d4f BS |
1835 | QTAILQ_INIT(&env->breakpoints); |
1836 | QTAILQ_INIT(&env->watchpoints); | |
5a38f081 | 1837 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 1838 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
5a38f081 AL |
1839 | cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); |
1840 | } | |
72cf2d4f | 1841 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
5a38f081 AL |
1842 | cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, |
1843 | wp->flags, NULL); | |
1844 | } | |
1845 | #endif | |
1846 | ||
c5be9f08 TS |
1847 | return new_env; |
1848 | } | |
1849 | ||
0124311e FB |
1850 | #if !defined(CONFIG_USER_ONLY) |
1851 | ||
5c751e99 EI |
1852 | static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) |
1853 | { | |
1854 | unsigned int i; | |
1855 | ||
1856 | /* Discard jump cache entries for any tb which might potentially | |
1857 | overlap the flushed page. */ | |
1858 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
1859 | memset (&env->tb_jmp_cache[i], 0, | |
1860 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); | |
1861 | ||
1862 | i = tb_jmp_cache_hash_page(addr); | |
1863 | memset (&env->tb_jmp_cache[i], 0, | |
1864 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); | |
1865 | } | |
1866 | ||
08738984 IK |
1867 | static CPUTLBEntry s_cputlb_empty_entry = { |
1868 | .addr_read = -1, | |
1869 | .addr_write = -1, | |
1870 | .addr_code = -1, | |
1871 | .addend = -1, | |
1872 | }; | |
1873 | ||
ee8b7021 FB |
1874 | /* NOTE: if flush_global is true, also flush global entries (not |
1875 | implemented yet) */ | |
1876 | void tlb_flush(CPUState *env, int flush_global) | |
33417e70 | 1877 | { |
33417e70 | 1878 | int i; |
0124311e | 1879 | |
9fa3e853 FB |
1880 | #if defined(DEBUG_TLB) |
1881 | printf("tlb_flush:\n"); | |
1882 | #endif | |
0124311e FB |
1883 | /* must reset current TB so that interrupts cannot modify the |
1884 | links while we are modifying them */ | |
1885 | env->current_tb = NULL; | |
1886 | ||
33417e70 | 1887 | for(i = 0; i < CPU_TLB_SIZE; i++) { |
cfde4bd9 IY |
1888 | int mmu_idx; |
1889 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
08738984 | 1890 | env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; |
cfde4bd9 | 1891 | } |
33417e70 | 1892 | } |
9fa3e853 | 1893 | |
8a40a180 | 1894 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
9fa3e853 | 1895 | |
e3db7226 | 1896 | tlb_flush_count++; |
33417e70 FB |
1897 | } |
1898 | ||
274da6b2 | 1899 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
61382a50 | 1900 | { |
5fafdf24 | 1901 | if (addr == (tlb_entry->addr_read & |
84b7b8e7 | 1902 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
5fafdf24 | 1903 | addr == (tlb_entry->addr_write & |
84b7b8e7 | 1904 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
5fafdf24 | 1905 | addr == (tlb_entry->addr_code & |
84b7b8e7 | 1906 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
08738984 | 1907 | *tlb_entry = s_cputlb_empty_entry; |
84b7b8e7 | 1908 | } |
61382a50 FB |
1909 | } |
1910 | ||
2e12669a | 1911 | void tlb_flush_page(CPUState *env, target_ulong addr) |
33417e70 | 1912 | { |
8a40a180 | 1913 | int i; |
cfde4bd9 | 1914 | int mmu_idx; |
0124311e | 1915 | |
9fa3e853 | 1916 | #if defined(DEBUG_TLB) |
108c49b8 | 1917 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
9fa3e853 | 1918 | #endif |
0124311e FB |
1919 | /* must reset current TB so that interrupts cannot modify the |
1920 | links while we are modifying them */ | |
1921 | env->current_tb = NULL; | |
61382a50 FB |
1922 | |
1923 | addr &= TARGET_PAGE_MASK; | |
1924 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
cfde4bd9 IY |
1925 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) |
1926 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); | |
0124311e | 1927 | |
5c751e99 | 1928 | tlb_flush_jmp_cache(env, addr); |
9fa3e853 FB |
1929 | } |
1930 | ||
9fa3e853 FB |
1931 | /* update the TLBs so that writes to code in the virtual page 'addr' |
1932 | can be detected */ | |
c227f099 | 1933 | static void tlb_protect_code(ram_addr_t ram_addr) |
9fa3e853 | 1934 | { |
5fafdf24 | 1935 | cpu_physical_memory_reset_dirty(ram_addr, |
6a00d601 FB |
1936 | ram_addr + TARGET_PAGE_SIZE, |
1937 | CODE_DIRTY_FLAG); | |
9fa3e853 FB |
1938 | } |
1939 | ||
9fa3e853 | 1940 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
3a7d929e | 1941 | tested for self modifying code */ |
c227f099 | 1942 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
3a7d929e | 1943 | target_ulong vaddr) |
9fa3e853 | 1944 | { |
3a7d929e | 1945 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; |
1ccde1cb FB |
1946 | } |
1947 | ||
5fafdf24 | 1948 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
1ccde1cb FB |
1949 | unsigned long start, unsigned long length) |
1950 | { | |
1951 | unsigned long addr; | |
84b7b8e7 FB |
1952 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
1953 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1ccde1cb | 1954 | if ((addr - start) < length) { |
0f459d16 | 1955 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; |
1ccde1cb FB |
1956 | } |
1957 | } | |
1958 | } | |
1959 | ||
5579c7f3 | 1960 | /* Note: start and end must be within the same ram block. */ |
c227f099 | 1961 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 1962 | int dirty_flags) |
1ccde1cb FB |
1963 | { |
1964 | CPUState *env; | |
4f2ac237 | 1965 | unsigned long length, start1; |
0a962c02 FB |
1966 | int i, mask, len; |
1967 | uint8_t *p; | |
1ccde1cb FB |
1968 | |
1969 | start &= TARGET_PAGE_MASK; | |
1970 | end = TARGET_PAGE_ALIGN(end); | |
1971 | ||
1972 | length = end - start; | |
1973 | if (length == 0) | |
1974 | return; | |
0a962c02 | 1975 | len = length >> TARGET_PAGE_BITS; |
f23db169 FB |
1976 | mask = ~dirty_flags; |
1977 | p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); | |
1978 | for(i = 0; i < len; i++) | |
1979 | p[i] &= mask; | |
1980 | ||
1ccde1cb FB |
1981 | /* we modify the TLB cache so that the dirty bit will be set again |
1982 | when accessing the range */ | |
5579c7f3 PB |
1983 | start1 = (unsigned long)qemu_get_ram_ptr(start); |
1984 | /* Chek that we don't span multiple blocks - this breaks the | |
1985 | address comparisons below. */ | |
1986 | if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1 | |
1987 | != (end - 1) - start) { | |
1988 | abort(); | |
1989 | } | |
1990 | ||
6a00d601 | 1991 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
cfde4bd9 IY |
1992 | int mmu_idx; |
1993 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
1994 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1995 | tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], | |
1996 | start1, length); | |
1997 | } | |
6a00d601 | 1998 | } |
1ccde1cb FB |
1999 | } |
2000 | ||
74576198 AL |
2001 | int cpu_physical_memory_set_dirty_tracking(int enable) |
2002 | { | |
f6f3fbca | 2003 | int ret = 0; |
74576198 | 2004 | in_migration = enable; |
f6f3fbca MT |
2005 | ret = cpu_notify_migration_log(!!enable); |
2006 | return ret; | |
74576198 AL |
2007 | } |
2008 | ||
2009 | int cpu_physical_memory_get_dirty_tracking(void) | |
2010 | { | |
2011 | return in_migration; | |
2012 | } | |
2013 | ||
c227f099 AL |
2014 | int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
2015 | target_phys_addr_t end_addr) | |
2bec46dc | 2016 | { |
7b8f3b78 | 2017 | int ret; |
151f7749 | 2018 | |
f6f3fbca | 2019 | ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr); |
151f7749 | 2020 | return ret; |
2bec46dc AL |
2021 | } |
2022 | ||
3a7d929e FB |
2023 | static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
2024 | { | |
c227f099 | 2025 | ram_addr_t ram_addr; |
5579c7f3 | 2026 | void *p; |
3a7d929e | 2027 | |
84b7b8e7 | 2028 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
5579c7f3 PB |
2029 | p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) |
2030 | + tlb_entry->addend); | |
2031 | ram_addr = qemu_ram_addr_from_host(p); | |
3a7d929e | 2032 | if (!cpu_physical_memory_is_dirty(ram_addr)) { |
0f459d16 | 2033 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
3a7d929e FB |
2034 | } |
2035 | } | |
2036 | } | |
2037 | ||
2038 | /* update the TLB according to the current state of the dirty bits */ | |
2039 | void cpu_tlb_update_dirty(CPUState *env) | |
2040 | { | |
2041 | int i; | |
cfde4bd9 IY |
2042 | int mmu_idx; |
2043 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
2044 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
2045 | tlb_update_dirty(&env->tlb_table[mmu_idx][i]); | |
2046 | } | |
3a7d929e FB |
2047 | } |
2048 | ||
0f459d16 | 2049 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
1ccde1cb | 2050 | { |
0f459d16 PB |
2051 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) |
2052 | tlb_entry->addr_write = vaddr; | |
1ccde1cb FB |
2053 | } |
2054 | ||
0f459d16 PB |
2055 | /* update the TLB corresponding to virtual page vaddr |
2056 | so that it is no longer dirty */ | |
2057 | static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) | |
1ccde1cb | 2058 | { |
1ccde1cb | 2059 | int i; |
cfde4bd9 | 2060 | int mmu_idx; |
1ccde1cb | 2061 | |
0f459d16 | 2062 | vaddr &= TARGET_PAGE_MASK; |
1ccde1cb | 2063 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
cfde4bd9 IY |
2064 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) |
2065 | tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); | |
9fa3e853 FB |
2066 | } |
2067 | ||
59817ccb FB |
2068 | /* add a new TLB entry. At most one entry for a given virtual address |
2069 | is permitted. Return 0 if OK or 2 if the page could not be mapped | |
2070 | (can only happen in non SOFTMMU mode for I/O pages or pages | |
2071 | conflicting with the host address space). */ | |
5fafdf24 | 2072 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
c227f099 | 2073 | target_phys_addr_t paddr, int prot, |
6ebbf390 | 2074 | int mmu_idx, int is_softmmu) |
9fa3e853 | 2075 | { |
92e873b9 | 2076 | PhysPageDesc *p; |
4f2ac237 | 2077 | unsigned long pd; |
9fa3e853 | 2078 | unsigned int index; |
4f2ac237 | 2079 | target_ulong address; |
0f459d16 | 2080 | target_ulong code_address; |
c227f099 | 2081 | target_phys_addr_t addend; |
9fa3e853 | 2082 | int ret; |
84b7b8e7 | 2083 | CPUTLBEntry *te; |
a1d1bb31 | 2084 | CPUWatchpoint *wp; |
c227f099 | 2085 | target_phys_addr_t iotlb; |
9fa3e853 | 2086 | |
92e873b9 | 2087 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
9fa3e853 FB |
2088 | if (!p) { |
2089 | pd = IO_MEM_UNASSIGNED; | |
9fa3e853 FB |
2090 | } else { |
2091 | pd = p->phys_offset; | |
9fa3e853 FB |
2092 | } |
2093 | #if defined(DEBUG_TLB) | |
6ebbf390 JM |
2094 | printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", |
2095 | vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd); | |
9fa3e853 FB |
2096 | #endif |
2097 | ||
2098 | ret = 0; | |
0f459d16 PB |
2099 | address = vaddr; |
2100 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { | |
2101 | /* IO memory case (romd handled later) */ | |
2102 | address |= TLB_MMIO; | |
2103 | } | |
5579c7f3 | 2104 | addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); |
0f459d16 PB |
2105 | if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { |
2106 | /* Normal RAM. */ | |
2107 | iotlb = pd & TARGET_PAGE_MASK; | |
2108 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM) | |
2109 | iotlb |= IO_MEM_NOTDIRTY; | |
2110 | else | |
2111 | iotlb |= IO_MEM_ROM; | |
2112 | } else { | |
ccbb4d44 | 2113 | /* IO handlers are currently passed a physical address. |
0f459d16 PB |
2114 | It would be nice to pass an offset from the base address |
2115 | of that region. This would avoid having to special case RAM, | |
2116 | and avoid full address decoding in every device. | |
2117 | We can't use the high bits of pd for this because | |
2118 | IO_MEM_ROMD uses these as a ram address. */ | |
8da3ff18 PB |
2119 | iotlb = (pd & ~TARGET_PAGE_MASK); |
2120 | if (p) { | |
8da3ff18 PB |
2121 | iotlb += p->region_offset; |
2122 | } else { | |
2123 | iotlb += paddr; | |
2124 | } | |
0f459d16 PB |
2125 | } |
2126 | ||
2127 | code_address = address; | |
2128 | /* Make accesses to pages with watchpoints go via the | |
2129 | watchpoint trap routines. */ | |
72cf2d4f | 2130 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
a1d1bb31 | 2131 | if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { |
0f459d16 PB |
2132 | iotlb = io_mem_watch + paddr; |
2133 | /* TODO: The memory case can be optimized by not trapping | |
2134 | reads of pages with a write breakpoint. */ | |
2135 | address |= TLB_MMIO; | |
6658ffb8 | 2136 | } |
0f459d16 | 2137 | } |
d79acba4 | 2138 | |
0f459d16 PB |
2139 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
2140 | env->iotlb[mmu_idx][index] = iotlb - vaddr; | |
2141 | te = &env->tlb_table[mmu_idx][index]; | |
2142 | te->addend = addend - vaddr; | |
2143 | if (prot & PAGE_READ) { | |
2144 | te->addr_read = address; | |
2145 | } else { | |
2146 | te->addr_read = -1; | |
2147 | } | |
5c751e99 | 2148 | |
0f459d16 PB |
2149 | if (prot & PAGE_EXEC) { |
2150 | te->addr_code = code_address; | |
2151 | } else { | |
2152 | te->addr_code = -1; | |
2153 | } | |
2154 | if (prot & PAGE_WRITE) { | |
2155 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
2156 | (pd & IO_MEM_ROMD)) { | |
2157 | /* Write access calls the I/O callback. */ | |
2158 | te->addr_write = address | TLB_MMIO; | |
2159 | } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
2160 | !cpu_physical_memory_is_dirty(pd)) { | |
2161 | te->addr_write = address | TLB_NOTDIRTY; | |
9fa3e853 | 2162 | } else { |
0f459d16 | 2163 | te->addr_write = address; |
9fa3e853 | 2164 | } |
0f459d16 PB |
2165 | } else { |
2166 | te->addr_write = -1; | |
9fa3e853 | 2167 | } |
9fa3e853 FB |
2168 | return ret; |
2169 | } | |
2170 | ||
0124311e FB |
2171 | #else |
2172 | ||
ee8b7021 | 2173 | void tlb_flush(CPUState *env, int flush_global) |
0124311e FB |
2174 | { |
2175 | } | |
2176 | ||
2e12669a | 2177 | void tlb_flush_page(CPUState *env, target_ulong addr) |
0124311e FB |
2178 | { |
2179 | } | |
2180 | ||
edf8e2af MW |
2181 | /* |
2182 | * Walks guest process memory "regions" one by one | |
2183 | * and calls callback function 'fn' for each region. | |
2184 | */ | |
2185 | int walk_memory_regions(void *priv, | |
2186 | int (*fn)(void *, unsigned long, unsigned long, unsigned long)) | |
33417e70 | 2187 | { |
9fa3e853 | 2188 | unsigned long start, end; |
edf8e2af | 2189 | PageDesc *p = NULL; |
9fa3e853 | 2190 | int i, j, prot, prot1; |
edf8e2af | 2191 | int rc = 0; |
33417e70 | 2192 | |
edf8e2af | 2193 | start = end = -1; |
9fa3e853 | 2194 | prot = 0; |
edf8e2af MW |
2195 | |
2196 | for (i = 0; i <= L1_SIZE; i++) { | |
2197 | p = (i < L1_SIZE) ? l1_map[i] : NULL; | |
2198 | for (j = 0; j < L2_SIZE; j++) { | |
2199 | prot1 = (p == NULL) ? 0 : p[j].flags; | |
2200 | /* | |
2201 | * "region" is one continuous chunk of memory | |
2202 | * that has same protection flags set. | |
2203 | */ | |
9fa3e853 FB |
2204 | if (prot1 != prot) { |
2205 | end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
2206 | if (start != -1) { | |
edf8e2af MW |
2207 | rc = (*fn)(priv, start, end, prot); |
2208 | /* callback can stop iteration by returning != 0 */ | |
2209 | if (rc != 0) | |
2210 | return (rc); | |
9fa3e853 FB |
2211 | } |
2212 | if (prot1 != 0) | |
2213 | start = end; | |
2214 | else | |
2215 | start = -1; | |
2216 | prot = prot1; | |
2217 | } | |
edf8e2af | 2218 | if (p == NULL) |
9fa3e853 FB |
2219 | break; |
2220 | } | |
33417e70 | 2221 | } |
edf8e2af MW |
2222 | return (rc); |
2223 | } | |
2224 | ||
2225 | static int dump_region(void *priv, unsigned long start, | |
2226 | unsigned long end, unsigned long prot) | |
2227 | { | |
2228 | FILE *f = (FILE *)priv; | |
2229 | ||
2230 | (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
2231 | start, end, end - start, | |
2232 | ((prot & PAGE_READ) ? 'r' : '-'), | |
2233 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
2234 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
2235 | ||
2236 | return (0); | |
2237 | } | |
2238 | ||
2239 | /* dump memory mappings */ | |
2240 | void page_dump(FILE *f) | |
2241 | { | |
2242 | (void) fprintf(f, "%-8s %-8s %-8s %s\n", | |
2243 | "start", "end", "size", "prot"); | |
2244 | walk_memory_regions(f, dump_region); | |
33417e70 FB |
2245 | } |
2246 | ||
53a5960a | 2247 | int page_get_flags(target_ulong address) |
33417e70 | 2248 | { |
9fa3e853 FB |
2249 | PageDesc *p; |
2250 | ||
2251 | p = page_find(address >> TARGET_PAGE_BITS); | |
33417e70 | 2252 | if (!p) |
9fa3e853 FB |
2253 | return 0; |
2254 | return p->flags; | |
2255 | } | |
2256 | ||
2257 | /* modify the flags of a page and invalidate the code if | |
ccbb4d44 | 2258 | necessary. The flag PAGE_WRITE_ORG is positioned automatically |
9fa3e853 | 2259 | depending on PAGE_WRITE */ |
53a5960a | 2260 | void page_set_flags(target_ulong start, target_ulong end, int flags) |
9fa3e853 FB |
2261 | { |
2262 | PageDesc *p; | |
53a5960a | 2263 | target_ulong addr; |
9fa3e853 | 2264 | |
c8a706fe | 2265 | /* mmap_lock should already be held. */ |
9fa3e853 FB |
2266 | start = start & TARGET_PAGE_MASK; |
2267 | end = TARGET_PAGE_ALIGN(end); | |
2268 | if (flags & PAGE_WRITE) | |
2269 | flags |= PAGE_WRITE_ORG; | |
9fa3e853 FB |
2270 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { |
2271 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
17e2377a PB |
2272 | /* We may be called for host regions that are outside guest |
2273 | address space. */ | |
2274 | if (!p) | |
2275 | return; | |
9fa3e853 FB |
2276 | /* if the write protection is set, then we invalidate the code |
2277 | inside */ | |
5fafdf24 | 2278 | if (!(p->flags & PAGE_WRITE) && |
9fa3e853 FB |
2279 | (flags & PAGE_WRITE) && |
2280 | p->first_tb) { | |
d720b93d | 2281 | tb_invalidate_phys_page(addr, 0, NULL); |
9fa3e853 FB |
2282 | } |
2283 | p->flags = flags; | |
2284 | } | |
33417e70 FB |
2285 | } |
2286 | ||
3d97b40b TS |
2287 | int page_check_range(target_ulong start, target_ulong len, int flags) |
2288 | { | |
2289 | PageDesc *p; | |
2290 | target_ulong end; | |
2291 | target_ulong addr; | |
2292 | ||
55f280c9 AZ |
2293 | if (start + len < start) |
2294 | /* we've wrapped around */ | |
2295 | return -1; | |
2296 | ||
3d97b40b TS |
2297 | end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ |
2298 | start = start & TARGET_PAGE_MASK; | |
2299 | ||
3d97b40b TS |
2300 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { |
2301 | p = page_find(addr >> TARGET_PAGE_BITS); | |
2302 | if( !p ) | |
2303 | return -1; | |
2304 | if( !(p->flags & PAGE_VALID) ) | |
2305 | return -1; | |
2306 | ||
dae3270c | 2307 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) |
3d97b40b | 2308 | return -1; |
dae3270c FB |
2309 | if (flags & PAGE_WRITE) { |
2310 | if (!(p->flags & PAGE_WRITE_ORG)) | |
2311 | return -1; | |
2312 | /* unprotect the page if it was put read-only because it | |
2313 | contains translated code */ | |
2314 | if (!(p->flags & PAGE_WRITE)) { | |
2315 | if (!page_unprotect(addr, 0, NULL)) | |
2316 | return -1; | |
2317 | } | |
2318 | return 0; | |
2319 | } | |
3d97b40b TS |
2320 | } |
2321 | return 0; | |
2322 | } | |
2323 | ||
9fa3e853 | 2324 | /* called from signal handler: invalidate the code and unprotect the |
ccbb4d44 | 2325 | page. Return TRUE if the fault was successfully handled. */ |
53a5960a | 2326 | int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
9fa3e853 FB |
2327 | { |
2328 | unsigned int page_index, prot, pindex; | |
2329 | PageDesc *p, *p1; | |
53a5960a | 2330 | target_ulong host_start, host_end, addr; |
9fa3e853 | 2331 | |
c8a706fe PB |
2332 | /* Technically this isn't safe inside a signal handler. However we |
2333 | know this only ever happens in a synchronous SEGV handler, so in | |
2334 | practice it seems to be ok. */ | |
2335 | mmap_lock(); | |
2336 | ||
83fb7adf | 2337 | host_start = address & qemu_host_page_mask; |
9fa3e853 FB |
2338 | page_index = host_start >> TARGET_PAGE_BITS; |
2339 | p1 = page_find(page_index); | |
c8a706fe PB |
2340 | if (!p1) { |
2341 | mmap_unlock(); | |
9fa3e853 | 2342 | return 0; |
c8a706fe | 2343 | } |
83fb7adf | 2344 | host_end = host_start + qemu_host_page_size; |
9fa3e853 FB |
2345 | p = p1; |
2346 | prot = 0; | |
2347 | for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { | |
2348 | prot |= p->flags; | |
2349 | p++; | |
2350 | } | |
2351 | /* if the page was really writable, then we change its | |
2352 | protection back to writable */ | |
2353 | if (prot & PAGE_WRITE_ORG) { | |
2354 | pindex = (address - host_start) >> TARGET_PAGE_BITS; | |
2355 | if (!(p1[pindex].flags & PAGE_WRITE)) { | |
5fafdf24 | 2356 | mprotect((void *)g2h(host_start), qemu_host_page_size, |
9fa3e853 FB |
2357 | (prot & PAGE_BITS) | PAGE_WRITE); |
2358 | p1[pindex].flags |= PAGE_WRITE; | |
2359 | /* and since the content will be modified, we must invalidate | |
2360 | the corresponding translated code. */ | |
d720b93d | 2361 | tb_invalidate_phys_page(address, pc, puc); |
9fa3e853 FB |
2362 | #ifdef DEBUG_TB_CHECK |
2363 | tb_invalidate_check(address); | |
2364 | #endif | |
c8a706fe | 2365 | mmap_unlock(); |
9fa3e853 FB |
2366 | return 1; |
2367 | } | |
2368 | } | |
c8a706fe | 2369 | mmap_unlock(); |
9fa3e853 FB |
2370 | return 0; |
2371 | } | |
2372 | ||
6a00d601 FB |
2373 | static inline void tlb_set_dirty(CPUState *env, |
2374 | unsigned long addr, target_ulong vaddr) | |
1ccde1cb FB |
2375 | { |
2376 | } | |
9fa3e853 FB |
2377 | #endif /* defined(CONFIG_USER_ONLY) */ |
2378 | ||
e2eef170 | 2379 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 2380 | |
c04b2b78 PB |
2381 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
2382 | typedef struct subpage_t { | |
2383 | target_phys_addr_t base; | |
2384 | CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4]; | |
2385 | CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4]; | |
2386 | void *opaque[TARGET_PAGE_SIZE][2][4]; | |
2387 | ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4]; | |
2388 | } subpage_t; | |
2389 | ||
c227f099 AL |
2390 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
2391 | ram_addr_t memory, ram_addr_t region_offset); | |
2392 | static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, | |
2393 | ram_addr_t orig_memory, ram_addr_t region_offset); | |
db7b5426 BS |
2394 | #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ |
2395 | need_subpage) \ | |
2396 | do { \ | |
2397 | if (addr > start_addr) \ | |
2398 | start_addr2 = 0; \ | |
2399 | else { \ | |
2400 | start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ | |
2401 | if (start_addr2 > 0) \ | |
2402 | need_subpage = 1; \ | |
2403 | } \ | |
2404 | \ | |
49e9fba2 | 2405 | if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ |
db7b5426 BS |
2406 | end_addr2 = TARGET_PAGE_SIZE - 1; \ |
2407 | else { \ | |
2408 | end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ | |
2409 | if (end_addr2 < TARGET_PAGE_SIZE - 1) \ | |
2410 | need_subpage = 1; \ | |
2411 | } \ | |
2412 | } while (0) | |
2413 | ||
8f2498f9 MT |
2414 | /* register physical memory. |
2415 | For RAM, 'size' must be a multiple of the target page size. | |
2416 | If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an | |
8da3ff18 PB |
2417 | io memory page. The address used when calling the IO function is |
2418 | the offset from the start of the region, plus region_offset. Both | |
ccbb4d44 | 2419 | start_addr and region_offset are rounded down to a page boundary |
8da3ff18 PB |
2420 | before calculating this offset. This should not be a problem unless |
2421 | the low bits of start_addr and region_offset differ. */ | |
c227f099 AL |
2422 | void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, |
2423 | ram_addr_t size, | |
2424 | ram_addr_t phys_offset, | |
2425 | ram_addr_t region_offset) | |
33417e70 | 2426 | { |
c227f099 | 2427 | target_phys_addr_t addr, end_addr; |
92e873b9 | 2428 | PhysPageDesc *p; |
9d42037b | 2429 | CPUState *env; |
c227f099 | 2430 | ram_addr_t orig_size = size; |
db7b5426 | 2431 | void *subpage; |
33417e70 | 2432 | |
f6f3fbca MT |
2433 | cpu_notify_set_memory(start_addr, size, phys_offset); |
2434 | ||
67c4d23c PB |
2435 | if (phys_offset == IO_MEM_UNASSIGNED) { |
2436 | region_offset = start_addr; | |
2437 | } | |
8da3ff18 | 2438 | region_offset &= TARGET_PAGE_MASK; |
5fd386f6 | 2439 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
c227f099 | 2440 | end_addr = start_addr + (target_phys_addr_t)size; |
49e9fba2 | 2441 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
db7b5426 BS |
2442 | p = phys_page_find(addr >> TARGET_PAGE_BITS); |
2443 | if (p && p->phys_offset != IO_MEM_UNASSIGNED) { | |
c227f099 AL |
2444 | ram_addr_t orig_memory = p->phys_offset; |
2445 | target_phys_addr_t start_addr2, end_addr2; | |
db7b5426 BS |
2446 | int need_subpage = 0; |
2447 | ||
2448 | CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, | |
2449 | need_subpage); | |
4254fab8 | 2450 | if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { |
db7b5426 BS |
2451 | if (!(orig_memory & IO_MEM_SUBPAGE)) { |
2452 | subpage = subpage_init((addr & TARGET_PAGE_MASK), | |
8da3ff18 PB |
2453 | &p->phys_offset, orig_memory, |
2454 | p->region_offset); | |
db7b5426 BS |
2455 | } else { |
2456 | subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) | |
2457 | >> IO_MEM_SHIFT]; | |
2458 | } | |
8da3ff18 PB |
2459 | subpage_register(subpage, start_addr2, end_addr2, phys_offset, |
2460 | region_offset); | |
2461 | p->region_offset = 0; | |
db7b5426 BS |
2462 | } else { |
2463 | p->phys_offset = phys_offset; | |
2464 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || | |
2465 | (phys_offset & IO_MEM_ROMD)) | |
2466 | phys_offset += TARGET_PAGE_SIZE; | |
2467 | } | |
2468 | } else { | |
2469 | p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
2470 | p->phys_offset = phys_offset; | |
8da3ff18 | 2471 | p->region_offset = region_offset; |
db7b5426 | 2472 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
8da3ff18 | 2473 | (phys_offset & IO_MEM_ROMD)) { |
db7b5426 | 2474 | phys_offset += TARGET_PAGE_SIZE; |
0e8f0967 | 2475 | } else { |
c227f099 | 2476 | target_phys_addr_t start_addr2, end_addr2; |
db7b5426 BS |
2477 | int need_subpage = 0; |
2478 | ||
2479 | CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, | |
2480 | end_addr2, need_subpage); | |
2481 | ||
4254fab8 | 2482 | if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { |
db7b5426 | 2483 | subpage = subpage_init((addr & TARGET_PAGE_MASK), |
8da3ff18 | 2484 | &p->phys_offset, IO_MEM_UNASSIGNED, |
67c4d23c | 2485 | addr & TARGET_PAGE_MASK); |
db7b5426 | 2486 | subpage_register(subpage, start_addr2, end_addr2, |
8da3ff18 PB |
2487 | phys_offset, region_offset); |
2488 | p->region_offset = 0; | |
db7b5426 BS |
2489 | } |
2490 | } | |
2491 | } | |
8da3ff18 | 2492 | region_offset += TARGET_PAGE_SIZE; |
33417e70 | 2493 | } |
3b46e624 | 2494 | |
9d42037b FB |
2495 | /* since each CPU stores ram addresses in its TLB cache, we must |
2496 | reset the modified entries */ | |
2497 | /* XXX: slow ! */ | |
2498 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
2499 | tlb_flush(env, 1); | |
2500 | } | |
33417e70 FB |
2501 | } |
2502 | ||
ba863458 | 2503 | /* XXX: temporary until new memory mapping API */ |
c227f099 | 2504 | ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) |
ba863458 FB |
2505 | { |
2506 | PhysPageDesc *p; | |
2507 | ||
2508 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2509 | if (!p) | |
2510 | return IO_MEM_UNASSIGNED; | |
2511 | return p->phys_offset; | |
2512 | } | |
2513 | ||
c227f099 | 2514 | void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
f65ed4c1 AL |
2515 | { |
2516 | if (kvm_enabled()) | |
2517 | kvm_coalesce_mmio_region(addr, size); | |
2518 | } | |
2519 | ||
c227f099 | 2520 | void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
f65ed4c1 AL |
2521 | { |
2522 | if (kvm_enabled()) | |
2523 | kvm_uncoalesce_mmio_region(addr, size); | |
2524 | } | |
2525 | ||
62a2744c SY |
2526 | void qemu_flush_coalesced_mmio_buffer(void) |
2527 | { | |
2528 | if (kvm_enabled()) | |
2529 | kvm_flush_coalesced_mmio_buffer(); | |
2530 | } | |
2531 | ||
c902760f MT |
2532 | #if defined(__linux__) && !defined(TARGET_S390X) |
2533 | ||
2534 | #include <sys/vfs.h> | |
2535 | ||
2536 | #define HUGETLBFS_MAGIC 0x958458f6 | |
2537 | ||
2538 | static long gethugepagesize(const char *path) | |
2539 | { | |
2540 | struct statfs fs; | |
2541 | int ret; | |
2542 | ||
2543 | do { | |
2544 | ret = statfs(path, &fs); | |
2545 | } while (ret != 0 && errno == EINTR); | |
2546 | ||
2547 | if (ret != 0) { | |
2548 | perror("statfs"); | |
2549 | return 0; | |
2550 | } | |
2551 | ||
2552 | if (fs.f_type != HUGETLBFS_MAGIC) | |
2553 | fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); | |
2554 | ||
2555 | return fs.f_bsize; | |
2556 | } | |
2557 | ||
2558 | static void *file_ram_alloc(ram_addr_t memory, const char *path) | |
2559 | { | |
2560 | char *filename; | |
2561 | void *area; | |
2562 | int fd; | |
2563 | #ifdef MAP_POPULATE | |
2564 | int flags; | |
2565 | #endif | |
2566 | unsigned long hpagesize; | |
2567 | ||
2568 | hpagesize = gethugepagesize(path); | |
2569 | if (!hpagesize) { | |
2570 | return NULL; | |
2571 | } | |
2572 | ||
2573 | if (memory < hpagesize) { | |
2574 | return NULL; | |
2575 | } | |
2576 | ||
2577 | if (kvm_enabled() && !kvm_has_sync_mmu()) { | |
2578 | fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); | |
2579 | return NULL; | |
2580 | } | |
2581 | ||
2582 | if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) { | |
2583 | return NULL; | |
2584 | } | |
2585 | ||
2586 | fd = mkstemp(filename); | |
2587 | if (fd < 0) { | |
2588 | perror("mkstemp"); | |
2589 | free(filename); | |
2590 | return NULL; | |
2591 | } | |
2592 | unlink(filename); | |
2593 | free(filename); | |
2594 | ||
2595 | memory = (memory+hpagesize-1) & ~(hpagesize-1); | |
2596 | ||
2597 | /* | |
2598 | * ftruncate is not supported by hugetlbfs in older | |
2599 | * hosts, so don't bother bailing out on errors. | |
2600 | * If anything goes wrong with it under other filesystems, | |
2601 | * mmap will fail. | |
2602 | */ | |
2603 | if (ftruncate(fd, memory)) | |
2604 | perror("ftruncate"); | |
2605 | ||
2606 | #ifdef MAP_POPULATE | |
2607 | /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case | |
2608 | * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED | |
2609 | * to sidestep this quirk. | |
2610 | */ | |
2611 | flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; | |
2612 | area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); | |
2613 | #else | |
2614 | area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); | |
2615 | #endif | |
2616 | if (area == MAP_FAILED) { | |
2617 | perror("file_ram_alloc: can't mmap RAM pages"); | |
2618 | close(fd); | |
2619 | return (NULL); | |
2620 | } | |
2621 | return area; | |
2622 | } | |
2623 | #endif | |
2624 | ||
c227f099 | 2625 | ram_addr_t qemu_ram_alloc(ram_addr_t size) |
94a6b54f PB |
2626 | { |
2627 | RAMBlock *new_block; | |
2628 | ||
94a6b54f PB |
2629 | size = TARGET_PAGE_ALIGN(size); |
2630 | new_block = qemu_malloc(sizeof(*new_block)); | |
2631 | ||
c902760f MT |
2632 | if (mem_path) { |
2633 | #if defined (__linux__) && !defined(TARGET_S390X) | |
2634 | new_block->host = file_ram_alloc(size, mem_path); | |
2635 | if (!new_block->host) | |
2636 | exit(1); | |
2637 | #else | |
2638 | fprintf(stderr, "-mem-path option unsupported\n"); | |
2639 | exit(1); | |
2640 | #endif | |
2641 | } else { | |
6b02494d | 2642 | #if defined(TARGET_S390X) && defined(CONFIG_KVM) |
c902760f MT |
2643 | /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ |
2644 | new_block->host = mmap((void*)0x1000000, size, | |
2645 | PROT_EXEC|PROT_READ|PROT_WRITE, | |
2646 | MAP_SHARED | MAP_ANONYMOUS, -1, 0); | |
6b02494d | 2647 | #else |
c902760f | 2648 | new_block->host = qemu_vmalloc(size); |
6b02494d | 2649 | #endif |
ccb167e9 | 2650 | #ifdef MADV_MERGEABLE |
c902760f | 2651 | madvise(new_block->host, size, MADV_MERGEABLE); |
ccb167e9 | 2652 | #endif |
c902760f | 2653 | } |
94a6b54f PB |
2654 | new_block->offset = last_ram_offset; |
2655 | new_block->length = size; | |
2656 | ||
2657 | new_block->next = ram_blocks; | |
2658 | ram_blocks = new_block; | |
2659 | ||
2660 | phys_ram_dirty = qemu_realloc(phys_ram_dirty, | |
2661 | (last_ram_offset + size) >> TARGET_PAGE_BITS); | |
2662 | memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), | |
2663 | 0xff, size >> TARGET_PAGE_BITS); | |
2664 | ||
2665 | last_ram_offset += size; | |
2666 | ||
6f0437e8 JK |
2667 | if (kvm_enabled()) |
2668 | kvm_setup_guest_memory(new_block->host, size); | |
2669 | ||
94a6b54f PB |
2670 | return new_block->offset; |
2671 | } | |
e9a1ab19 | 2672 | |
c227f099 | 2673 | void qemu_ram_free(ram_addr_t addr) |
e9a1ab19 | 2674 | { |
94a6b54f | 2675 | /* TODO: implement this. */ |
e9a1ab19 FB |
2676 | } |
2677 | ||
dc828ca1 | 2678 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
5579c7f3 PB |
2679 | With the exception of the softmmu code in this file, this should |
2680 | only be used for local memory (e.g. video ram) that the device owns, | |
2681 | and knows it isn't going to access beyond the end of the block. | |
2682 | ||
2683 | It should not be used for general purpose DMA. | |
2684 | Use cpu_physical_memory_map/cpu_physical_memory_rw instead. | |
2685 | */ | |
c227f099 | 2686 | void *qemu_get_ram_ptr(ram_addr_t addr) |
dc828ca1 | 2687 | { |
94a6b54f PB |
2688 | RAMBlock *prev; |
2689 | RAMBlock **prevp; | |
2690 | RAMBlock *block; | |
2691 | ||
94a6b54f PB |
2692 | prev = NULL; |
2693 | prevp = &ram_blocks; | |
2694 | block = ram_blocks; | |
2695 | while (block && (block->offset > addr | |
2696 | || block->offset + block->length <= addr)) { | |
2697 | if (prev) | |
2698 | prevp = &prev->next; | |
2699 | prev = block; | |
2700 | block = block->next; | |
2701 | } | |
2702 | if (!block) { | |
2703 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
2704 | abort(); | |
2705 | } | |
2706 | /* Move this entry to to start of the list. */ | |
2707 | if (prev) { | |
2708 | prev->next = block->next; | |
2709 | block->next = *prevp; | |
2710 | *prevp = block; | |
2711 | } | |
2712 | return block->host + (addr - block->offset); | |
dc828ca1 PB |
2713 | } |
2714 | ||
5579c7f3 PB |
2715 | /* Some of the softmmu routines need to translate from a host pointer |
2716 | (typically a TLB entry) back to a ram offset. */ | |
c227f099 | 2717 | ram_addr_t qemu_ram_addr_from_host(void *ptr) |
5579c7f3 | 2718 | { |
94a6b54f | 2719 | RAMBlock *prev; |
94a6b54f PB |
2720 | RAMBlock *block; |
2721 | uint8_t *host = ptr; | |
2722 | ||
94a6b54f | 2723 | prev = NULL; |
94a6b54f PB |
2724 | block = ram_blocks; |
2725 | while (block && (block->host > host | |
2726 | || block->host + block->length <= host)) { | |
94a6b54f PB |
2727 | prev = block; |
2728 | block = block->next; | |
2729 | } | |
2730 | if (!block) { | |
2731 | fprintf(stderr, "Bad ram pointer %p\n", ptr); | |
2732 | abort(); | |
2733 | } | |
2734 | return block->offset + (host - block->host); | |
5579c7f3 PB |
2735 | } |
2736 | ||
c227f099 | 2737 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
33417e70 | 2738 | { |
67d3b957 | 2739 | #ifdef DEBUG_UNASSIGNED |
ab3d1727 | 2740 | printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
b4f0a316 | 2741 | #endif |
faed1c2a | 2742 | #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
e18231a3 BS |
2743 | do_unassigned_access(addr, 0, 0, 0, 1); |
2744 | #endif | |
2745 | return 0; | |
2746 | } | |
2747 | ||
c227f099 | 2748 | static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) |
e18231a3 BS |
2749 | { |
2750 | #ifdef DEBUG_UNASSIGNED | |
2751 | printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); | |
2752 | #endif | |
faed1c2a | 2753 | #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
e18231a3 BS |
2754 | do_unassigned_access(addr, 0, 0, 0, 2); |
2755 | #endif | |
2756 | return 0; | |
2757 | } | |
2758 | ||
c227f099 | 2759 | static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) |
e18231a3 BS |
2760 | { |
2761 | #ifdef DEBUG_UNASSIGNED | |
2762 | printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); | |
2763 | #endif | |
faed1c2a | 2764 | #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
e18231a3 | 2765 | do_unassigned_access(addr, 0, 0, 0, 4); |
67d3b957 | 2766 | #endif |
33417e70 FB |
2767 | return 0; |
2768 | } | |
2769 | ||
c227f099 | 2770 | static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
33417e70 | 2771 | { |
67d3b957 | 2772 | #ifdef DEBUG_UNASSIGNED |
ab3d1727 | 2773 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
67d3b957 | 2774 | #endif |
faed1c2a | 2775 | #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
e18231a3 BS |
2776 | do_unassigned_access(addr, 1, 0, 0, 1); |
2777 | #endif | |
2778 | } | |
2779 | ||
c227f099 | 2780 | static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
e18231a3 BS |
2781 | { |
2782 | #ifdef DEBUG_UNASSIGNED | |
2783 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); | |
2784 | #endif | |
faed1c2a | 2785 | #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
e18231a3 BS |
2786 | do_unassigned_access(addr, 1, 0, 0, 2); |
2787 | #endif | |
2788 | } | |
2789 | ||
c227f099 | 2790 | static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
e18231a3 BS |
2791 | { |
2792 | #ifdef DEBUG_UNASSIGNED | |
2793 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); | |
2794 | #endif | |
faed1c2a | 2795 | #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) |
e18231a3 | 2796 | do_unassigned_access(addr, 1, 0, 0, 4); |
b4f0a316 | 2797 | #endif |
33417e70 FB |
2798 | } |
2799 | ||
d60efc6b | 2800 | static CPUReadMemoryFunc * const unassigned_mem_read[3] = { |
33417e70 | 2801 | unassigned_mem_readb, |
e18231a3 BS |
2802 | unassigned_mem_readw, |
2803 | unassigned_mem_readl, | |
33417e70 FB |
2804 | }; |
2805 | ||
d60efc6b | 2806 | static CPUWriteMemoryFunc * const unassigned_mem_write[3] = { |
33417e70 | 2807 | unassigned_mem_writeb, |
e18231a3 BS |
2808 | unassigned_mem_writew, |
2809 | unassigned_mem_writel, | |
33417e70 FB |
2810 | }; |
2811 | ||
c227f099 | 2812 | static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, |
0f459d16 | 2813 | uint32_t val) |
9fa3e853 | 2814 | { |
3a7d929e | 2815 | int dirty_flags; |
3a7d929e FB |
2816 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2817 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 2818 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
2819 | tb_invalidate_phys_page_fast(ram_addr, 1); |
2820 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 2821 | #endif |
3a7d929e | 2822 | } |
5579c7f3 | 2823 | stb_p(qemu_get_ram_ptr(ram_addr), val); |
f23db169 FB |
2824 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
2825 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
2826 | /* we remove the notdirty callback only if the code has been | |
2827 | flushed */ | |
2828 | if (dirty_flags == 0xff) | |
2e70f6ef | 2829 | tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
9fa3e853 FB |
2830 | } |
2831 | ||
c227f099 | 2832 | static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, |
0f459d16 | 2833 | uint32_t val) |
9fa3e853 | 2834 | { |
3a7d929e | 2835 | int dirty_flags; |
3a7d929e FB |
2836 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2837 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 2838 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
2839 | tb_invalidate_phys_page_fast(ram_addr, 2); |
2840 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 2841 | #endif |
3a7d929e | 2842 | } |
5579c7f3 | 2843 | stw_p(qemu_get_ram_ptr(ram_addr), val); |
f23db169 FB |
2844 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
2845 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
2846 | /* we remove the notdirty callback only if the code has been | |
2847 | flushed */ | |
2848 | if (dirty_flags == 0xff) | |
2e70f6ef | 2849 | tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
9fa3e853 FB |
2850 | } |
2851 | ||
c227f099 | 2852 | static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, |
0f459d16 | 2853 | uint32_t val) |
9fa3e853 | 2854 | { |
3a7d929e | 2855 | int dirty_flags; |
3a7d929e FB |
2856 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
2857 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
9fa3e853 | 2858 | #if !defined(CONFIG_USER_ONLY) |
3a7d929e FB |
2859 | tb_invalidate_phys_page_fast(ram_addr, 4); |
2860 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
9fa3e853 | 2861 | #endif |
3a7d929e | 2862 | } |
5579c7f3 | 2863 | stl_p(qemu_get_ram_ptr(ram_addr), val); |
f23db169 FB |
2864 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
2865 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
2866 | /* we remove the notdirty callback only if the code has been | |
2867 | flushed */ | |
2868 | if (dirty_flags == 0xff) | |
2e70f6ef | 2869 | tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
9fa3e853 FB |
2870 | } |
2871 | ||
d60efc6b | 2872 | static CPUReadMemoryFunc * const error_mem_read[3] = { |
9fa3e853 FB |
2873 | NULL, /* never used */ |
2874 | NULL, /* never used */ | |
2875 | NULL, /* never used */ | |
2876 | }; | |
2877 | ||
d60efc6b | 2878 | static CPUWriteMemoryFunc * const notdirty_mem_write[3] = { |
1ccde1cb FB |
2879 | notdirty_mem_writeb, |
2880 | notdirty_mem_writew, | |
2881 | notdirty_mem_writel, | |
2882 | }; | |
2883 | ||
0f459d16 | 2884 | /* Generate a debug exception if a watchpoint has been hit. */ |
b4051334 | 2885 | static void check_watchpoint(int offset, int len_mask, int flags) |
0f459d16 PB |
2886 | { |
2887 | CPUState *env = cpu_single_env; | |
06d55cc1 AL |
2888 | target_ulong pc, cs_base; |
2889 | TranslationBlock *tb; | |
0f459d16 | 2890 | target_ulong vaddr; |
a1d1bb31 | 2891 | CPUWatchpoint *wp; |
06d55cc1 | 2892 | int cpu_flags; |
0f459d16 | 2893 | |
06d55cc1 AL |
2894 | if (env->watchpoint_hit) { |
2895 | /* We re-entered the check after replacing the TB. Now raise | |
2896 | * the debug interrupt so that is will trigger after the | |
2897 | * current instruction. */ | |
2898 | cpu_interrupt(env, CPU_INTERRUPT_DEBUG); | |
2899 | return; | |
2900 | } | |
2e70f6ef | 2901 | vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
72cf2d4f | 2902 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 AL |
2903 | if ((vaddr == (wp->vaddr & len_mask) || |
2904 | (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { | |
6e140f28 AL |
2905 | wp->flags |= BP_WATCHPOINT_HIT; |
2906 | if (!env->watchpoint_hit) { | |
2907 | env->watchpoint_hit = wp; | |
2908 | tb = tb_find_pc(env->mem_io_pc); | |
2909 | if (!tb) { | |
2910 | cpu_abort(env, "check_watchpoint: could not find TB for " | |
2911 | "pc=%p", (void *)env->mem_io_pc); | |
2912 | } | |
2913 | cpu_restore_state(tb, env, env->mem_io_pc, NULL); | |
2914 | tb_phys_invalidate(tb, -1); | |
2915 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { | |
2916 | env->exception_index = EXCP_DEBUG; | |
2917 | } else { | |
2918 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
2919 | tb_gen_code(env, pc, cs_base, cpu_flags, 1); | |
2920 | } | |
2921 | cpu_resume_from_signal(env, NULL); | |
06d55cc1 | 2922 | } |
6e140f28 AL |
2923 | } else { |
2924 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
2925 | } |
2926 | } | |
2927 | } | |
2928 | ||
6658ffb8 PB |
2929 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
2930 | so these check for a hit then pass through to the normal out-of-line | |
2931 | phys routines. */ | |
c227f099 | 2932 | static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) |
6658ffb8 | 2933 | { |
b4051334 | 2934 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ); |
6658ffb8 PB |
2935 | return ldub_phys(addr); |
2936 | } | |
2937 | ||
c227f099 | 2938 | static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) |
6658ffb8 | 2939 | { |
b4051334 | 2940 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ); |
6658ffb8 PB |
2941 | return lduw_phys(addr); |
2942 | } | |
2943 | ||
c227f099 | 2944 | static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) |
6658ffb8 | 2945 | { |
b4051334 | 2946 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ); |
6658ffb8 PB |
2947 | return ldl_phys(addr); |
2948 | } | |
2949 | ||
c227f099 | 2950 | static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, |
6658ffb8 PB |
2951 | uint32_t val) |
2952 | { | |
b4051334 | 2953 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE); |
6658ffb8 PB |
2954 | stb_phys(addr, val); |
2955 | } | |
2956 | ||
c227f099 | 2957 | static void watch_mem_writew(void *opaque, target_phys_addr_t addr, |
6658ffb8 PB |
2958 | uint32_t val) |
2959 | { | |
b4051334 | 2960 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE); |
6658ffb8 PB |
2961 | stw_phys(addr, val); |
2962 | } | |
2963 | ||
c227f099 | 2964 | static void watch_mem_writel(void *opaque, target_phys_addr_t addr, |
6658ffb8 PB |
2965 | uint32_t val) |
2966 | { | |
b4051334 | 2967 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE); |
6658ffb8 PB |
2968 | stl_phys(addr, val); |
2969 | } | |
2970 | ||
d60efc6b | 2971 | static CPUReadMemoryFunc * const watch_mem_read[3] = { |
6658ffb8 PB |
2972 | watch_mem_readb, |
2973 | watch_mem_readw, | |
2974 | watch_mem_readl, | |
2975 | }; | |
2976 | ||
d60efc6b | 2977 | static CPUWriteMemoryFunc * const watch_mem_write[3] = { |
6658ffb8 PB |
2978 | watch_mem_writeb, |
2979 | watch_mem_writew, | |
2980 | watch_mem_writel, | |
2981 | }; | |
6658ffb8 | 2982 | |
c227f099 | 2983 | static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr, |
db7b5426 BS |
2984 | unsigned int len) |
2985 | { | |
db7b5426 BS |
2986 | uint32_t ret; |
2987 | unsigned int idx; | |
2988 | ||
8da3ff18 | 2989 | idx = SUBPAGE_IDX(addr); |
db7b5426 BS |
2990 | #if defined(DEBUG_SUBPAGE) |
2991 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, | |
2992 | mmio, len, addr, idx); | |
2993 | #endif | |
8da3ff18 PB |
2994 | ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], |
2995 | addr + mmio->region_offset[idx][0][len]); | |
db7b5426 BS |
2996 | |
2997 | return ret; | |
2998 | } | |
2999 | ||
c227f099 | 3000 | static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, |
db7b5426 BS |
3001 | uint32_t value, unsigned int len) |
3002 | { | |
db7b5426 BS |
3003 | unsigned int idx; |
3004 | ||
8da3ff18 | 3005 | idx = SUBPAGE_IDX(addr); |
db7b5426 BS |
3006 | #if defined(DEBUG_SUBPAGE) |
3007 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, | |
3008 | mmio, len, addr, idx, value); | |
3009 | #endif | |
8da3ff18 PB |
3010 | (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], |
3011 | addr + mmio->region_offset[idx][1][len], | |
3012 | value); | |
db7b5426 BS |
3013 | } |
3014 | ||
c227f099 | 3015 | static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) |
db7b5426 BS |
3016 | { |
3017 | #if defined(DEBUG_SUBPAGE) | |
3018 | printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); | |
3019 | #endif | |
3020 | ||
3021 | return subpage_readlen(opaque, addr, 0); | |
3022 | } | |
3023 | ||
c227f099 | 3024 | static void subpage_writeb (void *opaque, target_phys_addr_t addr, |
db7b5426 BS |
3025 | uint32_t value) |
3026 | { | |
3027 | #if defined(DEBUG_SUBPAGE) | |
3028 | printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); | |
3029 | #endif | |
3030 | subpage_writelen(opaque, addr, value, 0); | |
3031 | } | |
3032 | ||
c227f099 | 3033 | static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) |
db7b5426 BS |
3034 | { |
3035 | #if defined(DEBUG_SUBPAGE) | |
3036 | printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); | |
3037 | #endif | |
3038 | ||
3039 | return subpage_readlen(opaque, addr, 1); | |
3040 | } | |
3041 | ||
c227f099 | 3042 | static void subpage_writew (void *opaque, target_phys_addr_t addr, |
db7b5426 BS |
3043 | uint32_t value) |
3044 | { | |
3045 | #if defined(DEBUG_SUBPAGE) | |
3046 | printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); | |
3047 | #endif | |
3048 | subpage_writelen(opaque, addr, value, 1); | |
3049 | } | |
3050 | ||
c227f099 | 3051 | static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) |
db7b5426 BS |
3052 | { |
3053 | #if defined(DEBUG_SUBPAGE) | |
3054 | printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); | |
3055 | #endif | |
3056 | ||
3057 | return subpage_readlen(opaque, addr, 2); | |
3058 | } | |
3059 | ||
3060 | static void subpage_writel (void *opaque, | |
c227f099 | 3061 | target_phys_addr_t addr, uint32_t value) |
db7b5426 BS |
3062 | { |
3063 | #if defined(DEBUG_SUBPAGE) | |
3064 | printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); | |
3065 | #endif | |
3066 | subpage_writelen(opaque, addr, value, 2); | |
3067 | } | |
3068 | ||
d60efc6b | 3069 | static CPUReadMemoryFunc * const subpage_read[] = { |
db7b5426 BS |
3070 | &subpage_readb, |
3071 | &subpage_readw, | |
3072 | &subpage_readl, | |
3073 | }; | |
3074 | ||
d60efc6b | 3075 | static CPUWriteMemoryFunc * const subpage_write[] = { |
db7b5426 BS |
3076 | &subpage_writeb, |
3077 | &subpage_writew, | |
3078 | &subpage_writel, | |
3079 | }; | |
3080 | ||
c227f099 AL |
3081 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
3082 | ram_addr_t memory, ram_addr_t region_offset) | |
db7b5426 BS |
3083 | { |
3084 | int idx, eidx; | |
4254fab8 | 3085 | unsigned int i; |
db7b5426 BS |
3086 | |
3087 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
3088 | return -1; | |
3089 | idx = SUBPAGE_IDX(start); | |
3090 | eidx = SUBPAGE_IDX(end); | |
3091 | #if defined(DEBUG_SUBPAGE) | |
0bf9e31a | 3092 | printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, |
db7b5426 BS |
3093 | mmio, start, end, idx, eidx, memory); |
3094 | #endif | |
3095 | memory >>= IO_MEM_SHIFT; | |
3096 | for (; idx <= eidx; idx++) { | |
4254fab8 | 3097 | for (i = 0; i < 4; i++) { |
3ee89922 BS |
3098 | if (io_mem_read[memory][i]) { |
3099 | mmio->mem_read[idx][i] = &io_mem_read[memory][i]; | |
3100 | mmio->opaque[idx][0][i] = io_mem_opaque[memory]; | |
8da3ff18 | 3101 | mmio->region_offset[idx][0][i] = region_offset; |
3ee89922 BS |
3102 | } |
3103 | if (io_mem_write[memory][i]) { | |
3104 | mmio->mem_write[idx][i] = &io_mem_write[memory][i]; | |
3105 | mmio->opaque[idx][1][i] = io_mem_opaque[memory]; | |
8da3ff18 | 3106 | mmio->region_offset[idx][1][i] = region_offset; |
3ee89922 | 3107 | } |
4254fab8 | 3108 | } |
db7b5426 BS |
3109 | } |
3110 | ||
3111 | return 0; | |
3112 | } | |
3113 | ||
c227f099 AL |
3114 | static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, |
3115 | ram_addr_t orig_memory, ram_addr_t region_offset) | |
db7b5426 | 3116 | { |
c227f099 | 3117 | subpage_t *mmio; |
db7b5426 BS |
3118 | int subpage_memory; |
3119 | ||
c227f099 | 3120 | mmio = qemu_mallocz(sizeof(subpage_t)); |
1eec614b AL |
3121 | |
3122 | mmio->base = base; | |
1eed09cb | 3123 | subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio); |
db7b5426 | 3124 | #if defined(DEBUG_SUBPAGE) |
1eec614b AL |
3125 | printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
3126 | mmio, base, TARGET_PAGE_SIZE, subpage_memory); | |
db7b5426 | 3127 | #endif |
1eec614b AL |
3128 | *phys = subpage_memory | IO_MEM_SUBPAGE; |
3129 | subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory, | |
8da3ff18 | 3130 | region_offset); |
db7b5426 BS |
3131 | |
3132 | return mmio; | |
3133 | } | |
3134 | ||
88715657 AL |
3135 | static int get_free_io_mem_idx(void) |
3136 | { | |
3137 | int i; | |
3138 | ||
3139 | for (i = 0; i<IO_MEM_NB_ENTRIES; i++) | |
3140 | if (!io_mem_used[i]) { | |
3141 | io_mem_used[i] = 1; | |
3142 | return i; | |
3143 | } | |
c6703b47 | 3144 | fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES); |
88715657 AL |
3145 | return -1; |
3146 | } | |
3147 | ||
33417e70 FB |
3148 | /* mem_read and mem_write are arrays of functions containing the |
3149 | function to access byte (index 0), word (index 1) and dword (index | |
0b4e6e3e | 3150 | 2). Functions can be omitted with a NULL function pointer. |
3ee89922 | 3151 | If io_index is non zero, the corresponding io zone is |
4254fab8 BS |
3152 | modified. If it is zero, a new io zone is allocated. The return |
3153 | value can be used with cpu_register_physical_memory(). (-1) is | |
3154 | returned if error. */ | |
1eed09cb | 3155 | static int cpu_register_io_memory_fixed(int io_index, |
d60efc6b BS |
3156 | CPUReadMemoryFunc * const *mem_read, |
3157 | CPUWriteMemoryFunc * const *mem_write, | |
1eed09cb | 3158 | void *opaque) |
33417e70 | 3159 | { |
4254fab8 | 3160 | int i, subwidth = 0; |
33417e70 FB |
3161 | |
3162 | if (io_index <= 0) { | |
88715657 AL |
3163 | io_index = get_free_io_mem_idx(); |
3164 | if (io_index == -1) | |
3165 | return io_index; | |
33417e70 | 3166 | } else { |
1eed09cb | 3167 | io_index >>= IO_MEM_SHIFT; |
33417e70 FB |
3168 | if (io_index >= IO_MEM_NB_ENTRIES) |
3169 | return -1; | |
3170 | } | |
b5ff1b31 | 3171 | |
33417e70 | 3172 | for(i = 0;i < 3; i++) { |
4254fab8 BS |
3173 | if (!mem_read[i] || !mem_write[i]) |
3174 | subwidth = IO_MEM_SUBWIDTH; | |
33417e70 FB |
3175 | io_mem_read[io_index][i] = mem_read[i]; |
3176 | io_mem_write[io_index][i] = mem_write[i]; | |
3177 | } | |
a4193c8a | 3178 | io_mem_opaque[io_index] = opaque; |
4254fab8 | 3179 | return (io_index << IO_MEM_SHIFT) | subwidth; |
33417e70 | 3180 | } |
61382a50 | 3181 | |
d60efc6b BS |
3182 | int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, |
3183 | CPUWriteMemoryFunc * const *mem_write, | |
1eed09cb AK |
3184 | void *opaque) |
3185 | { | |
3186 | return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque); | |
3187 | } | |
3188 | ||
88715657 AL |
3189 | void cpu_unregister_io_memory(int io_table_address) |
3190 | { | |
3191 | int i; | |
3192 | int io_index = io_table_address >> IO_MEM_SHIFT; | |
3193 | ||
3194 | for (i=0;i < 3; i++) { | |
3195 | io_mem_read[io_index][i] = unassigned_mem_read[i]; | |
3196 | io_mem_write[io_index][i] = unassigned_mem_write[i]; | |
3197 | } | |
3198 | io_mem_opaque[io_index] = NULL; | |
3199 | io_mem_used[io_index] = 0; | |
3200 | } | |
3201 | ||
e9179ce1 AK |
3202 | static void io_mem_init(void) |
3203 | { | |
3204 | int i; | |
3205 | ||
3206 | cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL); | |
3207 | cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL); | |
3208 | cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL); | |
3209 | for (i=0; i<5; i++) | |
3210 | io_mem_used[i] = 1; | |
3211 | ||
3212 | io_mem_watch = cpu_register_io_memory(watch_mem_read, | |
3213 | watch_mem_write, NULL); | |
e9179ce1 AK |
3214 | } |
3215 | ||
e2eef170 PB |
3216 | #endif /* !defined(CONFIG_USER_ONLY) */ |
3217 | ||
13eb76e0 FB |
3218 | /* physical memory access (slow version, mainly for debug) */ |
3219 | #if defined(CONFIG_USER_ONLY) | |
a68fe89c PB |
3220 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
3221 | uint8_t *buf, int len, int is_write) | |
13eb76e0 FB |
3222 | { |
3223 | int l, flags; | |
3224 | target_ulong page; | |
53a5960a | 3225 | void * p; |
13eb76e0 FB |
3226 | |
3227 | while (len > 0) { | |
3228 | page = addr & TARGET_PAGE_MASK; | |
3229 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3230 | if (l > len) | |
3231 | l = len; | |
3232 | flags = page_get_flags(page); | |
3233 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 3234 | return -1; |
13eb76e0 FB |
3235 | if (is_write) { |
3236 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 3237 | return -1; |
579a97f7 | 3238 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 3239 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 3240 | return -1; |
72fb7daa AJ |
3241 | memcpy(p, buf, l); |
3242 | unlock_user(p, addr, l); | |
13eb76e0 FB |
3243 | } else { |
3244 | if (!(flags & PAGE_READ)) | |
a68fe89c | 3245 | return -1; |
579a97f7 | 3246 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 3247 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 3248 | return -1; |
72fb7daa | 3249 | memcpy(buf, p, l); |
5b257578 | 3250 | unlock_user(p, addr, 0); |
13eb76e0 FB |
3251 | } |
3252 | len -= l; | |
3253 | buf += l; | |
3254 | addr += l; | |
3255 | } | |
a68fe89c | 3256 | return 0; |
13eb76e0 | 3257 | } |
8df1cd07 | 3258 | |
13eb76e0 | 3259 | #else |
c227f099 | 3260 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
13eb76e0 FB |
3261 | int len, int is_write) |
3262 | { | |
3263 | int l, io_index; | |
3264 | uint8_t *ptr; | |
3265 | uint32_t val; | |
c227f099 | 3266 | target_phys_addr_t page; |
2e12669a | 3267 | unsigned long pd; |
92e873b9 | 3268 | PhysPageDesc *p; |
3b46e624 | 3269 | |
13eb76e0 FB |
3270 | while (len > 0) { |
3271 | page = addr & TARGET_PAGE_MASK; | |
3272 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3273 | if (l > len) | |
3274 | l = len; | |
92e873b9 | 3275 | p = phys_page_find(page >> TARGET_PAGE_BITS); |
13eb76e0 FB |
3276 | if (!p) { |
3277 | pd = IO_MEM_UNASSIGNED; | |
3278 | } else { | |
3279 | pd = p->phys_offset; | |
3280 | } | |
3b46e624 | 3281 | |
13eb76e0 | 3282 | if (is_write) { |
3a7d929e | 3283 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
c227f099 | 3284 | target_phys_addr_t addr1 = addr; |
13eb76e0 | 3285 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
8da3ff18 | 3286 | if (p) |
6c2934db | 3287 | addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
6a00d601 FB |
3288 | /* XXX: could force cpu_single_env to NULL to avoid |
3289 | potential bugs */ | |
6c2934db | 3290 | if (l >= 4 && ((addr1 & 3) == 0)) { |
1c213d19 | 3291 | /* 32 bit write access */ |
c27004ec | 3292 | val = ldl_p(buf); |
6c2934db | 3293 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val); |
13eb76e0 | 3294 | l = 4; |
6c2934db | 3295 | } else if (l >= 2 && ((addr1 & 1) == 0)) { |
1c213d19 | 3296 | /* 16 bit write access */ |
c27004ec | 3297 | val = lduw_p(buf); |
6c2934db | 3298 | io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val); |
13eb76e0 FB |
3299 | l = 2; |
3300 | } else { | |
1c213d19 | 3301 | /* 8 bit write access */ |
c27004ec | 3302 | val = ldub_p(buf); |
6c2934db | 3303 | io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val); |
13eb76e0 FB |
3304 | l = 1; |
3305 | } | |
3306 | } else { | |
b448f2f3 FB |
3307 | unsigned long addr1; |
3308 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
13eb76e0 | 3309 | /* RAM case */ |
5579c7f3 | 3310 | ptr = qemu_get_ram_ptr(addr1); |
13eb76e0 | 3311 | memcpy(ptr, buf, l); |
3a7d929e FB |
3312 | if (!cpu_physical_memory_is_dirty(addr1)) { |
3313 | /* invalidate code */ | |
3314 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
3315 | /* set dirty bit */ | |
5fafdf24 | 3316 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
f23db169 | 3317 | (0xff & ~CODE_DIRTY_FLAG); |
3a7d929e | 3318 | } |
13eb76e0 FB |
3319 | } |
3320 | } else { | |
5fafdf24 | 3321 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2a4188a3 | 3322 | !(pd & IO_MEM_ROMD)) { |
c227f099 | 3323 | target_phys_addr_t addr1 = addr; |
13eb76e0 FB |
3324 | /* I/O case */ |
3325 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
8da3ff18 | 3326 | if (p) |
6c2934db AJ |
3327 | addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
3328 | if (l >= 4 && ((addr1 & 3) == 0)) { | |
13eb76e0 | 3329 | /* 32 bit read access */ |
6c2934db | 3330 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1); |
c27004ec | 3331 | stl_p(buf, val); |
13eb76e0 | 3332 | l = 4; |
6c2934db | 3333 | } else if (l >= 2 && ((addr1 & 1) == 0)) { |
13eb76e0 | 3334 | /* 16 bit read access */ |
6c2934db | 3335 | val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1); |
c27004ec | 3336 | stw_p(buf, val); |
13eb76e0 FB |
3337 | l = 2; |
3338 | } else { | |
1c213d19 | 3339 | /* 8 bit read access */ |
6c2934db | 3340 | val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1); |
c27004ec | 3341 | stb_p(buf, val); |
13eb76e0 FB |
3342 | l = 1; |
3343 | } | |
3344 | } else { | |
3345 | /* RAM case */ | |
5579c7f3 | 3346 | ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
13eb76e0 FB |
3347 | (addr & ~TARGET_PAGE_MASK); |
3348 | memcpy(buf, ptr, l); | |
3349 | } | |
3350 | } | |
3351 | len -= l; | |
3352 | buf += l; | |
3353 | addr += l; | |
3354 | } | |
3355 | } | |
8df1cd07 | 3356 | |
d0ecd2aa | 3357 | /* used for ROM loading : can write in RAM and ROM */ |
c227f099 | 3358 | void cpu_physical_memory_write_rom(target_phys_addr_t addr, |
d0ecd2aa FB |
3359 | const uint8_t *buf, int len) |
3360 | { | |
3361 | int l; | |
3362 | uint8_t *ptr; | |
c227f099 | 3363 | target_phys_addr_t page; |
d0ecd2aa FB |
3364 | unsigned long pd; |
3365 | PhysPageDesc *p; | |
3b46e624 | 3366 | |
d0ecd2aa FB |
3367 | while (len > 0) { |
3368 | page = addr & TARGET_PAGE_MASK; | |
3369 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3370 | if (l > len) | |
3371 | l = len; | |
3372 | p = phys_page_find(page >> TARGET_PAGE_BITS); | |
3373 | if (!p) { | |
3374 | pd = IO_MEM_UNASSIGNED; | |
3375 | } else { | |
3376 | pd = p->phys_offset; | |
3377 | } | |
3b46e624 | 3378 | |
d0ecd2aa | 3379 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && |
2a4188a3 FB |
3380 | (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
3381 | !(pd & IO_MEM_ROMD)) { | |
d0ecd2aa FB |
3382 | /* do nothing */ |
3383 | } else { | |
3384 | unsigned long addr1; | |
3385 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
3386 | /* ROM/RAM case */ | |
5579c7f3 | 3387 | ptr = qemu_get_ram_ptr(addr1); |
d0ecd2aa FB |
3388 | memcpy(ptr, buf, l); |
3389 | } | |
3390 | len -= l; | |
3391 | buf += l; | |
3392 | addr += l; | |
3393 | } | |
3394 | } | |
3395 | ||
6d16c2f8 AL |
3396 | typedef struct { |
3397 | void *buffer; | |
c227f099 AL |
3398 | target_phys_addr_t addr; |
3399 | target_phys_addr_t len; | |
6d16c2f8 AL |
3400 | } BounceBuffer; |
3401 | ||
3402 | static BounceBuffer bounce; | |
3403 | ||
ba223c29 AL |
3404 | typedef struct MapClient { |
3405 | void *opaque; | |
3406 | void (*callback)(void *opaque); | |
72cf2d4f | 3407 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
3408 | } MapClient; |
3409 | ||
72cf2d4f BS |
3410 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
3411 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 AL |
3412 | |
3413 | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) | |
3414 | { | |
3415 | MapClient *client = qemu_malloc(sizeof(*client)); | |
3416 | ||
3417 | client->opaque = opaque; | |
3418 | client->callback = callback; | |
72cf2d4f | 3419 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
ba223c29 AL |
3420 | return client; |
3421 | } | |
3422 | ||
3423 | void cpu_unregister_map_client(void *_client) | |
3424 | { | |
3425 | MapClient *client = (MapClient *)_client; | |
3426 | ||
72cf2d4f | 3427 | QLIST_REMOVE(client, link); |
34d5e948 | 3428 | qemu_free(client); |
ba223c29 AL |
3429 | } |
3430 | ||
3431 | static void cpu_notify_map_clients(void) | |
3432 | { | |
3433 | MapClient *client; | |
3434 | ||
72cf2d4f BS |
3435 | while (!QLIST_EMPTY(&map_client_list)) { |
3436 | client = QLIST_FIRST(&map_client_list); | |
ba223c29 | 3437 | client->callback(client->opaque); |
34d5e948 | 3438 | cpu_unregister_map_client(client); |
ba223c29 AL |
3439 | } |
3440 | } | |
3441 | ||
6d16c2f8 AL |
3442 | /* Map a physical memory region into a host virtual address. |
3443 | * May map a subset of the requested range, given by and returned in *plen. | |
3444 | * May return NULL if resources needed to perform the mapping are exhausted. | |
3445 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
3446 | * Use cpu_register_map_client() to know when retrying the map operation is |
3447 | * likely to succeed. | |
6d16c2f8 | 3448 | */ |
c227f099 AL |
3449 | void *cpu_physical_memory_map(target_phys_addr_t addr, |
3450 | target_phys_addr_t *plen, | |
6d16c2f8 AL |
3451 | int is_write) |
3452 | { | |
c227f099 AL |
3453 | target_phys_addr_t len = *plen; |
3454 | target_phys_addr_t done = 0; | |
6d16c2f8 AL |
3455 | int l; |
3456 | uint8_t *ret = NULL; | |
3457 | uint8_t *ptr; | |
c227f099 | 3458 | target_phys_addr_t page; |
6d16c2f8 AL |
3459 | unsigned long pd; |
3460 | PhysPageDesc *p; | |
3461 | unsigned long addr1; | |
3462 | ||
3463 | while (len > 0) { | |
3464 | page = addr & TARGET_PAGE_MASK; | |
3465 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3466 | if (l > len) | |
3467 | l = len; | |
3468 | p = phys_page_find(page >> TARGET_PAGE_BITS); | |
3469 | if (!p) { | |
3470 | pd = IO_MEM_UNASSIGNED; | |
3471 | } else { | |
3472 | pd = p->phys_offset; | |
3473 | } | |
3474 | ||
3475 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
3476 | if (done || bounce.buffer) { | |
3477 | break; | |
3478 | } | |
3479 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); | |
3480 | bounce.addr = addr; | |
3481 | bounce.len = l; | |
3482 | if (!is_write) { | |
3483 | cpu_physical_memory_rw(addr, bounce.buffer, l, 0); | |
3484 | } | |
3485 | ptr = bounce.buffer; | |
3486 | } else { | |
3487 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
5579c7f3 | 3488 | ptr = qemu_get_ram_ptr(addr1); |
6d16c2f8 AL |
3489 | } |
3490 | if (!done) { | |
3491 | ret = ptr; | |
3492 | } else if (ret + done != ptr) { | |
3493 | break; | |
3494 | } | |
3495 | ||
3496 | len -= l; | |
3497 | addr += l; | |
3498 | done += l; | |
3499 | } | |
3500 | *plen = done; | |
3501 | return ret; | |
3502 | } | |
3503 | ||
3504 | /* Unmaps a memory region previously mapped by cpu_physical_memory_map(). | |
3505 | * Will also mark the memory as dirty if is_write == 1. access_len gives | |
3506 | * the amount of memory that was actually read or written by the caller. | |
3507 | */ | |
c227f099 AL |
3508 | void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, |
3509 | int is_write, target_phys_addr_t access_len) | |
6d16c2f8 AL |
3510 | { |
3511 | if (buffer != bounce.buffer) { | |
3512 | if (is_write) { | |
c227f099 | 3513 | ram_addr_t addr1 = qemu_ram_addr_from_host(buffer); |
6d16c2f8 AL |
3514 | while (access_len) { |
3515 | unsigned l; | |
3516 | l = TARGET_PAGE_SIZE; | |
3517 | if (l > access_len) | |
3518 | l = access_len; | |
3519 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
3520 | /* invalidate code */ | |
3521 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
3522 | /* set dirty bit */ | |
3523 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | |
3524 | (0xff & ~CODE_DIRTY_FLAG); | |
3525 | } | |
3526 | addr1 += l; | |
3527 | access_len -= l; | |
3528 | } | |
3529 | } | |
3530 | return; | |
3531 | } | |
3532 | if (is_write) { | |
3533 | cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); | |
3534 | } | |
f8a83245 | 3535 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 3536 | bounce.buffer = NULL; |
ba223c29 | 3537 | cpu_notify_map_clients(); |
6d16c2f8 | 3538 | } |
d0ecd2aa | 3539 | |
8df1cd07 | 3540 | /* warning: addr must be aligned */ |
c227f099 | 3541 | uint32_t ldl_phys(target_phys_addr_t addr) |
8df1cd07 FB |
3542 | { |
3543 | int io_index; | |
3544 | uint8_t *ptr; | |
3545 | uint32_t val; | |
3546 | unsigned long pd; | |
3547 | PhysPageDesc *p; | |
3548 | ||
3549 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
3550 | if (!p) { | |
3551 | pd = IO_MEM_UNASSIGNED; | |
3552 | } else { | |
3553 | pd = p->phys_offset; | |
3554 | } | |
3b46e624 | 3555 | |
5fafdf24 | 3556 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
2a4188a3 | 3557 | !(pd & IO_MEM_ROMD)) { |
8df1cd07 FB |
3558 | /* I/O case */ |
3559 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
8da3ff18 PB |
3560 | if (p) |
3561 | addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; | |
8df1cd07 FB |
3562 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
3563 | } else { | |
3564 | /* RAM case */ | |
5579c7f3 | 3565 | ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
8df1cd07 FB |
3566 | (addr & ~TARGET_PAGE_MASK); |
3567 | val = ldl_p(ptr); | |
3568 | } | |
3569 | return val; | |
3570 | } | |
3571 | ||
84b7b8e7 | 3572 | /* warning: addr must be aligned */ |
c227f099 | 3573 | uint64_t ldq_phys(target_phys_addr_t addr) |
84b7b8e7 FB |
3574 | { |
3575 | int io_index; | |
3576 | uint8_t *ptr; | |
3577 | uint64_t val; | |
3578 | unsigned long pd; | |
3579 | PhysPageDesc *p; | |
3580 | ||
3581 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
3582 | if (!p) { | |
3583 | pd = IO_MEM_UNASSIGNED; | |
3584 | } else { | |
3585 | pd = p->phys_offset; | |
3586 | } | |
3b46e624 | 3587 | |
2a4188a3 FB |
3588 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
3589 | !(pd & IO_MEM_ROMD)) { | |
84b7b8e7 FB |
3590 | /* I/O case */ |
3591 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
8da3ff18 PB |
3592 | if (p) |
3593 | addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; | |
84b7b8e7 FB |
3594 | #ifdef TARGET_WORDS_BIGENDIAN |
3595 | val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; | |
3596 | val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); | |
3597 | #else | |
3598 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
3599 | val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; | |
3600 | #endif | |
3601 | } else { | |
3602 | /* RAM case */ | |
5579c7f3 | 3603 | ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
84b7b8e7 FB |
3604 | (addr & ~TARGET_PAGE_MASK); |
3605 | val = ldq_p(ptr); | |
3606 | } | |
3607 | return val; | |
3608 | } | |
3609 | ||
aab33094 | 3610 | /* XXX: optimize */ |
c227f099 | 3611 | uint32_t ldub_phys(target_phys_addr_t addr) |
aab33094 FB |
3612 | { |
3613 | uint8_t val; | |
3614 | cpu_physical_memory_read(addr, &val, 1); | |
3615 | return val; | |
3616 | } | |
3617 | ||
3618 | /* XXX: optimize */ | |
c227f099 | 3619 | uint32_t lduw_phys(target_phys_addr_t addr) |
aab33094 FB |
3620 | { |
3621 | uint16_t val; | |
3622 | cpu_physical_memory_read(addr, (uint8_t *)&val, 2); | |
3623 | return tswap16(val); | |
3624 | } | |
3625 | ||
8df1cd07 FB |
3626 | /* warning: addr must be aligned. The ram page is not masked as dirty |
3627 | and the code inside is not invalidated. It is useful if the dirty | |
3628 | bits are used to track modified PTEs */ | |
c227f099 | 3629 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) |
8df1cd07 FB |
3630 | { |
3631 | int io_index; | |
3632 | uint8_t *ptr; | |
3633 | unsigned long pd; | |
3634 | PhysPageDesc *p; | |
3635 | ||
3636 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
3637 | if (!p) { | |
3638 | pd = IO_MEM_UNASSIGNED; | |
3639 | } else { | |
3640 | pd = p->phys_offset; | |
3641 | } | |
3b46e624 | 3642 | |
3a7d929e | 3643 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
8df1cd07 | 3644 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
8da3ff18 PB |
3645 | if (p) |
3646 | addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; | |
8df1cd07 FB |
3647 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
3648 | } else { | |
74576198 | 3649 | unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
5579c7f3 | 3650 | ptr = qemu_get_ram_ptr(addr1); |
8df1cd07 | 3651 | stl_p(ptr, val); |
74576198 AL |
3652 | |
3653 | if (unlikely(in_migration)) { | |
3654 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
3655 | /* invalidate code */ | |
3656 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
3657 | /* set dirty bit */ | |
3658 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | |
3659 | (0xff & ~CODE_DIRTY_FLAG); | |
3660 | } | |
3661 | } | |
8df1cd07 FB |
3662 | } |
3663 | } | |
3664 | ||
c227f099 | 3665 | void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) |
bc98a7ef JM |
3666 | { |
3667 | int io_index; | |
3668 | uint8_t *ptr; | |
3669 | unsigned long pd; | |
3670 | PhysPageDesc *p; | |
3671 | ||
3672 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
3673 | if (!p) { | |
3674 | pd = IO_MEM_UNASSIGNED; | |
3675 | } else { | |
3676 | pd = p->phys_offset; | |
3677 | } | |
3b46e624 | 3678 | |
bc98a7ef JM |
3679 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
3680 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
8da3ff18 PB |
3681 | if (p) |
3682 | addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; | |
bc98a7ef JM |
3683 | #ifdef TARGET_WORDS_BIGENDIAN |
3684 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); | |
3685 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); | |
3686 | #else | |
3687 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
3688 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); | |
3689 | #endif | |
3690 | } else { | |
5579c7f3 | 3691 | ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + |
bc98a7ef JM |
3692 | (addr & ~TARGET_PAGE_MASK); |
3693 | stq_p(ptr, val); | |
3694 | } | |
3695 | } | |
3696 | ||
8df1cd07 | 3697 | /* warning: addr must be aligned */ |
c227f099 | 3698 | void stl_phys(target_phys_addr_t addr, uint32_t val) |
8df1cd07 FB |
3699 | { |
3700 | int io_index; | |
3701 | uint8_t *ptr; | |
3702 | unsigned long pd; | |
3703 | PhysPageDesc *p; | |
3704 | ||
3705 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
3706 | if (!p) { | |
3707 | pd = IO_MEM_UNASSIGNED; | |
3708 | } else { | |
3709 | pd = p->phys_offset; | |
3710 | } | |
3b46e624 | 3711 | |
3a7d929e | 3712 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
8df1cd07 | 3713 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
8da3ff18 PB |
3714 | if (p) |
3715 | addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; | |
8df1cd07 FB |
3716 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
3717 | } else { | |
3718 | unsigned long addr1; | |
3719 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
3720 | /* RAM case */ | |
5579c7f3 | 3721 | ptr = qemu_get_ram_ptr(addr1); |
8df1cd07 | 3722 | stl_p(ptr, val); |
3a7d929e FB |
3723 | if (!cpu_physical_memory_is_dirty(addr1)) { |
3724 | /* invalidate code */ | |
3725 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
3726 | /* set dirty bit */ | |
f23db169 FB |
3727 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
3728 | (0xff & ~CODE_DIRTY_FLAG); | |
3a7d929e | 3729 | } |
8df1cd07 FB |
3730 | } |
3731 | } | |
3732 | ||
aab33094 | 3733 | /* XXX: optimize */ |
c227f099 | 3734 | void stb_phys(target_phys_addr_t addr, uint32_t val) |
aab33094 FB |
3735 | { |
3736 | uint8_t v = val; | |
3737 | cpu_physical_memory_write(addr, &v, 1); | |
3738 | } | |
3739 | ||
3740 | /* XXX: optimize */ | |
c227f099 | 3741 | void stw_phys(target_phys_addr_t addr, uint32_t val) |
aab33094 FB |
3742 | { |
3743 | uint16_t v = tswap16(val); | |
3744 | cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); | |
3745 | } | |
3746 | ||
3747 | /* XXX: optimize */ | |
c227f099 | 3748 | void stq_phys(target_phys_addr_t addr, uint64_t val) |
aab33094 FB |
3749 | { |
3750 | val = tswap64(val); | |
3751 | cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); | |
3752 | } | |
3753 | ||
5e2972fd | 3754 | /* virtual memory access for debug (includes writing to ROM) */ |
5fafdf24 | 3755 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
b448f2f3 | 3756 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
3757 | { |
3758 | int l; | |
c227f099 | 3759 | target_phys_addr_t phys_addr; |
9b3c35e0 | 3760 | target_ulong page; |
13eb76e0 FB |
3761 | |
3762 | while (len > 0) { | |
3763 | page = addr & TARGET_PAGE_MASK; | |
3764 | phys_addr = cpu_get_phys_page_debug(env, page); | |
3765 | /* if no physical page mapped, return an error */ | |
3766 | if (phys_addr == -1) | |
3767 | return -1; | |
3768 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3769 | if (l > len) | |
3770 | l = len; | |
5e2972fd | 3771 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
5e2972fd AL |
3772 | if (is_write) |
3773 | cpu_physical_memory_write_rom(phys_addr, buf, l); | |
3774 | else | |
5e2972fd | 3775 | cpu_physical_memory_rw(phys_addr, buf, l, is_write); |
13eb76e0 FB |
3776 | len -= l; |
3777 | buf += l; | |
3778 | addr += l; | |
3779 | } | |
3780 | return 0; | |
3781 | } | |
a68fe89c | 3782 | #endif |
13eb76e0 | 3783 | |
2e70f6ef PB |
3784 | /* in deterministic execution mode, instructions doing device I/Os |
3785 | must be at the end of the TB */ | |
3786 | void cpu_io_recompile(CPUState *env, void *retaddr) | |
3787 | { | |
3788 | TranslationBlock *tb; | |
3789 | uint32_t n, cflags; | |
3790 | target_ulong pc, cs_base; | |
3791 | uint64_t flags; | |
3792 | ||
3793 | tb = tb_find_pc((unsigned long)retaddr); | |
3794 | if (!tb) { | |
3795 | cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", | |
3796 | retaddr); | |
3797 | } | |
3798 | n = env->icount_decr.u16.low + tb->icount; | |
3799 | cpu_restore_state(tb, env, (unsigned long)retaddr, NULL); | |
3800 | /* Calculate how many instructions had been executed before the fault | |
bf20dc07 | 3801 | occurred. */ |
2e70f6ef PB |
3802 | n = n - env->icount_decr.u16.low; |
3803 | /* Generate a new TB ending on the I/O insn. */ | |
3804 | n++; | |
3805 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
3806 | they were already the first instruction in the TB. If this is not | |
bf20dc07 | 3807 | the first instruction in a TB then re-execute the preceding |
2e70f6ef PB |
3808 | branch. */ |
3809 | #if defined(TARGET_MIPS) | |
3810 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
3811 | env->active_tc.PC -= 4; | |
3812 | env->icount_decr.u16.low++; | |
3813 | env->hflags &= ~MIPS_HFLAG_BMASK; | |
3814 | } | |
3815 | #elif defined(TARGET_SH4) | |
3816 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
3817 | && n > 1) { | |
3818 | env->pc -= 2; | |
3819 | env->icount_decr.u16.low++; | |
3820 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); | |
3821 | } | |
3822 | #endif | |
3823 | /* This should never happen. */ | |
3824 | if (n > CF_COUNT_MASK) | |
3825 | cpu_abort(env, "TB too big during recompile"); | |
3826 | ||
3827 | cflags = n | CF_LAST_IO; | |
3828 | pc = tb->pc; | |
3829 | cs_base = tb->cs_base; | |
3830 | flags = tb->flags; | |
3831 | tb_phys_invalidate(tb, -1); | |
3832 | /* FIXME: In theory this could raise an exception. In practice | |
3833 | we have already translated the block once so it's probably ok. */ | |
3834 | tb_gen_code(env, pc, cs_base, flags, cflags); | |
bf20dc07 | 3835 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not |
2e70f6ef PB |
3836 | the first in the TB) then we end up generating a whole new TB and |
3837 | repeating the fault, which is horribly inefficient. | |
3838 | Better would be to execute just this insn uncached, or generate a | |
3839 | second new TB. */ | |
3840 | cpu_resume_from_signal(env, NULL); | |
3841 | } | |
3842 | ||
e3db7226 FB |
3843 | void dump_exec_info(FILE *f, |
3844 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) | |
3845 | { | |
3846 | int i, target_code_size, max_target_code_size; | |
3847 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
3848 | TranslationBlock *tb; | |
3b46e624 | 3849 | |
e3db7226 FB |
3850 | target_code_size = 0; |
3851 | max_target_code_size = 0; | |
3852 | cross_page = 0; | |
3853 | direct_jmp_count = 0; | |
3854 | direct_jmp2_count = 0; | |
3855 | for(i = 0; i < nb_tbs; i++) { | |
3856 | tb = &tbs[i]; | |
3857 | target_code_size += tb->size; | |
3858 | if (tb->size > max_target_code_size) | |
3859 | max_target_code_size = tb->size; | |
3860 | if (tb->page_addr[1] != -1) | |
3861 | cross_page++; | |
3862 | if (tb->tb_next_offset[0] != 0xffff) { | |
3863 | direct_jmp_count++; | |
3864 | if (tb->tb_next_offset[1] != 0xffff) { | |
3865 | direct_jmp2_count++; | |
3866 | } | |
3867 | } | |
3868 | } | |
3869 | /* XXX: avoid using doubles ? */ | |
57fec1fe | 3870 | cpu_fprintf(f, "Translation buffer state:\n"); |
26a5f13b FB |
3871 | cpu_fprintf(f, "gen code size %ld/%ld\n", |
3872 | code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); | |
3873 | cpu_fprintf(f, "TB count %d/%d\n", | |
3874 | nb_tbs, code_gen_max_blocks); | |
5fafdf24 | 3875 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
e3db7226 FB |
3876 | nb_tbs ? target_code_size / nb_tbs : 0, |
3877 | max_target_code_size); | |
5fafdf24 | 3878 | cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", |
e3db7226 FB |
3879 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, |
3880 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); | |
5fafdf24 TS |
3881 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", |
3882 | cross_page, | |
e3db7226 FB |
3883 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); |
3884 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
5fafdf24 | 3885 | direct_jmp_count, |
e3db7226 FB |
3886 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, |
3887 | direct_jmp2_count, | |
3888 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
57fec1fe | 3889 | cpu_fprintf(f, "\nStatistics:\n"); |
e3db7226 FB |
3890 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); |
3891 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
3892 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
b67d9a52 | 3893 | tcg_dump_info(f, cpu_fprintf); |
e3db7226 FB |
3894 | } |
3895 | ||
5fafdf24 | 3896 | #if !defined(CONFIG_USER_ONLY) |
61382a50 FB |
3897 | |
3898 | #define MMUSUFFIX _cmmu | |
3899 | #define GETPC() NULL | |
3900 | #define env cpu_single_env | |
b769d8fe | 3901 | #define SOFTMMU_CODE_ACCESS |
61382a50 FB |
3902 | |
3903 | #define SHIFT 0 | |
3904 | #include "softmmu_template.h" | |
3905 | ||
3906 | #define SHIFT 1 | |
3907 | #include "softmmu_template.h" | |
3908 | ||
3909 | #define SHIFT 2 | |
3910 | #include "softmmu_template.h" | |
3911 | ||
3912 | #define SHIFT 3 | |
3913 | #include "softmmu_template.h" | |
3914 | ||
3915 | #undef env | |
3916 | ||
3917 | #endif |