]>
Commit | Line | Data |
---|---|---|
d19893da FB |
1 | /* |
2 | * Host code generation | |
5fafdf24 | 3 | * |
d19893da FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d19893da | 18 | */ |
5b6dd868 BS |
19 | #ifdef _WIN32 |
20 | #include <windows.h> | |
5b6dd868 | 21 | #endif |
7b31bbc2 | 22 | #include "qemu/osdep.h" |
d19893da | 23 | |
2054396a | 24 | |
5b6dd868 | 25 | #include "qemu-common.h" |
af5ad107 | 26 | #define NO_CPU_IO_DEFS |
d3eead2e | 27 | #include "cpu.h" |
6db8b538 | 28 | #include "trace.h" |
76cad711 | 29 | #include "disas/disas.h" |
63c91552 | 30 | #include "exec/exec-all.h" |
57fec1fe | 31 | #include "tcg.h" |
5b6dd868 BS |
32 | #if defined(CONFIG_USER_ONLY) |
33 | #include "qemu.h" | |
34 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | |
35 | #include <sys/param.h> | |
36 | #if __FreeBSD_version >= 700104 | |
37 | #define HAVE_KINFO_GETVMMAP | |
38 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ | |
5b6dd868 BS |
39 | #include <sys/proc.h> |
40 | #include <machine/profile.h> | |
41 | #define _KERNEL | |
42 | #include <sys/user.h> | |
43 | #undef _KERNEL | |
44 | #undef sigqueue | |
45 | #include <libutil.h> | |
46 | #endif | |
47 | #endif | |
0bc3cd62 PB |
48 | #else |
49 | #include "exec/address-spaces.h" | |
5b6dd868 BS |
50 | #endif |
51 | ||
022c62cb | 52 | #include "exec/cputlb.h" |
e1b89321 | 53 | #include "exec/tb-hash.h" |
5b6dd868 | 54 | #include "translate-all.h" |
510a647f | 55 | #include "qemu/bitmap.h" |
0aa09897 | 56 | #include "qemu/timer.h" |
508127e2 | 57 | #include "exec/log.h" |
5b6dd868 BS |
58 | |
59 | //#define DEBUG_TB_INVALIDATE | |
60 | //#define DEBUG_FLUSH | |
61 | /* make various TB consistency checks */ | |
62 | //#define DEBUG_TB_CHECK | |
63 | ||
64 | #if !defined(CONFIG_USER_ONLY) | |
65 | /* TB consistency checks only implemented for usermode emulation. */ | |
66 | #undef DEBUG_TB_CHECK | |
67 | #endif | |
68 | ||
69 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
70 | ||
5b6dd868 BS |
71 | typedef struct PageDesc { |
72 | /* list of TBs intersecting this ram page */ | |
73 | TranslationBlock *first_tb; | |
6fad459c | 74 | #ifdef CONFIG_SOFTMMU |
5b6dd868 BS |
75 | /* in order to optimize self modifying code, we count the number |
76 | of lookups we do to a given page to use a bitmap */ | |
77 | unsigned int code_write_count; | |
510a647f | 78 | unsigned long *code_bitmap; |
6fad459c | 79 | #else |
5b6dd868 BS |
80 | unsigned long flags; |
81 | #endif | |
82 | } PageDesc; | |
83 | ||
84 | /* In system mode we want L1_MAP to be based on ram offsets, | |
85 | while in user mode we want it to be based on virtual addresses. */ | |
86 | #if !defined(CONFIG_USER_ONLY) | |
87 | #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS | |
88 | # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS | |
89 | #else | |
90 | # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
91 | #endif | |
92 | #else | |
93 | # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS | |
94 | #endif | |
95 | ||
03f49957 PB |
96 | /* Size of the L2 (and L3, etc) page tables. */ |
97 | #define V_L2_BITS 10 | |
98 | #define V_L2_SIZE (1 << V_L2_BITS) | |
99 | ||
5b6dd868 | 100 | uintptr_t qemu_host_page_size; |
0c2d70c4 | 101 | intptr_t qemu_host_page_mask; |
5b6dd868 | 102 | |
66ec9f49 VK |
103 | /* |
104 | * L1 Mapping properties | |
105 | */ | |
106 | static int v_l1_size; | |
107 | static int v_l1_shift; | |
108 | static int v_l2_levels; | |
109 | ||
110 | /* The bottom level has pointers to PageDesc, and is indexed by | |
111 | * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. | |
112 | */ | |
113 | #define V_L1_MIN_BITS 4 | |
114 | #define V_L1_MAX_BITS (V_L2_BITS + 3) | |
115 | #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) | |
116 | ||
117 | static void *l1_map[V_L1_MAX_SIZE]; | |
5b6dd868 | 118 | |
57fec1fe FB |
119 | /* code generation context */ |
120 | TCGContext tcg_ctx; | |
fdbc2b57 | 121 | bool parallel_cpus; |
d19893da | 122 | |
677ef623 FK |
123 | /* translation block context */ |
124 | #ifdef CONFIG_USER_ONLY | |
125 | __thread int have_tb_lock; | |
126 | #endif | |
127 | ||
66ec9f49 VK |
128 | static void page_table_config_init(void) |
129 | { | |
130 | uint32_t v_l1_bits; | |
131 | ||
132 | assert(TARGET_PAGE_BITS); | |
133 | /* The bits remaining after N lower levels of page tables. */ | |
134 | v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; | |
135 | if (v_l1_bits < V_L1_MIN_BITS) { | |
136 | v_l1_bits += V_L2_BITS; | |
137 | } | |
138 | ||
139 | v_l1_size = 1 << v_l1_bits; | |
140 | v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; | |
141 | v_l2_levels = v_l1_shift / V_L2_BITS - 1; | |
142 | ||
143 | assert(v_l1_bits <= V_L1_MAX_BITS); | |
144 | assert(v_l1_shift % V_L2_BITS == 0); | |
145 | assert(v_l2_levels >= 0); | |
146 | } | |
147 | ||
677ef623 FK |
148 | void tb_lock(void) |
149 | { | |
150 | #ifdef CONFIG_USER_ONLY | |
151 | assert(!have_tb_lock); | |
152 | qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); | |
153 | have_tb_lock++; | |
154 | #endif | |
155 | } | |
156 | ||
157 | void tb_unlock(void) | |
158 | { | |
159 | #ifdef CONFIG_USER_ONLY | |
160 | assert(have_tb_lock); | |
161 | have_tb_lock--; | |
162 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
163 | #endif | |
164 | } | |
165 | ||
166 | void tb_lock_reset(void) | |
167 | { | |
168 | #ifdef CONFIG_USER_ONLY | |
169 | if (have_tb_lock) { | |
170 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
171 | have_tb_lock = 0; | |
172 | } | |
173 | #endif | |
174 | } | |
175 | ||
a8a826a3 | 176 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); |
5b6dd868 | 177 | |
57fec1fe FB |
178 | void cpu_gen_init(void) |
179 | { | |
180 | tcg_context_init(&tcg_ctx); | |
57fec1fe FB |
181 | } |
182 | ||
fca8a500 RH |
183 | /* Encode VAL as a signed leb128 sequence at P. |
184 | Return P incremented past the encoded value. */ | |
185 | static uint8_t *encode_sleb128(uint8_t *p, target_long val) | |
186 | { | |
187 | int more, byte; | |
188 | ||
189 | do { | |
190 | byte = val & 0x7f; | |
191 | val >>= 7; | |
192 | more = !((val == 0 && (byte & 0x40) == 0) | |
193 | || (val == -1 && (byte & 0x40) != 0)); | |
194 | if (more) { | |
195 | byte |= 0x80; | |
196 | } | |
197 | *p++ = byte; | |
198 | } while (more); | |
199 | ||
200 | return p; | |
201 | } | |
202 | ||
203 | /* Decode a signed leb128 sequence at *PP; increment *PP past the | |
204 | decoded value. Return the decoded value. */ | |
205 | static target_long decode_sleb128(uint8_t **pp) | |
206 | { | |
207 | uint8_t *p = *pp; | |
208 | target_long val = 0; | |
209 | int byte, shift = 0; | |
210 | ||
211 | do { | |
212 | byte = *p++; | |
213 | val |= (target_ulong)(byte & 0x7f) << shift; | |
214 | shift += 7; | |
215 | } while (byte & 0x80); | |
216 | if (shift < TARGET_LONG_BITS && (byte & 0x40)) { | |
217 | val |= -(target_ulong)1 << shift; | |
218 | } | |
219 | ||
220 | *pp = p; | |
221 | return val; | |
222 | } | |
223 | ||
224 | /* Encode the data collected about the instructions while compiling TB. | |
225 | Place the data at BLOCK, and return the number of bytes consumed. | |
226 | ||
227 | The logical table consisits of TARGET_INSN_START_WORDS target_ulong's, | |
228 | which come from the target's insn_start data, followed by a uintptr_t | |
229 | which comes from the host pc of the end of the code implementing the insn. | |
230 | ||
231 | Each line of the table is encoded as sleb128 deltas from the previous | |
232 | line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. | |
233 | That is, the first column is seeded with the guest pc, the last column | |
234 | with the host pc, and the middle columns with zeros. */ | |
235 | ||
236 | static int encode_search(TranslationBlock *tb, uint8_t *block) | |
237 | { | |
b125f9dc | 238 | uint8_t *highwater = tcg_ctx.code_gen_highwater; |
fca8a500 RH |
239 | uint8_t *p = block; |
240 | int i, j, n; | |
241 | ||
242 | tb->tc_search = block; | |
243 | ||
244 | for (i = 0, n = tb->icount; i < n; ++i) { | |
245 | target_ulong prev; | |
246 | ||
247 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
248 | if (i == 0) { | |
249 | prev = (j == 0 ? tb->pc : 0); | |
250 | } else { | |
251 | prev = tcg_ctx.gen_insn_data[i - 1][j]; | |
252 | } | |
253 | p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); | |
254 | } | |
255 | prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); | |
256 | p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); | |
b125f9dc RH |
257 | |
258 | /* Test for (pending) buffer overflow. The assumption is that any | |
259 | one row beginning below the high water mark cannot overrun | |
260 | the buffer completely. Thus we can test for overflow after | |
261 | encoding a row without having to check during encoding. */ | |
262 | if (unlikely(p > highwater)) { | |
263 | return -1; | |
264 | } | |
fca8a500 RH |
265 | } |
266 | ||
267 | return p - block; | |
268 | } | |
269 | ||
fec88f64 | 270 | /* The cpu state corresponding to 'searched_pc' is restored. */ |
74f10515 | 271 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, |
a8a826a3 | 272 | uintptr_t searched_pc) |
d19893da | 273 | { |
fca8a500 RH |
274 | target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; |
275 | uintptr_t host_pc = (uintptr_t)tb->tc_ptr; | |
74f10515 | 276 | CPUArchState *env = cpu->env_ptr; |
fca8a500 RH |
277 | uint8_t *p = tb->tc_search; |
278 | int i, j, num_insns = tb->icount; | |
57fec1fe | 279 | #ifdef CONFIG_PROFILER |
fca8a500 | 280 | int64_t ti = profile_getclock(); |
57fec1fe FB |
281 | #endif |
282 | ||
01ecaf43 RH |
283 | searched_pc -= GETPC_ADJ; |
284 | ||
fca8a500 RH |
285 | if (searched_pc < host_pc) { |
286 | return -1; | |
287 | } | |
d19893da | 288 | |
fca8a500 RH |
289 | /* Reconstruct the stored insn data while looking for the point at |
290 | which the end of the insn exceeds the searched_pc. */ | |
291 | for (i = 0; i < num_insns; ++i) { | |
292 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
293 | data[j] += decode_sleb128(&p); | |
294 | } | |
295 | host_pc += decode_sleb128(&p); | |
296 | if (host_pc > searched_pc) { | |
297 | goto found; | |
298 | } | |
299 | } | |
300 | return -1; | |
3b46e624 | 301 | |
fca8a500 | 302 | found: |
bd79255d | 303 | if (tb->cflags & CF_USE_ICOUNT) { |
414b15c9 | 304 | assert(use_icount); |
2e70f6ef | 305 | /* Reset the cycle counter to the start of the block. */ |
fca8a500 | 306 | cpu->icount_decr.u16.low += num_insns; |
2e70f6ef | 307 | /* Clear the IO flag. */ |
99df7dce | 308 | cpu->can_do_io = 0; |
2e70f6ef | 309 | } |
fca8a500 RH |
310 | cpu->icount_decr.u16.low -= i; |
311 | restore_state_to_opc(env, tb, data); | |
57fec1fe FB |
312 | |
313 | #ifdef CONFIG_PROFILER | |
fca8a500 RH |
314 | tcg_ctx.restore_time += profile_getclock() - ti; |
315 | tcg_ctx.restore_count++; | |
57fec1fe | 316 | #endif |
d19893da FB |
317 | return 0; |
318 | } | |
5b6dd868 | 319 | |
3f38f309 | 320 | bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) |
a8a826a3 BS |
321 | { |
322 | TranslationBlock *tb; | |
323 | ||
324 | tb = tb_find_pc(retaddr); | |
325 | if (tb) { | |
74f10515 | 326 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
d8a499f1 PD |
327 | if (tb->cflags & CF_NOCACHE) { |
328 | /* one-shot translation, invalidate it immediately */ | |
d8a499f1 PD |
329 | tb_phys_invalidate(tb, -1); |
330 | tb_free(tb); | |
331 | } | |
a8a826a3 BS |
332 | return true; |
333 | } | |
334 | return false; | |
335 | } | |
336 | ||
47c16ed5 | 337 | void page_size_init(void) |
5b6dd868 BS |
338 | { |
339 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
340 | TARGET_PAGE_SIZE */ | |
5b6dd868 | 341 | qemu_real_host_page_size = getpagesize(); |
0c2d70c4 | 342 | qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; |
5b6dd868 BS |
343 | if (qemu_host_page_size == 0) { |
344 | qemu_host_page_size = qemu_real_host_page_size; | |
345 | } | |
346 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
347 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
348 | } | |
0c2d70c4 | 349 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; |
47c16ed5 | 350 | } |
5b6dd868 | 351 | |
47c16ed5 AK |
352 | static void page_init(void) |
353 | { | |
354 | page_size_init(); | |
66ec9f49 VK |
355 | page_table_config_init(); |
356 | ||
5b6dd868 BS |
357 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) |
358 | { | |
359 | #ifdef HAVE_KINFO_GETVMMAP | |
360 | struct kinfo_vmentry *freep; | |
361 | int i, cnt; | |
362 | ||
363 | freep = kinfo_getvmmap(getpid(), &cnt); | |
364 | if (freep) { | |
365 | mmap_lock(); | |
366 | for (i = 0; i < cnt; i++) { | |
367 | unsigned long startaddr, endaddr; | |
368 | ||
369 | startaddr = freep[i].kve_start; | |
370 | endaddr = freep[i].kve_end; | |
371 | if (h2g_valid(startaddr)) { | |
372 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
373 | ||
374 | if (h2g_valid(endaddr)) { | |
375 | endaddr = h2g(endaddr); | |
376 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
377 | } else { | |
378 | #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS | |
379 | endaddr = ~0ul; | |
380 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
381 | #endif | |
382 | } | |
383 | } | |
384 | } | |
385 | free(freep); | |
386 | mmap_unlock(); | |
387 | } | |
388 | #else | |
389 | FILE *f; | |
390 | ||
391 | last_brk = (unsigned long)sbrk(0); | |
392 | ||
393 | f = fopen("/compat/linux/proc/self/maps", "r"); | |
394 | if (f) { | |
395 | mmap_lock(); | |
396 | ||
397 | do { | |
398 | unsigned long startaddr, endaddr; | |
399 | int n; | |
400 | ||
401 | n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); | |
402 | ||
403 | if (n == 2 && h2g_valid(startaddr)) { | |
404 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
405 | ||
406 | if (h2g_valid(endaddr)) { | |
407 | endaddr = h2g(endaddr); | |
408 | } else { | |
409 | endaddr = ~0ul; | |
410 | } | |
411 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
412 | } | |
413 | } while (!feof(f)); | |
414 | ||
415 | fclose(f); | |
416 | mmap_unlock(); | |
417 | } | |
418 | #endif | |
419 | } | |
420 | #endif | |
421 | } | |
422 | ||
75692087 PB |
423 | /* If alloc=1: |
424 | * Called with mmap_lock held for user-mode emulation. | |
425 | */ | |
5b6dd868 BS |
426 | static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) |
427 | { | |
428 | PageDesc *pd; | |
429 | void **lp; | |
430 | int i; | |
431 | ||
5b6dd868 | 432 | /* Level 1. Always allocated. */ |
66ec9f49 | 433 | lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); |
5b6dd868 BS |
434 | |
435 | /* Level 2..N-1. */ | |
66ec9f49 | 436 | for (i = v_l2_levels; i > 0; i--) { |
6940fab8 | 437 | void **p = atomic_rcu_read(lp); |
5b6dd868 BS |
438 | |
439 | if (p == NULL) { | |
440 | if (!alloc) { | |
441 | return NULL; | |
442 | } | |
e3a0abfd | 443 | p = g_new0(void *, V_L2_SIZE); |
6940fab8 | 444 | atomic_rcu_set(lp, p); |
5b6dd868 BS |
445 | } |
446 | ||
03f49957 | 447 | lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); |
5b6dd868 BS |
448 | } |
449 | ||
6940fab8 | 450 | pd = atomic_rcu_read(lp); |
5b6dd868 BS |
451 | if (pd == NULL) { |
452 | if (!alloc) { | |
453 | return NULL; | |
454 | } | |
e3a0abfd | 455 | pd = g_new0(PageDesc, V_L2_SIZE); |
6940fab8 | 456 | atomic_rcu_set(lp, pd); |
5b6dd868 BS |
457 | } |
458 | ||
03f49957 | 459 | return pd + (index & (V_L2_SIZE - 1)); |
5b6dd868 BS |
460 | } |
461 | ||
462 | static inline PageDesc *page_find(tb_page_addr_t index) | |
463 | { | |
464 | return page_find_alloc(index, 0); | |
465 | } | |
466 | ||
5b6dd868 BS |
467 | #if defined(CONFIG_USER_ONLY) |
468 | /* Currently it is not recommended to allocate big chunks of data in | |
469 | user mode. It will change when a dedicated libc will be used. */ | |
470 | /* ??? 64-bit hosts ought to have no problem mmaping data outside the | |
471 | region in which the guest needs to run. Revisit this. */ | |
472 | #define USE_STATIC_CODE_GEN_BUFFER | |
473 | #endif | |
474 | ||
5b6dd868 BS |
475 | /* Minimum size of the code gen buffer. This number is randomly chosen, |
476 | but not so small that we can't have a fair number of TB's live. */ | |
477 | #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) | |
478 | ||
479 | /* Maximum size of the code gen buffer we'd like to use. Unless otherwise | |
480 | indicated, this is constrained by the range of direct branches on the | |
481 | host cpu, as used by the TCG implementation of goto_tb. */ | |
482 | #if defined(__x86_64__) | |
483 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
484 | #elif defined(__sparc__) | |
485 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
5bfd75a3 RH |
486 | #elif defined(__powerpc64__) |
487 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
399f1648 SF |
488 | #elif defined(__powerpc__) |
489 | # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024) | |
4a136e0a CF |
490 | #elif defined(__aarch64__) |
491 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
492 | #elif defined(__arm__) |
493 | # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) | |
494 | #elif defined(__s390x__) | |
495 | /* We have a +- 4GB range on the branches; leave some slop. */ | |
496 | # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) | |
479eb121 RH |
497 | #elif defined(__mips__) |
498 | /* We have a 256MB branch region, but leave room to make sure the | |
499 | main executable is also within that region. */ | |
500 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
501 | #else |
502 | # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) | |
503 | #endif | |
504 | ||
505 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) | |
506 | ||
507 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
508 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
509 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
510 | ||
511 | static inline size_t size_code_gen_buffer(size_t tb_size) | |
512 | { | |
513 | /* Size the buffer. */ | |
514 | if (tb_size == 0) { | |
515 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
516 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
517 | #else | |
518 | /* ??? Needs adjustments. */ | |
519 | /* ??? If we relax the requirement that CONFIG_USER_ONLY use the | |
520 | static buffer, we could size this on RESERVED_VA, on the text | |
521 | segment size of the executable, or continue to use the default. */ | |
522 | tb_size = (unsigned long)(ram_size / 4); | |
523 | #endif | |
524 | } | |
525 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
526 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
527 | } | |
528 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
529 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
530 | } | |
5b6dd868 BS |
531 | return tb_size; |
532 | } | |
533 | ||
483c76e1 RH |
534 | #ifdef __mips__ |
535 | /* In order to use J and JAL within the code_gen_buffer, we require | |
536 | that the buffer not cross a 256MB boundary. */ | |
537 | static inline bool cross_256mb(void *addr, size_t size) | |
538 | { | |
7ba6a512 | 539 | return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; |
483c76e1 RH |
540 | } |
541 | ||
542 | /* We weren't able to allocate a buffer without crossing that boundary, | |
543 | so make do with the larger portion of the buffer that doesn't cross. | |
544 | Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ | |
545 | static inline void *split_cross_256mb(void *buf1, size_t size1) | |
546 | { | |
7ba6a512 | 547 | void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); |
483c76e1 RH |
548 | size_t size2 = buf1 + size1 - buf2; |
549 | ||
550 | size1 = buf2 - buf1; | |
551 | if (size1 < size2) { | |
552 | size1 = size2; | |
553 | buf1 = buf2; | |
554 | } | |
555 | ||
556 | tcg_ctx.code_gen_buffer_size = size1; | |
557 | return buf1; | |
558 | } | |
559 | #endif | |
560 | ||
5b6dd868 BS |
561 | #ifdef USE_STATIC_CODE_GEN_BUFFER |
562 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
563 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
564 | ||
f293709c RH |
565 | # ifdef _WIN32 |
566 | static inline void do_protect(void *addr, long size, int prot) | |
567 | { | |
568 | DWORD old_protect; | |
569 | VirtualProtect(addr, size, prot, &old_protect); | |
570 | } | |
571 | ||
572 | static inline void map_exec(void *addr, long size) | |
573 | { | |
574 | do_protect(addr, size, PAGE_EXECUTE_READWRITE); | |
575 | } | |
576 | ||
577 | static inline void map_none(void *addr, long size) | |
578 | { | |
579 | do_protect(addr, size, PAGE_NOACCESS); | |
580 | } | |
581 | # else | |
582 | static inline void do_protect(void *addr, long size, int prot) | |
583 | { | |
584 | uintptr_t start, end; | |
585 | ||
586 | start = (uintptr_t)addr; | |
587 | start &= qemu_real_host_page_mask; | |
588 | ||
589 | end = (uintptr_t)addr + size; | |
590 | end = ROUND_UP(end, qemu_real_host_page_size); | |
591 | ||
592 | mprotect((void *)start, end - start, prot); | |
593 | } | |
594 | ||
595 | static inline void map_exec(void *addr, long size) | |
596 | { | |
597 | do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); | |
598 | } | |
599 | ||
600 | static inline void map_none(void *addr, long size) | |
601 | { | |
602 | do_protect(addr, size, PROT_NONE); | |
603 | } | |
604 | # endif /* WIN32 */ | |
605 | ||
5b6dd868 BS |
606 | static inline void *alloc_code_gen_buffer(void) |
607 | { | |
483c76e1 | 608 | void *buf = static_code_gen_buffer; |
f293709c RH |
609 | size_t full_size, size; |
610 | ||
611 | /* The size of the buffer, rounded down to end on a page boundary. */ | |
612 | full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) | |
613 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
614 | ||
615 | /* Reserve a guard page. */ | |
616 | size = full_size - qemu_real_host_page_size; | |
617 | ||
618 | /* Honor a command-line option limiting the size of the buffer. */ | |
619 | if (size > tcg_ctx.code_gen_buffer_size) { | |
620 | size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) | |
621 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
622 | } | |
623 | tcg_ctx.code_gen_buffer_size = size; | |
624 | ||
483c76e1 | 625 | #ifdef __mips__ |
f293709c RH |
626 | if (cross_256mb(buf, size)) { |
627 | buf = split_cross_256mb(buf, size); | |
628 | size = tcg_ctx.code_gen_buffer_size; | |
483c76e1 RH |
629 | } |
630 | #endif | |
f293709c RH |
631 | |
632 | map_exec(buf, size); | |
633 | map_none(buf + size, qemu_real_host_page_size); | |
634 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
635 | ||
483c76e1 | 636 | return buf; |
5b6dd868 | 637 | } |
f293709c RH |
638 | #elif defined(_WIN32) |
639 | static inline void *alloc_code_gen_buffer(void) | |
640 | { | |
641 | size_t size = tcg_ctx.code_gen_buffer_size; | |
642 | void *buf1, *buf2; | |
643 | ||
644 | /* Perform the allocation in two steps, so that the guard page | |
645 | is reserved but uncommitted. */ | |
646 | buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, | |
647 | MEM_RESERVE, PAGE_NOACCESS); | |
648 | if (buf1 != NULL) { | |
649 | buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); | |
650 | assert(buf1 == buf2); | |
651 | } | |
652 | ||
653 | return buf1; | |
654 | } | |
655 | #else | |
5b6dd868 BS |
656 | static inline void *alloc_code_gen_buffer(void) |
657 | { | |
658 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
659 | uintptr_t start = 0; | |
f293709c | 660 | size_t size = tcg_ctx.code_gen_buffer_size; |
5b6dd868 BS |
661 | void *buf; |
662 | ||
663 | /* Constrain the position of the buffer based on the host cpu. | |
664 | Note that these addresses are chosen in concert with the | |
665 | addresses assigned in the relevant linker script file. */ | |
666 | # if defined(__PIE__) || defined(__PIC__) | |
667 | /* Don't bother setting a preferred location if we're building | |
668 | a position-independent executable. We're more likely to get | |
669 | an address near the main executable if we let the kernel | |
670 | choose the address. */ | |
671 | # elif defined(__x86_64__) && defined(MAP_32BIT) | |
672 | /* Force the memory down into low memory with the executable. | |
673 | Leave the choice of exact location with the kernel. */ | |
674 | flags |= MAP_32BIT; | |
675 | /* Cannot expect to map more than 800MB in low memory. */ | |
f293709c RH |
676 | if (size > 800u * 1024 * 1024) { |
677 | tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; | |
5b6dd868 BS |
678 | } |
679 | # elif defined(__sparc__) | |
680 | start = 0x40000000ul; | |
681 | # elif defined(__s390x__) | |
682 | start = 0x90000000ul; | |
479eb121 | 683 | # elif defined(__mips__) |
f293709c | 684 | # if _MIPS_SIM == _ABI64 |
479eb121 RH |
685 | start = 0x128000000ul; |
686 | # else | |
687 | start = 0x08000000ul; | |
688 | # endif | |
5b6dd868 BS |
689 | # endif |
690 | ||
f293709c RH |
691 | buf = mmap((void *)start, size + qemu_real_host_page_size, |
692 | PROT_NONE, flags, -1, 0); | |
483c76e1 RH |
693 | if (buf == MAP_FAILED) { |
694 | return NULL; | |
695 | } | |
696 | ||
697 | #ifdef __mips__ | |
f293709c | 698 | if (cross_256mb(buf, size)) { |
5d831be2 | 699 | /* Try again, with the original still mapped, to avoid re-acquiring |
483c76e1 | 700 | that 256mb crossing. This time don't specify an address. */ |
f293709c RH |
701 | size_t size2; |
702 | void *buf2 = mmap(NULL, size + qemu_real_host_page_size, | |
703 | PROT_NONE, flags, -1, 0); | |
704 | switch (buf2 != MAP_FAILED) { | |
705 | case 1: | |
706 | if (!cross_256mb(buf2, size)) { | |
483c76e1 | 707 | /* Success! Use the new buffer. */ |
8bdf4997 | 708 | munmap(buf, size + qemu_real_host_page_size); |
f293709c | 709 | break; |
483c76e1 RH |
710 | } |
711 | /* Failure. Work with what we had. */ | |
8bdf4997 | 712 | munmap(buf2, size + qemu_real_host_page_size); |
f293709c RH |
713 | /* fallthru */ |
714 | default: | |
715 | /* Split the original buffer. Free the smaller half. */ | |
716 | buf2 = split_cross_256mb(buf, size); | |
717 | size2 = tcg_ctx.code_gen_buffer_size; | |
718 | if (buf == buf2) { | |
719 | munmap(buf + size2 + qemu_real_host_page_size, size - size2); | |
720 | } else { | |
721 | munmap(buf, size - size2); | |
722 | } | |
723 | size = size2; | |
724 | break; | |
483c76e1 | 725 | } |
f293709c | 726 | buf = buf2; |
483c76e1 RH |
727 | } |
728 | #endif | |
729 | ||
f293709c RH |
730 | /* Make the final buffer accessible. The guard page at the end |
731 | will remain inaccessible with PROT_NONE. */ | |
732 | mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC); | |
483c76e1 | 733 | |
f293709c RH |
734 | /* Request large pages for the buffer. */ |
735 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
483c76e1 | 736 | |
5b6dd868 BS |
737 | return buf; |
738 | } | |
f293709c | 739 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ |
5b6dd868 BS |
740 | |
741 | static inline void code_gen_alloc(size_t tb_size) | |
742 | { | |
0b0d3320 EV |
743 | tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); |
744 | tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); | |
745 | if (tcg_ctx.code_gen_buffer == NULL) { | |
5b6dd868 BS |
746 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
747 | exit(1); | |
748 | } | |
749 | ||
8163b749 RH |
750 | /* Estimate a good size for the number of TBs we can support. We |
751 | still haven't deducted the prologue from the buffer size here, | |
752 | but that's minimal and won't affect the estimate much. */ | |
753 | tcg_ctx.code_gen_max_blocks | |
754 | = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
755 | tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks); | |
756 | ||
677ef623 | 757 | qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); |
5b6dd868 BS |
758 | } |
759 | ||
909eaac9 EC |
760 | static void tb_htable_init(void) |
761 | { | |
762 | unsigned int mode = QHT_MODE_AUTO_RESIZE; | |
763 | ||
764 | qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode); | |
765 | } | |
766 | ||
5b6dd868 BS |
767 | /* Must be called before using the QEMU cpus. 'tb_size' is the size |
768 | (in bytes) allocated to the translation buffer. Zero means default | |
769 | size. */ | |
770 | void tcg_exec_init(unsigned long tb_size) | |
771 | { | |
772 | cpu_gen_init(); | |
5b6dd868 | 773 | page_init(); |
909eaac9 | 774 | tb_htable_init(); |
f293709c | 775 | code_gen_alloc(tb_size); |
4cbea598 | 776 | #if defined(CONFIG_SOFTMMU) |
5b6dd868 BS |
777 | /* There's no guest base to take into account, so go ahead and |
778 | initialize the prologue now. */ | |
779 | tcg_prologue_init(&tcg_ctx); | |
780 | #endif | |
781 | } | |
782 | ||
783 | bool tcg_enabled(void) | |
784 | { | |
0b0d3320 | 785 | return tcg_ctx.code_gen_buffer != NULL; |
5b6dd868 BS |
786 | } |
787 | ||
788 | /* Allocate a new translation block. Flush the translation buffer if | |
789 | too many translation blocks or too much generated code. */ | |
790 | static TranslationBlock *tb_alloc(target_ulong pc) | |
791 | { | |
792 | TranslationBlock *tb; | |
793 | ||
b125f9dc | 794 | if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) { |
5b6dd868 BS |
795 | return NULL; |
796 | } | |
5e5f07e0 | 797 | tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++]; |
5b6dd868 BS |
798 | tb->pc = pc; |
799 | tb->cflags = 0; | |
6d21e420 | 800 | tb->invalid = false; |
5b6dd868 BS |
801 | return tb; |
802 | } | |
803 | ||
804 | void tb_free(TranslationBlock *tb) | |
805 | { | |
806 | /* In practice this is mostly used for single use temporary TB | |
807 | Ignore the hard cases and just back up if this TB happens to | |
808 | be the last one generated. */ | |
5e5f07e0 EV |
809 | if (tcg_ctx.tb_ctx.nb_tbs > 0 && |
810 | tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { | |
0b0d3320 | 811 | tcg_ctx.code_gen_ptr = tb->tc_ptr; |
5e5f07e0 | 812 | tcg_ctx.tb_ctx.nb_tbs--; |
5b6dd868 BS |
813 | } |
814 | } | |
815 | ||
816 | static inline void invalidate_page_bitmap(PageDesc *p) | |
817 | { | |
6fad459c | 818 | #ifdef CONFIG_SOFTMMU |
012aef07 MA |
819 | g_free(p->code_bitmap); |
820 | p->code_bitmap = NULL; | |
5b6dd868 | 821 | p->code_write_count = 0; |
6fad459c | 822 | #endif |
5b6dd868 BS |
823 | } |
824 | ||
825 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | |
826 | static void page_flush_tb_1(int level, void **lp) | |
827 | { | |
828 | int i; | |
829 | ||
830 | if (*lp == NULL) { | |
831 | return; | |
832 | } | |
833 | if (level == 0) { | |
834 | PageDesc *pd = *lp; | |
835 | ||
03f49957 | 836 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
837 | pd[i].first_tb = NULL; |
838 | invalidate_page_bitmap(pd + i); | |
839 | } | |
840 | } else { | |
841 | void **pp = *lp; | |
842 | ||
03f49957 | 843 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
844 | page_flush_tb_1(level - 1, pp + i); |
845 | } | |
846 | } | |
847 | } | |
848 | ||
849 | static void page_flush_tb(void) | |
850 | { | |
66ec9f49 | 851 | int i, l1_sz = v_l1_size; |
5b6dd868 | 852 | |
66ec9f49 VK |
853 | for (i = 0; i < l1_sz; i++) { |
854 | page_flush_tb_1(v_l2_levels, l1_map + i); | |
5b6dd868 BS |
855 | } |
856 | } | |
857 | ||
858 | /* flush all the translation blocks */ | |
3359baad | 859 | static void do_tb_flush(CPUState *cpu, void *data) |
5b6dd868 | 860 | { |
3359baad SF |
861 | unsigned tb_flush_req = (unsigned) (uintptr_t) data; |
862 | ||
863 | tb_lock(); | |
864 | ||
865 | /* If it's already been done on request of another CPU, | |
866 | * just retry. | |
867 | */ | |
868 | if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) { | |
869 | goto done; | |
135a972b | 870 | } |
3359baad | 871 | |
5b6dd868 BS |
872 | #if defined(DEBUG_FLUSH) |
873 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", | |
0b0d3320 | 874 | (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), |
5e5f07e0 | 875 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? |
0b0d3320 | 876 | ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / |
5e5f07e0 | 877 | tcg_ctx.tb_ctx.nb_tbs : 0); |
5b6dd868 | 878 | #endif |
0b0d3320 EV |
879 | if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) |
880 | > tcg_ctx.code_gen_buffer_size) { | |
a47dddd7 | 881 | cpu_abort(cpu, "Internal error: code buffer overflow\n"); |
5b6dd868 | 882 | } |
5b6dd868 | 883 | |
bdc44640 | 884 | CPU_FOREACH(cpu) { |
89a16b1e SF |
885 | int i; |
886 | ||
887 | for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { | |
888 | atomic_set(&cpu->tb_jmp_cache[i], NULL); | |
889 | } | |
5b6dd868 BS |
890 | } |
891 | ||
118b0730 | 892 | tcg_ctx.tb_ctx.nb_tbs = 0; |
909eaac9 | 893 | qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); |
5b6dd868 BS |
894 | page_flush_tb(); |
895 | ||
0b0d3320 | 896 | tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; |
5b6dd868 BS |
897 | /* XXX: flush processor icache at this point if cache flush is |
898 | expensive */ | |
3359baad SF |
899 | atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, |
900 | tcg_ctx.tb_ctx.tb_flush_count + 1); | |
901 | ||
902 | done: | |
903 | tb_unlock(); | |
904 | } | |
905 | ||
906 | void tb_flush(CPUState *cpu) | |
907 | { | |
908 | if (tcg_enabled()) { | |
909 | uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); | |
910 | async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req); | |
911 | } | |
5b6dd868 BS |
912 | } |
913 | ||
914 | #ifdef DEBUG_TB_CHECK | |
915 | ||
909eaac9 EC |
916 | static void |
917 | do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp) | |
5b6dd868 | 918 | { |
909eaac9 EC |
919 | TranslationBlock *tb = p; |
920 | target_ulong addr = *(target_ulong *)userp; | |
921 | ||
922 | if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { | |
923 | printf("ERROR invalidate: address=" TARGET_FMT_lx | |
924 | " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); | |
925 | } | |
926 | } | |
5b6dd868 | 927 | |
909eaac9 EC |
928 | static void tb_invalidate_check(target_ulong address) |
929 | { | |
5b6dd868 | 930 | address &= TARGET_PAGE_MASK; |
909eaac9 EC |
931 | qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address); |
932 | } | |
933 | ||
934 | static void | |
935 | do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp) | |
936 | { | |
937 | TranslationBlock *tb = p; | |
938 | int flags1, flags2; | |
939 | ||
940 | flags1 = page_get_flags(tb->pc); | |
941 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
942 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
943 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
944 | (long)tb->pc, tb->size, flags1, flags2); | |
5b6dd868 BS |
945 | } |
946 | } | |
947 | ||
948 | /* verify that all the pages have correct rights for code */ | |
949 | static void tb_page_check(void) | |
950 | { | |
909eaac9 | 951 | qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL); |
5b6dd868 BS |
952 | } |
953 | ||
954 | #endif | |
955 | ||
5b6dd868 BS |
956 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
957 | { | |
958 | TranslationBlock *tb1; | |
959 | unsigned int n1; | |
960 | ||
961 | for (;;) { | |
962 | tb1 = *ptb; | |
963 | n1 = (uintptr_t)tb1 & 3; | |
964 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
965 | if (tb1 == tb) { | |
966 | *ptb = tb1->page_next[n1]; | |
967 | break; | |
968 | } | |
969 | ptb = &tb1->page_next[n1]; | |
970 | } | |
971 | } | |
972 | ||
13362678 SF |
973 | /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */ |
974 | static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n) | |
5b6dd868 | 975 | { |
c37e6d7e SF |
976 | TranslationBlock *tb1; |
977 | uintptr_t *ptb, ntb; | |
5b6dd868 BS |
978 | unsigned int n1; |
979 | ||
f309101c | 980 | ptb = &tb->jmp_list_next[n]; |
c37e6d7e | 981 | if (*ptb) { |
5b6dd868 BS |
982 | /* find tb(n) in circular list */ |
983 | for (;;) { | |
c37e6d7e SF |
984 | ntb = *ptb; |
985 | n1 = ntb & 3; | |
986 | tb1 = (TranslationBlock *)(ntb & ~3); | |
5b6dd868 BS |
987 | if (n1 == n && tb1 == tb) { |
988 | break; | |
989 | } | |
990 | if (n1 == 2) { | |
f309101c | 991 | ptb = &tb1->jmp_list_first; |
5b6dd868 | 992 | } else { |
f309101c | 993 | ptb = &tb1->jmp_list_next[n1]; |
5b6dd868 BS |
994 | } |
995 | } | |
996 | /* now we can suppress tb(n) from the list */ | |
f309101c | 997 | *ptb = tb->jmp_list_next[n]; |
5b6dd868 | 998 | |
c37e6d7e | 999 | tb->jmp_list_next[n] = (uintptr_t)NULL; |
5b6dd868 BS |
1000 | } |
1001 | } | |
1002 | ||
1003 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
1004 | another TB */ | |
1005 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
1006 | { | |
f309101c SF |
1007 | uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]); |
1008 | tb_set_jmp_target(tb, n, addr); | |
5b6dd868 BS |
1009 | } |
1010 | ||
89bba496 SF |
1011 | /* remove any jumps to the TB */ |
1012 | static inline void tb_jmp_unlink(TranslationBlock *tb) | |
1013 | { | |
f9c5b66f SF |
1014 | TranslationBlock *tb1; |
1015 | uintptr_t *ptb, ntb; | |
89bba496 SF |
1016 | unsigned int n1; |
1017 | ||
f9c5b66f | 1018 | ptb = &tb->jmp_list_first; |
89bba496 | 1019 | for (;;) { |
f9c5b66f SF |
1020 | ntb = *ptb; |
1021 | n1 = ntb & 3; | |
1022 | tb1 = (TranslationBlock *)(ntb & ~3); | |
89bba496 SF |
1023 | if (n1 == 2) { |
1024 | break; | |
1025 | } | |
f9c5b66f SF |
1026 | tb_reset_jump(tb1, n1); |
1027 | *ptb = tb1->jmp_list_next[n1]; | |
1028 | tb1->jmp_list_next[n1] = (uintptr_t)NULL; | |
89bba496 | 1029 | } |
89bba496 SF |
1030 | } |
1031 | ||
0c884d16 | 1032 | /* invalidate one TB */ |
5b6dd868 BS |
1033 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
1034 | { | |
182735ef | 1035 | CPUState *cpu; |
5b6dd868 | 1036 | PageDesc *p; |
42bd3228 | 1037 | uint32_t h; |
5b6dd868 | 1038 | tb_page_addr_t phys_pc; |
5b6dd868 | 1039 | |
6d21e420 PB |
1040 | atomic_set(&tb->invalid, true); |
1041 | ||
5b6dd868 BS |
1042 | /* remove the TB from the hash list */ |
1043 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
42bd3228 | 1044 | h = tb_hash_func(phys_pc, tb->pc, tb->flags); |
909eaac9 | 1045 | qht_remove(&tcg_ctx.tb_ctx.htable, tb, h); |
5b6dd868 BS |
1046 | |
1047 | /* remove the TB from the page list */ | |
1048 | if (tb->page_addr[0] != page_addr) { | |
1049 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
1050 | tb_page_remove(&p->first_tb, tb); | |
1051 | invalidate_page_bitmap(p); | |
1052 | } | |
1053 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
1054 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
1055 | tb_page_remove(&p->first_tb, tb); | |
1056 | invalidate_page_bitmap(p); | |
1057 | } | |
1058 | ||
5b6dd868 BS |
1059 | /* remove the TB from the hash list */ |
1060 | h = tb_jmp_cache_hash_func(tb->pc); | |
bdc44640 | 1061 | CPU_FOREACH(cpu) { |
89a16b1e SF |
1062 | if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { |
1063 | atomic_set(&cpu->tb_jmp_cache[h], NULL); | |
5b6dd868 BS |
1064 | } |
1065 | } | |
1066 | ||
1067 | /* suppress this TB from the two jump lists */ | |
13362678 SF |
1068 | tb_remove_from_jmp_list(tb, 0); |
1069 | tb_remove_from_jmp_list(tb, 1); | |
5b6dd868 BS |
1070 | |
1071 | /* suppress any remaining jumps to this TB */ | |
89bba496 | 1072 | tb_jmp_unlink(tb); |
5b6dd868 | 1073 | |
5e5f07e0 | 1074 | tcg_ctx.tb_ctx.tb_phys_invalidate_count++; |
5b6dd868 BS |
1075 | } |
1076 | ||
6fad459c | 1077 | #ifdef CONFIG_SOFTMMU |
5b6dd868 BS |
1078 | static void build_page_bitmap(PageDesc *p) |
1079 | { | |
1080 | int n, tb_start, tb_end; | |
1081 | TranslationBlock *tb; | |
1082 | ||
510a647f | 1083 | p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); |
5b6dd868 BS |
1084 | |
1085 | tb = p->first_tb; | |
1086 | while (tb != NULL) { | |
1087 | n = (uintptr_t)tb & 3; | |
1088 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1089 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1090 | if (n == 0) { | |
1091 | /* NOTE: tb_end may be after the end of the page, but | |
1092 | it is not a problem */ | |
1093 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
1094 | tb_end = tb_start + tb->size; | |
1095 | if (tb_end > TARGET_PAGE_SIZE) { | |
1096 | tb_end = TARGET_PAGE_SIZE; | |
1097 | } | |
1098 | } else { | |
1099 | tb_start = 0; | |
1100 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1101 | } | |
510a647f | 1102 | bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); |
5b6dd868 BS |
1103 | tb = tb->page_next[n]; |
1104 | } | |
1105 | } | |
6fad459c | 1106 | #endif |
5b6dd868 | 1107 | |
e90d96b1 SF |
1108 | /* add the tb in the target page and protect it if necessary |
1109 | * | |
1110 | * Called with mmap_lock held for user-mode emulation. | |
1111 | */ | |
1112 | static inline void tb_alloc_page(TranslationBlock *tb, | |
1113 | unsigned int n, tb_page_addr_t page_addr) | |
1114 | { | |
1115 | PageDesc *p; | |
1116 | #ifndef CONFIG_USER_ONLY | |
1117 | bool page_already_protected; | |
1118 | #endif | |
1119 | ||
1120 | tb->page_addr[n] = page_addr; | |
1121 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); | |
1122 | tb->page_next[n] = p->first_tb; | |
1123 | #ifndef CONFIG_USER_ONLY | |
1124 | page_already_protected = p->first_tb != NULL; | |
1125 | #endif | |
1126 | p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); | |
1127 | invalidate_page_bitmap(p); | |
1128 | ||
1129 | #if defined(CONFIG_USER_ONLY) | |
1130 | if (p->flags & PAGE_WRITE) { | |
1131 | target_ulong addr; | |
1132 | PageDesc *p2; | |
1133 | int prot; | |
1134 | ||
1135 | /* force the host page as non writable (writes will have a | |
1136 | page fault + mprotect overhead) */ | |
1137 | page_addr &= qemu_host_page_mask; | |
1138 | prot = 0; | |
1139 | for (addr = page_addr; addr < page_addr + qemu_host_page_size; | |
1140 | addr += TARGET_PAGE_SIZE) { | |
1141 | ||
1142 | p2 = page_find(addr >> TARGET_PAGE_BITS); | |
1143 | if (!p2) { | |
1144 | continue; | |
1145 | } | |
1146 | prot |= p2->flags; | |
1147 | p2->flags &= ~PAGE_WRITE; | |
1148 | } | |
1149 | mprotect(g2h(page_addr), qemu_host_page_size, | |
1150 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
1151 | #ifdef DEBUG_TB_INVALIDATE | |
1152 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", | |
1153 | page_addr); | |
1154 | #endif | |
1155 | } | |
1156 | #else | |
1157 | /* if some code is already present, then the pages are already | |
1158 | protected. So we handle the case where only the first TB is | |
1159 | allocated in a physical page */ | |
1160 | if (!page_already_protected) { | |
1161 | tlb_protect_code(page_addr); | |
1162 | } | |
1163 | #endif | |
1164 | } | |
1165 | ||
1166 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
1167 | * (-1) to indicate that only one page contains the TB. | |
1168 | * | |
1169 | * Called with mmap_lock held for user-mode emulation. | |
1170 | */ | |
1171 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | |
1172 | tb_page_addr_t phys_page2) | |
1173 | { | |
42bd3228 | 1174 | uint32_t h; |
e90d96b1 | 1175 | |
e90d96b1 SF |
1176 | /* add in the page list */ |
1177 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
1178 | if (phys_page2 != -1) { | |
1179 | tb_alloc_page(tb, 1, phys_page2); | |
1180 | } else { | |
1181 | tb->page_addr[1] = -1; | |
1182 | } | |
1183 | ||
2e1ae44a AB |
1184 | /* add in the hash table */ |
1185 | h = tb_hash_func(phys_pc, tb->pc, tb->flags); | |
1186 | qht_insert(&tcg_ctx.tb_ctx.htable, tb, h); | |
1187 | ||
e90d96b1 SF |
1188 | #ifdef DEBUG_TB_CHECK |
1189 | tb_page_check(); | |
1190 | #endif | |
1191 | } | |
1192 | ||
75692087 | 1193 | /* Called with mmap_lock held for user mode emulation. */ |
648f034c | 1194 | TranslationBlock *tb_gen_code(CPUState *cpu, |
5b6dd868 | 1195 | target_ulong pc, target_ulong cs_base, |
89fee74a | 1196 | uint32_t flags, int cflags) |
5b6dd868 | 1197 | { |
648f034c | 1198 | CPUArchState *env = cpu->env_ptr; |
5b6dd868 | 1199 | TranslationBlock *tb; |
5b6dd868 BS |
1200 | tb_page_addr_t phys_pc, phys_page2; |
1201 | target_ulong virt_page2; | |
fec88f64 | 1202 | tcg_insn_unit *gen_code_buf; |
fca8a500 | 1203 | int gen_code_size, search_size; |
fec88f64 RH |
1204 | #ifdef CONFIG_PROFILER |
1205 | int64_t ti; | |
1206 | #endif | |
5b6dd868 BS |
1207 | |
1208 | phys_pc = get_page_addr_code(env, pc); | |
56c0269a | 1209 | if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { |
0266359e PB |
1210 | cflags |= CF_USE_ICOUNT; |
1211 | } | |
b125f9dc | 1212 | |
5b6dd868 | 1213 | tb = tb_alloc(pc); |
b125f9dc RH |
1214 | if (unlikely(!tb)) { |
1215 | buffer_overflow: | |
5b6dd868 | 1216 | /* flush must be done */ |
bbd77c18 | 1217 | tb_flush(cpu); |
3359baad SF |
1218 | mmap_unlock(); |
1219 | cpu_loop_exit(cpu); | |
5b6dd868 | 1220 | } |
fec88f64 RH |
1221 | |
1222 | gen_code_buf = tcg_ctx.code_gen_ptr; | |
1223 | tb->tc_ptr = gen_code_buf; | |
5b6dd868 BS |
1224 | tb->cs_base = cs_base; |
1225 | tb->flags = flags; | |
1226 | tb->cflags = cflags; | |
fec88f64 RH |
1227 | |
1228 | #ifdef CONFIG_PROFILER | |
1229 | tcg_ctx.tb_count1++; /* includes aborted translations because of | |
1230 | exceptions */ | |
1231 | ti = profile_getclock(); | |
1232 | #endif | |
1233 | ||
1234 | tcg_func_start(&tcg_ctx); | |
1235 | ||
7c255043 | 1236 | tcg_ctx.cpu = ENV_GET_CPU(env); |
fec88f64 | 1237 | gen_intermediate_code(env, tb); |
7c255043 | 1238 | tcg_ctx.cpu = NULL; |
fec88f64 RH |
1239 | |
1240 | trace_translate_block(tb, tb->pc, tb->tc_ptr); | |
1241 | ||
1242 | /* generate machine code */ | |
f309101c SF |
1243 | tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; |
1244 | tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; | |
1245 | tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset; | |
fec88f64 | 1246 | #ifdef USE_DIRECT_JUMP |
f309101c SF |
1247 | tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset; |
1248 | tcg_ctx.tb_jmp_target_addr = NULL; | |
fec88f64 | 1249 | #else |
f309101c SF |
1250 | tcg_ctx.tb_jmp_insn_offset = NULL; |
1251 | tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr; | |
fec88f64 RH |
1252 | #endif |
1253 | ||
1254 | #ifdef CONFIG_PROFILER | |
1255 | tcg_ctx.tb_count++; | |
1256 | tcg_ctx.interm_time += profile_getclock() - ti; | |
1257 | tcg_ctx.code_time -= profile_getclock(); | |
1258 | #endif | |
1259 | ||
b125f9dc RH |
1260 | /* ??? Overflow could be handled better here. In particular, we |
1261 | don't need to re-do gen_intermediate_code, nor should we re-do | |
1262 | the tcg optimization currently hidden inside tcg_gen_code. All | |
1263 | that should be required is to flush the TBs, allocate a new TB, | |
1264 | re-initialize it per above, and re-do the actual code generation. */ | |
5bd2ec3d | 1265 | gen_code_size = tcg_gen_code(&tcg_ctx, tb); |
b125f9dc RH |
1266 | if (unlikely(gen_code_size < 0)) { |
1267 | goto buffer_overflow; | |
1268 | } | |
fca8a500 | 1269 | search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); |
b125f9dc RH |
1270 | if (unlikely(search_size < 0)) { |
1271 | goto buffer_overflow; | |
1272 | } | |
fec88f64 RH |
1273 | |
1274 | #ifdef CONFIG_PROFILER | |
1275 | tcg_ctx.code_time += profile_getclock(); | |
1276 | tcg_ctx.code_in_len += tb->size; | |
1277 | tcg_ctx.code_out_len += gen_code_size; | |
fca8a500 | 1278 | tcg_ctx.search_out_len += search_size; |
fec88f64 RH |
1279 | #endif |
1280 | ||
1281 | #ifdef DEBUG_DISAS | |
d977e1c2 AB |
1282 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && |
1283 | qemu_log_in_addr_range(tb->pc)) { | |
fec88f64 RH |
1284 | qemu_log("OUT: [size=%d]\n", gen_code_size); |
1285 | log_disas(tb->tc_ptr, gen_code_size); | |
1286 | qemu_log("\n"); | |
1287 | qemu_log_flush(); | |
1288 | } | |
1289 | #endif | |
1290 | ||
fca8a500 RH |
1291 | tcg_ctx.code_gen_ptr = (void *) |
1292 | ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, | |
1293 | CODE_GEN_ALIGN); | |
5b6dd868 | 1294 | |
901bc3de SF |
1295 | /* init jump list */ |
1296 | assert(((uintptr_t)tb & 3) == 0); | |
1297 | tb->jmp_list_first = (uintptr_t)tb | 2; | |
1298 | tb->jmp_list_next[0] = (uintptr_t)NULL; | |
1299 | tb->jmp_list_next[1] = (uintptr_t)NULL; | |
1300 | ||
1301 | /* init original jump addresses wich has been set during tcg_gen_code() */ | |
1302 | if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { | |
1303 | tb_reset_jump(tb, 0); | |
1304 | } | |
1305 | if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { | |
1306 | tb_reset_jump(tb, 1); | |
1307 | } | |
1308 | ||
5b6dd868 BS |
1309 | /* check next page if needed */ |
1310 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
1311 | phys_page2 = -1; | |
1312 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
1313 | phys_page2 = get_page_addr_code(env, virt_page2); | |
1314 | } | |
901bc3de SF |
1315 | /* As long as consistency of the TB stuff is provided by tb_lock in user |
1316 | * mode and is implicit in single-threaded softmmu emulation, no explicit | |
1317 | * memory barrier is required before tb_link_page() makes the TB visible | |
1318 | * through the physical hash table and physical page list. | |
1319 | */ | |
5b6dd868 BS |
1320 | tb_link_page(tb, phys_pc, phys_page2); |
1321 | return tb; | |
1322 | } | |
1323 | ||
1324 | /* | |
1325 | * Invalidate all TBs which intersect with the target physical address range | |
1326 | * [start;end[. NOTE: start and end may refer to *different* physical pages. | |
1327 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1328 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1329 | * this TB. | |
75692087 PB |
1330 | * |
1331 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 | 1332 | */ |
35865339 | 1333 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) |
5b6dd868 BS |
1334 | { |
1335 | while (start < end) { | |
35865339 | 1336 | tb_invalidate_phys_page_range(start, end, 0); |
5b6dd868 BS |
1337 | start &= TARGET_PAGE_MASK; |
1338 | start += TARGET_PAGE_SIZE; | |
1339 | } | |
1340 | } | |
1341 | ||
1342 | /* | |
1343 | * Invalidate all TBs which intersect with the target physical address range | |
1344 | * [start;end[. NOTE: start and end must refer to the *same* physical page. | |
1345 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1346 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1347 | * this TB. | |
75692087 PB |
1348 | * |
1349 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 BS |
1350 | */ |
1351 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | |
1352 | int is_cpu_write_access) | |
1353 | { | |
3213525f | 1354 | TranslationBlock *tb, *tb_next; |
baea4fae | 1355 | #if defined(TARGET_HAS_PRECISE_SMC) |
3213525f | 1356 | CPUState *cpu = current_cpu; |
4917cf44 AF |
1357 | CPUArchState *env = NULL; |
1358 | #endif | |
5b6dd868 BS |
1359 | tb_page_addr_t tb_start, tb_end; |
1360 | PageDesc *p; | |
1361 | int n; | |
1362 | #ifdef TARGET_HAS_PRECISE_SMC | |
1363 | int current_tb_not_found = is_cpu_write_access; | |
1364 | TranslationBlock *current_tb = NULL; | |
1365 | int current_tb_modified = 0; | |
1366 | target_ulong current_pc = 0; | |
1367 | target_ulong current_cs_base = 0; | |
89fee74a | 1368 | uint32_t current_flags = 0; |
5b6dd868 BS |
1369 | #endif /* TARGET_HAS_PRECISE_SMC */ |
1370 | ||
1371 | p = page_find(start >> TARGET_PAGE_BITS); | |
1372 | if (!p) { | |
1373 | return; | |
1374 | } | |
baea4fae | 1375 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1376 | if (cpu != NULL) { |
1377 | env = cpu->env_ptr; | |
d77953b9 | 1378 | } |
4917cf44 | 1379 | #endif |
5b6dd868 BS |
1380 | |
1381 | /* we remove all the TBs in the range [start, end[ */ | |
1382 | /* XXX: see if in some cases it could be faster to invalidate all | |
1383 | the code */ | |
1384 | tb = p->first_tb; | |
1385 | while (tb != NULL) { | |
1386 | n = (uintptr_t)tb & 3; | |
1387 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1388 | tb_next = tb->page_next[n]; | |
1389 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1390 | if (n == 0) { | |
1391 | /* NOTE: tb_end may be after the end of the page, but | |
1392 | it is not a problem */ | |
1393 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
1394 | tb_end = tb_start + tb->size; | |
1395 | } else { | |
1396 | tb_start = tb->page_addr[1]; | |
1397 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1398 | } | |
1399 | if (!(tb_end <= start || tb_start >= end)) { | |
1400 | #ifdef TARGET_HAS_PRECISE_SMC | |
1401 | if (current_tb_not_found) { | |
1402 | current_tb_not_found = 0; | |
1403 | current_tb = NULL; | |
93afeade | 1404 | if (cpu->mem_io_pc) { |
5b6dd868 | 1405 | /* now we have a real cpu fault */ |
93afeade | 1406 | current_tb = tb_find_pc(cpu->mem_io_pc); |
5b6dd868 BS |
1407 | } |
1408 | } | |
1409 | if (current_tb == tb && | |
1410 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1411 | /* If we are modifying the current TB, we must stop | |
1412 | its execution. We could be more precise by checking | |
1413 | that the modification is after the current PC, but it | |
1414 | would require a specialized function to partially | |
1415 | restore the CPU state */ | |
1416 | ||
1417 | current_tb_modified = 1; | |
74f10515 | 1418 | cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); |
5b6dd868 BS |
1419 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1420 | ¤t_flags); | |
1421 | } | |
1422 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
5b6dd868 | 1423 | tb_phys_invalidate(tb, -1); |
5b6dd868 BS |
1424 | } |
1425 | tb = tb_next; | |
1426 | } | |
1427 | #if !defined(CONFIG_USER_ONLY) | |
1428 | /* if no code remaining, no need to continue to use slow writes */ | |
1429 | if (!p->first_tb) { | |
1430 | invalidate_page_bitmap(p); | |
fc377bcf | 1431 | tlb_unprotect_code(start); |
5b6dd868 BS |
1432 | } |
1433 | #endif | |
1434 | #ifdef TARGET_HAS_PRECISE_SMC | |
1435 | if (current_tb_modified) { | |
1436 | /* we generate a block containing just the instruction | |
1437 | modifying the memory. It will ensure that it cannot modify | |
1438 | itself */ | |
648f034c | 1439 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
6886b980 | 1440 | cpu_loop_exit_noexc(cpu); |
5b6dd868 BS |
1441 | } |
1442 | #endif | |
1443 | } | |
1444 | ||
6fad459c | 1445 | #ifdef CONFIG_SOFTMMU |
5b6dd868 BS |
1446 | /* len must be <= 8 and start must be a multiple of len */ |
1447 | void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) | |
1448 | { | |
1449 | PageDesc *p; | |
5b6dd868 BS |
1450 | |
1451 | #if 0 | |
1452 | if (1) { | |
1453 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
1454 | cpu_single_env->mem_io_vaddr, len, | |
1455 | cpu_single_env->eip, | |
1456 | cpu_single_env->eip + | |
1457 | (intptr_t)cpu_single_env->segs[R_CS].base); | |
1458 | } | |
1459 | #endif | |
1460 | p = page_find(start >> TARGET_PAGE_BITS); | |
1461 | if (!p) { | |
1462 | return; | |
1463 | } | |
fc377bcf PB |
1464 | if (!p->code_bitmap && |
1465 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { | |
1466 | /* build code bitmap */ | |
1467 | build_page_bitmap(p); | |
1468 | } | |
5b6dd868 | 1469 | if (p->code_bitmap) { |
510a647f EC |
1470 | unsigned int nr; |
1471 | unsigned long b; | |
1472 | ||
1473 | nr = start & ~TARGET_PAGE_MASK; | |
1474 | b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); | |
5b6dd868 BS |
1475 | if (b & ((1 << len) - 1)) { |
1476 | goto do_invalidate; | |
1477 | } | |
1478 | } else { | |
1479 | do_invalidate: | |
1480 | tb_invalidate_phys_page_range(start, start + len, 1); | |
1481 | } | |
1482 | } | |
6fad459c | 1483 | #else |
75809229 PM |
1484 | /* Called with mmap_lock held. If pc is not 0 then it indicates the |
1485 | * host PC of the faulting store instruction that caused this invalidate. | |
1486 | * Returns true if the caller needs to abort execution of the current | |
1487 | * TB (because it was modified by this store and the guest CPU has | |
1488 | * precise-SMC semantics). | |
1489 | */ | |
1490 | static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) | |
5b6dd868 BS |
1491 | { |
1492 | TranslationBlock *tb; | |
1493 | PageDesc *p; | |
1494 | int n; | |
1495 | #ifdef TARGET_HAS_PRECISE_SMC | |
1496 | TranslationBlock *current_tb = NULL; | |
4917cf44 AF |
1497 | CPUState *cpu = current_cpu; |
1498 | CPUArchState *env = NULL; | |
5b6dd868 BS |
1499 | int current_tb_modified = 0; |
1500 | target_ulong current_pc = 0; | |
1501 | target_ulong current_cs_base = 0; | |
89fee74a | 1502 | uint32_t current_flags = 0; |
5b6dd868 BS |
1503 | #endif |
1504 | ||
1505 | addr &= TARGET_PAGE_MASK; | |
1506 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1507 | if (!p) { | |
75809229 | 1508 | return false; |
5b6dd868 BS |
1509 | } |
1510 | tb = p->first_tb; | |
1511 | #ifdef TARGET_HAS_PRECISE_SMC | |
1512 | if (tb && pc != 0) { | |
1513 | current_tb = tb_find_pc(pc); | |
1514 | } | |
4917cf44 AF |
1515 | if (cpu != NULL) { |
1516 | env = cpu->env_ptr; | |
d77953b9 | 1517 | } |
5b6dd868 BS |
1518 | #endif |
1519 | while (tb != NULL) { | |
1520 | n = (uintptr_t)tb & 3; | |
1521 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1522 | #ifdef TARGET_HAS_PRECISE_SMC | |
1523 | if (current_tb == tb && | |
1524 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1525 | /* If we are modifying the current TB, we must stop | |
1526 | its execution. We could be more precise by checking | |
1527 | that the modification is after the current PC, but it | |
1528 | would require a specialized function to partially | |
1529 | restore the CPU state */ | |
1530 | ||
1531 | current_tb_modified = 1; | |
74f10515 | 1532 | cpu_restore_state_from_tb(cpu, current_tb, pc); |
5b6dd868 BS |
1533 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1534 | ¤t_flags); | |
1535 | } | |
1536 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1537 | tb_phys_invalidate(tb, addr); | |
1538 | tb = tb->page_next[n]; | |
1539 | } | |
1540 | p->first_tb = NULL; | |
1541 | #ifdef TARGET_HAS_PRECISE_SMC | |
1542 | if (current_tb_modified) { | |
1543 | /* we generate a block containing just the instruction | |
1544 | modifying the memory. It will ensure that it cannot modify | |
1545 | itself */ | |
648f034c | 1546 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
75809229 | 1547 | return true; |
5b6dd868 BS |
1548 | } |
1549 | #endif | |
75809229 | 1550 | return false; |
5b6dd868 BS |
1551 | } |
1552 | #endif | |
1553 | ||
5b6dd868 BS |
1554 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
1555 | tb[1].tc_ptr. Return NULL if not found */ | |
a8a826a3 | 1556 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) |
5b6dd868 BS |
1557 | { |
1558 | int m_min, m_max, m; | |
1559 | uintptr_t v; | |
1560 | TranslationBlock *tb; | |
1561 | ||
5e5f07e0 | 1562 | if (tcg_ctx.tb_ctx.nb_tbs <= 0) { |
5b6dd868 BS |
1563 | return NULL; |
1564 | } | |
0b0d3320 EV |
1565 | if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer || |
1566 | tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) { | |
5b6dd868 BS |
1567 | return NULL; |
1568 | } | |
1569 | /* binary search (cf Knuth) */ | |
1570 | m_min = 0; | |
5e5f07e0 | 1571 | m_max = tcg_ctx.tb_ctx.nb_tbs - 1; |
5b6dd868 BS |
1572 | while (m_min <= m_max) { |
1573 | m = (m_min + m_max) >> 1; | |
5e5f07e0 | 1574 | tb = &tcg_ctx.tb_ctx.tbs[m]; |
5b6dd868 BS |
1575 | v = (uintptr_t)tb->tc_ptr; |
1576 | if (v == tc_ptr) { | |
1577 | return tb; | |
1578 | } else if (tc_ptr < v) { | |
1579 | m_max = m - 1; | |
1580 | } else { | |
1581 | m_min = m + 1; | |
1582 | } | |
1583 | } | |
5e5f07e0 | 1584 | return &tcg_ctx.tb_ctx.tbs[m_max]; |
5b6dd868 BS |
1585 | } |
1586 | ||
ec53b45b | 1587 | #if !defined(CONFIG_USER_ONLY) |
29d8ec7b | 1588 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) |
5b6dd868 BS |
1589 | { |
1590 | ram_addr_t ram_addr; | |
5c8a00ce | 1591 | MemoryRegion *mr; |
149f54b5 | 1592 | hwaddr l = 1; |
5b6dd868 | 1593 | |
41063e1e | 1594 | rcu_read_lock(); |
29d8ec7b | 1595 | mr = address_space_translate(as, addr, &addr, &l, false); |
5c8a00ce PB |
1596 | if (!(memory_region_is_ram(mr) |
1597 | || memory_region_is_romd(mr))) { | |
41063e1e | 1598 | rcu_read_unlock(); |
5b6dd868 BS |
1599 | return; |
1600 | } | |
e4e69794 | 1601 | ram_addr = memory_region_get_ram_addr(mr) + addr; |
5b6dd868 | 1602 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
41063e1e | 1603 | rcu_read_unlock(); |
5b6dd868 | 1604 | } |
ec53b45b | 1605 | #endif /* !defined(CONFIG_USER_ONLY) */ |
5b6dd868 | 1606 | |
239c51a5 | 1607 | void tb_check_watchpoint(CPUState *cpu) |
5b6dd868 BS |
1608 | { |
1609 | TranslationBlock *tb; | |
1610 | ||
93afeade | 1611 | tb = tb_find_pc(cpu->mem_io_pc); |
8d302e76 AJ |
1612 | if (tb) { |
1613 | /* We can use retranslation to find the PC. */ | |
1614 | cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); | |
1615 | tb_phys_invalidate(tb, -1); | |
1616 | } else { | |
1617 | /* The exception probably happened in a helper. The CPU state should | |
1618 | have been saved before calling it. Fetch the PC from there. */ | |
1619 | CPUArchState *env = cpu->env_ptr; | |
1620 | target_ulong pc, cs_base; | |
1621 | tb_page_addr_t addr; | |
89fee74a | 1622 | uint32_t flags; |
8d302e76 AJ |
1623 | |
1624 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
1625 | addr = get_page_addr_code(env, pc); | |
1626 | tb_invalidate_phys_range(addr, addr + 1); | |
5b6dd868 | 1627 | } |
5b6dd868 BS |
1628 | } |
1629 | ||
1630 | #ifndef CONFIG_USER_ONLY | |
5b6dd868 BS |
1631 | /* in deterministic execution mode, instructions doing device I/Os |
1632 | must be at the end of the TB */ | |
90b40a69 | 1633 | void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) |
5b6dd868 | 1634 | { |
a47dddd7 | 1635 | #if defined(TARGET_MIPS) || defined(TARGET_SH4) |
90b40a69 | 1636 | CPUArchState *env = cpu->env_ptr; |
a47dddd7 | 1637 | #endif |
5b6dd868 BS |
1638 | TranslationBlock *tb; |
1639 | uint32_t n, cflags; | |
1640 | target_ulong pc, cs_base; | |
89fee74a | 1641 | uint32_t flags; |
5b6dd868 BS |
1642 | |
1643 | tb = tb_find_pc(retaddr); | |
1644 | if (!tb) { | |
a47dddd7 | 1645 | cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", |
5b6dd868 BS |
1646 | (void *)retaddr); |
1647 | } | |
28ecfd7a | 1648 | n = cpu->icount_decr.u16.low + tb->icount; |
74f10515 | 1649 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
5b6dd868 BS |
1650 | /* Calculate how many instructions had been executed before the fault |
1651 | occurred. */ | |
28ecfd7a | 1652 | n = n - cpu->icount_decr.u16.low; |
5b6dd868 BS |
1653 | /* Generate a new TB ending on the I/O insn. */ |
1654 | n++; | |
1655 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
1656 | they were already the first instruction in the TB. If this is not | |
1657 | the first instruction in a TB then re-execute the preceding | |
1658 | branch. */ | |
1659 | #if defined(TARGET_MIPS) | |
1660 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
c3577479 | 1661 | env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); |
28ecfd7a | 1662 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1663 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1664 | } | |
1665 | #elif defined(TARGET_SH4) | |
1666 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
1667 | && n > 1) { | |
1668 | env->pc -= 2; | |
28ecfd7a | 1669 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1670 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); |
1671 | } | |
1672 | #endif | |
1673 | /* This should never happen. */ | |
1674 | if (n > CF_COUNT_MASK) { | |
a47dddd7 | 1675 | cpu_abort(cpu, "TB too big during recompile"); |
5b6dd868 BS |
1676 | } |
1677 | ||
1678 | cflags = n | CF_LAST_IO; | |
1679 | pc = tb->pc; | |
1680 | cs_base = tb->cs_base; | |
1681 | flags = tb->flags; | |
1682 | tb_phys_invalidate(tb, -1); | |
02d57ea1 SF |
1683 | if (tb->cflags & CF_NOCACHE) { |
1684 | if (tb->orig_tb) { | |
1685 | /* Invalidate original TB if this TB was generated in | |
1686 | * cpu_exec_nocache() */ | |
1687 | tb_phys_invalidate(tb->orig_tb, -1); | |
1688 | } | |
1689 | tb_free(tb); | |
1690 | } | |
5b6dd868 BS |
1691 | /* FIXME: In theory this could raise an exception. In practice |
1692 | we have already translated the block once so it's probably ok. */ | |
648f034c | 1693 | tb_gen_code(cpu, pc, cs_base, flags, cflags); |
5b6dd868 BS |
1694 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not |
1695 | the first in the TB) then we end up generating a whole new TB and | |
1696 | repeating the fault, which is horribly inefficient. | |
1697 | Better would be to execute just this insn uncached, or generate a | |
1698 | second new TB. */ | |
6886b980 | 1699 | cpu_loop_exit_noexc(cpu); |
5b6dd868 BS |
1700 | } |
1701 | ||
611d4f99 | 1702 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) |
5b6dd868 BS |
1703 | { |
1704 | unsigned int i; | |
1705 | ||
1706 | /* Discard jump cache entries for any tb which might potentially | |
1707 | overlap the flushed page. */ | |
1708 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
8cd70437 | 1709 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1710 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1711 | ||
1712 | i = tb_jmp_cache_hash_page(addr); | |
8cd70437 | 1713 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1714 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1715 | } | |
1716 | ||
7266ae91 EC |
1717 | static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf, |
1718 | struct qht_stats hst) | |
1719 | { | |
1720 | uint32_t hgram_opts; | |
1721 | size_t hgram_bins; | |
1722 | char *hgram; | |
1723 | ||
1724 | if (!hst.head_buckets) { | |
1725 | return; | |
1726 | } | |
1727 | cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", | |
1728 | hst.used_head_buckets, hst.head_buckets, | |
1729 | (double)hst.used_head_buckets / hst.head_buckets * 100); | |
1730 | ||
1731 | hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; | |
1732 | hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; | |
1733 | if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { | |
1734 | hgram_opts |= QDIST_PR_NODECIMAL; | |
1735 | } | |
1736 | hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); | |
1737 | cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", | |
1738 | qdist_avg(&hst.occupancy) * 100, hgram); | |
1739 | g_free(hgram); | |
1740 | ||
1741 | hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; | |
1742 | hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); | |
1743 | if (hgram_bins > 10) { | |
1744 | hgram_bins = 10; | |
1745 | } else { | |
1746 | hgram_bins = 0; | |
1747 | hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; | |
1748 | } | |
1749 | hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); | |
1750 | cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n", | |
1751 | qdist_avg(&hst.chain), hgram); | |
1752 | g_free(hgram); | |
1753 | } | |
1754 | ||
5b6dd868 BS |
1755 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) |
1756 | { | |
1757 | int i, target_code_size, max_target_code_size; | |
1758 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
1759 | TranslationBlock *tb; | |
329844d4 | 1760 | struct qht_stats hst; |
5b6dd868 BS |
1761 | |
1762 | target_code_size = 0; | |
1763 | max_target_code_size = 0; | |
1764 | cross_page = 0; | |
1765 | direct_jmp_count = 0; | |
1766 | direct_jmp2_count = 0; | |
5e5f07e0 EV |
1767 | for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { |
1768 | tb = &tcg_ctx.tb_ctx.tbs[i]; | |
5b6dd868 BS |
1769 | target_code_size += tb->size; |
1770 | if (tb->size > max_target_code_size) { | |
1771 | max_target_code_size = tb->size; | |
1772 | } | |
1773 | if (tb->page_addr[1] != -1) { | |
1774 | cross_page++; | |
1775 | } | |
f309101c | 1776 | if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { |
5b6dd868 | 1777 | direct_jmp_count++; |
f309101c | 1778 | if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { |
5b6dd868 BS |
1779 | direct_jmp2_count++; |
1780 | } | |
1781 | } | |
1782 | } | |
1783 | /* XXX: avoid using doubles ? */ | |
1784 | cpu_fprintf(f, "Translation buffer state:\n"); | |
1785 | cpu_fprintf(f, "gen code size %td/%zd\n", | |
0b0d3320 | 1786 | tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, |
b125f9dc | 1787 | tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); |
5b6dd868 | 1788 | cpu_fprintf(f, "TB count %d/%d\n", |
5e5f07e0 | 1789 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks); |
5b6dd868 | 1790 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
5e5f07e0 EV |
1791 | tcg_ctx.tb_ctx.nb_tbs ? target_code_size / |
1792 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1793 | max_target_code_size); | |
5b6dd868 | 1794 | cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", |
5e5f07e0 EV |
1795 | tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - |
1796 | tcg_ctx.code_gen_buffer) / | |
1797 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1798 | target_code_size ? (double) (tcg_ctx.code_gen_ptr - | |
1799 | tcg_ctx.code_gen_buffer) / | |
1800 | target_code_size : 0); | |
1801 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, | |
1802 | tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / | |
1803 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 BS |
1804 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", |
1805 | direct_jmp_count, | |
5e5f07e0 EV |
1806 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / |
1807 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
5b6dd868 | 1808 | direct_jmp2_count, |
5e5f07e0 EV |
1809 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / |
1810 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
329844d4 EC |
1811 | |
1812 | qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst); | |
7266ae91 | 1813 | print_qht_statistics(f, cpu_fprintf, hst); |
329844d4 EC |
1814 | qht_statistics_destroy(&hst); |
1815 | ||
5b6dd868 | 1816 | cpu_fprintf(f, "\nStatistics:\n"); |
3359baad SF |
1817 | cpu_fprintf(f, "TB flush count %u\n", |
1818 | atomic_read(&tcg_ctx.tb_ctx.tb_flush_count)); | |
5e5f07e0 EV |
1819 | cpu_fprintf(f, "TB invalidate count %d\n", |
1820 | tcg_ctx.tb_ctx.tb_phys_invalidate_count); | |
5b6dd868 BS |
1821 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |
1822 | tcg_dump_info(f, cpu_fprintf); | |
1823 | } | |
1824 | ||
246ae24d MF |
1825 | void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) |
1826 | { | |
1827 | tcg_dump_op_count(f, cpu_fprintf); | |
1828 | } | |
1829 | ||
5b6dd868 BS |
1830 | #else /* CONFIG_USER_ONLY */ |
1831 | ||
c3affe56 | 1832 | void cpu_interrupt(CPUState *cpu, int mask) |
5b6dd868 | 1833 | { |
259186a7 | 1834 | cpu->interrupt_request |= mask; |
378df4b2 | 1835 | cpu->tcg_exit_req = 1; |
5b6dd868 BS |
1836 | } |
1837 | ||
1838 | /* | |
1839 | * Walks guest process memory "regions" one by one | |
1840 | * and calls callback function 'fn' for each region. | |
1841 | */ | |
1842 | struct walk_memory_regions_data { | |
1843 | walk_memory_regions_fn fn; | |
1844 | void *priv; | |
1a1c4db9 | 1845 | target_ulong start; |
5b6dd868 BS |
1846 | int prot; |
1847 | }; | |
1848 | ||
1849 | static int walk_memory_regions_end(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1850 | target_ulong end, int new_prot) |
5b6dd868 | 1851 | { |
1a1c4db9 | 1852 | if (data->start != -1u) { |
5b6dd868 BS |
1853 | int rc = data->fn(data->priv, data->start, end, data->prot); |
1854 | if (rc != 0) { | |
1855 | return rc; | |
1856 | } | |
1857 | } | |
1858 | ||
1a1c4db9 | 1859 | data->start = (new_prot ? end : -1u); |
5b6dd868 BS |
1860 | data->prot = new_prot; |
1861 | ||
1862 | return 0; | |
1863 | } | |
1864 | ||
1865 | static int walk_memory_regions_1(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1866 | target_ulong base, int level, void **lp) |
5b6dd868 | 1867 | { |
1a1c4db9 | 1868 | target_ulong pa; |
5b6dd868 BS |
1869 | int i, rc; |
1870 | ||
1871 | if (*lp == NULL) { | |
1872 | return walk_memory_regions_end(data, base, 0); | |
1873 | } | |
1874 | ||
1875 | if (level == 0) { | |
1876 | PageDesc *pd = *lp; | |
1877 | ||
03f49957 | 1878 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
1879 | int prot = pd[i].flags; |
1880 | ||
1881 | pa = base | (i << TARGET_PAGE_BITS); | |
1882 | if (prot != data->prot) { | |
1883 | rc = walk_memory_regions_end(data, pa, prot); | |
1884 | if (rc != 0) { | |
1885 | return rc; | |
1886 | } | |
1887 | } | |
1888 | } | |
1889 | } else { | |
1890 | void **pp = *lp; | |
1891 | ||
03f49957 | 1892 | for (i = 0; i < V_L2_SIZE; ++i) { |
1a1c4db9 | 1893 | pa = base | ((target_ulong)i << |
03f49957 | 1894 | (TARGET_PAGE_BITS + V_L2_BITS * level)); |
5b6dd868 BS |
1895 | rc = walk_memory_regions_1(data, pa, level - 1, pp + i); |
1896 | if (rc != 0) { | |
1897 | return rc; | |
1898 | } | |
1899 | } | |
1900 | } | |
1901 | ||
1902 | return 0; | |
1903 | } | |
1904 | ||
1905 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
1906 | { | |
1907 | struct walk_memory_regions_data data; | |
66ec9f49 | 1908 | uintptr_t i, l1_sz = v_l1_size; |
5b6dd868 BS |
1909 | |
1910 | data.fn = fn; | |
1911 | data.priv = priv; | |
1a1c4db9 | 1912 | data.start = -1u; |
5b6dd868 BS |
1913 | data.prot = 0; |
1914 | ||
66ec9f49 VK |
1915 | for (i = 0; i < l1_sz; i++) { |
1916 | target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); | |
1917 | int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); | |
5b6dd868 BS |
1918 | if (rc != 0) { |
1919 | return rc; | |
1920 | } | |
1921 | } | |
1922 | ||
1923 | return walk_memory_regions_end(&data, 0, 0); | |
1924 | } | |
1925 | ||
1a1c4db9 MI |
1926 | static int dump_region(void *priv, target_ulong start, |
1927 | target_ulong end, unsigned long prot) | |
5b6dd868 BS |
1928 | { |
1929 | FILE *f = (FILE *)priv; | |
1930 | ||
1a1c4db9 MI |
1931 | (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx |
1932 | " "TARGET_FMT_lx" %c%c%c\n", | |
5b6dd868 BS |
1933 | start, end, end - start, |
1934 | ((prot & PAGE_READ) ? 'r' : '-'), | |
1935 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
1936 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
1937 | ||
1938 | return 0; | |
1939 | } | |
1940 | ||
1941 | /* dump memory mappings */ | |
1942 | void page_dump(FILE *f) | |
1943 | { | |
1a1c4db9 | 1944 | const int length = sizeof(target_ulong) * 2; |
227b8175 SW |
1945 | (void) fprintf(f, "%-*s %-*s %-*s %s\n", |
1946 | length, "start", length, "end", length, "size", "prot"); | |
5b6dd868 BS |
1947 | walk_memory_regions(f, dump_region); |
1948 | } | |
1949 | ||
1950 | int page_get_flags(target_ulong address) | |
1951 | { | |
1952 | PageDesc *p; | |
1953 | ||
1954 | p = page_find(address >> TARGET_PAGE_BITS); | |
1955 | if (!p) { | |
1956 | return 0; | |
1957 | } | |
1958 | return p->flags; | |
1959 | } | |
1960 | ||
1961 | /* Modify the flags of a page and invalidate the code if necessary. | |
1962 | The flag PAGE_WRITE_ORG is positioned automatically depending | |
1963 | on PAGE_WRITE. The mmap_lock should already be held. */ | |
1964 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1965 | { | |
1966 | target_ulong addr, len; | |
1967 | ||
1968 | /* This function should never be called with addresses outside the | |
1969 | guest address space. If this assert fires, it probably indicates | |
1970 | a missing call to h2g_valid. */ | |
1971 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1972 | assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1973 | #endif |
1974 | assert(start < end); | |
1975 | ||
1976 | start = start & TARGET_PAGE_MASK; | |
1977 | end = TARGET_PAGE_ALIGN(end); | |
1978 | ||
1979 | if (flags & PAGE_WRITE) { | |
1980 | flags |= PAGE_WRITE_ORG; | |
1981 | } | |
1982 | ||
1983 | for (addr = start, len = end - start; | |
1984 | len != 0; | |
1985 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1986 | PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1987 | ||
1988 | /* If the write protection bit is set, then we invalidate | |
1989 | the code inside. */ | |
1990 | if (!(p->flags & PAGE_WRITE) && | |
1991 | (flags & PAGE_WRITE) && | |
1992 | p->first_tb) { | |
75809229 | 1993 | tb_invalidate_phys_page(addr, 0); |
5b6dd868 BS |
1994 | } |
1995 | p->flags = flags; | |
1996 | } | |
1997 | } | |
1998 | ||
1999 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
2000 | { | |
2001 | PageDesc *p; | |
2002 | target_ulong end; | |
2003 | target_ulong addr; | |
2004 | ||
2005 | /* This function should never be called with addresses outside the | |
2006 | guest address space. If this assert fires, it probably indicates | |
2007 | a missing call to h2g_valid. */ | |
2008 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 2009 | assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
2010 | #endif |
2011 | ||
2012 | if (len == 0) { | |
2013 | return 0; | |
2014 | } | |
2015 | if (start + len - 1 < start) { | |
2016 | /* We've wrapped around. */ | |
2017 | return -1; | |
2018 | } | |
2019 | ||
2020 | /* must do before we loose bits in the next step */ | |
2021 | end = TARGET_PAGE_ALIGN(start + len); | |
2022 | start = start & TARGET_PAGE_MASK; | |
2023 | ||
2024 | for (addr = start, len = end - start; | |
2025 | len != 0; | |
2026 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
2027 | p = page_find(addr >> TARGET_PAGE_BITS); | |
2028 | if (!p) { | |
2029 | return -1; | |
2030 | } | |
2031 | if (!(p->flags & PAGE_VALID)) { | |
2032 | return -1; | |
2033 | } | |
2034 | ||
2035 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { | |
2036 | return -1; | |
2037 | } | |
2038 | if (flags & PAGE_WRITE) { | |
2039 | if (!(p->flags & PAGE_WRITE_ORG)) { | |
2040 | return -1; | |
2041 | } | |
2042 | /* unprotect the page if it was put read-only because it | |
2043 | contains translated code */ | |
2044 | if (!(p->flags & PAGE_WRITE)) { | |
f213e72f | 2045 | if (!page_unprotect(addr, 0)) { |
5b6dd868 BS |
2046 | return -1; |
2047 | } | |
2048 | } | |
5b6dd868 BS |
2049 | } |
2050 | } | |
2051 | return 0; | |
2052 | } | |
2053 | ||
2054 | /* called from signal handler: invalidate the code and unprotect the | |
f213e72f PM |
2055 | * page. Return 0 if the fault was not handled, 1 if it was handled, |
2056 | * and 2 if it was handled but the caller must cause the TB to be | |
2057 | * immediately exited. (We can only return 2 if the 'pc' argument is | |
2058 | * non-zero.) | |
2059 | */ | |
2060 | int page_unprotect(target_ulong address, uintptr_t pc) | |
5b6dd868 BS |
2061 | { |
2062 | unsigned int prot; | |
7399a337 | 2063 | bool current_tb_invalidated; |
5b6dd868 BS |
2064 | PageDesc *p; |
2065 | target_ulong host_start, host_end, addr; | |
2066 | ||
2067 | /* Technically this isn't safe inside a signal handler. However we | |
2068 | know this only ever happens in a synchronous SEGV handler, so in | |
2069 | practice it seems to be ok. */ | |
2070 | mmap_lock(); | |
2071 | ||
2072 | p = page_find(address >> TARGET_PAGE_BITS); | |
2073 | if (!p) { | |
2074 | mmap_unlock(); | |
2075 | return 0; | |
2076 | } | |
2077 | ||
2078 | /* if the page was really writable, then we change its | |
2079 | protection back to writable */ | |
2080 | if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { | |
2081 | host_start = address & qemu_host_page_mask; | |
2082 | host_end = host_start + qemu_host_page_size; | |
2083 | ||
2084 | prot = 0; | |
7399a337 | 2085 | current_tb_invalidated = false; |
5b6dd868 BS |
2086 | for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { |
2087 | p = page_find(addr >> TARGET_PAGE_BITS); | |
2088 | p->flags |= PAGE_WRITE; | |
2089 | prot |= p->flags; | |
2090 | ||
2091 | /* and since the content will be modified, we must invalidate | |
2092 | the corresponding translated code. */ | |
7399a337 | 2093 | current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); |
5b6dd868 BS |
2094 | #ifdef DEBUG_TB_CHECK |
2095 | tb_invalidate_check(addr); | |
2096 | #endif | |
2097 | } | |
2098 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
2099 | prot & PAGE_BITS); | |
2100 | ||
2101 | mmap_unlock(); | |
7399a337 SS |
2102 | /* If current TB was invalidated return to main loop */ |
2103 | return current_tb_invalidated ? 2 : 1; | |
5b6dd868 BS |
2104 | } |
2105 | mmap_unlock(); | |
2106 | return 0; | |
2107 | } | |
2108 | #endif /* CONFIG_USER_ONLY */ |