]>
Commit | Line | Data |
---|---|---|
d19893da FB |
1 | /* |
2 | * Host code generation | |
5fafdf24 | 3 | * |
d19893da FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d19893da | 18 | */ |
5b6dd868 BS |
19 | #ifdef _WIN32 |
20 | #include <windows.h> | |
21 | #else | |
5b6dd868 BS |
22 | #include <sys/mman.h> |
23 | #endif | |
7b31bbc2 | 24 | #include "qemu/osdep.h" |
d19893da | 25 | |
2054396a | 26 | |
5b6dd868 | 27 | #include "qemu-common.h" |
af5ad107 | 28 | #define NO_CPU_IO_DEFS |
d3eead2e | 29 | #include "cpu.h" |
6db8b538 | 30 | #include "trace.h" |
76cad711 | 31 | #include "disas/disas.h" |
57fec1fe | 32 | #include "tcg.h" |
5b6dd868 BS |
33 | #if defined(CONFIG_USER_ONLY) |
34 | #include "qemu.h" | |
35 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | |
36 | #include <sys/param.h> | |
37 | #if __FreeBSD_version >= 700104 | |
38 | #define HAVE_KINFO_GETVMMAP | |
39 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ | |
5b6dd868 BS |
40 | #include <sys/proc.h> |
41 | #include <machine/profile.h> | |
42 | #define _KERNEL | |
43 | #include <sys/user.h> | |
44 | #undef _KERNEL | |
45 | #undef sigqueue | |
46 | #include <libutil.h> | |
47 | #endif | |
48 | #endif | |
0bc3cd62 PB |
49 | #else |
50 | #include "exec/address-spaces.h" | |
5b6dd868 BS |
51 | #endif |
52 | ||
022c62cb | 53 | #include "exec/cputlb.h" |
e1b89321 | 54 | #include "exec/tb-hash.h" |
5b6dd868 | 55 | #include "translate-all.h" |
510a647f | 56 | #include "qemu/bitmap.h" |
0aa09897 | 57 | #include "qemu/timer.h" |
5b6dd868 BS |
58 | |
59 | //#define DEBUG_TB_INVALIDATE | |
60 | //#define DEBUG_FLUSH | |
61 | /* make various TB consistency checks */ | |
62 | //#define DEBUG_TB_CHECK | |
63 | ||
64 | #if !defined(CONFIG_USER_ONLY) | |
65 | /* TB consistency checks only implemented for usermode emulation. */ | |
66 | #undef DEBUG_TB_CHECK | |
67 | #endif | |
68 | ||
69 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
70 | ||
5b6dd868 BS |
71 | typedef struct PageDesc { |
72 | /* list of TBs intersecting this ram page */ | |
73 | TranslationBlock *first_tb; | |
74 | /* in order to optimize self modifying code, we count the number | |
75 | of lookups we do to a given page to use a bitmap */ | |
76 | unsigned int code_write_count; | |
510a647f | 77 | unsigned long *code_bitmap; |
5b6dd868 BS |
78 | #if defined(CONFIG_USER_ONLY) |
79 | unsigned long flags; | |
80 | #endif | |
81 | } PageDesc; | |
82 | ||
83 | /* In system mode we want L1_MAP to be based on ram offsets, | |
84 | while in user mode we want it to be based on virtual addresses. */ | |
85 | #if !defined(CONFIG_USER_ONLY) | |
86 | #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS | |
87 | # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS | |
88 | #else | |
89 | # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
90 | #endif | |
91 | #else | |
92 | # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS | |
93 | #endif | |
94 | ||
03f49957 PB |
95 | /* Size of the L2 (and L3, etc) page tables. */ |
96 | #define V_L2_BITS 10 | |
97 | #define V_L2_SIZE (1 << V_L2_BITS) | |
98 | ||
5b6dd868 BS |
99 | /* The bits remaining after N lower levels of page tables. */ |
100 | #define V_L1_BITS_REM \ | |
03f49957 | 101 | ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS) |
5b6dd868 BS |
102 | |
103 | #if V_L1_BITS_REM < 4 | |
03f49957 | 104 | #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS) |
5b6dd868 BS |
105 | #else |
106 | #define V_L1_BITS V_L1_BITS_REM | |
107 | #endif | |
108 | ||
109 | #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) | |
110 | ||
111 | #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) | |
112 | ||
5b6dd868 | 113 | uintptr_t qemu_host_page_size; |
0c2d70c4 | 114 | intptr_t qemu_host_page_mask; |
5b6dd868 | 115 | |
d1142fb8 | 116 | /* The bottom level has pointers to PageDesc */ |
5b6dd868 BS |
117 | static void *l1_map[V_L1_SIZE]; |
118 | ||
57fec1fe FB |
119 | /* code generation context */ |
120 | TCGContext tcg_ctx; | |
d19893da | 121 | |
677ef623 FK |
122 | /* translation block context */ |
123 | #ifdef CONFIG_USER_ONLY | |
124 | __thread int have_tb_lock; | |
125 | #endif | |
126 | ||
127 | void tb_lock(void) | |
128 | { | |
129 | #ifdef CONFIG_USER_ONLY | |
130 | assert(!have_tb_lock); | |
131 | qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); | |
132 | have_tb_lock++; | |
133 | #endif | |
134 | } | |
135 | ||
136 | void tb_unlock(void) | |
137 | { | |
138 | #ifdef CONFIG_USER_ONLY | |
139 | assert(have_tb_lock); | |
140 | have_tb_lock--; | |
141 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
142 | #endif | |
143 | } | |
144 | ||
145 | void tb_lock_reset(void) | |
146 | { | |
147 | #ifdef CONFIG_USER_ONLY | |
148 | if (have_tb_lock) { | |
149 | qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); | |
150 | have_tb_lock = 0; | |
151 | } | |
152 | #endif | |
153 | } | |
154 | ||
5b6dd868 BS |
155 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
156 | tb_page_addr_t phys_page2); | |
a8a826a3 | 157 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); |
5b6dd868 | 158 | |
57fec1fe FB |
159 | void cpu_gen_init(void) |
160 | { | |
161 | tcg_context_init(&tcg_ctx); | |
57fec1fe FB |
162 | } |
163 | ||
fca8a500 RH |
164 | /* Encode VAL as a signed leb128 sequence at P. |
165 | Return P incremented past the encoded value. */ | |
166 | static uint8_t *encode_sleb128(uint8_t *p, target_long val) | |
167 | { | |
168 | int more, byte; | |
169 | ||
170 | do { | |
171 | byte = val & 0x7f; | |
172 | val >>= 7; | |
173 | more = !((val == 0 && (byte & 0x40) == 0) | |
174 | || (val == -1 && (byte & 0x40) != 0)); | |
175 | if (more) { | |
176 | byte |= 0x80; | |
177 | } | |
178 | *p++ = byte; | |
179 | } while (more); | |
180 | ||
181 | return p; | |
182 | } | |
183 | ||
184 | /* Decode a signed leb128 sequence at *PP; increment *PP past the | |
185 | decoded value. Return the decoded value. */ | |
186 | static target_long decode_sleb128(uint8_t **pp) | |
187 | { | |
188 | uint8_t *p = *pp; | |
189 | target_long val = 0; | |
190 | int byte, shift = 0; | |
191 | ||
192 | do { | |
193 | byte = *p++; | |
194 | val |= (target_ulong)(byte & 0x7f) << shift; | |
195 | shift += 7; | |
196 | } while (byte & 0x80); | |
197 | if (shift < TARGET_LONG_BITS && (byte & 0x40)) { | |
198 | val |= -(target_ulong)1 << shift; | |
199 | } | |
200 | ||
201 | *pp = p; | |
202 | return val; | |
203 | } | |
204 | ||
205 | /* Encode the data collected about the instructions while compiling TB. | |
206 | Place the data at BLOCK, and return the number of bytes consumed. | |
207 | ||
208 | The logical table consisits of TARGET_INSN_START_WORDS target_ulong's, | |
209 | which come from the target's insn_start data, followed by a uintptr_t | |
210 | which comes from the host pc of the end of the code implementing the insn. | |
211 | ||
212 | Each line of the table is encoded as sleb128 deltas from the previous | |
213 | line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. | |
214 | That is, the first column is seeded with the guest pc, the last column | |
215 | with the host pc, and the middle columns with zeros. */ | |
216 | ||
217 | static int encode_search(TranslationBlock *tb, uint8_t *block) | |
218 | { | |
b125f9dc | 219 | uint8_t *highwater = tcg_ctx.code_gen_highwater; |
fca8a500 RH |
220 | uint8_t *p = block; |
221 | int i, j, n; | |
222 | ||
223 | tb->tc_search = block; | |
224 | ||
225 | for (i = 0, n = tb->icount; i < n; ++i) { | |
226 | target_ulong prev; | |
227 | ||
228 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
229 | if (i == 0) { | |
230 | prev = (j == 0 ? tb->pc : 0); | |
231 | } else { | |
232 | prev = tcg_ctx.gen_insn_data[i - 1][j]; | |
233 | } | |
234 | p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); | |
235 | } | |
236 | prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); | |
237 | p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); | |
b125f9dc RH |
238 | |
239 | /* Test for (pending) buffer overflow. The assumption is that any | |
240 | one row beginning below the high water mark cannot overrun | |
241 | the buffer completely. Thus we can test for overflow after | |
242 | encoding a row without having to check during encoding. */ | |
243 | if (unlikely(p > highwater)) { | |
244 | return -1; | |
245 | } | |
fca8a500 RH |
246 | } |
247 | ||
248 | return p - block; | |
249 | } | |
250 | ||
fec88f64 | 251 | /* The cpu state corresponding to 'searched_pc' is restored. */ |
74f10515 | 252 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, |
a8a826a3 | 253 | uintptr_t searched_pc) |
d19893da | 254 | { |
fca8a500 RH |
255 | target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; |
256 | uintptr_t host_pc = (uintptr_t)tb->tc_ptr; | |
74f10515 | 257 | CPUArchState *env = cpu->env_ptr; |
fca8a500 RH |
258 | uint8_t *p = tb->tc_search; |
259 | int i, j, num_insns = tb->icount; | |
57fec1fe | 260 | #ifdef CONFIG_PROFILER |
fca8a500 | 261 | int64_t ti = profile_getclock(); |
57fec1fe FB |
262 | #endif |
263 | ||
fca8a500 RH |
264 | if (searched_pc < host_pc) { |
265 | return -1; | |
266 | } | |
d19893da | 267 | |
fca8a500 RH |
268 | /* Reconstruct the stored insn data while looking for the point at |
269 | which the end of the insn exceeds the searched_pc. */ | |
270 | for (i = 0; i < num_insns; ++i) { | |
271 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | |
272 | data[j] += decode_sleb128(&p); | |
273 | } | |
274 | host_pc += decode_sleb128(&p); | |
275 | if (host_pc > searched_pc) { | |
276 | goto found; | |
277 | } | |
278 | } | |
279 | return -1; | |
3b46e624 | 280 | |
fca8a500 | 281 | found: |
bd79255d | 282 | if (tb->cflags & CF_USE_ICOUNT) { |
414b15c9 | 283 | assert(use_icount); |
2e70f6ef | 284 | /* Reset the cycle counter to the start of the block. */ |
fca8a500 | 285 | cpu->icount_decr.u16.low += num_insns; |
2e70f6ef | 286 | /* Clear the IO flag. */ |
99df7dce | 287 | cpu->can_do_io = 0; |
2e70f6ef | 288 | } |
fca8a500 RH |
289 | cpu->icount_decr.u16.low -= i; |
290 | restore_state_to_opc(env, tb, data); | |
57fec1fe FB |
291 | |
292 | #ifdef CONFIG_PROFILER | |
fca8a500 RH |
293 | tcg_ctx.restore_time += profile_getclock() - ti; |
294 | tcg_ctx.restore_count++; | |
57fec1fe | 295 | #endif |
d19893da FB |
296 | return 0; |
297 | } | |
5b6dd868 | 298 | |
3f38f309 | 299 | bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) |
a8a826a3 BS |
300 | { |
301 | TranslationBlock *tb; | |
302 | ||
303 | tb = tb_find_pc(retaddr); | |
304 | if (tb) { | |
74f10515 | 305 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
d8a499f1 PD |
306 | if (tb->cflags & CF_NOCACHE) { |
307 | /* one-shot translation, invalidate it immediately */ | |
308 | cpu->current_tb = NULL; | |
309 | tb_phys_invalidate(tb, -1); | |
310 | tb_free(tb); | |
311 | } | |
a8a826a3 BS |
312 | return true; |
313 | } | |
314 | return false; | |
315 | } | |
316 | ||
47c16ed5 | 317 | void page_size_init(void) |
5b6dd868 BS |
318 | { |
319 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
320 | TARGET_PAGE_SIZE */ | |
5b6dd868 | 321 | qemu_real_host_page_size = getpagesize(); |
0c2d70c4 | 322 | qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; |
5b6dd868 BS |
323 | if (qemu_host_page_size == 0) { |
324 | qemu_host_page_size = qemu_real_host_page_size; | |
325 | } | |
326 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
327 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
328 | } | |
0c2d70c4 | 329 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; |
47c16ed5 | 330 | } |
5b6dd868 | 331 | |
47c16ed5 AK |
332 | static void page_init(void) |
333 | { | |
334 | page_size_init(); | |
5b6dd868 BS |
335 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) |
336 | { | |
337 | #ifdef HAVE_KINFO_GETVMMAP | |
338 | struct kinfo_vmentry *freep; | |
339 | int i, cnt; | |
340 | ||
341 | freep = kinfo_getvmmap(getpid(), &cnt); | |
342 | if (freep) { | |
343 | mmap_lock(); | |
344 | for (i = 0; i < cnt; i++) { | |
345 | unsigned long startaddr, endaddr; | |
346 | ||
347 | startaddr = freep[i].kve_start; | |
348 | endaddr = freep[i].kve_end; | |
349 | if (h2g_valid(startaddr)) { | |
350 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
351 | ||
352 | if (h2g_valid(endaddr)) { | |
353 | endaddr = h2g(endaddr); | |
354 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
355 | } else { | |
356 | #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS | |
357 | endaddr = ~0ul; | |
358 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
359 | #endif | |
360 | } | |
361 | } | |
362 | } | |
363 | free(freep); | |
364 | mmap_unlock(); | |
365 | } | |
366 | #else | |
367 | FILE *f; | |
368 | ||
369 | last_brk = (unsigned long)sbrk(0); | |
370 | ||
371 | f = fopen("/compat/linux/proc/self/maps", "r"); | |
372 | if (f) { | |
373 | mmap_lock(); | |
374 | ||
375 | do { | |
376 | unsigned long startaddr, endaddr; | |
377 | int n; | |
378 | ||
379 | n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); | |
380 | ||
381 | if (n == 2 && h2g_valid(startaddr)) { | |
382 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
383 | ||
384 | if (h2g_valid(endaddr)) { | |
385 | endaddr = h2g(endaddr); | |
386 | } else { | |
387 | endaddr = ~0ul; | |
388 | } | |
389 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
390 | } | |
391 | } while (!feof(f)); | |
392 | ||
393 | fclose(f); | |
394 | mmap_unlock(); | |
395 | } | |
396 | #endif | |
397 | } | |
398 | #endif | |
399 | } | |
400 | ||
75692087 PB |
401 | /* If alloc=1: |
402 | * Called with mmap_lock held for user-mode emulation. | |
403 | */ | |
5b6dd868 BS |
404 | static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) |
405 | { | |
406 | PageDesc *pd; | |
407 | void **lp; | |
408 | int i; | |
409 | ||
5b6dd868 BS |
410 | /* Level 1. Always allocated. */ |
411 | lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); | |
412 | ||
413 | /* Level 2..N-1. */ | |
03f49957 | 414 | for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) { |
6940fab8 | 415 | void **p = atomic_rcu_read(lp); |
5b6dd868 BS |
416 | |
417 | if (p == NULL) { | |
418 | if (!alloc) { | |
419 | return NULL; | |
420 | } | |
e3a0abfd | 421 | p = g_new0(void *, V_L2_SIZE); |
6940fab8 | 422 | atomic_rcu_set(lp, p); |
5b6dd868 BS |
423 | } |
424 | ||
03f49957 | 425 | lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); |
5b6dd868 BS |
426 | } |
427 | ||
6940fab8 | 428 | pd = atomic_rcu_read(lp); |
5b6dd868 BS |
429 | if (pd == NULL) { |
430 | if (!alloc) { | |
431 | return NULL; | |
432 | } | |
e3a0abfd | 433 | pd = g_new0(PageDesc, V_L2_SIZE); |
6940fab8 | 434 | atomic_rcu_set(lp, pd); |
5b6dd868 BS |
435 | } |
436 | ||
03f49957 | 437 | return pd + (index & (V_L2_SIZE - 1)); |
5b6dd868 BS |
438 | } |
439 | ||
440 | static inline PageDesc *page_find(tb_page_addr_t index) | |
441 | { | |
442 | return page_find_alloc(index, 0); | |
443 | } | |
444 | ||
5b6dd868 BS |
445 | #if defined(CONFIG_USER_ONLY) |
446 | /* Currently it is not recommended to allocate big chunks of data in | |
447 | user mode. It will change when a dedicated libc will be used. */ | |
448 | /* ??? 64-bit hosts ought to have no problem mmaping data outside the | |
449 | region in which the guest needs to run. Revisit this. */ | |
450 | #define USE_STATIC_CODE_GEN_BUFFER | |
451 | #endif | |
452 | ||
5b6dd868 BS |
453 | /* Minimum size of the code gen buffer. This number is randomly chosen, |
454 | but not so small that we can't have a fair number of TB's live. */ | |
455 | #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) | |
456 | ||
457 | /* Maximum size of the code gen buffer we'd like to use. Unless otherwise | |
458 | indicated, this is constrained by the range of direct branches on the | |
459 | host cpu, as used by the TCG implementation of goto_tb. */ | |
460 | #if defined(__x86_64__) | |
461 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
462 | #elif defined(__sparc__) | |
463 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
5bfd75a3 RH |
464 | #elif defined(__powerpc64__) |
465 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
4a136e0a CF |
466 | #elif defined(__aarch64__) |
467 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
468 | #elif defined(__arm__) |
469 | # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) | |
470 | #elif defined(__s390x__) | |
471 | /* We have a +- 4GB range on the branches; leave some slop. */ | |
472 | # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) | |
479eb121 RH |
473 | #elif defined(__mips__) |
474 | /* We have a 256MB branch region, but leave room to make sure the | |
475 | main executable is also within that region. */ | |
476 | # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) | |
5b6dd868 BS |
477 | #else |
478 | # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) | |
479 | #endif | |
480 | ||
481 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) | |
482 | ||
483 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
484 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
485 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
486 | ||
487 | static inline size_t size_code_gen_buffer(size_t tb_size) | |
488 | { | |
489 | /* Size the buffer. */ | |
490 | if (tb_size == 0) { | |
491 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
492 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
493 | #else | |
494 | /* ??? Needs adjustments. */ | |
495 | /* ??? If we relax the requirement that CONFIG_USER_ONLY use the | |
496 | static buffer, we could size this on RESERVED_VA, on the text | |
497 | segment size of the executable, or continue to use the default. */ | |
498 | tb_size = (unsigned long)(ram_size / 4); | |
499 | #endif | |
500 | } | |
501 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
502 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
503 | } | |
504 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
505 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
506 | } | |
0b0d3320 | 507 | tcg_ctx.code_gen_buffer_size = tb_size; |
5b6dd868 BS |
508 | return tb_size; |
509 | } | |
510 | ||
483c76e1 RH |
511 | #ifdef __mips__ |
512 | /* In order to use J and JAL within the code_gen_buffer, we require | |
513 | that the buffer not cross a 256MB boundary. */ | |
514 | static inline bool cross_256mb(void *addr, size_t size) | |
515 | { | |
516 | return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000; | |
517 | } | |
518 | ||
519 | /* We weren't able to allocate a buffer without crossing that boundary, | |
520 | so make do with the larger portion of the buffer that doesn't cross. | |
521 | Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ | |
522 | static inline void *split_cross_256mb(void *buf1, size_t size1) | |
523 | { | |
524 | void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000); | |
525 | size_t size2 = buf1 + size1 - buf2; | |
526 | ||
527 | size1 = buf2 - buf1; | |
528 | if (size1 < size2) { | |
529 | size1 = size2; | |
530 | buf1 = buf2; | |
531 | } | |
532 | ||
533 | tcg_ctx.code_gen_buffer_size = size1; | |
534 | return buf1; | |
535 | } | |
536 | #endif | |
537 | ||
5b6dd868 BS |
538 | #ifdef USE_STATIC_CODE_GEN_BUFFER |
539 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
540 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
541 | ||
f293709c RH |
542 | # ifdef _WIN32 |
543 | static inline void do_protect(void *addr, long size, int prot) | |
544 | { | |
545 | DWORD old_protect; | |
546 | VirtualProtect(addr, size, prot, &old_protect); | |
547 | } | |
548 | ||
549 | static inline void map_exec(void *addr, long size) | |
550 | { | |
551 | do_protect(addr, size, PAGE_EXECUTE_READWRITE); | |
552 | } | |
553 | ||
554 | static inline void map_none(void *addr, long size) | |
555 | { | |
556 | do_protect(addr, size, PAGE_NOACCESS); | |
557 | } | |
558 | # else | |
559 | static inline void do_protect(void *addr, long size, int prot) | |
560 | { | |
561 | uintptr_t start, end; | |
562 | ||
563 | start = (uintptr_t)addr; | |
564 | start &= qemu_real_host_page_mask; | |
565 | ||
566 | end = (uintptr_t)addr + size; | |
567 | end = ROUND_UP(end, qemu_real_host_page_size); | |
568 | ||
569 | mprotect((void *)start, end - start, prot); | |
570 | } | |
571 | ||
572 | static inline void map_exec(void *addr, long size) | |
573 | { | |
574 | do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); | |
575 | } | |
576 | ||
577 | static inline void map_none(void *addr, long size) | |
578 | { | |
579 | do_protect(addr, size, PROT_NONE); | |
580 | } | |
581 | # endif /* WIN32 */ | |
582 | ||
5b6dd868 BS |
583 | static inline void *alloc_code_gen_buffer(void) |
584 | { | |
483c76e1 | 585 | void *buf = static_code_gen_buffer; |
f293709c RH |
586 | size_t full_size, size; |
587 | ||
588 | /* The size of the buffer, rounded down to end on a page boundary. */ | |
589 | full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) | |
590 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
591 | ||
592 | /* Reserve a guard page. */ | |
593 | size = full_size - qemu_real_host_page_size; | |
594 | ||
595 | /* Honor a command-line option limiting the size of the buffer. */ | |
596 | if (size > tcg_ctx.code_gen_buffer_size) { | |
597 | size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) | |
598 | & qemu_real_host_page_mask) - (uintptr_t)buf; | |
599 | } | |
600 | tcg_ctx.code_gen_buffer_size = size; | |
601 | ||
483c76e1 | 602 | #ifdef __mips__ |
f293709c RH |
603 | if (cross_256mb(buf, size)) { |
604 | buf = split_cross_256mb(buf, size); | |
605 | size = tcg_ctx.code_gen_buffer_size; | |
483c76e1 RH |
606 | } |
607 | #endif | |
f293709c RH |
608 | |
609 | map_exec(buf, size); | |
610 | map_none(buf + size, qemu_real_host_page_size); | |
611 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
612 | ||
483c76e1 | 613 | return buf; |
5b6dd868 | 614 | } |
f293709c RH |
615 | #elif defined(_WIN32) |
616 | static inline void *alloc_code_gen_buffer(void) | |
617 | { | |
618 | size_t size = tcg_ctx.code_gen_buffer_size; | |
619 | void *buf1, *buf2; | |
620 | ||
621 | /* Perform the allocation in two steps, so that the guard page | |
622 | is reserved but uncommitted. */ | |
623 | buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, | |
624 | MEM_RESERVE, PAGE_NOACCESS); | |
625 | if (buf1 != NULL) { | |
626 | buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); | |
627 | assert(buf1 == buf2); | |
628 | } | |
629 | ||
630 | return buf1; | |
631 | } | |
632 | #else | |
5b6dd868 BS |
633 | static inline void *alloc_code_gen_buffer(void) |
634 | { | |
635 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
636 | uintptr_t start = 0; | |
f293709c | 637 | size_t size = tcg_ctx.code_gen_buffer_size; |
5b6dd868 BS |
638 | void *buf; |
639 | ||
640 | /* Constrain the position of the buffer based on the host cpu. | |
641 | Note that these addresses are chosen in concert with the | |
642 | addresses assigned in the relevant linker script file. */ | |
643 | # if defined(__PIE__) || defined(__PIC__) | |
644 | /* Don't bother setting a preferred location if we're building | |
645 | a position-independent executable. We're more likely to get | |
646 | an address near the main executable if we let the kernel | |
647 | choose the address. */ | |
648 | # elif defined(__x86_64__) && defined(MAP_32BIT) | |
649 | /* Force the memory down into low memory with the executable. | |
650 | Leave the choice of exact location with the kernel. */ | |
651 | flags |= MAP_32BIT; | |
652 | /* Cannot expect to map more than 800MB in low memory. */ | |
f293709c RH |
653 | if (size > 800u * 1024 * 1024) { |
654 | tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; | |
5b6dd868 BS |
655 | } |
656 | # elif defined(__sparc__) | |
657 | start = 0x40000000ul; | |
658 | # elif defined(__s390x__) | |
659 | start = 0x90000000ul; | |
479eb121 | 660 | # elif defined(__mips__) |
f293709c | 661 | # if _MIPS_SIM == _ABI64 |
479eb121 RH |
662 | start = 0x128000000ul; |
663 | # else | |
664 | start = 0x08000000ul; | |
665 | # endif | |
5b6dd868 BS |
666 | # endif |
667 | ||
f293709c RH |
668 | buf = mmap((void *)start, size + qemu_real_host_page_size, |
669 | PROT_NONE, flags, -1, 0); | |
483c76e1 RH |
670 | if (buf == MAP_FAILED) { |
671 | return NULL; | |
672 | } | |
673 | ||
674 | #ifdef __mips__ | |
f293709c | 675 | if (cross_256mb(buf, size)) { |
5d831be2 | 676 | /* Try again, with the original still mapped, to avoid re-acquiring |
483c76e1 | 677 | that 256mb crossing. This time don't specify an address. */ |
f293709c RH |
678 | size_t size2; |
679 | void *buf2 = mmap(NULL, size + qemu_real_host_page_size, | |
680 | PROT_NONE, flags, -1, 0); | |
681 | switch (buf2 != MAP_FAILED) { | |
682 | case 1: | |
683 | if (!cross_256mb(buf2, size)) { | |
483c76e1 | 684 | /* Success! Use the new buffer. */ |
f293709c RH |
685 | munmap(buf, size); |
686 | break; | |
483c76e1 RH |
687 | } |
688 | /* Failure. Work with what we had. */ | |
f293709c RH |
689 | munmap(buf2, size); |
690 | /* fallthru */ | |
691 | default: | |
692 | /* Split the original buffer. Free the smaller half. */ | |
693 | buf2 = split_cross_256mb(buf, size); | |
694 | size2 = tcg_ctx.code_gen_buffer_size; | |
695 | if (buf == buf2) { | |
696 | munmap(buf + size2 + qemu_real_host_page_size, size - size2); | |
697 | } else { | |
698 | munmap(buf, size - size2); | |
699 | } | |
700 | size = size2; | |
701 | break; | |
483c76e1 | 702 | } |
f293709c | 703 | buf = buf2; |
483c76e1 RH |
704 | } |
705 | #endif | |
706 | ||
f293709c RH |
707 | /* Make the final buffer accessible. The guard page at the end |
708 | will remain inaccessible with PROT_NONE. */ | |
709 | mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC); | |
483c76e1 | 710 | |
f293709c RH |
711 | /* Request large pages for the buffer. */ |
712 | qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); | |
483c76e1 | 713 | |
5b6dd868 BS |
714 | return buf; |
715 | } | |
f293709c | 716 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ |
5b6dd868 BS |
717 | |
718 | static inline void code_gen_alloc(size_t tb_size) | |
719 | { | |
0b0d3320 EV |
720 | tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); |
721 | tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); | |
722 | if (tcg_ctx.code_gen_buffer == NULL) { | |
5b6dd868 BS |
723 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
724 | exit(1); | |
725 | } | |
726 | ||
8163b749 RH |
727 | /* Estimate a good size for the number of TBs we can support. We |
728 | still haven't deducted the prologue from the buffer size here, | |
729 | but that's minimal and won't affect the estimate much. */ | |
730 | tcg_ctx.code_gen_max_blocks | |
731 | = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
732 | tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks); | |
733 | ||
677ef623 | 734 | qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); |
5b6dd868 BS |
735 | } |
736 | ||
737 | /* Must be called before using the QEMU cpus. 'tb_size' is the size | |
738 | (in bytes) allocated to the translation buffer. Zero means default | |
739 | size. */ | |
740 | void tcg_exec_init(unsigned long tb_size) | |
741 | { | |
742 | cpu_gen_init(); | |
5b6dd868 | 743 | page_init(); |
f293709c | 744 | code_gen_alloc(tb_size); |
4cbea598 | 745 | #if defined(CONFIG_SOFTMMU) |
5b6dd868 BS |
746 | /* There's no guest base to take into account, so go ahead and |
747 | initialize the prologue now. */ | |
748 | tcg_prologue_init(&tcg_ctx); | |
749 | #endif | |
750 | } | |
751 | ||
752 | bool tcg_enabled(void) | |
753 | { | |
0b0d3320 | 754 | return tcg_ctx.code_gen_buffer != NULL; |
5b6dd868 BS |
755 | } |
756 | ||
757 | /* Allocate a new translation block. Flush the translation buffer if | |
758 | too many translation blocks or too much generated code. */ | |
759 | static TranslationBlock *tb_alloc(target_ulong pc) | |
760 | { | |
761 | TranslationBlock *tb; | |
762 | ||
b125f9dc | 763 | if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) { |
5b6dd868 BS |
764 | return NULL; |
765 | } | |
5e5f07e0 | 766 | tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++]; |
5b6dd868 BS |
767 | tb->pc = pc; |
768 | tb->cflags = 0; | |
769 | return tb; | |
770 | } | |
771 | ||
772 | void tb_free(TranslationBlock *tb) | |
773 | { | |
774 | /* In practice this is mostly used for single use temporary TB | |
775 | Ignore the hard cases and just back up if this TB happens to | |
776 | be the last one generated. */ | |
5e5f07e0 EV |
777 | if (tcg_ctx.tb_ctx.nb_tbs > 0 && |
778 | tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { | |
0b0d3320 | 779 | tcg_ctx.code_gen_ptr = tb->tc_ptr; |
5e5f07e0 | 780 | tcg_ctx.tb_ctx.nb_tbs--; |
5b6dd868 BS |
781 | } |
782 | } | |
783 | ||
784 | static inline void invalidate_page_bitmap(PageDesc *p) | |
785 | { | |
012aef07 MA |
786 | g_free(p->code_bitmap); |
787 | p->code_bitmap = NULL; | |
5b6dd868 BS |
788 | p->code_write_count = 0; |
789 | } | |
790 | ||
791 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | |
792 | static void page_flush_tb_1(int level, void **lp) | |
793 | { | |
794 | int i; | |
795 | ||
796 | if (*lp == NULL) { | |
797 | return; | |
798 | } | |
799 | if (level == 0) { | |
800 | PageDesc *pd = *lp; | |
801 | ||
03f49957 | 802 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
803 | pd[i].first_tb = NULL; |
804 | invalidate_page_bitmap(pd + i); | |
805 | } | |
806 | } else { | |
807 | void **pp = *lp; | |
808 | ||
03f49957 | 809 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
810 | page_flush_tb_1(level - 1, pp + i); |
811 | } | |
812 | } | |
813 | } | |
814 | ||
815 | static void page_flush_tb(void) | |
816 | { | |
817 | int i; | |
818 | ||
819 | for (i = 0; i < V_L1_SIZE; i++) { | |
03f49957 | 820 | page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); |
5b6dd868 BS |
821 | } |
822 | } | |
823 | ||
824 | /* flush all the translation blocks */ | |
825 | /* XXX: tb_flush is currently not thread safe */ | |
bbd77c18 | 826 | void tb_flush(CPUState *cpu) |
5b6dd868 | 827 | { |
5b6dd868 BS |
828 | #if defined(DEBUG_FLUSH) |
829 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", | |
0b0d3320 | 830 | (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), |
5e5f07e0 | 831 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? |
0b0d3320 | 832 | ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / |
5e5f07e0 | 833 | tcg_ctx.tb_ctx.nb_tbs : 0); |
5b6dd868 | 834 | #endif |
0b0d3320 EV |
835 | if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) |
836 | > tcg_ctx.code_gen_buffer_size) { | |
a47dddd7 | 837 | cpu_abort(cpu, "Internal error: code buffer overflow\n"); |
5b6dd868 | 838 | } |
5e5f07e0 | 839 | tcg_ctx.tb_ctx.nb_tbs = 0; |
5b6dd868 | 840 | |
bdc44640 | 841 | CPU_FOREACH(cpu) { |
8cd70437 | 842 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
5b6dd868 BS |
843 | } |
844 | ||
eb2535f4 | 845 | memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash)); |
5b6dd868 BS |
846 | page_flush_tb(); |
847 | ||
0b0d3320 | 848 | tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; |
5b6dd868 BS |
849 | /* XXX: flush processor icache at this point if cache flush is |
850 | expensive */ | |
5e5f07e0 | 851 | tcg_ctx.tb_ctx.tb_flush_count++; |
5b6dd868 BS |
852 | } |
853 | ||
854 | #ifdef DEBUG_TB_CHECK | |
855 | ||
856 | static void tb_invalidate_check(target_ulong address) | |
857 | { | |
858 | TranslationBlock *tb; | |
859 | int i; | |
860 | ||
861 | address &= TARGET_PAGE_MASK; | |
862 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
5e5f07e0 | 863 | for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
5b6dd868 BS |
864 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
865 | address >= tb->pc + tb->size)) { | |
866 | printf("ERROR invalidate: address=" TARGET_FMT_lx | |
867 | " PC=%08lx size=%04x\n", | |
868 | address, (long)tb->pc, tb->size); | |
869 | } | |
870 | } | |
871 | } | |
872 | } | |
873 | ||
874 | /* verify that all the pages have correct rights for code */ | |
875 | static void tb_page_check(void) | |
876 | { | |
877 | TranslationBlock *tb; | |
878 | int i, flags1, flags2; | |
879 | ||
880 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
5e5f07e0 EV |
881 | for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL; |
882 | tb = tb->phys_hash_next) { | |
5b6dd868 BS |
883 | flags1 = page_get_flags(tb->pc); |
884 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
885 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
886 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
887 | (long)tb->pc, tb->size, flags1, flags2); | |
888 | } | |
889 | } | |
890 | } | |
891 | } | |
892 | ||
893 | #endif | |
894 | ||
0c884d16 | 895 | static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb) |
5b6dd868 BS |
896 | { |
897 | TranslationBlock *tb1; | |
898 | ||
899 | for (;;) { | |
900 | tb1 = *ptb; | |
901 | if (tb1 == tb) { | |
0c884d16 | 902 | *ptb = tb1->phys_hash_next; |
5b6dd868 BS |
903 | break; |
904 | } | |
0c884d16 | 905 | ptb = &tb1->phys_hash_next; |
5b6dd868 BS |
906 | } |
907 | } | |
908 | ||
909 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
910 | { | |
911 | TranslationBlock *tb1; | |
912 | unsigned int n1; | |
913 | ||
914 | for (;;) { | |
915 | tb1 = *ptb; | |
916 | n1 = (uintptr_t)tb1 & 3; | |
917 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
918 | if (tb1 == tb) { | |
919 | *ptb = tb1->page_next[n1]; | |
920 | break; | |
921 | } | |
922 | ptb = &tb1->page_next[n1]; | |
923 | } | |
924 | } | |
925 | ||
926 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
927 | { | |
928 | TranslationBlock *tb1, **ptb; | |
929 | unsigned int n1; | |
930 | ||
931 | ptb = &tb->jmp_next[n]; | |
932 | tb1 = *ptb; | |
933 | if (tb1) { | |
934 | /* find tb(n) in circular list */ | |
935 | for (;;) { | |
936 | tb1 = *ptb; | |
937 | n1 = (uintptr_t)tb1 & 3; | |
938 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
939 | if (n1 == n && tb1 == tb) { | |
940 | break; | |
941 | } | |
942 | if (n1 == 2) { | |
943 | ptb = &tb1->jmp_first; | |
944 | } else { | |
945 | ptb = &tb1->jmp_next[n1]; | |
946 | } | |
947 | } | |
948 | /* now we can suppress tb(n) from the list */ | |
949 | *ptb = tb->jmp_next[n]; | |
950 | ||
951 | tb->jmp_next[n] = NULL; | |
952 | } | |
953 | } | |
954 | ||
955 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
956 | another TB */ | |
957 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
958 | { | |
959 | tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); | |
960 | } | |
961 | ||
0c884d16 | 962 | /* invalidate one TB */ |
5b6dd868 BS |
963 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
964 | { | |
182735ef | 965 | CPUState *cpu; |
5b6dd868 BS |
966 | PageDesc *p; |
967 | unsigned int h, n1; | |
968 | tb_page_addr_t phys_pc; | |
969 | TranslationBlock *tb1, *tb2; | |
970 | ||
971 | /* remove the TB from the hash list */ | |
972 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
973 | h = tb_phys_hash_func(phys_pc); | |
5e5f07e0 | 974 | tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb); |
5b6dd868 BS |
975 | |
976 | /* remove the TB from the page list */ | |
977 | if (tb->page_addr[0] != page_addr) { | |
978 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
979 | tb_page_remove(&p->first_tb, tb); | |
980 | invalidate_page_bitmap(p); | |
981 | } | |
982 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
983 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
984 | tb_page_remove(&p->first_tb, tb); | |
985 | invalidate_page_bitmap(p); | |
986 | } | |
987 | ||
5e5f07e0 | 988 | tcg_ctx.tb_ctx.tb_invalidated_flag = 1; |
5b6dd868 BS |
989 | |
990 | /* remove the TB from the hash list */ | |
991 | h = tb_jmp_cache_hash_func(tb->pc); | |
bdc44640 | 992 | CPU_FOREACH(cpu) { |
8cd70437 AF |
993 | if (cpu->tb_jmp_cache[h] == tb) { |
994 | cpu->tb_jmp_cache[h] = NULL; | |
5b6dd868 BS |
995 | } |
996 | } | |
997 | ||
998 | /* suppress this TB from the two jump lists */ | |
999 | tb_jmp_remove(tb, 0); | |
1000 | tb_jmp_remove(tb, 1); | |
1001 | ||
1002 | /* suppress any remaining jumps to this TB */ | |
1003 | tb1 = tb->jmp_first; | |
1004 | for (;;) { | |
1005 | n1 = (uintptr_t)tb1 & 3; | |
1006 | if (n1 == 2) { | |
1007 | break; | |
1008 | } | |
1009 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1010 | tb2 = tb1->jmp_next[n1]; | |
1011 | tb_reset_jump(tb1, n1); | |
1012 | tb1->jmp_next[n1] = NULL; | |
1013 | tb1 = tb2; | |
1014 | } | |
1015 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ | |
1016 | ||
5e5f07e0 | 1017 | tcg_ctx.tb_ctx.tb_phys_invalidate_count++; |
5b6dd868 BS |
1018 | } |
1019 | ||
5b6dd868 BS |
1020 | static void build_page_bitmap(PageDesc *p) |
1021 | { | |
1022 | int n, tb_start, tb_end; | |
1023 | TranslationBlock *tb; | |
1024 | ||
510a647f | 1025 | p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); |
5b6dd868 BS |
1026 | |
1027 | tb = p->first_tb; | |
1028 | while (tb != NULL) { | |
1029 | n = (uintptr_t)tb & 3; | |
1030 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1031 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1032 | if (n == 0) { | |
1033 | /* NOTE: tb_end may be after the end of the page, but | |
1034 | it is not a problem */ | |
1035 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
1036 | tb_end = tb_start + tb->size; | |
1037 | if (tb_end > TARGET_PAGE_SIZE) { | |
1038 | tb_end = TARGET_PAGE_SIZE; | |
1039 | } | |
1040 | } else { | |
1041 | tb_start = 0; | |
1042 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1043 | } | |
510a647f | 1044 | bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); |
5b6dd868 BS |
1045 | tb = tb->page_next[n]; |
1046 | } | |
1047 | } | |
1048 | ||
75692087 | 1049 | /* Called with mmap_lock held for user mode emulation. */ |
648f034c | 1050 | TranslationBlock *tb_gen_code(CPUState *cpu, |
5b6dd868 BS |
1051 | target_ulong pc, target_ulong cs_base, |
1052 | int flags, int cflags) | |
1053 | { | |
648f034c | 1054 | CPUArchState *env = cpu->env_ptr; |
5b6dd868 | 1055 | TranslationBlock *tb; |
5b6dd868 BS |
1056 | tb_page_addr_t phys_pc, phys_page2; |
1057 | target_ulong virt_page2; | |
fec88f64 | 1058 | tcg_insn_unit *gen_code_buf; |
fca8a500 | 1059 | int gen_code_size, search_size; |
fec88f64 RH |
1060 | #ifdef CONFIG_PROFILER |
1061 | int64_t ti; | |
1062 | #endif | |
5b6dd868 BS |
1063 | |
1064 | phys_pc = get_page_addr_code(env, pc); | |
56c0269a | 1065 | if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { |
0266359e PB |
1066 | cflags |= CF_USE_ICOUNT; |
1067 | } | |
b125f9dc | 1068 | |
5b6dd868 | 1069 | tb = tb_alloc(pc); |
b125f9dc RH |
1070 | if (unlikely(!tb)) { |
1071 | buffer_overflow: | |
5b6dd868 | 1072 | /* flush must be done */ |
bbd77c18 | 1073 | tb_flush(cpu); |
5b6dd868 BS |
1074 | /* cannot fail at this point */ |
1075 | tb = tb_alloc(pc); | |
b125f9dc | 1076 | assert(tb != NULL); |
5b6dd868 | 1077 | /* Don't forget to invalidate previous TB info. */ |
5e5f07e0 | 1078 | tcg_ctx.tb_ctx.tb_invalidated_flag = 1; |
5b6dd868 | 1079 | } |
fec88f64 RH |
1080 | |
1081 | gen_code_buf = tcg_ctx.code_gen_ptr; | |
1082 | tb->tc_ptr = gen_code_buf; | |
5b6dd868 BS |
1083 | tb->cs_base = cs_base; |
1084 | tb->flags = flags; | |
1085 | tb->cflags = cflags; | |
fec88f64 RH |
1086 | |
1087 | #ifdef CONFIG_PROFILER | |
1088 | tcg_ctx.tb_count1++; /* includes aborted translations because of | |
1089 | exceptions */ | |
1090 | ti = profile_getclock(); | |
1091 | #endif | |
1092 | ||
1093 | tcg_func_start(&tcg_ctx); | |
1094 | ||
1095 | gen_intermediate_code(env, tb); | |
1096 | ||
1097 | trace_translate_block(tb, tb->pc, tb->tc_ptr); | |
1098 | ||
1099 | /* generate machine code */ | |
1100 | tb->tb_next_offset[0] = 0xffff; | |
1101 | tb->tb_next_offset[1] = 0xffff; | |
1102 | tcg_ctx.tb_next_offset = tb->tb_next_offset; | |
1103 | #ifdef USE_DIRECT_JUMP | |
1104 | tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset; | |
1105 | tcg_ctx.tb_next = NULL; | |
1106 | #else | |
1107 | tcg_ctx.tb_jmp_offset = NULL; | |
1108 | tcg_ctx.tb_next = tb->tb_next; | |
1109 | #endif | |
1110 | ||
1111 | #ifdef CONFIG_PROFILER | |
1112 | tcg_ctx.tb_count++; | |
1113 | tcg_ctx.interm_time += profile_getclock() - ti; | |
1114 | tcg_ctx.code_time -= profile_getclock(); | |
1115 | #endif | |
1116 | ||
b125f9dc RH |
1117 | /* ??? Overflow could be handled better here. In particular, we |
1118 | don't need to re-do gen_intermediate_code, nor should we re-do | |
1119 | the tcg optimization currently hidden inside tcg_gen_code. All | |
1120 | that should be required is to flush the TBs, allocate a new TB, | |
1121 | re-initialize it per above, and re-do the actual code generation. */ | |
fec88f64 | 1122 | gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf); |
b125f9dc RH |
1123 | if (unlikely(gen_code_size < 0)) { |
1124 | goto buffer_overflow; | |
1125 | } | |
fca8a500 | 1126 | search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); |
b125f9dc RH |
1127 | if (unlikely(search_size < 0)) { |
1128 | goto buffer_overflow; | |
1129 | } | |
fec88f64 RH |
1130 | |
1131 | #ifdef CONFIG_PROFILER | |
1132 | tcg_ctx.code_time += profile_getclock(); | |
1133 | tcg_ctx.code_in_len += tb->size; | |
1134 | tcg_ctx.code_out_len += gen_code_size; | |
fca8a500 | 1135 | tcg_ctx.search_out_len += search_size; |
fec88f64 RH |
1136 | #endif |
1137 | ||
1138 | #ifdef DEBUG_DISAS | |
1139 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { | |
1140 | qemu_log("OUT: [size=%d]\n", gen_code_size); | |
1141 | log_disas(tb->tc_ptr, gen_code_size); | |
1142 | qemu_log("\n"); | |
1143 | qemu_log_flush(); | |
1144 | } | |
1145 | #endif | |
1146 | ||
fca8a500 RH |
1147 | tcg_ctx.code_gen_ptr = (void *) |
1148 | ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, | |
1149 | CODE_GEN_ALIGN); | |
5b6dd868 BS |
1150 | |
1151 | /* check next page if needed */ | |
1152 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
1153 | phys_page2 = -1; | |
1154 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
1155 | phys_page2 = get_page_addr_code(env, virt_page2); | |
1156 | } | |
1157 | tb_link_page(tb, phys_pc, phys_page2); | |
1158 | return tb; | |
1159 | } | |
1160 | ||
1161 | /* | |
1162 | * Invalidate all TBs which intersect with the target physical address range | |
1163 | * [start;end[. NOTE: start and end may refer to *different* physical pages. | |
1164 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1165 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1166 | * this TB. | |
75692087 PB |
1167 | * |
1168 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 | 1169 | */ |
35865339 | 1170 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) |
5b6dd868 BS |
1171 | { |
1172 | while (start < end) { | |
35865339 | 1173 | tb_invalidate_phys_page_range(start, end, 0); |
5b6dd868 BS |
1174 | start &= TARGET_PAGE_MASK; |
1175 | start += TARGET_PAGE_SIZE; | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * Invalidate all TBs which intersect with the target physical address range | |
1181 | * [start;end[. NOTE: start and end must refer to the *same* physical page. | |
1182 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1183 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1184 | * this TB. | |
75692087 PB |
1185 | * |
1186 | * Called with mmap_lock held for user-mode emulation | |
5b6dd868 BS |
1187 | */ |
1188 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | |
1189 | int is_cpu_write_access) | |
1190 | { | |
1191 | TranslationBlock *tb, *tb_next, *saved_tb; | |
4917cf44 | 1192 | CPUState *cpu = current_cpu; |
baea4fae | 1193 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1194 | CPUArchState *env = NULL; |
1195 | #endif | |
5b6dd868 BS |
1196 | tb_page_addr_t tb_start, tb_end; |
1197 | PageDesc *p; | |
1198 | int n; | |
1199 | #ifdef TARGET_HAS_PRECISE_SMC | |
1200 | int current_tb_not_found = is_cpu_write_access; | |
1201 | TranslationBlock *current_tb = NULL; | |
1202 | int current_tb_modified = 0; | |
1203 | target_ulong current_pc = 0; | |
1204 | target_ulong current_cs_base = 0; | |
1205 | int current_flags = 0; | |
1206 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1207 | ||
1208 | p = page_find(start >> TARGET_PAGE_BITS); | |
1209 | if (!p) { | |
1210 | return; | |
1211 | } | |
baea4fae | 1212 | #if defined(TARGET_HAS_PRECISE_SMC) |
4917cf44 AF |
1213 | if (cpu != NULL) { |
1214 | env = cpu->env_ptr; | |
d77953b9 | 1215 | } |
4917cf44 | 1216 | #endif |
5b6dd868 BS |
1217 | |
1218 | /* we remove all the TBs in the range [start, end[ */ | |
1219 | /* XXX: see if in some cases it could be faster to invalidate all | |
1220 | the code */ | |
1221 | tb = p->first_tb; | |
1222 | while (tb != NULL) { | |
1223 | n = (uintptr_t)tb & 3; | |
1224 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1225 | tb_next = tb->page_next[n]; | |
1226 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1227 | if (n == 0) { | |
1228 | /* NOTE: tb_end may be after the end of the page, but | |
1229 | it is not a problem */ | |
1230 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
1231 | tb_end = tb_start + tb->size; | |
1232 | } else { | |
1233 | tb_start = tb->page_addr[1]; | |
1234 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1235 | } | |
1236 | if (!(tb_end <= start || tb_start >= end)) { | |
1237 | #ifdef TARGET_HAS_PRECISE_SMC | |
1238 | if (current_tb_not_found) { | |
1239 | current_tb_not_found = 0; | |
1240 | current_tb = NULL; | |
93afeade | 1241 | if (cpu->mem_io_pc) { |
5b6dd868 | 1242 | /* now we have a real cpu fault */ |
93afeade | 1243 | current_tb = tb_find_pc(cpu->mem_io_pc); |
5b6dd868 BS |
1244 | } |
1245 | } | |
1246 | if (current_tb == tb && | |
1247 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1248 | /* If we are modifying the current TB, we must stop | |
1249 | its execution. We could be more precise by checking | |
1250 | that the modification is after the current PC, but it | |
1251 | would require a specialized function to partially | |
1252 | restore the CPU state */ | |
1253 | ||
1254 | current_tb_modified = 1; | |
74f10515 | 1255 | cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); |
5b6dd868 BS |
1256 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1257 | ¤t_flags); | |
1258 | } | |
1259 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1260 | /* we need to do that to handle the case where a signal | |
1261 | occurs while doing tb_phys_invalidate() */ | |
1262 | saved_tb = NULL; | |
d77953b9 AF |
1263 | if (cpu != NULL) { |
1264 | saved_tb = cpu->current_tb; | |
1265 | cpu->current_tb = NULL; | |
5b6dd868 BS |
1266 | } |
1267 | tb_phys_invalidate(tb, -1); | |
d77953b9 AF |
1268 | if (cpu != NULL) { |
1269 | cpu->current_tb = saved_tb; | |
c3affe56 AF |
1270 | if (cpu->interrupt_request && cpu->current_tb) { |
1271 | cpu_interrupt(cpu, cpu->interrupt_request); | |
5b6dd868 BS |
1272 | } |
1273 | } | |
1274 | } | |
1275 | tb = tb_next; | |
1276 | } | |
1277 | #if !defined(CONFIG_USER_ONLY) | |
1278 | /* if no code remaining, no need to continue to use slow writes */ | |
1279 | if (!p->first_tb) { | |
1280 | invalidate_page_bitmap(p); | |
fc377bcf | 1281 | tlb_unprotect_code(start); |
5b6dd868 BS |
1282 | } |
1283 | #endif | |
1284 | #ifdef TARGET_HAS_PRECISE_SMC | |
1285 | if (current_tb_modified) { | |
1286 | /* we generate a block containing just the instruction | |
1287 | modifying the memory. It will ensure that it cannot modify | |
1288 | itself */ | |
d77953b9 | 1289 | cpu->current_tb = NULL; |
648f034c | 1290 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
0ea8cb88 | 1291 | cpu_resume_from_signal(cpu, NULL); |
5b6dd868 BS |
1292 | } |
1293 | #endif | |
1294 | } | |
1295 | ||
1296 | /* len must be <= 8 and start must be a multiple of len */ | |
1297 | void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) | |
1298 | { | |
1299 | PageDesc *p; | |
5b6dd868 BS |
1300 | |
1301 | #if 0 | |
1302 | if (1) { | |
1303 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
1304 | cpu_single_env->mem_io_vaddr, len, | |
1305 | cpu_single_env->eip, | |
1306 | cpu_single_env->eip + | |
1307 | (intptr_t)cpu_single_env->segs[R_CS].base); | |
1308 | } | |
1309 | #endif | |
1310 | p = page_find(start >> TARGET_PAGE_BITS); | |
1311 | if (!p) { | |
1312 | return; | |
1313 | } | |
fc377bcf PB |
1314 | if (!p->code_bitmap && |
1315 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { | |
1316 | /* build code bitmap */ | |
1317 | build_page_bitmap(p); | |
1318 | } | |
5b6dd868 | 1319 | if (p->code_bitmap) { |
510a647f EC |
1320 | unsigned int nr; |
1321 | unsigned long b; | |
1322 | ||
1323 | nr = start & ~TARGET_PAGE_MASK; | |
1324 | b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); | |
5b6dd868 BS |
1325 | if (b & ((1 << len) - 1)) { |
1326 | goto do_invalidate; | |
1327 | } | |
1328 | } else { | |
1329 | do_invalidate: | |
1330 | tb_invalidate_phys_page_range(start, start + len, 1); | |
1331 | } | |
1332 | } | |
1333 | ||
1334 | #if !defined(CONFIG_SOFTMMU) | |
75692087 | 1335 | /* Called with mmap_lock held. */ |
5b6dd868 | 1336 | static void tb_invalidate_phys_page(tb_page_addr_t addr, |
d02532f0 AG |
1337 | uintptr_t pc, void *puc, |
1338 | bool locked) | |
5b6dd868 BS |
1339 | { |
1340 | TranslationBlock *tb; | |
1341 | PageDesc *p; | |
1342 | int n; | |
1343 | #ifdef TARGET_HAS_PRECISE_SMC | |
1344 | TranslationBlock *current_tb = NULL; | |
4917cf44 AF |
1345 | CPUState *cpu = current_cpu; |
1346 | CPUArchState *env = NULL; | |
5b6dd868 BS |
1347 | int current_tb_modified = 0; |
1348 | target_ulong current_pc = 0; | |
1349 | target_ulong current_cs_base = 0; | |
1350 | int current_flags = 0; | |
1351 | #endif | |
1352 | ||
1353 | addr &= TARGET_PAGE_MASK; | |
1354 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1355 | if (!p) { | |
1356 | return; | |
1357 | } | |
1358 | tb = p->first_tb; | |
1359 | #ifdef TARGET_HAS_PRECISE_SMC | |
1360 | if (tb && pc != 0) { | |
1361 | current_tb = tb_find_pc(pc); | |
1362 | } | |
4917cf44 AF |
1363 | if (cpu != NULL) { |
1364 | env = cpu->env_ptr; | |
d77953b9 | 1365 | } |
5b6dd868 BS |
1366 | #endif |
1367 | while (tb != NULL) { | |
1368 | n = (uintptr_t)tb & 3; | |
1369 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1370 | #ifdef TARGET_HAS_PRECISE_SMC | |
1371 | if (current_tb == tb && | |
1372 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1373 | /* If we are modifying the current TB, we must stop | |
1374 | its execution. We could be more precise by checking | |
1375 | that the modification is after the current PC, but it | |
1376 | would require a specialized function to partially | |
1377 | restore the CPU state */ | |
1378 | ||
1379 | current_tb_modified = 1; | |
74f10515 | 1380 | cpu_restore_state_from_tb(cpu, current_tb, pc); |
5b6dd868 BS |
1381 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1382 | ¤t_flags); | |
1383 | } | |
1384 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1385 | tb_phys_invalidate(tb, addr); | |
1386 | tb = tb->page_next[n]; | |
1387 | } | |
1388 | p->first_tb = NULL; | |
1389 | #ifdef TARGET_HAS_PRECISE_SMC | |
1390 | if (current_tb_modified) { | |
1391 | /* we generate a block containing just the instruction | |
1392 | modifying the memory. It will ensure that it cannot modify | |
1393 | itself */ | |
d77953b9 | 1394 | cpu->current_tb = NULL; |
648f034c | 1395 | tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); |
d02532f0 AG |
1396 | if (locked) { |
1397 | mmap_unlock(); | |
1398 | } | |
0ea8cb88 | 1399 | cpu_resume_from_signal(cpu, puc); |
5b6dd868 BS |
1400 | } |
1401 | #endif | |
1402 | } | |
1403 | #endif | |
1404 | ||
75692087 PB |
1405 | /* add the tb in the target page and protect it if necessary |
1406 | * | |
1407 | * Called with mmap_lock held for user-mode emulation. | |
1408 | */ | |
5b6dd868 BS |
1409 | static inline void tb_alloc_page(TranslationBlock *tb, |
1410 | unsigned int n, tb_page_addr_t page_addr) | |
1411 | { | |
1412 | PageDesc *p; | |
1413 | #ifndef CONFIG_USER_ONLY | |
1414 | bool page_already_protected; | |
1415 | #endif | |
1416 | ||
1417 | tb->page_addr[n] = page_addr; | |
1418 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); | |
1419 | tb->page_next[n] = p->first_tb; | |
1420 | #ifndef CONFIG_USER_ONLY | |
1421 | page_already_protected = p->first_tb != NULL; | |
1422 | #endif | |
1423 | p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); | |
1424 | invalidate_page_bitmap(p); | |
1425 | ||
5b6dd868 BS |
1426 | #if defined(CONFIG_USER_ONLY) |
1427 | if (p->flags & PAGE_WRITE) { | |
1428 | target_ulong addr; | |
1429 | PageDesc *p2; | |
1430 | int prot; | |
1431 | ||
1432 | /* force the host page as non writable (writes will have a | |
1433 | page fault + mprotect overhead) */ | |
1434 | page_addr &= qemu_host_page_mask; | |
1435 | prot = 0; | |
1436 | for (addr = page_addr; addr < page_addr + qemu_host_page_size; | |
1437 | addr += TARGET_PAGE_SIZE) { | |
1438 | ||
1439 | p2 = page_find(addr >> TARGET_PAGE_BITS); | |
1440 | if (!p2) { | |
1441 | continue; | |
1442 | } | |
1443 | prot |= p2->flags; | |
1444 | p2->flags &= ~PAGE_WRITE; | |
1445 | } | |
1446 | mprotect(g2h(page_addr), qemu_host_page_size, | |
1447 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
1448 | #ifdef DEBUG_TB_INVALIDATE | |
1449 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", | |
1450 | page_addr); | |
1451 | #endif | |
1452 | } | |
1453 | #else | |
1454 | /* if some code is already present, then the pages are already | |
1455 | protected. So we handle the case where only the first TB is | |
1456 | allocated in a physical page */ | |
1457 | if (!page_already_protected) { | |
1458 | tlb_protect_code(page_addr); | |
1459 | } | |
1460 | #endif | |
5b6dd868 BS |
1461 | } |
1462 | ||
1463 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
75692087 | 1464 | * (-1) to indicate that only one page contains the TB. |
9fd1a948 PB |
1465 | * |
1466 | * Called with mmap_lock held for user-mode emulation. | |
75692087 | 1467 | */ |
5b6dd868 BS |
1468 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
1469 | tb_page_addr_t phys_page2) | |
1470 | { | |
1471 | unsigned int h; | |
1472 | TranslationBlock **ptb; | |
1473 | ||
5b6dd868 BS |
1474 | /* add in the physical hash table */ |
1475 | h = tb_phys_hash_func(phys_pc); | |
5e5f07e0 | 1476 | ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h]; |
5b6dd868 BS |
1477 | tb->phys_hash_next = *ptb; |
1478 | *ptb = tb; | |
1479 | ||
1480 | /* add in the page list */ | |
1481 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
1482 | if (phys_page2 != -1) { | |
1483 | tb_alloc_page(tb, 1, phys_page2); | |
1484 | } else { | |
1485 | tb->page_addr[1] = -1; | |
1486 | } | |
1487 | ||
1488 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); | |
1489 | tb->jmp_next[0] = NULL; | |
1490 | tb->jmp_next[1] = NULL; | |
1491 | ||
1492 | /* init original jump addresses */ | |
1493 | if (tb->tb_next_offset[0] != 0xffff) { | |
1494 | tb_reset_jump(tb, 0); | |
1495 | } | |
1496 | if (tb->tb_next_offset[1] != 0xffff) { | |
1497 | tb_reset_jump(tb, 1); | |
1498 | } | |
1499 | ||
1500 | #ifdef DEBUG_TB_CHECK | |
1501 | tb_page_check(); | |
1502 | #endif | |
5b6dd868 BS |
1503 | } |
1504 | ||
5b6dd868 BS |
1505 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
1506 | tb[1].tc_ptr. Return NULL if not found */ | |
a8a826a3 | 1507 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) |
5b6dd868 BS |
1508 | { |
1509 | int m_min, m_max, m; | |
1510 | uintptr_t v; | |
1511 | TranslationBlock *tb; | |
1512 | ||
5e5f07e0 | 1513 | if (tcg_ctx.tb_ctx.nb_tbs <= 0) { |
5b6dd868 BS |
1514 | return NULL; |
1515 | } | |
0b0d3320 EV |
1516 | if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer || |
1517 | tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) { | |
5b6dd868 BS |
1518 | return NULL; |
1519 | } | |
1520 | /* binary search (cf Knuth) */ | |
1521 | m_min = 0; | |
5e5f07e0 | 1522 | m_max = tcg_ctx.tb_ctx.nb_tbs - 1; |
5b6dd868 BS |
1523 | while (m_min <= m_max) { |
1524 | m = (m_min + m_max) >> 1; | |
5e5f07e0 | 1525 | tb = &tcg_ctx.tb_ctx.tbs[m]; |
5b6dd868 BS |
1526 | v = (uintptr_t)tb->tc_ptr; |
1527 | if (v == tc_ptr) { | |
1528 | return tb; | |
1529 | } else if (tc_ptr < v) { | |
1530 | m_max = m - 1; | |
1531 | } else { | |
1532 | m_min = m + 1; | |
1533 | } | |
1534 | } | |
5e5f07e0 | 1535 | return &tcg_ctx.tb_ctx.tbs[m_max]; |
5b6dd868 BS |
1536 | } |
1537 | ||
ec53b45b | 1538 | #if !defined(CONFIG_USER_ONLY) |
29d8ec7b | 1539 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) |
5b6dd868 BS |
1540 | { |
1541 | ram_addr_t ram_addr; | |
5c8a00ce | 1542 | MemoryRegion *mr; |
149f54b5 | 1543 | hwaddr l = 1; |
5b6dd868 | 1544 | |
41063e1e | 1545 | rcu_read_lock(); |
29d8ec7b | 1546 | mr = address_space_translate(as, addr, &addr, &l, false); |
5c8a00ce PB |
1547 | if (!(memory_region_is_ram(mr) |
1548 | || memory_region_is_romd(mr))) { | |
41063e1e | 1549 | rcu_read_unlock(); |
5b6dd868 BS |
1550 | return; |
1551 | } | |
5c8a00ce | 1552 | ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) |
149f54b5 | 1553 | + addr; |
5b6dd868 | 1554 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
41063e1e | 1555 | rcu_read_unlock(); |
5b6dd868 | 1556 | } |
ec53b45b | 1557 | #endif /* !defined(CONFIG_USER_ONLY) */ |
5b6dd868 | 1558 | |
239c51a5 | 1559 | void tb_check_watchpoint(CPUState *cpu) |
5b6dd868 BS |
1560 | { |
1561 | TranslationBlock *tb; | |
1562 | ||
93afeade | 1563 | tb = tb_find_pc(cpu->mem_io_pc); |
8d302e76 AJ |
1564 | if (tb) { |
1565 | /* We can use retranslation to find the PC. */ | |
1566 | cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); | |
1567 | tb_phys_invalidate(tb, -1); | |
1568 | } else { | |
1569 | /* The exception probably happened in a helper. The CPU state should | |
1570 | have been saved before calling it. Fetch the PC from there. */ | |
1571 | CPUArchState *env = cpu->env_ptr; | |
1572 | target_ulong pc, cs_base; | |
1573 | tb_page_addr_t addr; | |
1574 | int flags; | |
1575 | ||
1576 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
1577 | addr = get_page_addr_code(env, pc); | |
1578 | tb_invalidate_phys_range(addr, addr + 1); | |
5b6dd868 | 1579 | } |
5b6dd868 BS |
1580 | } |
1581 | ||
1582 | #ifndef CONFIG_USER_ONLY | |
5b6dd868 BS |
1583 | /* in deterministic execution mode, instructions doing device I/Os |
1584 | must be at the end of the TB */ | |
90b40a69 | 1585 | void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) |
5b6dd868 | 1586 | { |
a47dddd7 | 1587 | #if defined(TARGET_MIPS) || defined(TARGET_SH4) |
90b40a69 | 1588 | CPUArchState *env = cpu->env_ptr; |
a47dddd7 | 1589 | #endif |
5b6dd868 BS |
1590 | TranslationBlock *tb; |
1591 | uint32_t n, cflags; | |
1592 | target_ulong pc, cs_base; | |
1593 | uint64_t flags; | |
1594 | ||
1595 | tb = tb_find_pc(retaddr); | |
1596 | if (!tb) { | |
a47dddd7 | 1597 | cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", |
5b6dd868 BS |
1598 | (void *)retaddr); |
1599 | } | |
28ecfd7a | 1600 | n = cpu->icount_decr.u16.low + tb->icount; |
74f10515 | 1601 | cpu_restore_state_from_tb(cpu, tb, retaddr); |
5b6dd868 BS |
1602 | /* Calculate how many instructions had been executed before the fault |
1603 | occurred. */ | |
28ecfd7a | 1604 | n = n - cpu->icount_decr.u16.low; |
5b6dd868 BS |
1605 | /* Generate a new TB ending on the I/O insn. */ |
1606 | n++; | |
1607 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
1608 | they were already the first instruction in the TB. If this is not | |
1609 | the first instruction in a TB then re-execute the preceding | |
1610 | branch. */ | |
1611 | #if defined(TARGET_MIPS) | |
1612 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
c3577479 | 1613 | env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); |
28ecfd7a | 1614 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1615 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1616 | } | |
1617 | #elif defined(TARGET_SH4) | |
1618 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
1619 | && n > 1) { | |
1620 | env->pc -= 2; | |
28ecfd7a | 1621 | cpu->icount_decr.u16.low++; |
5b6dd868 BS |
1622 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); |
1623 | } | |
1624 | #endif | |
1625 | /* This should never happen. */ | |
1626 | if (n > CF_COUNT_MASK) { | |
a47dddd7 | 1627 | cpu_abort(cpu, "TB too big during recompile"); |
5b6dd868 BS |
1628 | } |
1629 | ||
1630 | cflags = n | CF_LAST_IO; | |
1631 | pc = tb->pc; | |
1632 | cs_base = tb->cs_base; | |
1633 | flags = tb->flags; | |
1634 | tb_phys_invalidate(tb, -1); | |
02d57ea1 SF |
1635 | if (tb->cflags & CF_NOCACHE) { |
1636 | if (tb->orig_tb) { | |
1637 | /* Invalidate original TB if this TB was generated in | |
1638 | * cpu_exec_nocache() */ | |
1639 | tb_phys_invalidate(tb->orig_tb, -1); | |
1640 | } | |
1641 | tb_free(tb); | |
1642 | } | |
5b6dd868 BS |
1643 | /* FIXME: In theory this could raise an exception. In practice |
1644 | we have already translated the block once so it's probably ok. */ | |
648f034c | 1645 | tb_gen_code(cpu, pc, cs_base, flags, cflags); |
5b6dd868 BS |
1646 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not |
1647 | the first in the TB) then we end up generating a whole new TB and | |
1648 | repeating the fault, which is horribly inefficient. | |
1649 | Better would be to execute just this insn uncached, or generate a | |
1650 | second new TB. */ | |
0ea8cb88 | 1651 | cpu_resume_from_signal(cpu, NULL); |
5b6dd868 BS |
1652 | } |
1653 | ||
611d4f99 | 1654 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) |
5b6dd868 BS |
1655 | { |
1656 | unsigned int i; | |
1657 | ||
1658 | /* Discard jump cache entries for any tb which might potentially | |
1659 | overlap the flushed page. */ | |
1660 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
8cd70437 | 1661 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1662 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1663 | ||
1664 | i = tb_jmp_cache_hash_page(addr); | |
8cd70437 | 1665 | memset(&cpu->tb_jmp_cache[i], 0, |
5b6dd868 BS |
1666 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
1667 | } | |
1668 | ||
1669 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) | |
1670 | { | |
1671 | int i, target_code_size, max_target_code_size; | |
1672 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
1673 | TranslationBlock *tb; | |
1674 | ||
1675 | target_code_size = 0; | |
1676 | max_target_code_size = 0; | |
1677 | cross_page = 0; | |
1678 | direct_jmp_count = 0; | |
1679 | direct_jmp2_count = 0; | |
5e5f07e0 EV |
1680 | for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { |
1681 | tb = &tcg_ctx.tb_ctx.tbs[i]; | |
5b6dd868 BS |
1682 | target_code_size += tb->size; |
1683 | if (tb->size > max_target_code_size) { | |
1684 | max_target_code_size = tb->size; | |
1685 | } | |
1686 | if (tb->page_addr[1] != -1) { | |
1687 | cross_page++; | |
1688 | } | |
1689 | if (tb->tb_next_offset[0] != 0xffff) { | |
1690 | direct_jmp_count++; | |
1691 | if (tb->tb_next_offset[1] != 0xffff) { | |
1692 | direct_jmp2_count++; | |
1693 | } | |
1694 | } | |
1695 | } | |
1696 | /* XXX: avoid using doubles ? */ | |
1697 | cpu_fprintf(f, "Translation buffer state:\n"); | |
1698 | cpu_fprintf(f, "gen code size %td/%zd\n", | |
0b0d3320 | 1699 | tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, |
b125f9dc | 1700 | tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); |
5b6dd868 | 1701 | cpu_fprintf(f, "TB count %d/%d\n", |
5e5f07e0 | 1702 | tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks); |
5b6dd868 | 1703 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
5e5f07e0 EV |
1704 | tcg_ctx.tb_ctx.nb_tbs ? target_code_size / |
1705 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1706 | max_target_code_size); | |
5b6dd868 | 1707 | cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", |
5e5f07e0 EV |
1708 | tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - |
1709 | tcg_ctx.code_gen_buffer) / | |
1710 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
1711 | target_code_size ? (double) (tcg_ctx.code_gen_ptr - | |
1712 | tcg_ctx.code_gen_buffer) / | |
1713 | target_code_size : 0); | |
1714 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, | |
1715 | tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / | |
1716 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 BS |
1717 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", |
1718 | direct_jmp_count, | |
5e5f07e0 EV |
1719 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / |
1720 | tcg_ctx.tb_ctx.nb_tbs : 0, | |
5b6dd868 | 1721 | direct_jmp2_count, |
5e5f07e0 EV |
1722 | tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / |
1723 | tcg_ctx.tb_ctx.nb_tbs : 0); | |
5b6dd868 | 1724 | cpu_fprintf(f, "\nStatistics:\n"); |
5e5f07e0 EV |
1725 | cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); |
1726 | cpu_fprintf(f, "TB invalidate count %d\n", | |
1727 | tcg_ctx.tb_ctx.tb_phys_invalidate_count); | |
5b6dd868 BS |
1728 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |
1729 | tcg_dump_info(f, cpu_fprintf); | |
1730 | } | |
1731 | ||
246ae24d MF |
1732 | void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) |
1733 | { | |
1734 | tcg_dump_op_count(f, cpu_fprintf); | |
1735 | } | |
1736 | ||
5b6dd868 BS |
1737 | #else /* CONFIG_USER_ONLY */ |
1738 | ||
c3affe56 | 1739 | void cpu_interrupt(CPUState *cpu, int mask) |
5b6dd868 | 1740 | { |
259186a7 | 1741 | cpu->interrupt_request |= mask; |
378df4b2 | 1742 | cpu->tcg_exit_req = 1; |
5b6dd868 BS |
1743 | } |
1744 | ||
1745 | /* | |
1746 | * Walks guest process memory "regions" one by one | |
1747 | * and calls callback function 'fn' for each region. | |
1748 | */ | |
1749 | struct walk_memory_regions_data { | |
1750 | walk_memory_regions_fn fn; | |
1751 | void *priv; | |
1a1c4db9 | 1752 | target_ulong start; |
5b6dd868 BS |
1753 | int prot; |
1754 | }; | |
1755 | ||
1756 | static int walk_memory_regions_end(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1757 | target_ulong end, int new_prot) |
5b6dd868 | 1758 | { |
1a1c4db9 | 1759 | if (data->start != -1u) { |
5b6dd868 BS |
1760 | int rc = data->fn(data->priv, data->start, end, data->prot); |
1761 | if (rc != 0) { | |
1762 | return rc; | |
1763 | } | |
1764 | } | |
1765 | ||
1a1c4db9 | 1766 | data->start = (new_prot ? end : -1u); |
5b6dd868 BS |
1767 | data->prot = new_prot; |
1768 | ||
1769 | return 0; | |
1770 | } | |
1771 | ||
1772 | static int walk_memory_regions_1(struct walk_memory_regions_data *data, | |
1a1c4db9 | 1773 | target_ulong base, int level, void **lp) |
5b6dd868 | 1774 | { |
1a1c4db9 | 1775 | target_ulong pa; |
5b6dd868 BS |
1776 | int i, rc; |
1777 | ||
1778 | if (*lp == NULL) { | |
1779 | return walk_memory_regions_end(data, base, 0); | |
1780 | } | |
1781 | ||
1782 | if (level == 0) { | |
1783 | PageDesc *pd = *lp; | |
1784 | ||
03f49957 | 1785 | for (i = 0; i < V_L2_SIZE; ++i) { |
5b6dd868 BS |
1786 | int prot = pd[i].flags; |
1787 | ||
1788 | pa = base | (i << TARGET_PAGE_BITS); | |
1789 | if (prot != data->prot) { | |
1790 | rc = walk_memory_regions_end(data, pa, prot); | |
1791 | if (rc != 0) { | |
1792 | return rc; | |
1793 | } | |
1794 | } | |
1795 | } | |
1796 | } else { | |
1797 | void **pp = *lp; | |
1798 | ||
03f49957 | 1799 | for (i = 0; i < V_L2_SIZE; ++i) { |
1a1c4db9 | 1800 | pa = base | ((target_ulong)i << |
03f49957 | 1801 | (TARGET_PAGE_BITS + V_L2_BITS * level)); |
5b6dd868 BS |
1802 | rc = walk_memory_regions_1(data, pa, level - 1, pp + i); |
1803 | if (rc != 0) { | |
1804 | return rc; | |
1805 | } | |
1806 | } | |
1807 | } | |
1808 | ||
1809 | return 0; | |
1810 | } | |
1811 | ||
1812 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
1813 | { | |
1814 | struct walk_memory_regions_data data; | |
1815 | uintptr_t i; | |
1816 | ||
1817 | data.fn = fn; | |
1818 | data.priv = priv; | |
1a1c4db9 | 1819 | data.start = -1u; |
5b6dd868 BS |
1820 | data.prot = 0; |
1821 | ||
1822 | for (i = 0; i < V_L1_SIZE; i++) { | |
1a1c4db9 | 1823 | int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS), |
03f49957 | 1824 | V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); |
5b6dd868 BS |
1825 | if (rc != 0) { |
1826 | return rc; | |
1827 | } | |
1828 | } | |
1829 | ||
1830 | return walk_memory_regions_end(&data, 0, 0); | |
1831 | } | |
1832 | ||
1a1c4db9 MI |
1833 | static int dump_region(void *priv, target_ulong start, |
1834 | target_ulong end, unsigned long prot) | |
5b6dd868 BS |
1835 | { |
1836 | FILE *f = (FILE *)priv; | |
1837 | ||
1a1c4db9 MI |
1838 | (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx |
1839 | " "TARGET_FMT_lx" %c%c%c\n", | |
5b6dd868 BS |
1840 | start, end, end - start, |
1841 | ((prot & PAGE_READ) ? 'r' : '-'), | |
1842 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
1843 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
1844 | ||
1845 | return 0; | |
1846 | } | |
1847 | ||
1848 | /* dump memory mappings */ | |
1849 | void page_dump(FILE *f) | |
1850 | { | |
1a1c4db9 | 1851 | const int length = sizeof(target_ulong) * 2; |
227b8175 SW |
1852 | (void) fprintf(f, "%-*s %-*s %-*s %s\n", |
1853 | length, "start", length, "end", length, "size", "prot"); | |
5b6dd868 BS |
1854 | walk_memory_regions(f, dump_region); |
1855 | } | |
1856 | ||
1857 | int page_get_flags(target_ulong address) | |
1858 | { | |
1859 | PageDesc *p; | |
1860 | ||
1861 | p = page_find(address >> TARGET_PAGE_BITS); | |
1862 | if (!p) { | |
1863 | return 0; | |
1864 | } | |
1865 | return p->flags; | |
1866 | } | |
1867 | ||
1868 | /* Modify the flags of a page and invalidate the code if necessary. | |
1869 | The flag PAGE_WRITE_ORG is positioned automatically depending | |
1870 | on PAGE_WRITE. The mmap_lock should already be held. */ | |
1871 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1872 | { | |
1873 | target_ulong addr, len; | |
1874 | ||
1875 | /* This function should never be called with addresses outside the | |
1876 | guest address space. If this assert fires, it probably indicates | |
1877 | a missing call to h2g_valid. */ | |
1878 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1879 | assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1880 | #endif |
1881 | assert(start < end); | |
1882 | ||
1883 | start = start & TARGET_PAGE_MASK; | |
1884 | end = TARGET_PAGE_ALIGN(end); | |
1885 | ||
1886 | if (flags & PAGE_WRITE) { | |
1887 | flags |= PAGE_WRITE_ORG; | |
1888 | } | |
1889 | ||
1890 | for (addr = start, len = end - start; | |
1891 | len != 0; | |
1892 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1893 | PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1894 | ||
1895 | /* If the write protection bit is set, then we invalidate | |
1896 | the code inside. */ | |
1897 | if (!(p->flags & PAGE_WRITE) && | |
1898 | (flags & PAGE_WRITE) && | |
1899 | p->first_tb) { | |
d02532f0 | 1900 | tb_invalidate_phys_page(addr, 0, NULL, false); |
5b6dd868 BS |
1901 | } |
1902 | p->flags = flags; | |
1903 | } | |
1904 | } | |
1905 | ||
1906 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
1907 | { | |
1908 | PageDesc *p; | |
1909 | target_ulong end; | |
1910 | target_ulong addr; | |
1911 | ||
1912 | /* This function should never be called with addresses outside the | |
1913 | guest address space. If this assert fires, it probably indicates | |
1914 | a missing call to h2g_valid. */ | |
1915 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1a1c4db9 | 1916 | assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); |
5b6dd868 BS |
1917 | #endif |
1918 | ||
1919 | if (len == 0) { | |
1920 | return 0; | |
1921 | } | |
1922 | if (start + len - 1 < start) { | |
1923 | /* We've wrapped around. */ | |
1924 | return -1; | |
1925 | } | |
1926 | ||
1927 | /* must do before we loose bits in the next step */ | |
1928 | end = TARGET_PAGE_ALIGN(start + len); | |
1929 | start = start & TARGET_PAGE_MASK; | |
1930 | ||
1931 | for (addr = start, len = end - start; | |
1932 | len != 0; | |
1933 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1934 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1935 | if (!p) { | |
1936 | return -1; | |
1937 | } | |
1938 | if (!(p->flags & PAGE_VALID)) { | |
1939 | return -1; | |
1940 | } | |
1941 | ||
1942 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { | |
1943 | return -1; | |
1944 | } | |
1945 | if (flags & PAGE_WRITE) { | |
1946 | if (!(p->flags & PAGE_WRITE_ORG)) { | |
1947 | return -1; | |
1948 | } | |
1949 | /* unprotect the page if it was put read-only because it | |
1950 | contains translated code */ | |
1951 | if (!(p->flags & PAGE_WRITE)) { | |
1952 | if (!page_unprotect(addr, 0, NULL)) { | |
1953 | return -1; | |
1954 | } | |
1955 | } | |
5b6dd868 BS |
1956 | } |
1957 | } | |
1958 | return 0; | |
1959 | } | |
1960 | ||
1961 | /* called from signal handler: invalidate the code and unprotect the | |
1962 | page. Return TRUE if the fault was successfully handled. */ | |
1963 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc) | |
1964 | { | |
1965 | unsigned int prot; | |
1966 | PageDesc *p; | |
1967 | target_ulong host_start, host_end, addr; | |
1968 | ||
1969 | /* Technically this isn't safe inside a signal handler. However we | |
1970 | know this only ever happens in a synchronous SEGV handler, so in | |
1971 | practice it seems to be ok. */ | |
1972 | mmap_lock(); | |
1973 | ||
1974 | p = page_find(address >> TARGET_PAGE_BITS); | |
1975 | if (!p) { | |
1976 | mmap_unlock(); | |
1977 | return 0; | |
1978 | } | |
1979 | ||
1980 | /* if the page was really writable, then we change its | |
1981 | protection back to writable */ | |
1982 | if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { | |
1983 | host_start = address & qemu_host_page_mask; | |
1984 | host_end = host_start + qemu_host_page_size; | |
1985 | ||
1986 | prot = 0; | |
1987 | for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { | |
1988 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1989 | p->flags |= PAGE_WRITE; | |
1990 | prot |= p->flags; | |
1991 | ||
1992 | /* and since the content will be modified, we must invalidate | |
1993 | the corresponding translated code. */ | |
d02532f0 | 1994 | tb_invalidate_phys_page(addr, pc, puc, true); |
5b6dd868 BS |
1995 | #ifdef DEBUG_TB_CHECK |
1996 | tb_invalidate_check(addr); | |
1997 | #endif | |
1998 | } | |
1999 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
2000 | prot & PAGE_BITS); | |
2001 | ||
2002 | mmap_unlock(); | |
2003 | return 1; | |
2004 | } | |
2005 | mmap_unlock(); | |
2006 | return 0; | |
2007 | } | |
2008 | #endif /* CONFIG_USER_ONLY */ |