]>
Commit | Line | Data |
---|---|---|
42a623c7 BS |
1 | /* |
2 | * User emulator execution | |
3 | * | |
4 | * Copyright (c) 2003-2005 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
42a623c7 BS |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
d38ea87a | 19 | #include "qemu/osdep.h" |
78271684 | 20 | #include "hw/core/tcg-cpu-ops.h" |
76cad711 | 21 | #include "disas/disas.h" |
63c91552 | 22 | #include "exec/exec-all.h" |
dcb32f1d | 23 | #include "tcg/tcg.h" |
023b0ae3 | 24 | #include "qemu/bitops.h" |
177a8cb8 | 25 | #include "qemu/rcu.h" |
f08b6170 | 26 | #include "exec/cpu_ldst.h" |
3b9bd3f4 | 27 | #include "exec/translate-all.h" |
a411d296 | 28 | #include "exec/helper-proto.h" |
e6cd4bb5 | 29 | #include "qemu/atomic128.h" |
243af022 | 30 | #include "trace/trace-root.h" |
37e891e3 | 31 | #include "tcg/tcg-ldst.h" |
0583f775 | 32 | #include "internal.h" |
42a623c7 | 33 | |
ec603b55 RH |
34 | __thread uintptr_t helper_retaddr; |
35 | ||
42a623c7 BS |
36 | //#define DEBUG_SIGNAL |
37 | ||
0fdbb7d2 RH |
38 | /* |
39 | * Adjust the pc to pass to cpu_restore_state; return the memop type. | |
40 | */ | |
41 | MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) | |
42a623c7 | 42 | { |
52ba13f0 RH |
43 | switch (helper_retaddr) { |
44 | default: | |
45 | /* | |
46 | * Fault during host memory operation within a helper function. | |
47 | * The helper's host return address, saved here, gives us a | |
48 | * pointer into the generated code that will unwind to the | |
49 | * correct guest pc. | |
50 | */ | |
0fdbb7d2 | 51 | *pc = helper_retaddr; |
52ba13f0 RH |
52 | break; |
53 | ||
54 | case 0: | |
55 | /* | |
56 | * Fault during host memory operation within generated code. | |
57 | * (Or, a unrelated bug within qemu, but we can't tell from here). | |
58 | * | |
59 | * We take the host pc from the signal frame. However, we cannot | |
60 | * use that value directly. Within cpu_restore_state_from_tb, we | |
61 | * assume PC comes from GETPC(), as used by the helper functions, | |
62 | * so we adjust the address by -GETPC_ADJ to form an address that | |
e3a6e0da | 63 | * is within the call insn, so that the address does not accidentally |
52ba13f0 RH |
64 | * match the beginning of the next guest insn. However, when the |
65 | * pc comes from the signal frame it points to the actual faulting | |
66 | * host memory insn and not the return from a call insn. | |
67 | * | |
68 | * Therefore, adjust to compensate for what will be done later | |
69 | * by cpu_restore_state_from_tb. | |
70 | */ | |
0fdbb7d2 | 71 | *pc += GETPC_ADJ; |
52ba13f0 RH |
72 | break; |
73 | ||
74 | case 1: | |
75 | /* | |
76 | * Fault during host read for translation, or loosely, "execution". | |
77 | * | |
78 | * The guest pc is already pointing to the start of the TB for which | |
79 | * code is being generated. If the guest translator manages the | |
80 | * page crossings correctly, this is exactly the correct address | |
81 | * (and if the translator doesn't handle page boundaries correctly | |
82 | * there's little we can do about that here). Therefore, do not | |
83 | * trigger the unwinder. | |
52ba13f0 | 84 | */ |
0fdbb7d2 RH |
85 | *pc = 0; |
86 | return MMU_INST_FETCH; | |
ec603b55 RH |
87 | } |
88 | ||
0fdbb7d2 RH |
89 | return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; |
90 | } | |
91 | ||
5e38ba7d RH |
92 | /** |
93 | * handle_sigsegv_accerr_write: | |
94 | * @cpu: the cpu context | |
95 | * @old_set: the sigset_t from the signal ucontext_t | |
96 | * @host_pc: the host pc, adjusted for the signal | |
97 | * @guest_addr: the guest address of the fault | |
98 | * | |
99 | * Return true if the write fault has been handled, and should be re-tried. | |
100 | * | |
101 | * Note that it is important that we don't call page_unprotect() unless | |
9323e79f | 102 | * this is really a "write to nonwritable page" fault, because |
5e38ba7d | 103 | * page_unprotect() assumes that if it is called for an access to |
9323e79f PM |
104 | * a page that's writable this means we had two threads racing and |
105 | * another thread got there first and already made the page writable; | |
5e38ba7d RH |
106 | * so we will retry the access. If we were to call page_unprotect() |
107 | * for some other kind of fault that should really be passed to the | |
108 | * guest, we'd end up in an infinite loop of retrying the faulting access. | |
109 | */ | |
110 | bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, | |
111 | uintptr_t host_pc, abi_ptr guest_addr) | |
112 | { | |
113 | switch (page_unprotect(guest_addr, host_pc)) { | |
114 | case 0: | |
115 | /* | |
116 | * Fault not caused by a page marked unwritable to protect | |
117 | * cached translations, must be the guest binary's problem. | |
118 | */ | |
119 | return false; | |
120 | case 1: | |
121 | /* | |
122 | * Fault caused by protection of cached translation; TBs | |
123 | * invalidated, so resume execution. | |
124 | */ | |
125 | return true; | |
126 | case 2: | |
127 | /* | |
128 | * Fault caused by protection of cached translation, and the | |
129 | * currently executing TB was modified and must be exited immediately. | |
130 | */ | |
940b3090 RH |
131 | sigprocmask(SIG_SETMASK, old_set, NULL); |
132 | cpu_loop_exit_noexc(cpu); | |
5e38ba7d RH |
133 | /* NORETURN */ |
134 | default: | |
135 | g_assert_not_reached(); | |
136 | } | |
137 | } | |
138 | ||
67ff2186 | 139 | typedef struct PageFlagsNode { |
177a8cb8 | 140 | struct rcu_head rcu; |
67ff2186 RH |
141 | IntervalTreeNode itree; |
142 | int flags; | |
143 | } PageFlagsNode; | |
d941c086 | 144 | |
67ff2186 | 145 | static IntervalTreeRoot pageflags_root; |
d941c086 | 146 | |
32b12039 | 147 | static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last) |
67ff2186 RH |
148 | { |
149 | IntervalTreeNode *n; | |
d941c086 | 150 | |
67ff2186 RH |
151 | n = interval_tree_iter_first(&pageflags_root, start, last); |
152 | return n ? container_of(n, PageFlagsNode, itree) : NULL; | |
d941c086 RH |
153 | } |
154 | ||
67ff2186 | 155 | static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, |
32b12039 | 156 | target_ulong last) |
d941c086 | 157 | { |
67ff2186 | 158 | IntervalTreeNode *n; |
d941c086 | 159 | |
67ff2186 RH |
160 | n = interval_tree_iter_next(&p->itree, start, last); |
161 | return n ? container_of(n, PageFlagsNode, itree) : NULL; | |
d941c086 RH |
162 | } |
163 | ||
164 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
165 | { | |
67ff2186 RH |
166 | IntervalTreeNode *n; |
167 | int rc = 0; | |
d941c086 | 168 | |
67ff2186 RH |
169 | mmap_lock(); |
170 | for (n = interval_tree_iter_first(&pageflags_root, 0, -1); | |
171 | n != NULL; | |
172 | n = interval_tree_iter_next(n, 0, -1)) { | |
173 | PageFlagsNode *p = container_of(n, PageFlagsNode, itree); | |
d941c086 | 174 | |
67ff2186 | 175 | rc = fn(priv, n->start, n->last + 1, p->flags); |
d941c086 | 176 | if (rc != 0) { |
67ff2186 | 177 | break; |
d941c086 RH |
178 | } |
179 | } | |
67ff2186 | 180 | mmap_unlock(); |
d941c086 | 181 | |
67ff2186 | 182 | return rc; |
d941c086 RH |
183 | } |
184 | ||
185 | static int dump_region(void *priv, target_ulong start, | |
67ff2186 | 186 | target_ulong end, unsigned long prot) |
d941c086 RH |
187 | { |
188 | FILE *f = (FILE *)priv; | |
189 | ||
67ff2186 RH |
190 | fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n", |
191 | start, end, end - start, | |
192 | ((prot & PAGE_READ) ? 'r' : '-'), | |
193 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
194 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
d941c086 RH |
195 | return 0; |
196 | } | |
197 | ||
198 | /* dump memory mappings */ | |
199 | void page_dump(FILE *f) | |
200 | { | |
201 | const int length = sizeof(target_ulong) * 2; | |
67ff2186 RH |
202 | |
203 | fprintf(f, "%-*s %-*s %-*s %s\n", | |
d941c086 RH |
204 | length, "start", length, "end", length, "size", "prot"); |
205 | walk_memory_regions(f, dump_region); | |
206 | } | |
207 | ||
208 | int page_get_flags(target_ulong address) | |
209 | { | |
67ff2186 | 210 | PageFlagsNode *p = pageflags_find(address, address); |
d941c086 | 211 | |
67ff2186 RH |
212 | /* |
213 | * See util/interval-tree.c re lockless lookups: no false positives but | |
214 | * there are false negatives. If we find nothing, retry with the mmap | |
215 | * lock acquired. | |
216 | */ | |
217 | if (p) { | |
218 | return p->flags; | |
219 | } | |
220 | if (have_mmap_lock()) { | |
d941c086 RH |
221 | return 0; |
222 | } | |
67ff2186 RH |
223 | |
224 | mmap_lock(); | |
225 | p = pageflags_find(address, address); | |
226 | mmap_unlock(); | |
227 | return p ? p->flags : 0; | |
228 | } | |
229 | ||
230 | /* A subroutine of page_set_flags: insert a new node for [start,last]. */ | |
231 | static void pageflags_create(target_ulong start, target_ulong last, int flags) | |
232 | { | |
233 | PageFlagsNode *p = g_new(PageFlagsNode, 1); | |
234 | ||
235 | p->itree.start = start; | |
236 | p->itree.last = last; | |
237 | p->flags = flags; | |
238 | interval_tree_insert(&p->itree, &pageflags_root); | |
239 | } | |
240 | ||
241 | /* A subroutine of page_set_flags: remove everything in [start,last]. */ | |
242 | static bool pageflags_unset(target_ulong start, target_ulong last) | |
243 | { | |
244 | bool inval_tb = false; | |
245 | ||
246 | while (true) { | |
247 | PageFlagsNode *p = pageflags_find(start, last); | |
248 | target_ulong p_last; | |
249 | ||
250 | if (!p) { | |
251 | break; | |
252 | } | |
253 | ||
254 | if (p->flags & PAGE_EXEC) { | |
255 | inval_tb = true; | |
256 | } | |
257 | ||
258 | interval_tree_remove(&p->itree, &pageflags_root); | |
259 | p_last = p->itree.last; | |
260 | ||
261 | if (p->itree.start < start) { | |
262 | /* Truncate the node from the end, or split out the middle. */ | |
263 | p->itree.last = start - 1; | |
264 | interval_tree_insert(&p->itree, &pageflags_root); | |
265 | if (last < p_last) { | |
266 | pageflags_create(last + 1, p_last, p->flags); | |
267 | break; | |
268 | } | |
269 | } else if (p_last <= last) { | |
270 | /* Range completely covers node -- remove it. */ | |
177a8cb8 | 271 | g_free_rcu(p, rcu); |
67ff2186 RH |
272 | } else { |
273 | /* Truncate the node from the start. */ | |
274 | p->itree.start = last + 1; | |
275 | interval_tree_insert(&p->itree, &pageflags_root); | |
276 | break; | |
277 | } | |
278 | } | |
279 | ||
280 | return inval_tb; | |
281 | } | |
282 | ||
283 | /* | |
284 | * A subroutine of page_set_flags: nothing overlaps [start,last], | |
285 | * but check adjacent mappings and maybe merge into a single range. | |
286 | */ | |
287 | static void pageflags_create_merge(target_ulong start, target_ulong last, | |
288 | int flags) | |
289 | { | |
290 | PageFlagsNode *next = NULL, *prev = NULL; | |
291 | ||
292 | if (start > 0) { | |
293 | prev = pageflags_find(start - 1, start - 1); | |
294 | if (prev) { | |
295 | if (prev->flags == flags) { | |
296 | interval_tree_remove(&prev->itree, &pageflags_root); | |
297 | } else { | |
298 | prev = NULL; | |
299 | } | |
300 | } | |
301 | } | |
302 | if (last + 1 != 0) { | |
303 | next = pageflags_find(last + 1, last + 1); | |
304 | if (next) { | |
305 | if (next->flags == flags) { | |
306 | interval_tree_remove(&next->itree, &pageflags_root); | |
307 | } else { | |
308 | next = NULL; | |
309 | } | |
310 | } | |
311 | } | |
312 | ||
313 | if (prev) { | |
314 | if (next) { | |
315 | prev->itree.last = next->itree.last; | |
177a8cb8 | 316 | g_free_rcu(next, rcu); |
67ff2186 RH |
317 | } else { |
318 | prev->itree.last = last; | |
319 | } | |
320 | interval_tree_insert(&prev->itree, &pageflags_root); | |
321 | } else if (next) { | |
322 | next->itree.start = start; | |
323 | interval_tree_insert(&next->itree, &pageflags_root); | |
324 | } else { | |
325 | pageflags_create(start, last, flags); | |
326 | } | |
d941c086 RH |
327 | } |
328 | ||
329 | /* | |
330 | * Allow the target to decide if PAGE_TARGET_[12] may be reset. | |
331 | * By default, they are not kept. | |
332 | */ | |
333 | #ifndef PAGE_TARGET_STICKY | |
334 | #define PAGE_TARGET_STICKY 0 | |
335 | #endif | |
336 | #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) | |
337 | ||
67ff2186 RH |
338 | /* A subroutine of page_set_flags: add flags to [start,last]. */ |
339 | static bool pageflags_set_clear(target_ulong start, target_ulong last, | |
340 | int set_flags, int clear_flags) | |
341 | { | |
342 | PageFlagsNode *p; | |
343 | target_ulong p_start, p_last; | |
344 | int p_flags, merge_flags; | |
345 | bool inval_tb = false; | |
346 | ||
347 | restart: | |
348 | p = pageflags_find(start, last); | |
349 | if (!p) { | |
350 | if (set_flags) { | |
351 | pageflags_create_merge(start, last, set_flags); | |
352 | } | |
353 | goto done; | |
354 | } | |
355 | ||
356 | p_start = p->itree.start; | |
357 | p_last = p->itree.last; | |
358 | p_flags = p->flags; | |
359 | /* Using mprotect on a page does not change sticky bits. */ | |
360 | merge_flags = (p_flags & ~clear_flags) | set_flags; | |
361 | ||
362 | /* | |
363 | * Need to flush if an overlapping executable region | |
364 | * removes exec, or adds write. | |
365 | */ | |
366 | if ((p_flags & PAGE_EXEC) | |
367 | && (!(merge_flags & PAGE_EXEC) | |
368 | || (merge_flags & ~p_flags & PAGE_WRITE))) { | |
369 | inval_tb = true; | |
370 | } | |
371 | ||
372 | /* | |
373 | * If there is an exact range match, update and return without | |
374 | * attempting to merge with adjacent regions. | |
375 | */ | |
376 | if (start == p_start && last == p_last) { | |
377 | if (merge_flags) { | |
378 | p->flags = merge_flags; | |
379 | } else { | |
380 | interval_tree_remove(&p->itree, &pageflags_root); | |
177a8cb8 | 381 | g_free_rcu(p, rcu); |
67ff2186 RH |
382 | } |
383 | goto done; | |
384 | } | |
385 | ||
386 | /* | |
387 | * If sticky bits affect the original mapping, then we must be more | |
388 | * careful about the existing intervals and the separate flags. | |
389 | */ | |
390 | if (set_flags != merge_flags) { | |
391 | if (p_start < start) { | |
392 | interval_tree_remove(&p->itree, &pageflags_root); | |
393 | p->itree.last = start - 1; | |
394 | interval_tree_insert(&p->itree, &pageflags_root); | |
395 | ||
396 | if (last < p_last) { | |
397 | if (merge_flags) { | |
398 | pageflags_create(start, last, merge_flags); | |
399 | } | |
400 | pageflags_create(last + 1, p_last, p_flags); | |
401 | } else { | |
402 | if (merge_flags) { | |
403 | pageflags_create(start, p_last, merge_flags); | |
404 | } | |
405 | if (p_last < last) { | |
406 | start = p_last + 1; | |
407 | goto restart; | |
408 | } | |
409 | } | |
410 | } else { | |
411 | if (start < p_start && set_flags) { | |
412 | pageflags_create(start, p_start - 1, set_flags); | |
413 | } | |
414 | if (last < p_last) { | |
415 | interval_tree_remove(&p->itree, &pageflags_root); | |
416 | p->itree.start = last + 1; | |
417 | interval_tree_insert(&p->itree, &pageflags_root); | |
418 | if (merge_flags) { | |
419 | pageflags_create(start, last, merge_flags); | |
420 | } | |
421 | } else { | |
422 | if (merge_flags) { | |
423 | p->flags = merge_flags; | |
424 | } else { | |
425 | interval_tree_remove(&p->itree, &pageflags_root); | |
177a8cb8 | 426 | g_free_rcu(p, rcu); |
67ff2186 RH |
427 | } |
428 | if (p_last < last) { | |
429 | start = p_last + 1; | |
430 | goto restart; | |
431 | } | |
432 | } | |
433 | } | |
434 | goto done; | |
435 | } | |
436 | ||
437 | /* If flags are not changing for this range, incorporate it. */ | |
438 | if (set_flags == p_flags) { | |
439 | if (start < p_start) { | |
440 | interval_tree_remove(&p->itree, &pageflags_root); | |
441 | p->itree.start = start; | |
442 | interval_tree_insert(&p->itree, &pageflags_root); | |
443 | } | |
444 | if (p_last < last) { | |
445 | start = p_last + 1; | |
446 | goto restart; | |
447 | } | |
448 | goto done; | |
449 | } | |
450 | ||
451 | /* Maybe split out head and/or tail ranges with the original flags. */ | |
452 | interval_tree_remove(&p->itree, &pageflags_root); | |
453 | if (p_start < start) { | |
454 | p->itree.last = start - 1; | |
455 | interval_tree_insert(&p->itree, &pageflags_root); | |
456 | ||
457 | if (p_last < last) { | |
458 | goto restart; | |
459 | } | |
460 | if (last < p_last) { | |
461 | pageflags_create(last + 1, p_last, p_flags); | |
462 | } | |
463 | } else if (last < p_last) { | |
464 | p->itree.start = last + 1; | |
465 | interval_tree_insert(&p->itree, &pageflags_root); | |
466 | } else { | |
177a8cb8 | 467 | g_free_rcu(p, rcu); |
67ff2186 RH |
468 | goto restart; |
469 | } | |
470 | if (set_flags) { | |
471 | pageflags_create(start, last, set_flags); | |
472 | } | |
473 | ||
474 | done: | |
475 | return inval_tb; | |
476 | } | |
477 | ||
d941c086 RH |
478 | /* |
479 | * Modify the flags of a page and invalidate the code if necessary. | |
480 | * The flag PAGE_WRITE_ORG is positioned automatically depending | |
481 | * on PAGE_WRITE. The mmap_lock should already be held. | |
482 | */ | |
49840a4a | 483 | void page_set_flags(target_ulong start, target_ulong last, int flags) |
d941c086 | 484 | { |
67ff2186 RH |
485 | bool reset = false; |
486 | bool inval_tb = false; | |
d941c086 RH |
487 | |
488 | /* This function should never be called with addresses outside the | |
489 | guest address space. If this assert fires, it probably indicates | |
490 | a missing call to h2g_valid. */ | |
49840a4a RH |
491 | assert(start <= last); |
492 | assert(last <= GUEST_ADDR_MAX); | |
d941c086 RH |
493 | /* Only set PAGE_ANON with new mappings. */ |
494 | assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); | |
495 | assert_memory_lock(); | |
496 | ||
49840a4a RH |
497 | start &= TARGET_PAGE_MASK; |
498 | last |= ~TARGET_PAGE_MASK; | |
d941c086 | 499 | |
67ff2186 RH |
500 | if (!(flags & PAGE_VALID)) { |
501 | flags = 0; | |
502 | } else { | |
503 | reset = flags & PAGE_RESET; | |
504 | flags &= ~PAGE_RESET; | |
505 | if (flags & PAGE_WRITE) { | |
506 | flags |= PAGE_WRITE_ORG; | |
507 | } | |
d941c086 | 508 | } |
67ff2186 RH |
509 | |
510 | if (!flags || reset) { | |
10310cbd | 511 | page_reset_target_data(start, last); |
67ff2186 | 512 | inval_tb |= pageflags_unset(start, last); |
d941c086 | 513 | } |
67ff2186 RH |
514 | if (flags) { |
515 | inval_tb |= pageflags_set_clear(start, last, flags, | |
516 | ~(reset ? 0 : PAGE_STICKY)); | |
d941c086 | 517 | } |
d941c086 | 518 | if (inval_tb) { |
e506ad6a | 519 | tb_invalidate_phys_range(start, last); |
d941c086 RH |
520 | } |
521 | } | |
522 | ||
bef6f008 | 523 | bool page_check_range(target_ulong start, target_ulong len, int flags) |
d941c086 | 524 | { |
67ff2186 | 525 | target_ulong last; |
e630c012 | 526 | int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ |
bef6f008 | 527 | bool ret; |
d941c086 RH |
528 | |
529 | if (len == 0) { | |
bef6f008 | 530 | return true; /* trivial length */ |
d941c086 | 531 | } |
67ff2186 RH |
532 | |
533 | last = start + len - 1; | |
534 | if (last < start) { | |
bef6f008 | 535 | return false; /* wrap around */ |
d941c086 RH |
536 | } |
537 | ||
e630c012 | 538 | locked = have_mmap_lock(); |
67ff2186 RH |
539 | while (true) { |
540 | PageFlagsNode *p = pageflags_find(start, last); | |
541 | int missing; | |
d941c086 | 542 | |
d941c086 | 543 | if (!p) { |
e630c012 RH |
544 | if (!locked) { |
545 | /* | |
546 | * Lockless lookups have false negatives. | |
547 | * Retry with the lock held. | |
548 | */ | |
549 | mmap_lock(); | |
550 | locked = -1; | |
551 | p = pageflags_find(start, last); | |
552 | } | |
553 | if (!p) { | |
bef6f008 | 554 | ret = false; /* entire region invalid */ |
e630c012 RH |
555 | break; |
556 | } | |
d941c086 | 557 | } |
67ff2186 | 558 | if (start < p->itree.start) { |
bef6f008 | 559 | ret = false; /* initial bytes invalid */ |
e630c012 | 560 | break; |
d941c086 RH |
561 | } |
562 | ||
67ff2186 | 563 | missing = flags & ~p->flags; |
91e9e116 | 564 | if (missing & ~PAGE_WRITE) { |
bef6f008 | 565 | ret = false; /* page doesn't match */ |
e630c012 | 566 | break; |
d941c086 | 567 | } |
67ff2186 | 568 | if (missing & PAGE_WRITE) { |
d941c086 | 569 | if (!(p->flags & PAGE_WRITE_ORG)) { |
bef6f008 | 570 | ret = false; /* page not writable */ |
e630c012 | 571 | break; |
67ff2186 RH |
572 | } |
573 | /* Asking about writable, but has been protected: undo. */ | |
574 | if (!page_unprotect(start, 0)) { | |
bef6f008 | 575 | ret = false; |
e630c012 | 576 | break; |
d941c086 | 577 | } |
67ff2186 RH |
578 | /* TODO: page_unprotect should take a range, not a single page. */ |
579 | if (last - start < TARGET_PAGE_SIZE) { | |
bef6f008 | 580 | ret = true; /* ok */ |
e630c012 | 581 | break; |
d941c086 | 582 | } |
67ff2186 RH |
583 | start += TARGET_PAGE_SIZE; |
584 | continue; | |
d941c086 | 585 | } |
67ff2186 RH |
586 | |
587 | if (last <= p->itree.last) { | |
bef6f008 | 588 | ret = true; /* ok */ |
e630c012 | 589 | break; |
67ff2186 RH |
590 | } |
591 | start = p->itree.last + 1; | |
d941c086 | 592 | } |
e630c012 RH |
593 | |
594 | /* Release the lock if acquired locally. */ | |
595 | if (locked < 0) { | |
596 | mmap_unlock(); | |
597 | } | |
598 | return ret; | |
d941c086 RH |
599 | } |
600 | ||
c2281ddc RH |
601 | bool page_check_range_empty(target_ulong start, target_ulong last) |
602 | { | |
603 | assert(last >= start); | |
604 | assert_memory_lock(); | |
605 | return pageflags_find(start, last) == NULL; | |
606 | } | |
607 | ||
f2bb7cf2 RH |
608 | target_ulong page_find_range_empty(target_ulong min, target_ulong max, |
609 | target_ulong len, target_ulong align) | |
610 | { | |
611 | target_ulong len_m1, align_m1; | |
612 | ||
613 | assert(min <= max); | |
614 | assert(max <= GUEST_ADDR_MAX); | |
615 | assert(len != 0); | |
616 | assert(is_power_of_2(align)); | |
617 | assert_memory_lock(); | |
618 | ||
619 | len_m1 = len - 1; | |
620 | align_m1 = align - 1; | |
621 | ||
622 | /* Iteratively narrow the search region. */ | |
623 | while (1) { | |
624 | PageFlagsNode *p; | |
625 | ||
626 | /* Align min and double-check there's enough space remaining. */ | |
627 | min = (min + align_m1) & ~align_m1; | |
628 | if (min > max) { | |
629 | return -1; | |
630 | } | |
631 | if (len_m1 > max - min) { | |
632 | return -1; | |
633 | } | |
634 | ||
635 | p = pageflags_find(min, min + len_m1); | |
636 | if (p == NULL) { | |
637 | /* Found! */ | |
638 | return min; | |
639 | } | |
640 | if (max <= p->itree.last) { | |
641 | /* Existing allocation fills the remainder of the search region. */ | |
642 | return -1; | |
643 | } | |
644 | /* Skip across existing allocation. */ | |
645 | min = p->itree.last + 1; | |
646 | } | |
647 | } | |
648 | ||
67ff2186 | 649 | void page_protect(tb_page_addr_t address) |
d941c086 | 650 | { |
67ff2186 RH |
651 | PageFlagsNode *p; |
652 | target_ulong start, last; | |
d941c086 RH |
653 | int prot; |
654 | ||
67ff2186 RH |
655 | assert_memory_lock(); |
656 | ||
657 | if (qemu_host_page_size <= TARGET_PAGE_SIZE) { | |
658 | start = address & TARGET_PAGE_MASK; | |
659 | last = start + TARGET_PAGE_SIZE - 1; | |
660 | } else { | |
661 | start = address & qemu_host_page_mask; | |
662 | last = start + qemu_host_page_size - 1; | |
663 | } | |
664 | ||
665 | p = pageflags_find(start, last); | |
666 | if (!p) { | |
667 | return; | |
668 | } | |
669 | prot = p->flags; | |
670 | ||
671 | if (unlikely(p->itree.last < last)) { | |
672 | /* More than one protection region covers the one host page. */ | |
673 | assert(TARGET_PAGE_SIZE < qemu_host_page_size); | |
674 | while ((p = pageflags_next(p, start, last)) != NULL) { | |
d941c086 | 675 | prot |= p->flags; |
d941c086 | 676 | } |
67ff2186 RH |
677 | } |
678 | ||
679 | if (prot & PAGE_WRITE) { | |
680 | pageflags_set_clear(start, last, 0, PAGE_WRITE); | |
681 | mprotect(g2h_untagged(start), qemu_host_page_size, | |
682 | prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE); | |
d941c086 RH |
683 | } |
684 | } | |
685 | ||
686 | /* | |
687 | * Called from signal handler: invalidate the code and unprotect the | |
688 | * page. Return 0 if the fault was not handled, 1 if it was handled, | |
689 | * and 2 if it was handled but the caller must cause the TB to be | |
690 | * immediately exited. (We can only return 2 if the 'pc' argument is | |
691 | * non-zero.) | |
692 | */ | |
693 | int page_unprotect(target_ulong address, uintptr_t pc) | |
694 | { | |
67ff2186 | 695 | PageFlagsNode *p; |
d941c086 | 696 | bool current_tb_invalidated; |
d941c086 RH |
697 | |
698 | /* | |
699 | * Technically this isn't safe inside a signal handler. However we | |
700 | * know this only ever happens in a synchronous SEGV handler, so in | |
701 | * practice it seems to be ok. | |
702 | */ | |
703 | mmap_lock(); | |
704 | ||
67ff2186 RH |
705 | p = pageflags_find(address, address); |
706 | ||
707 | /* If this address was not really writable, nothing to do. */ | |
708 | if (!p || !(p->flags & PAGE_WRITE_ORG)) { | |
d941c086 RH |
709 | mmap_unlock(); |
710 | return 0; | |
711 | } | |
712 | ||
67ff2186 RH |
713 | current_tb_invalidated = false; |
714 | if (p->flags & PAGE_WRITE) { | |
715 | /* | |
716 | * If the page is actually marked WRITE then assume this is because | |
717 | * this thread raced with another one which got here first and | |
718 | * set the page to PAGE_WRITE and did the TB invalidate for us. | |
719 | */ | |
d941c086 | 720 | #ifdef TARGET_HAS_PRECISE_SMC |
67ff2186 RH |
721 | TranslationBlock *current_tb = tcg_tb_lookup(pc); |
722 | if (current_tb) { | |
723 | current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; | |
724 | } | |
d941c086 | 725 | #endif |
67ff2186 RH |
726 | } else { |
727 | target_ulong start, len, i; | |
728 | int prot; | |
729 | ||
730 | if (qemu_host_page_size <= TARGET_PAGE_SIZE) { | |
731 | start = address & TARGET_PAGE_MASK; | |
732 | len = TARGET_PAGE_SIZE; | |
733 | prot = p->flags | PAGE_WRITE; | |
734 | pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); | |
735 | current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); | |
d941c086 | 736 | } else { |
67ff2186 RH |
737 | start = address & qemu_host_page_mask; |
738 | len = qemu_host_page_size; | |
d941c086 | 739 | prot = 0; |
d941c086 | 740 | |
67ff2186 RH |
741 | for (i = 0; i < len; i += TARGET_PAGE_SIZE) { |
742 | target_ulong addr = start + i; | |
743 | ||
744 | p = pageflags_find(addr, addr); | |
745 | if (p) { | |
746 | prot |= p->flags; | |
747 | if (p->flags & PAGE_WRITE_ORG) { | |
748 | prot |= PAGE_WRITE; | |
749 | pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1, | |
750 | PAGE_WRITE, 0); | |
751 | } | |
752 | } | |
d941c086 RH |
753 | /* |
754 | * Since the content will be modified, we must invalidate | |
755 | * the corresponding translated code. | |
756 | */ | |
757 | current_tb_invalidated |= | |
758 | tb_invalidate_phys_page_unwind(addr, pc); | |
759 | } | |
d941c086 | 760 | } |
67ff2186 RH |
761 | if (prot & PAGE_EXEC) { |
762 | prot = (prot & ~PAGE_EXEC) | PAGE_READ; | |
763 | } | |
764 | mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS); | |
d941c086 RH |
765 | } |
766 | mmap_unlock(); | |
67ff2186 RH |
767 | |
768 | /* If current TB was invalidated return to main loop */ | |
769 | return current_tb_invalidated ? 2 : 1; | |
d941c086 RH |
770 | } |
771 | ||
4f8f4127 | 772 | static int probe_access_internal(CPUArchState *env, vaddr addr, |
069cfe77 RH |
773 | int fault_size, MMUAccessType access_type, |
774 | bool nonfault, uintptr_t ra) | |
59e96ac6 | 775 | { |
72d2bbf9 RH |
776 | int acc_flag; |
777 | bool maperr; | |
c25c283d | 778 | |
c25c283d DH |
779 | switch (access_type) { |
780 | case MMU_DATA_STORE: | |
72d2bbf9 | 781 | acc_flag = PAGE_WRITE_ORG; |
c25c283d DH |
782 | break; |
783 | case MMU_DATA_LOAD: | |
72d2bbf9 | 784 | acc_flag = PAGE_READ; |
c25c283d DH |
785 | break; |
786 | case MMU_INST_FETCH: | |
72d2bbf9 | 787 | acc_flag = PAGE_EXEC; |
c25c283d DH |
788 | break; |
789 | default: | |
790 | g_assert_not_reached(); | |
791 | } | |
792 | ||
72d2bbf9 RH |
793 | if (guest_addr_valid_untagged(addr)) { |
794 | int page_flags = page_get_flags(addr); | |
795 | if (page_flags & acc_flag) { | |
6d03226b AB |
796 | if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE) |
797 | && cpu_plugin_mem_cbs_enabled(env_cpu(env))) { | |
798 | return TLB_MMIO; | |
799 | } | |
72d2bbf9 | 800 | return 0; /* success */ |
069cfe77 | 801 | } |
72d2bbf9 RH |
802 | maperr = !(page_flags & PAGE_VALID); |
803 | } else { | |
804 | maperr = true; | |
805 | } | |
806 | ||
807 | if (nonfault) { | |
808 | return TLB_INVALID_MASK; | |
59e96ac6 | 809 | } |
72d2bbf9 RH |
810 | |
811 | cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); | |
069cfe77 RH |
812 | } |
813 | ||
4f8f4127 | 814 | int probe_access_flags(CPUArchState *env, vaddr addr, int size, |
069cfe77 RH |
815 | MMUAccessType access_type, int mmu_idx, |
816 | bool nonfault, void **phost, uintptr_t ra) | |
817 | { | |
818 | int flags; | |
819 | ||
1770b2f2 DHB |
820 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); |
821 | flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); | |
6d03226b | 822 | *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr); |
069cfe77 RH |
823 | return flags; |
824 | } | |
825 | ||
4f8f4127 | 826 | void *probe_access(CPUArchState *env, vaddr addr, int size, |
069cfe77 RH |
827 | MMUAccessType access_type, int mmu_idx, uintptr_t ra) |
828 | { | |
829 | int flags; | |
830 | ||
831 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | |
832 | flags = probe_access_internal(env, addr, size, access_type, false, ra); | |
6d03226b | 833 | g_assert((flags & ~TLB_MMIO) == 0); |
fef39ccd | 834 | |
3e8f1628 | 835 | return size ? g2h(env_cpu(env), addr) : NULL; |
59e96ac6 DH |
836 | } |
837 | ||
4f8f4127 | 838 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, |
cdf71308 RH |
839 | void **hostp) |
840 | { | |
841 | int flags; | |
842 | ||
843 | flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0); | |
844 | g_assert(flags == 0); | |
845 | ||
846 | if (hostp) { | |
847 | *hostp = g2h_untagged(addr); | |
848 | } | |
849 | return addr; | |
850 | } | |
851 | ||
f88f3ac9 RH |
852 | #ifdef TARGET_PAGE_DATA_SIZE |
853 | /* | |
854 | * Allocate chunks of target data together. For the only current user, | |
855 | * if we allocate one hunk per page, we have overhead of 40/128 or 40%. | |
856 | * Therefore, allocate memory for 64 pages at a time for overhead < 1%. | |
857 | */ | |
858 | #define TPD_PAGES 64 | |
859 | #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES) | |
860 | ||
861 | typedef struct TargetPageDataNode { | |
177a8cb8 | 862 | struct rcu_head rcu; |
f88f3ac9 RH |
863 | IntervalTreeNode itree; |
864 | char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned)); | |
865 | } TargetPageDataNode; | |
866 | ||
867 | static IntervalTreeRoot targetdata_root; | |
868 | ||
10310cbd | 869 | void page_reset_target_data(target_ulong start, target_ulong last) |
0fe61084 | 870 | { |
f88f3ac9 | 871 | IntervalTreeNode *n, *next; |
f88f3ac9 | 872 | |
0fe61084 RH |
873 | assert_memory_lock(); |
874 | ||
10310cbd RH |
875 | start &= TARGET_PAGE_MASK; |
876 | last |= ~TARGET_PAGE_MASK; | |
f88f3ac9 RH |
877 | |
878 | for (n = interval_tree_iter_first(&targetdata_root, start, last), | |
879 | next = n ? interval_tree_iter_next(n, start, last) : NULL; | |
880 | n != NULL; | |
881 | n = next, | |
882 | next = next ? interval_tree_iter_next(n, start, last) : NULL) { | |
883 | target_ulong n_start, n_last, p_ofs, p_len; | |
177a8cb8 | 884 | TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree); |
f88f3ac9 RH |
885 | |
886 | if (n->start >= start && n->last <= last) { | |
887 | interval_tree_remove(n, &targetdata_root); | |
177a8cb8 | 888 | g_free_rcu(t, rcu); |
f88f3ac9 RH |
889 | continue; |
890 | } | |
0fe61084 | 891 | |
f88f3ac9 RH |
892 | if (n->start < start) { |
893 | n_start = start; | |
894 | p_ofs = (start - n->start) >> TARGET_PAGE_BITS; | |
895 | } else { | |
896 | n_start = n->start; | |
897 | p_ofs = 0; | |
898 | } | |
899 | n_last = MIN(last, n->last); | |
900 | p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; | |
0fe61084 | 901 | |
f88f3ac9 | 902 | memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE); |
0fe61084 | 903 | } |
0fe61084 RH |
904 | } |
905 | ||
0fe61084 RH |
906 | void *page_get_target_data(target_ulong address) |
907 | { | |
f88f3ac9 RH |
908 | IntervalTreeNode *n; |
909 | TargetPageDataNode *t; | |
910 | target_ulong page, region; | |
911 | ||
912 | page = address & TARGET_PAGE_MASK; | |
913 | region = address & TBD_MASK; | |
0fe61084 | 914 | |
f88f3ac9 RH |
915 | n = interval_tree_iter_first(&targetdata_root, page, page); |
916 | if (!n) { | |
917 | /* | |
918 | * See util/interval-tree.c re lockless lookups: no false positives | |
919 | * but there are false negatives. If we find nothing, retry with | |
920 | * the mmap lock acquired. We also need the lock for the | |
921 | * allocation + insert. | |
922 | */ | |
923 | mmap_lock(); | |
924 | n = interval_tree_iter_first(&targetdata_root, page, page); | |
925 | if (!n) { | |
926 | t = g_new0(TargetPageDataNode, 1); | |
927 | n = &t->itree; | |
928 | n->start = region; | |
929 | n->last = region | ~TBD_MASK; | |
930 | interval_tree_insert(n, &targetdata_root); | |
931 | } | |
932 | mmap_unlock(); | |
0fe61084 | 933 | } |
f88f3ac9 RH |
934 | |
935 | t = container_of(n, TargetPageDataNode, itree); | |
936 | return t->data[(page - region) >> TARGET_PAGE_BITS]; | |
0fe61084 | 937 | } |
f88f3ac9 | 938 | #else |
10310cbd | 939 | void page_reset_target_data(target_ulong start, target_ulong last) { } |
f88f3ac9 | 940 | #endif /* TARGET_PAGE_DATA_SIZE */ |
0fe61084 | 941 | |
a411d296 PMD |
942 | /* The softmmu versions of these helpers are in cputlb.c. */ |
943 | ||
b0326eb9 | 944 | static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr, |
de95016d | 945 | MemOp mop, uintptr_t ra, MMUAccessType type) |
b9e60257 | 946 | { |
9395cd0a | 947 | int a_bits = get_alignment_bits(mop); |
f83bcecb | 948 | void *ret; |
b9e60257 | 949 | |
9395cd0a RH |
950 | /* Enforce guest required alignment. */ |
951 | if (unlikely(addr & ((1 << a_bits) - 1))) { | |
952 | cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); | |
953 | } | |
b9e60257 | 954 | |
f83bcecb RH |
955 | ret = g2h(env_cpu(env), addr); |
956 | set_helper_retaddr(ra); | |
ed4cfbcd RH |
957 | return ret; |
958 | } | |
959 | ||
cdfac37b RH |
960 | #include "ldst_atomicity.c.inc" |
961 | ||
de95016d RH |
962 | static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, |
963 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 964 | { |
f83bcecb RH |
965 | void *haddr; |
966 | uint8_t ret; | |
ed4cfbcd | 967 | |
de95016d | 968 | tcg_debug_assert((mop & MO_SIZE) == MO_8); |
f86e8f3d | 969 | cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
de95016d | 970 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); |
f83bcecb | 971 | ret = ldub_p(haddr); |
ed4cfbcd | 972 | clear_helper_retaddr(); |
de95016d RH |
973 | return ret; |
974 | } | |
975 | ||
24e46e6c | 976 | tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
977 | MemOpIdx oi, uintptr_t ra) |
978 | { | |
979 | return do_ld1_mmu(env, addr, get_memop(oi), ra); | |
980 | } | |
981 | ||
24e46e6c | 982 | tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
983 | MemOpIdx oi, uintptr_t ra) |
984 | { | |
985 | return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra); | |
986 | } | |
987 | ||
988 | uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, | |
989 | MemOpIdx oi, uintptr_t ra) | |
990 | { | |
991 | uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra); | |
f83bcecb | 992 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
ed4cfbcd RH |
993 | return ret; |
994 | } | |
995 | ||
fbea7a40 RH |
996 | static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr, |
997 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 998 | { |
f83bcecb RH |
999 | void *haddr; |
1000 | uint16_t ret; | |
ed4cfbcd | 1001 | |
de95016d | 1002 | tcg_debug_assert((mop & MO_SIZE) == MO_16); |
f86e8f3d | 1003 | cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
de95016d RH |
1004 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); |
1005 | ret = load_atom_2(env, ra, haddr, mop); | |
ed4cfbcd | 1006 | clear_helper_retaddr(); |
de95016d RH |
1007 | |
1008 | if (mop & MO_BSWAP) { | |
1009 | ret = bswap16(ret); | |
1010 | } | |
1011 | return ret; | |
1012 | } | |
1013 | ||
fbea7a40 | 1014 | tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
1015 | MemOpIdx oi, uintptr_t ra) |
1016 | { | |
fbea7a40 | 1017 | return do_ld2_mmu(env, addr, get_memop(oi), ra); |
de95016d RH |
1018 | } |
1019 | ||
fbea7a40 RH |
1020 | tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, |
1021 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1022 | { |
fbea7a40 | 1023 | return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra); |
ed4cfbcd RH |
1024 | } |
1025 | ||
fbea7a40 RH |
1026 | uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, |
1027 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1028 | { |
fbea7a40 | 1029 | uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra); |
de95016d | 1030 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
fbea7a40 | 1031 | return ret; |
de95016d RH |
1032 | } |
1033 | ||
fbea7a40 RH |
1034 | static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr, |
1035 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1036 | { |
f83bcecb | 1037 | void *haddr; |
ed4cfbcd RH |
1038 | uint32_t ret; |
1039 | ||
de95016d | 1040 | tcg_debug_assert((mop & MO_SIZE) == MO_32); |
f86e8f3d | 1041 | cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
de95016d RH |
1042 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); |
1043 | ret = load_atom_4(env, ra, haddr, mop); | |
ed4cfbcd | 1044 | clear_helper_retaddr(); |
de95016d RH |
1045 | |
1046 | if (mop & MO_BSWAP) { | |
1047 | ret = bswap32(ret); | |
1048 | } | |
1049 | return ret; | |
1050 | } | |
1051 | ||
fbea7a40 | 1052 | tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
1053 | MemOpIdx oi, uintptr_t ra) |
1054 | { | |
fbea7a40 | 1055 | return do_ld4_mmu(env, addr, get_memop(oi), ra); |
de95016d RH |
1056 | } |
1057 | ||
fbea7a40 RH |
1058 | tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, |
1059 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1060 | { |
fbea7a40 | 1061 | return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra); |
ed4cfbcd RH |
1062 | } |
1063 | ||
fbea7a40 RH |
1064 | uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, |
1065 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1066 | { |
fbea7a40 | 1067 | uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra); |
de95016d | 1068 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
fbea7a40 | 1069 | return ret; |
de95016d RH |
1070 | } |
1071 | ||
fbea7a40 RH |
1072 | static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr, |
1073 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1074 | { |
f83bcecb | 1075 | void *haddr; |
ed4cfbcd RH |
1076 | uint64_t ret; |
1077 | ||
de95016d | 1078 | tcg_debug_assert((mop & MO_SIZE) == MO_64); |
f86e8f3d | 1079 | cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
de95016d RH |
1080 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); |
1081 | ret = load_atom_8(env, ra, haddr, mop); | |
b9e60257 | 1082 | clear_helper_retaddr(); |
b9e60257 | 1083 | |
de95016d RH |
1084 | if (mop & MO_BSWAP) { |
1085 | ret = bswap64(ret); | |
1086 | } | |
1087 | return ret; | |
b9e60257 RH |
1088 | } |
1089 | ||
fbea7a40 | 1090 | uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, |
f83bcecb | 1091 | MemOpIdx oi, uintptr_t ra) |
b9e60257 | 1092 | { |
fbea7a40 | 1093 | return do_ld8_mmu(env, addr, get_memop(oi), ra); |
b9e60257 RH |
1094 | } |
1095 | ||
fbea7a40 RH |
1096 | uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, |
1097 | MemOpIdx oi, uintptr_t ra) | |
b9e60257 | 1098 | { |
fbea7a40 | 1099 | uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra); |
f83bcecb | 1100 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
fbea7a40 | 1101 | return ret; |
ed4cfbcd RH |
1102 | } |
1103 | ||
fbea7a40 RH |
1104 | static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr, |
1105 | MemOp mop, uintptr_t ra) | |
cb48f365 RH |
1106 | { |
1107 | void *haddr; | |
1108 | Int128 ret; | |
1109 | ||
35c653c4 | 1110 | tcg_debug_assert((mop & MO_SIZE) == MO_128); |
f86e8f3d | 1111 | cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
35c653c4 RH |
1112 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); |
1113 | ret = load_atom_16(env, ra, haddr, mop); | |
cb48f365 | 1114 | clear_helper_retaddr(); |
35c653c4 RH |
1115 | |
1116 | if (mop & MO_BSWAP) { | |
1117 | ret = bswap128(ret); | |
1118 | } | |
1119 | return ret; | |
1120 | } | |
1121 | ||
fbea7a40 RH |
1122 | Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, |
1123 | MemOpIdx oi, uintptr_t ra) | |
35c653c4 | 1124 | { |
fbea7a40 | 1125 | return do_ld16_mmu(env, addr, get_memop(oi), ra); |
35c653c4 RH |
1126 | } |
1127 | ||
fbea7a40 | 1128 | Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi) |
35c653c4 | 1129 | { |
fbea7a40 | 1130 | return helper_ld16_mmu(env, addr, oi, GETPC()); |
cb48f365 RH |
1131 | } |
1132 | ||
fbea7a40 RH |
1133 | Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, |
1134 | MemOpIdx oi, uintptr_t ra) | |
cb48f365 | 1135 | { |
fbea7a40 | 1136 | Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra); |
cb48f365 | 1137 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
cb48f365 RH |
1138 | return ret; |
1139 | } | |
1140 | ||
de95016d RH |
1141 | static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, |
1142 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1143 | { |
f83bcecb | 1144 | void *haddr; |
ed4cfbcd | 1145 | |
de95016d | 1146 | tcg_debug_assert((mop & MO_SIZE) == MO_8); |
f86e8f3d | 1147 | cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
de95016d | 1148 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); |
f83bcecb RH |
1149 | stb_p(haddr, val); |
1150 | clear_helper_retaddr(); | |
ed4cfbcd RH |
1151 | } |
1152 | ||
24e46e6c | 1153 | void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, |
f83bcecb | 1154 | MemOpIdx oi, uintptr_t ra) |
ed4cfbcd | 1155 | { |
de95016d RH |
1156 | do_st1_mmu(env, addr, val, get_memop(oi), ra); |
1157 | } | |
ed4cfbcd | 1158 | |
de95016d RH |
1159 | void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, |
1160 | MemOpIdx oi, uintptr_t ra) | |
1161 | { | |
1162 | do_st1_mmu(env, addr, val, get_memop(oi), ra); | |
f83bcecb | 1163 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
b9e60257 RH |
1164 | } |
1165 | ||
fbea7a40 RH |
1166 | static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, |
1167 | MemOp mop, uintptr_t ra) | |
b9e60257 | 1168 | { |
f83bcecb | 1169 | void *haddr; |
b9e60257 | 1170 | |
de95016d | 1171 | tcg_debug_assert((mop & MO_SIZE) == MO_16); |
f86e8f3d | 1172 | cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
de95016d | 1173 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); |
b9e60257 | 1174 | |
de95016d RH |
1175 | if (mop & MO_BSWAP) { |
1176 | val = bswap16(val); | |
1177 | } | |
fbea7a40 RH |
1178 | store_atom_2(env, ra, haddr, mop, val); |
1179 | clear_helper_retaddr(); | |
de95016d RH |
1180 | } |
1181 | ||
fbea7a40 | 1182 | void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, |
de95016d RH |
1183 | MemOpIdx oi, uintptr_t ra) |
1184 | { | |
fbea7a40 | 1185 | do_st2_mmu(env, addr, val, get_memop(oi), ra); |
ed4cfbcd RH |
1186 | } |
1187 | ||
fbea7a40 | 1188 | void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, |
f83bcecb | 1189 | MemOpIdx oi, uintptr_t ra) |
de95016d | 1190 | { |
fbea7a40 | 1191 | do_st2_mmu(env, addr, val, get_memop(oi), ra); |
de95016d RH |
1192 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
1193 | } | |
1194 | ||
fbea7a40 RH |
1195 | static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, |
1196 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1197 | { |
f83bcecb | 1198 | void *haddr; |
ed4cfbcd | 1199 | |
de95016d | 1200 | tcg_debug_assert((mop & MO_SIZE) == MO_32); |
f86e8f3d | 1201 | cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
de95016d | 1202 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); |
de95016d RH |
1203 | |
1204 | if (mop & MO_BSWAP) { | |
1205 | val = bswap32(val); | |
1206 | } | |
fbea7a40 RH |
1207 | store_atom_4(env, ra, haddr, mop, val); |
1208 | clear_helper_retaddr(); | |
de95016d RH |
1209 | } |
1210 | ||
fbea7a40 | 1211 | void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, |
de95016d RH |
1212 | MemOpIdx oi, uintptr_t ra) |
1213 | { | |
fbea7a40 | 1214 | do_st4_mmu(env, addr, val, get_memop(oi), ra); |
b9e60257 RH |
1215 | } |
1216 | ||
fbea7a40 RH |
1217 | void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, |
1218 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1219 | { |
fbea7a40 | 1220 | do_st4_mmu(env, addr, val, get_memop(oi), ra); |
de95016d RH |
1221 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
1222 | } | |
1223 | ||
fbea7a40 RH |
1224 | static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, |
1225 | MemOp mop, uintptr_t ra) | |
b9e60257 | 1226 | { |
f83bcecb | 1227 | void *haddr; |
b9e60257 | 1228 | |
de95016d | 1229 | tcg_debug_assert((mop & MO_SIZE) == MO_64); |
f86e8f3d | 1230 | cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
de95016d | 1231 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); |
de95016d RH |
1232 | |
1233 | if (mop & MO_BSWAP) { | |
1234 | val = bswap64(val); | |
1235 | } | |
fbea7a40 RH |
1236 | store_atom_8(env, ra, haddr, mop, val); |
1237 | clear_helper_retaddr(); | |
de95016d RH |
1238 | } |
1239 | ||
fbea7a40 | 1240 | void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, |
de95016d RH |
1241 | MemOpIdx oi, uintptr_t ra) |
1242 | { | |
fbea7a40 | 1243 | do_st8_mmu(env, addr, val, get_memop(oi), ra); |
b9e60257 RH |
1244 | } |
1245 | ||
fbea7a40 | 1246 | void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, |
f83bcecb | 1247 | MemOpIdx oi, uintptr_t ra) |
ed4cfbcd | 1248 | { |
fbea7a40 | 1249 | do_st8_mmu(env, addr, val, get_memop(oi), ra); |
f83bcecb | 1250 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
ed4cfbcd RH |
1251 | } |
1252 | ||
fbea7a40 RH |
1253 | static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, |
1254 | MemOp mop, uintptr_t ra) | |
35c653c4 RH |
1255 | { |
1256 | void *haddr; | |
1257 | ||
1258 | tcg_debug_assert((mop & MO_SIZE) == MO_128); | |
f86e8f3d | 1259 | cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
35c653c4 | 1260 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); |
fbea7a40 RH |
1261 | |
1262 | if (mop & MO_BSWAP) { | |
1263 | val = bswap128(val); | |
1264 | } | |
35c653c4 RH |
1265 | store_atom_16(env, ra, haddr, mop, val); |
1266 | clear_helper_retaddr(); | |
1267 | } | |
1268 | ||
24e46e6c | 1269 | void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, |
35c653c4 RH |
1270 | MemOpIdx oi, uintptr_t ra) |
1271 | { | |
fbea7a40 | 1272 | do_st16_mmu(env, addr, val, get_memop(oi), ra); |
35c653c4 RH |
1273 | } |
1274 | ||
e570597a | 1275 | void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) |
35c653c4 RH |
1276 | { |
1277 | helper_st16_mmu(env, addr, val, oi, GETPC()); | |
1278 | } | |
1279 | ||
fbea7a40 RH |
1280 | void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, |
1281 | Int128 val, MemOpIdx oi, uintptr_t ra) | |
cb48f365 | 1282 | { |
fbea7a40 | 1283 | do_st16_mmu(env, addr, val, get_memop(oi), ra); |
cb48f365 RH |
1284 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
1285 | } | |
1286 | ||
ed4cfbcd RH |
1287 | uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) |
1288 | { | |
1289 | uint32_t ret; | |
1290 | ||
1291 | set_helper_retaddr(1); | |
3e8f1628 | 1292 | ret = ldub_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1293 | clear_helper_retaddr(); |
1294 | return ret; | |
1295 | } | |
1296 | ||
1297 | uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) | |
1298 | { | |
1299 | uint32_t ret; | |
1300 | ||
1301 | set_helper_retaddr(1); | |
3e8f1628 | 1302 | ret = lduw_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1303 | clear_helper_retaddr(); |
1304 | return ret; | |
1305 | } | |
1306 | ||
1307 | uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) | |
1308 | { | |
1309 | uint32_t ret; | |
1310 | ||
1311 | set_helper_retaddr(1); | |
3e8f1628 | 1312 | ret = ldl_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1313 | clear_helper_retaddr(); |
1314 | return ret; | |
1315 | } | |
1316 | ||
1317 | uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) | |
1318 | { | |
1319 | uint64_t ret; | |
1320 | ||
1321 | set_helper_retaddr(1); | |
3e8f1628 | 1322 | ret = ldq_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1323 | clear_helper_retaddr(); |
1324 | return ret; | |
1325 | } | |
1326 | ||
28990626 RH |
1327 | uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, |
1328 | MemOpIdx oi, uintptr_t ra) | |
1329 | { | |
1330 | void *haddr; | |
1331 | uint8_t ret; | |
1332 | ||
1333 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); | |
1334 | ret = ldub_p(haddr); | |
1335 | clear_helper_retaddr(); | |
1336 | return ret; | |
1337 | } | |
1338 | ||
1339 | uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, | |
1340 | MemOpIdx oi, uintptr_t ra) | |
1341 | { | |
1342 | void *haddr; | |
1343 | uint16_t ret; | |
1344 | ||
1345 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); | |
1346 | ret = lduw_p(haddr); | |
1347 | clear_helper_retaddr(); | |
1348 | if (get_memop(oi) & MO_BSWAP) { | |
1349 | ret = bswap16(ret); | |
1350 | } | |
1351 | return ret; | |
1352 | } | |
1353 | ||
1354 | uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, | |
1355 | MemOpIdx oi, uintptr_t ra) | |
1356 | { | |
1357 | void *haddr; | |
1358 | uint32_t ret; | |
1359 | ||
1360 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); | |
1361 | ret = ldl_p(haddr); | |
1362 | clear_helper_retaddr(); | |
1363 | if (get_memop(oi) & MO_BSWAP) { | |
1364 | ret = bswap32(ret); | |
1365 | } | |
1366 | return ret; | |
1367 | } | |
1368 | ||
1369 | uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, | |
1370 | MemOpIdx oi, uintptr_t ra) | |
1371 | { | |
1372 | void *haddr; | |
1373 | uint64_t ret; | |
1374 | ||
28990626 RH |
1375 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); |
1376 | ret = ldq_p(haddr); | |
1377 | clear_helper_retaddr(); | |
1378 | if (get_memop(oi) & MO_BSWAP) { | |
1379 | ret = bswap64(ret); | |
1380 | } | |
1381 | return ret; | |
1382 | } | |
1383 | ||
f83bcecb RH |
1384 | #include "ldst_common.c.inc" |
1385 | ||
a754f7f3 RH |
1386 | /* |
1387 | * Do not allow unaligned operations to proceed. Return the host address. | |
a754f7f3 | 1388 | */ |
b0326eb9 AJ |
1389 | static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi, |
1390 | int size, uintptr_t retaddr) | |
a411d296 | 1391 | { |
fce3f474 RH |
1392 | MemOp mop = get_memop(oi); |
1393 | int a_bits = get_alignment_bits(mop); | |
1394 | void *ret; | |
1395 | ||
1396 | /* Enforce guest required alignment. */ | |
1397 | if (unlikely(addr & ((1 << a_bits) - 1))) { | |
7bedee32 | 1398 | cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr); |
fce3f474 RH |
1399 | } |
1400 | ||
a411d296 PMD |
1401 | /* Enforce qemu required alignment. */ |
1402 | if (unlikely(addr & (size - 1))) { | |
29a0af61 | 1403 | cpu_loop_exit_atomic(env_cpu(env), retaddr); |
a411d296 | 1404 | } |
fce3f474 RH |
1405 | |
1406 | ret = g2h(env_cpu(env), addr); | |
08b97f7f RH |
1407 | set_helper_retaddr(retaddr); |
1408 | return ret; | |
a411d296 PMD |
1409 | } |
1410 | ||
be9568b4 RH |
1411 | #include "atomic_common.c.inc" |
1412 | ||
1413 | /* | |
1414 | * First set of functions passes in OI and RETADDR. | |
1415 | * This makes them callable from other helpers. | |
1416 | */ | |
1417 | ||
be9568b4 RH |
1418 | #define ATOMIC_NAME(X) \ |
1419 | glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) | |
08b97f7f | 1420 | #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) |
a411d296 | 1421 | |
a411d296 PMD |
1422 | #define DATA_SIZE 1 |
1423 | #include "atomic_template.h" | |
1424 | ||
1425 | #define DATA_SIZE 2 | |
1426 | #include "atomic_template.h" | |
1427 | ||
1428 | #define DATA_SIZE 4 | |
1429 | #include "atomic_template.h" | |
1430 | ||
1431 | #ifdef CONFIG_ATOMIC64 | |
1432 | #define DATA_SIZE 8 | |
1433 | #include "atomic_template.h" | |
1434 | #endif | |
1435 | ||
76f9d6ad | 1436 | #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 |
be9568b4 RH |
1437 | #define DATA_SIZE 16 |
1438 | #include "atomic_template.h" | |
1439 | #endif |