]>
Commit | Line | Data |
---|---|---|
42a623c7 BS |
1 | /* |
2 | * User emulator execution | |
3 | * | |
4 | * Copyright (c) 2003-2005 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
42a623c7 BS |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
d38ea87a | 19 | #include "qemu/osdep.h" |
78271684 | 20 | #include "hw/core/tcg-cpu-ops.h" |
76cad711 | 21 | #include "disas/disas.h" |
63c91552 | 22 | #include "exec/exec-all.h" |
dcb32f1d | 23 | #include "tcg/tcg.h" |
023b0ae3 | 24 | #include "qemu/bitops.h" |
177a8cb8 | 25 | #include "qemu/rcu.h" |
f08b6170 | 26 | #include "exec/cpu_ldst.h" |
3b9bd3f4 | 27 | #include "exec/translate-all.h" |
a411d296 | 28 | #include "exec/helper-proto.h" |
e6cd4bb5 | 29 | #include "qemu/atomic128.h" |
243af022 | 30 | #include "trace/trace-root.h" |
37e891e3 | 31 | #include "tcg/tcg-ldst.h" |
0583f775 | 32 | #include "internal.h" |
42a623c7 | 33 | |
ec603b55 RH |
34 | __thread uintptr_t helper_retaddr; |
35 | ||
42a623c7 BS |
36 | //#define DEBUG_SIGNAL |
37 | ||
0fdbb7d2 RH |
38 | /* |
39 | * Adjust the pc to pass to cpu_restore_state; return the memop type. | |
40 | */ | |
41 | MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) | |
42a623c7 | 42 | { |
52ba13f0 RH |
43 | switch (helper_retaddr) { |
44 | default: | |
45 | /* | |
46 | * Fault during host memory operation within a helper function. | |
47 | * The helper's host return address, saved here, gives us a | |
48 | * pointer into the generated code that will unwind to the | |
49 | * correct guest pc. | |
50 | */ | |
0fdbb7d2 | 51 | *pc = helper_retaddr; |
52ba13f0 RH |
52 | break; |
53 | ||
54 | case 0: | |
55 | /* | |
56 | * Fault during host memory operation within generated code. | |
57 | * (Or, a unrelated bug within qemu, but we can't tell from here). | |
58 | * | |
59 | * We take the host pc from the signal frame. However, we cannot | |
60 | * use that value directly. Within cpu_restore_state_from_tb, we | |
61 | * assume PC comes from GETPC(), as used by the helper functions, | |
62 | * so we adjust the address by -GETPC_ADJ to form an address that | |
e3a6e0da | 63 | * is within the call insn, so that the address does not accidentally |
52ba13f0 RH |
64 | * match the beginning of the next guest insn. However, when the |
65 | * pc comes from the signal frame it points to the actual faulting | |
66 | * host memory insn and not the return from a call insn. | |
67 | * | |
68 | * Therefore, adjust to compensate for what will be done later | |
69 | * by cpu_restore_state_from_tb. | |
70 | */ | |
0fdbb7d2 | 71 | *pc += GETPC_ADJ; |
52ba13f0 RH |
72 | break; |
73 | ||
74 | case 1: | |
75 | /* | |
76 | * Fault during host read for translation, or loosely, "execution". | |
77 | * | |
78 | * The guest pc is already pointing to the start of the TB for which | |
79 | * code is being generated. If the guest translator manages the | |
80 | * page crossings correctly, this is exactly the correct address | |
81 | * (and if the translator doesn't handle page boundaries correctly | |
82 | * there's little we can do about that here). Therefore, do not | |
83 | * trigger the unwinder. | |
52ba13f0 | 84 | */ |
0fdbb7d2 RH |
85 | *pc = 0; |
86 | return MMU_INST_FETCH; | |
ec603b55 RH |
87 | } |
88 | ||
0fdbb7d2 RH |
89 | return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; |
90 | } | |
91 | ||
5e38ba7d RH |
92 | /** |
93 | * handle_sigsegv_accerr_write: | |
94 | * @cpu: the cpu context | |
95 | * @old_set: the sigset_t from the signal ucontext_t | |
96 | * @host_pc: the host pc, adjusted for the signal | |
97 | * @guest_addr: the guest address of the fault | |
98 | * | |
99 | * Return true if the write fault has been handled, and should be re-tried. | |
100 | * | |
101 | * Note that it is important that we don't call page_unprotect() unless | |
9323e79f | 102 | * this is really a "write to nonwritable page" fault, because |
5e38ba7d | 103 | * page_unprotect() assumes that if it is called for an access to |
9323e79f PM |
104 | * a page that's writable this means we had two threads racing and |
105 | * another thread got there first and already made the page writable; | |
5e38ba7d RH |
106 | * so we will retry the access. If we were to call page_unprotect() |
107 | * for some other kind of fault that should really be passed to the | |
108 | * guest, we'd end up in an infinite loop of retrying the faulting access. | |
109 | */ | |
110 | bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, | |
111 | uintptr_t host_pc, abi_ptr guest_addr) | |
112 | { | |
113 | switch (page_unprotect(guest_addr, host_pc)) { | |
114 | case 0: | |
115 | /* | |
116 | * Fault not caused by a page marked unwritable to protect | |
117 | * cached translations, must be the guest binary's problem. | |
118 | */ | |
119 | return false; | |
120 | case 1: | |
121 | /* | |
122 | * Fault caused by protection of cached translation; TBs | |
123 | * invalidated, so resume execution. | |
124 | */ | |
125 | return true; | |
126 | case 2: | |
127 | /* | |
128 | * Fault caused by protection of cached translation, and the | |
129 | * currently executing TB was modified and must be exited immediately. | |
130 | */ | |
940b3090 RH |
131 | sigprocmask(SIG_SETMASK, old_set, NULL); |
132 | cpu_loop_exit_noexc(cpu); | |
5e38ba7d RH |
133 | /* NORETURN */ |
134 | default: | |
135 | g_assert_not_reached(); | |
136 | } | |
137 | } | |
138 | ||
67ff2186 | 139 | typedef struct PageFlagsNode { |
177a8cb8 | 140 | struct rcu_head rcu; |
67ff2186 RH |
141 | IntervalTreeNode itree; |
142 | int flags; | |
143 | } PageFlagsNode; | |
d941c086 | 144 | |
67ff2186 | 145 | static IntervalTreeRoot pageflags_root; |
d941c086 | 146 | |
67ff2186 RH |
147 | static PageFlagsNode *pageflags_find(target_ulong start, target_long last) |
148 | { | |
149 | IntervalTreeNode *n; | |
d941c086 | 150 | |
67ff2186 RH |
151 | n = interval_tree_iter_first(&pageflags_root, start, last); |
152 | return n ? container_of(n, PageFlagsNode, itree) : NULL; | |
d941c086 RH |
153 | } |
154 | ||
67ff2186 RH |
155 | static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, |
156 | target_long last) | |
d941c086 | 157 | { |
67ff2186 | 158 | IntervalTreeNode *n; |
d941c086 | 159 | |
67ff2186 RH |
160 | n = interval_tree_iter_next(&p->itree, start, last); |
161 | return n ? container_of(n, PageFlagsNode, itree) : NULL; | |
d941c086 RH |
162 | } |
163 | ||
164 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
165 | { | |
67ff2186 RH |
166 | IntervalTreeNode *n; |
167 | int rc = 0; | |
d941c086 | 168 | |
67ff2186 RH |
169 | mmap_lock(); |
170 | for (n = interval_tree_iter_first(&pageflags_root, 0, -1); | |
171 | n != NULL; | |
172 | n = interval_tree_iter_next(n, 0, -1)) { | |
173 | PageFlagsNode *p = container_of(n, PageFlagsNode, itree); | |
d941c086 | 174 | |
67ff2186 | 175 | rc = fn(priv, n->start, n->last + 1, p->flags); |
d941c086 | 176 | if (rc != 0) { |
67ff2186 | 177 | break; |
d941c086 RH |
178 | } |
179 | } | |
67ff2186 | 180 | mmap_unlock(); |
d941c086 | 181 | |
67ff2186 | 182 | return rc; |
d941c086 RH |
183 | } |
184 | ||
185 | static int dump_region(void *priv, target_ulong start, | |
67ff2186 | 186 | target_ulong end, unsigned long prot) |
d941c086 RH |
187 | { |
188 | FILE *f = (FILE *)priv; | |
189 | ||
67ff2186 RH |
190 | fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n", |
191 | start, end, end - start, | |
192 | ((prot & PAGE_READ) ? 'r' : '-'), | |
193 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
194 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
d941c086 RH |
195 | return 0; |
196 | } | |
197 | ||
198 | /* dump memory mappings */ | |
199 | void page_dump(FILE *f) | |
200 | { | |
201 | const int length = sizeof(target_ulong) * 2; | |
67ff2186 RH |
202 | |
203 | fprintf(f, "%-*s %-*s %-*s %s\n", | |
d941c086 RH |
204 | length, "start", length, "end", length, "size", "prot"); |
205 | walk_memory_regions(f, dump_region); | |
206 | } | |
207 | ||
208 | int page_get_flags(target_ulong address) | |
209 | { | |
67ff2186 | 210 | PageFlagsNode *p = pageflags_find(address, address); |
d941c086 | 211 | |
67ff2186 RH |
212 | /* |
213 | * See util/interval-tree.c re lockless lookups: no false positives but | |
214 | * there are false negatives. If we find nothing, retry with the mmap | |
215 | * lock acquired. | |
216 | */ | |
217 | if (p) { | |
218 | return p->flags; | |
219 | } | |
220 | if (have_mmap_lock()) { | |
d941c086 RH |
221 | return 0; |
222 | } | |
67ff2186 RH |
223 | |
224 | mmap_lock(); | |
225 | p = pageflags_find(address, address); | |
226 | mmap_unlock(); | |
227 | return p ? p->flags : 0; | |
228 | } | |
229 | ||
230 | /* A subroutine of page_set_flags: insert a new node for [start,last]. */ | |
231 | static void pageflags_create(target_ulong start, target_ulong last, int flags) | |
232 | { | |
233 | PageFlagsNode *p = g_new(PageFlagsNode, 1); | |
234 | ||
235 | p->itree.start = start; | |
236 | p->itree.last = last; | |
237 | p->flags = flags; | |
238 | interval_tree_insert(&p->itree, &pageflags_root); | |
239 | } | |
240 | ||
241 | /* A subroutine of page_set_flags: remove everything in [start,last]. */ | |
242 | static bool pageflags_unset(target_ulong start, target_ulong last) | |
243 | { | |
244 | bool inval_tb = false; | |
245 | ||
246 | while (true) { | |
247 | PageFlagsNode *p = pageflags_find(start, last); | |
248 | target_ulong p_last; | |
249 | ||
250 | if (!p) { | |
251 | break; | |
252 | } | |
253 | ||
254 | if (p->flags & PAGE_EXEC) { | |
255 | inval_tb = true; | |
256 | } | |
257 | ||
258 | interval_tree_remove(&p->itree, &pageflags_root); | |
259 | p_last = p->itree.last; | |
260 | ||
261 | if (p->itree.start < start) { | |
262 | /* Truncate the node from the end, or split out the middle. */ | |
263 | p->itree.last = start - 1; | |
264 | interval_tree_insert(&p->itree, &pageflags_root); | |
265 | if (last < p_last) { | |
266 | pageflags_create(last + 1, p_last, p->flags); | |
267 | break; | |
268 | } | |
269 | } else if (p_last <= last) { | |
270 | /* Range completely covers node -- remove it. */ | |
177a8cb8 | 271 | g_free_rcu(p, rcu); |
67ff2186 RH |
272 | } else { |
273 | /* Truncate the node from the start. */ | |
274 | p->itree.start = last + 1; | |
275 | interval_tree_insert(&p->itree, &pageflags_root); | |
276 | break; | |
277 | } | |
278 | } | |
279 | ||
280 | return inval_tb; | |
281 | } | |
282 | ||
283 | /* | |
284 | * A subroutine of page_set_flags: nothing overlaps [start,last], | |
285 | * but check adjacent mappings and maybe merge into a single range. | |
286 | */ | |
287 | static void pageflags_create_merge(target_ulong start, target_ulong last, | |
288 | int flags) | |
289 | { | |
290 | PageFlagsNode *next = NULL, *prev = NULL; | |
291 | ||
292 | if (start > 0) { | |
293 | prev = pageflags_find(start - 1, start - 1); | |
294 | if (prev) { | |
295 | if (prev->flags == flags) { | |
296 | interval_tree_remove(&prev->itree, &pageflags_root); | |
297 | } else { | |
298 | prev = NULL; | |
299 | } | |
300 | } | |
301 | } | |
302 | if (last + 1 != 0) { | |
303 | next = pageflags_find(last + 1, last + 1); | |
304 | if (next) { | |
305 | if (next->flags == flags) { | |
306 | interval_tree_remove(&next->itree, &pageflags_root); | |
307 | } else { | |
308 | next = NULL; | |
309 | } | |
310 | } | |
311 | } | |
312 | ||
313 | if (prev) { | |
314 | if (next) { | |
315 | prev->itree.last = next->itree.last; | |
177a8cb8 | 316 | g_free_rcu(next, rcu); |
67ff2186 RH |
317 | } else { |
318 | prev->itree.last = last; | |
319 | } | |
320 | interval_tree_insert(&prev->itree, &pageflags_root); | |
321 | } else if (next) { | |
322 | next->itree.start = start; | |
323 | interval_tree_insert(&next->itree, &pageflags_root); | |
324 | } else { | |
325 | pageflags_create(start, last, flags); | |
326 | } | |
d941c086 RH |
327 | } |
328 | ||
329 | /* | |
330 | * Allow the target to decide if PAGE_TARGET_[12] may be reset. | |
331 | * By default, they are not kept. | |
332 | */ | |
333 | #ifndef PAGE_TARGET_STICKY | |
334 | #define PAGE_TARGET_STICKY 0 | |
335 | #endif | |
336 | #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) | |
337 | ||
67ff2186 RH |
338 | /* A subroutine of page_set_flags: add flags to [start,last]. */ |
339 | static bool pageflags_set_clear(target_ulong start, target_ulong last, | |
340 | int set_flags, int clear_flags) | |
341 | { | |
342 | PageFlagsNode *p; | |
343 | target_ulong p_start, p_last; | |
344 | int p_flags, merge_flags; | |
345 | bool inval_tb = false; | |
346 | ||
347 | restart: | |
348 | p = pageflags_find(start, last); | |
349 | if (!p) { | |
350 | if (set_flags) { | |
351 | pageflags_create_merge(start, last, set_flags); | |
352 | } | |
353 | goto done; | |
354 | } | |
355 | ||
356 | p_start = p->itree.start; | |
357 | p_last = p->itree.last; | |
358 | p_flags = p->flags; | |
359 | /* Using mprotect on a page does not change sticky bits. */ | |
360 | merge_flags = (p_flags & ~clear_flags) | set_flags; | |
361 | ||
362 | /* | |
363 | * Need to flush if an overlapping executable region | |
364 | * removes exec, or adds write. | |
365 | */ | |
366 | if ((p_flags & PAGE_EXEC) | |
367 | && (!(merge_flags & PAGE_EXEC) | |
368 | || (merge_flags & ~p_flags & PAGE_WRITE))) { | |
369 | inval_tb = true; | |
370 | } | |
371 | ||
372 | /* | |
373 | * If there is an exact range match, update and return without | |
374 | * attempting to merge with adjacent regions. | |
375 | */ | |
376 | if (start == p_start && last == p_last) { | |
377 | if (merge_flags) { | |
378 | p->flags = merge_flags; | |
379 | } else { | |
380 | interval_tree_remove(&p->itree, &pageflags_root); | |
177a8cb8 | 381 | g_free_rcu(p, rcu); |
67ff2186 RH |
382 | } |
383 | goto done; | |
384 | } | |
385 | ||
386 | /* | |
387 | * If sticky bits affect the original mapping, then we must be more | |
388 | * careful about the existing intervals and the separate flags. | |
389 | */ | |
390 | if (set_flags != merge_flags) { | |
391 | if (p_start < start) { | |
392 | interval_tree_remove(&p->itree, &pageflags_root); | |
393 | p->itree.last = start - 1; | |
394 | interval_tree_insert(&p->itree, &pageflags_root); | |
395 | ||
396 | if (last < p_last) { | |
397 | if (merge_flags) { | |
398 | pageflags_create(start, last, merge_flags); | |
399 | } | |
400 | pageflags_create(last + 1, p_last, p_flags); | |
401 | } else { | |
402 | if (merge_flags) { | |
403 | pageflags_create(start, p_last, merge_flags); | |
404 | } | |
405 | if (p_last < last) { | |
406 | start = p_last + 1; | |
407 | goto restart; | |
408 | } | |
409 | } | |
410 | } else { | |
411 | if (start < p_start && set_flags) { | |
412 | pageflags_create(start, p_start - 1, set_flags); | |
413 | } | |
414 | if (last < p_last) { | |
415 | interval_tree_remove(&p->itree, &pageflags_root); | |
416 | p->itree.start = last + 1; | |
417 | interval_tree_insert(&p->itree, &pageflags_root); | |
418 | if (merge_flags) { | |
419 | pageflags_create(start, last, merge_flags); | |
420 | } | |
421 | } else { | |
422 | if (merge_flags) { | |
423 | p->flags = merge_flags; | |
424 | } else { | |
425 | interval_tree_remove(&p->itree, &pageflags_root); | |
177a8cb8 | 426 | g_free_rcu(p, rcu); |
67ff2186 RH |
427 | } |
428 | if (p_last < last) { | |
429 | start = p_last + 1; | |
430 | goto restart; | |
431 | } | |
432 | } | |
433 | } | |
434 | goto done; | |
435 | } | |
436 | ||
437 | /* If flags are not changing for this range, incorporate it. */ | |
438 | if (set_flags == p_flags) { | |
439 | if (start < p_start) { | |
440 | interval_tree_remove(&p->itree, &pageflags_root); | |
441 | p->itree.start = start; | |
442 | interval_tree_insert(&p->itree, &pageflags_root); | |
443 | } | |
444 | if (p_last < last) { | |
445 | start = p_last + 1; | |
446 | goto restart; | |
447 | } | |
448 | goto done; | |
449 | } | |
450 | ||
451 | /* Maybe split out head and/or tail ranges with the original flags. */ | |
452 | interval_tree_remove(&p->itree, &pageflags_root); | |
453 | if (p_start < start) { | |
454 | p->itree.last = start - 1; | |
455 | interval_tree_insert(&p->itree, &pageflags_root); | |
456 | ||
457 | if (p_last < last) { | |
458 | goto restart; | |
459 | } | |
460 | if (last < p_last) { | |
461 | pageflags_create(last + 1, p_last, p_flags); | |
462 | } | |
463 | } else if (last < p_last) { | |
464 | p->itree.start = last + 1; | |
465 | interval_tree_insert(&p->itree, &pageflags_root); | |
466 | } else { | |
177a8cb8 | 467 | g_free_rcu(p, rcu); |
67ff2186 RH |
468 | goto restart; |
469 | } | |
470 | if (set_flags) { | |
471 | pageflags_create(start, last, set_flags); | |
472 | } | |
473 | ||
474 | done: | |
475 | return inval_tb; | |
476 | } | |
477 | ||
d941c086 RH |
478 | /* |
479 | * Modify the flags of a page and invalidate the code if necessary. | |
480 | * The flag PAGE_WRITE_ORG is positioned automatically depending | |
481 | * on PAGE_WRITE. The mmap_lock should already be held. | |
482 | */ | |
49840a4a | 483 | void page_set_flags(target_ulong start, target_ulong last, int flags) |
d941c086 | 484 | { |
67ff2186 RH |
485 | bool reset = false; |
486 | bool inval_tb = false; | |
d941c086 RH |
487 | |
488 | /* This function should never be called with addresses outside the | |
489 | guest address space. If this assert fires, it probably indicates | |
490 | a missing call to h2g_valid. */ | |
49840a4a RH |
491 | assert(start <= last); |
492 | assert(last <= GUEST_ADDR_MAX); | |
d941c086 RH |
493 | /* Only set PAGE_ANON with new mappings. */ |
494 | assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); | |
495 | assert_memory_lock(); | |
496 | ||
49840a4a RH |
497 | start &= TARGET_PAGE_MASK; |
498 | last |= ~TARGET_PAGE_MASK; | |
d941c086 | 499 | |
67ff2186 RH |
500 | if (!(flags & PAGE_VALID)) { |
501 | flags = 0; | |
502 | } else { | |
503 | reset = flags & PAGE_RESET; | |
504 | flags &= ~PAGE_RESET; | |
505 | if (flags & PAGE_WRITE) { | |
506 | flags |= PAGE_WRITE_ORG; | |
507 | } | |
d941c086 | 508 | } |
67ff2186 RH |
509 | |
510 | if (!flags || reset) { | |
10310cbd | 511 | page_reset_target_data(start, last); |
67ff2186 | 512 | inval_tb |= pageflags_unset(start, last); |
d941c086 | 513 | } |
67ff2186 RH |
514 | if (flags) { |
515 | inval_tb |= pageflags_set_clear(start, last, flags, | |
516 | ~(reset ? 0 : PAGE_STICKY)); | |
d941c086 | 517 | } |
d941c086 | 518 | if (inval_tb) { |
e506ad6a | 519 | tb_invalidate_phys_range(start, last); |
d941c086 RH |
520 | } |
521 | } | |
522 | ||
523 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
524 | { | |
67ff2186 | 525 | target_ulong last; |
e630c012 RH |
526 | int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ |
527 | int ret; | |
d941c086 RH |
528 | |
529 | if (len == 0) { | |
67ff2186 | 530 | return 0; /* trivial length */ |
d941c086 | 531 | } |
67ff2186 RH |
532 | |
533 | last = start + len - 1; | |
534 | if (last < start) { | |
535 | return -1; /* wrap around */ | |
d941c086 RH |
536 | } |
537 | ||
e630c012 | 538 | locked = have_mmap_lock(); |
67ff2186 RH |
539 | while (true) { |
540 | PageFlagsNode *p = pageflags_find(start, last); | |
541 | int missing; | |
d941c086 | 542 | |
d941c086 | 543 | if (!p) { |
e630c012 RH |
544 | if (!locked) { |
545 | /* | |
546 | * Lockless lookups have false negatives. | |
547 | * Retry with the lock held. | |
548 | */ | |
549 | mmap_lock(); | |
550 | locked = -1; | |
551 | p = pageflags_find(start, last); | |
552 | } | |
553 | if (!p) { | |
554 | ret = -1; /* entire region invalid */ | |
555 | break; | |
556 | } | |
d941c086 | 557 | } |
67ff2186 | 558 | if (start < p->itree.start) { |
e630c012 RH |
559 | ret = -1; /* initial bytes invalid */ |
560 | break; | |
d941c086 RH |
561 | } |
562 | ||
67ff2186 RH |
563 | missing = flags & ~p->flags; |
564 | if (missing & PAGE_READ) { | |
e630c012 RH |
565 | ret = -1; /* page not readable */ |
566 | break; | |
d941c086 | 567 | } |
67ff2186 | 568 | if (missing & PAGE_WRITE) { |
d941c086 | 569 | if (!(p->flags & PAGE_WRITE_ORG)) { |
e630c012 RH |
570 | ret = -1; /* page not writable */ |
571 | break; | |
67ff2186 RH |
572 | } |
573 | /* Asking about writable, but has been protected: undo. */ | |
574 | if (!page_unprotect(start, 0)) { | |
e630c012 RH |
575 | ret = -1; |
576 | break; | |
d941c086 | 577 | } |
67ff2186 RH |
578 | /* TODO: page_unprotect should take a range, not a single page. */ |
579 | if (last - start < TARGET_PAGE_SIZE) { | |
e630c012 RH |
580 | ret = 0; /* ok */ |
581 | break; | |
d941c086 | 582 | } |
67ff2186 RH |
583 | start += TARGET_PAGE_SIZE; |
584 | continue; | |
d941c086 | 585 | } |
67ff2186 RH |
586 | |
587 | if (last <= p->itree.last) { | |
e630c012 RH |
588 | ret = 0; /* ok */ |
589 | break; | |
67ff2186 RH |
590 | } |
591 | start = p->itree.last + 1; | |
d941c086 | 592 | } |
e630c012 RH |
593 | |
594 | /* Release the lock if acquired locally. */ | |
595 | if (locked < 0) { | |
596 | mmap_unlock(); | |
597 | } | |
598 | return ret; | |
d941c086 RH |
599 | } |
600 | ||
67ff2186 | 601 | void page_protect(tb_page_addr_t address) |
d941c086 | 602 | { |
67ff2186 RH |
603 | PageFlagsNode *p; |
604 | target_ulong start, last; | |
d941c086 RH |
605 | int prot; |
606 | ||
67ff2186 RH |
607 | assert_memory_lock(); |
608 | ||
609 | if (qemu_host_page_size <= TARGET_PAGE_SIZE) { | |
610 | start = address & TARGET_PAGE_MASK; | |
611 | last = start + TARGET_PAGE_SIZE - 1; | |
612 | } else { | |
613 | start = address & qemu_host_page_mask; | |
614 | last = start + qemu_host_page_size - 1; | |
615 | } | |
616 | ||
617 | p = pageflags_find(start, last); | |
618 | if (!p) { | |
619 | return; | |
620 | } | |
621 | prot = p->flags; | |
622 | ||
623 | if (unlikely(p->itree.last < last)) { | |
624 | /* More than one protection region covers the one host page. */ | |
625 | assert(TARGET_PAGE_SIZE < qemu_host_page_size); | |
626 | while ((p = pageflags_next(p, start, last)) != NULL) { | |
d941c086 | 627 | prot |= p->flags; |
d941c086 | 628 | } |
67ff2186 RH |
629 | } |
630 | ||
631 | if (prot & PAGE_WRITE) { | |
632 | pageflags_set_clear(start, last, 0, PAGE_WRITE); | |
633 | mprotect(g2h_untagged(start), qemu_host_page_size, | |
634 | prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE); | |
d941c086 RH |
635 | } |
636 | } | |
637 | ||
638 | /* | |
639 | * Called from signal handler: invalidate the code and unprotect the | |
640 | * page. Return 0 if the fault was not handled, 1 if it was handled, | |
641 | * and 2 if it was handled but the caller must cause the TB to be | |
642 | * immediately exited. (We can only return 2 if the 'pc' argument is | |
643 | * non-zero.) | |
644 | */ | |
645 | int page_unprotect(target_ulong address, uintptr_t pc) | |
646 | { | |
67ff2186 | 647 | PageFlagsNode *p; |
d941c086 | 648 | bool current_tb_invalidated; |
d941c086 RH |
649 | |
650 | /* | |
651 | * Technically this isn't safe inside a signal handler. However we | |
652 | * know this only ever happens in a synchronous SEGV handler, so in | |
653 | * practice it seems to be ok. | |
654 | */ | |
655 | mmap_lock(); | |
656 | ||
67ff2186 RH |
657 | p = pageflags_find(address, address); |
658 | ||
659 | /* If this address was not really writable, nothing to do. */ | |
660 | if (!p || !(p->flags & PAGE_WRITE_ORG)) { | |
d941c086 RH |
661 | mmap_unlock(); |
662 | return 0; | |
663 | } | |
664 | ||
67ff2186 RH |
665 | current_tb_invalidated = false; |
666 | if (p->flags & PAGE_WRITE) { | |
667 | /* | |
668 | * If the page is actually marked WRITE then assume this is because | |
669 | * this thread raced with another one which got here first and | |
670 | * set the page to PAGE_WRITE and did the TB invalidate for us. | |
671 | */ | |
d941c086 | 672 | #ifdef TARGET_HAS_PRECISE_SMC |
67ff2186 RH |
673 | TranslationBlock *current_tb = tcg_tb_lookup(pc); |
674 | if (current_tb) { | |
675 | current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; | |
676 | } | |
d941c086 | 677 | #endif |
67ff2186 RH |
678 | } else { |
679 | target_ulong start, len, i; | |
680 | int prot; | |
681 | ||
682 | if (qemu_host_page_size <= TARGET_PAGE_SIZE) { | |
683 | start = address & TARGET_PAGE_MASK; | |
684 | len = TARGET_PAGE_SIZE; | |
685 | prot = p->flags | PAGE_WRITE; | |
686 | pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); | |
687 | current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); | |
d941c086 | 688 | } else { |
67ff2186 RH |
689 | start = address & qemu_host_page_mask; |
690 | len = qemu_host_page_size; | |
d941c086 | 691 | prot = 0; |
d941c086 | 692 | |
67ff2186 RH |
693 | for (i = 0; i < len; i += TARGET_PAGE_SIZE) { |
694 | target_ulong addr = start + i; | |
695 | ||
696 | p = pageflags_find(addr, addr); | |
697 | if (p) { | |
698 | prot |= p->flags; | |
699 | if (p->flags & PAGE_WRITE_ORG) { | |
700 | prot |= PAGE_WRITE; | |
701 | pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1, | |
702 | PAGE_WRITE, 0); | |
703 | } | |
704 | } | |
d941c086 RH |
705 | /* |
706 | * Since the content will be modified, we must invalidate | |
707 | * the corresponding translated code. | |
708 | */ | |
709 | current_tb_invalidated |= | |
710 | tb_invalidate_phys_page_unwind(addr, pc); | |
711 | } | |
d941c086 | 712 | } |
67ff2186 RH |
713 | if (prot & PAGE_EXEC) { |
714 | prot = (prot & ~PAGE_EXEC) | PAGE_READ; | |
715 | } | |
716 | mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS); | |
d941c086 RH |
717 | } |
718 | mmap_unlock(); | |
67ff2186 RH |
719 | |
720 | /* If current TB was invalidated return to main loop */ | |
721 | return current_tb_invalidated ? 2 : 1; | |
d941c086 RH |
722 | } |
723 | ||
4f8f4127 | 724 | static int probe_access_internal(CPUArchState *env, vaddr addr, |
069cfe77 RH |
725 | int fault_size, MMUAccessType access_type, |
726 | bool nonfault, uintptr_t ra) | |
59e96ac6 | 727 | { |
72d2bbf9 RH |
728 | int acc_flag; |
729 | bool maperr; | |
c25c283d | 730 | |
c25c283d DH |
731 | switch (access_type) { |
732 | case MMU_DATA_STORE: | |
72d2bbf9 | 733 | acc_flag = PAGE_WRITE_ORG; |
c25c283d DH |
734 | break; |
735 | case MMU_DATA_LOAD: | |
72d2bbf9 | 736 | acc_flag = PAGE_READ; |
c25c283d DH |
737 | break; |
738 | case MMU_INST_FETCH: | |
72d2bbf9 | 739 | acc_flag = PAGE_EXEC; |
c25c283d DH |
740 | break; |
741 | default: | |
742 | g_assert_not_reached(); | |
743 | } | |
744 | ||
72d2bbf9 RH |
745 | if (guest_addr_valid_untagged(addr)) { |
746 | int page_flags = page_get_flags(addr); | |
747 | if (page_flags & acc_flag) { | |
748 | return 0; /* success */ | |
069cfe77 | 749 | } |
72d2bbf9 RH |
750 | maperr = !(page_flags & PAGE_VALID); |
751 | } else { | |
752 | maperr = true; | |
753 | } | |
754 | ||
755 | if (nonfault) { | |
756 | return TLB_INVALID_MASK; | |
59e96ac6 | 757 | } |
72d2bbf9 RH |
758 | |
759 | cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); | |
069cfe77 RH |
760 | } |
761 | ||
4f8f4127 | 762 | int probe_access_flags(CPUArchState *env, vaddr addr, int size, |
069cfe77 RH |
763 | MMUAccessType access_type, int mmu_idx, |
764 | bool nonfault, void **phost, uintptr_t ra) | |
765 | { | |
766 | int flags; | |
767 | ||
1770b2f2 DHB |
768 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); |
769 | flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); | |
3e8f1628 | 770 | *phost = flags ? NULL : g2h(env_cpu(env), addr); |
069cfe77 RH |
771 | return flags; |
772 | } | |
773 | ||
4f8f4127 | 774 | void *probe_access(CPUArchState *env, vaddr addr, int size, |
069cfe77 RH |
775 | MMUAccessType access_type, int mmu_idx, uintptr_t ra) |
776 | { | |
777 | int flags; | |
778 | ||
779 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | |
780 | flags = probe_access_internal(env, addr, size, access_type, false, ra); | |
781 | g_assert(flags == 0); | |
fef39ccd | 782 | |
3e8f1628 | 783 | return size ? g2h(env_cpu(env), addr) : NULL; |
59e96ac6 DH |
784 | } |
785 | ||
4f8f4127 | 786 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, |
cdf71308 RH |
787 | void **hostp) |
788 | { | |
789 | int flags; | |
790 | ||
791 | flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0); | |
792 | g_assert(flags == 0); | |
793 | ||
794 | if (hostp) { | |
795 | *hostp = g2h_untagged(addr); | |
796 | } | |
797 | return addr; | |
798 | } | |
799 | ||
f88f3ac9 RH |
800 | #ifdef TARGET_PAGE_DATA_SIZE |
801 | /* | |
802 | * Allocate chunks of target data together. For the only current user, | |
803 | * if we allocate one hunk per page, we have overhead of 40/128 or 40%. | |
804 | * Therefore, allocate memory for 64 pages at a time for overhead < 1%. | |
805 | */ | |
806 | #define TPD_PAGES 64 | |
807 | #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES) | |
808 | ||
809 | typedef struct TargetPageDataNode { | |
177a8cb8 | 810 | struct rcu_head rcu; |
f88f3ac9 RH |
811 | IntervalTreeNode itree; |
812 | char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned)); | |
813 | } TargetPageDataNode; | |
814 | ||
815 | static IntervalTreeRoot targetdata_root; | |
816 | ||
10310cbd | 817 | void page_reset_target_data(target_ulong start, target_ulong last) |
0fe61084 | 818 | { |
f88f3ac9 | 819 | IntervalTreeNode *n, *next; |
f88f3ac9 | 820 | |
0fe61084 RH |
821 | assert_memory_lock(); |
822 | ||
10310cbd RH |
823 | start &= TARGET_PAGE_MASK; |
824 | last |= ~TARGET_PAGE_MASK; | |
f88f3ac9 RH |
825 | |
826 | for (n = interval_tree_iter_first(&targetdata_root, start, last), | |
827 | next = n ? interval_tree_iter_next(n, start, last) : NULL; | |
828 | n != NULL; | |
829 | n = next, | |
830 | next = next ? interval_tree_iter_next(n, start, last) : NULL) { | |
831 | target_ulong n_start, n_last, p_ofs, p_len; | |
177a8cb8 | 832 | TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree); |
f88f3ac9 RH |
833 | |
834 | if (n->start >= start && n->last <= last) { | |
835 | interval_tree_remove(n, &targetdata_root); | |
177a8cb8 | 836 | g_free_rcu(t, rcu); |
f88f3ac9 RH |
837 | continue; |
838 | } | |
0fe61084 | 839 | |
f88f3ac9 RH |
840 | if (n->start < start) { |
841 | n_start = start; | |
842 | p_ofs = (start - n->start) >> TARGET_PAGE_BITS; | |
843 | } else { | |
844 | n_start = n->start; | |
845 | p_ofs = 0; | |
846 | } | |
847 | n_last = MIN(last, n->last); | |
848 | p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; | |
0fe61084 | 849 | |
f88f3ac9 | 850 | memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE); |
0fe61084 | 851 | } |
0fe61084 RH |
852 | } |
853 | ||
0fe61084 RH |
854 | void *page_get_target_data(target_ulong address) |
855 | { | |
f88f3ac9 RH |
856 | IntervalTreeNode *n; |
857 | TargetPageDataNode *t; | |
858 | target_ulong page, region; | |
859 | ||
860 | page = address & TARGET_PAGE_MASK; | |
861 | region = address & TBD_MASK; | |
0fe61084 | 862 | |
f88f3ac9 RH |
863 | n = interval_tree_iter_first(&targetdata_root, page, page); |
864 | if (!n) { | |
865 | /* | |
866 | * See util/interval-tree.c re lockless lookups: no false positives | |
867 | * but there are false negatives. If we find nothing, retry with | |
868 | * the mmap lock acquired. We also need the lock for the | |
869 | * allocation + insert. | |
870 | */ | |
871 | mmap_lock(); | |
872 | n = interval_tree_iter_first(&targetdata_root, page, page); | |
873 | if (!n) { | |
874 | t = g_new0(TargetPageDataNode, 1); | |
875 | n = &t->itree; | |
876 | n->start = region; | |
877 | n->last = region | ~TBD_MASK; | |
878 | interval_tree_insert(n, &targetdata_root); | |
879 | } | |
880 | mmap_unlock(); | |
0fe61084 | 881 | } |
f88f3ac9 RH |
882 | |
883 | t = container_of(n, TargetPageDataNode, itree); | |
884 | return t->data[(page - region) >> TARGET_PAGE_BITS]; | |
0fe61084 | 885 | } |
f88f3ac9 | 886 | #else |
10310cbd | 887 | void page_reset_target_data(target_ulong start, target_ulong last) { } |
f88f3ac9 | 888 | #endif /* TARGET_PAGE_DATA_SIZE */ |
0fe61084 | 889 | |
a411d296 PMD |
890 | /* The softmmu versions of these helpers are in cputlb.c. */ |
891 | ||
de95016d RH |
892 | static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, |
893 | MemOp mop, uintptr_t ra, MMUAccessType type) | |
b9e60257 | 894 | { |
9395cd0a | 895 | int a_bits = get_alignment_bits(mop); |
f83bcecb | 896 | void *ret; |
b9e60257 | 897 | |
9395cd0a RH |
898 | /* Enforce guest required alignment. */ |
899 | if (unlikely(addr & ((1 << a_bits) - 1))) { | |
900 | cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); | |
901 | } | |
b9e60257 | 902 | |
f83bcecb RH |
903 | ret = g2h(env_cpu(env), addr); |
904 | set_helper_retaddr(ra); | |
ed4cfbcd RH |
905 | return ret; |
906 | } | |
907 | ||
cdfac37b RH |
908 | #include "ldst_atomicity.c.inc" |
909 | ||
de95016d RH |
910 | static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, |
911 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 912 | { |
f83bcecb RH |
913 | void *haddr; |
914 | uint8_t ret; | |
ed4cfbcd | 915 | |
de95016d RH |
916 | tcg_debug_assert((mop & MO_SIZE) == MO_8); |
917 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); | |
f83bcecb | 918 | ret = ldub_p(haddr); |
ed4cfbcd | 919 | clear_helper_retaddr(); |
de95016d RH |
920 | return ret; |
921 | } | |
922 | ||
24e46e6c | 923 | tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
924 | MemOpIdx oi, uintptr_t ra) |
925 | { | |
926 | return do_ld1_mmu(env, addr, get_memop(oi), ra); | |
927 | } | |
928 | ||
24e46e6c | 929 | tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
930 | MemOpIdx oi, uintptr_t ra) |
931 | { | |
932 | return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra); | |
933 | } | |
934 | ||
935 | uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, | |
936 | MemOpIdx oi, uintptr_t ra) | |
937 | { | |
938 | uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra); | |
f83bcecb | 939 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
ed4cfbcd RH |
940 | return ret; |
941 | } | |
942 | ||
fbea7a40 RH |
943 | static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr, |
944 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 945 | { |
f83bcecb RH |
946 | void *haddr; |
947 | uint16_t ret; | |
ed4cfbcd | 948 | |
de95016d RH |
949 | tcg_debug_assert((mop & MO_SIZE) == MO_16); |
950 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); | |
951 | ret = load_atom_2(env, ra, haddr, mop); | |
ed4cfbcd | 952 | clear_helper_retaddr(); |
de95016d RH |
953 | |
954 | if (mop & MO_BSWAP) { | |
955 | ret = bswap16(ret); | |
956 | } | |
957 | return ret; | |
958 | } | |
959 | ||
fbea7a40 | 960 | tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
961 | MemOpIdx oi, uintptr_t ra) |
962 | { | |
fbea7a40 | 963 | return do_ld2_mmu(env, addr, get_memop(oi), ra); |
de95016d RH |
964 | } |
965 | ||
fbea7a40 RH |
966 | tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, |
967 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 968 | { |
fbea7a40 | 969 | return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra); |
ed4cfbcd RH |
970 | } |
971 | ||
fbea7a40 RH |
972 | uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, |
973 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 974 | { |
fbea7a40 | 975 | uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra); |
de95016d | 976 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
fbea7a40 | 977 | return ret; |
de95016d RH |
978 | } |
979 | ||
fbea7a40 RH |
980 | static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr, |
981 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 982 | { |
f83bcecb | 983 | void *haddr; |
ed4cfbcd RH |
984 | uint32_t ret; |
985 | ||
de95016d RH |
986 | tcg_debug_assert((mop & MO_SIZE) == MO_32); |
987 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); | |
988 | ret = load_atom_4(env, ra, haddr, mop); | |
ed4cfbcd | 989 | clear_helper_retaddr(); |
de95016d RH |
990 | |
991 | if (mop & MO_BSWAP) { | |
992 | ret = bswap32(ret); | |
993 | } | |
994 | return ret; | |
995 | } | |
996 | ||
fbea7a40 | 997 | tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, |
de95016d RH |
998 | MemOpIdx oi, uintptr_t ra) |
999 | { | |
fbea7a40 | 1000 | return do_ld4_mmu(env, addr, get_memop(oi), ra); |
de95016d RH |
1001 | } |
1002 | ||
fbea7a40 RH |
1003 | tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, |
1004 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1005 | { |
fbea7a40 | 1006 | return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra); |
ed4cfbcd RH |
1007 | } |
1008 | ||
fbea7a40 RH |
1009 | uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, |
1010 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1011 | { |
fbea7a40 | 1012 | uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra); |
de95016d | 1013 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
fbea7a40 | 1014 | return ret; |
de95016d RH |
1015 | } |
1016 | ||
fbea7a40 RH |
1017 | static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr, |
1018 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1019 | { |
f83bcecb | 1020 | void *haddr; |
ed4cfbcd RH |
1021 | uint64_t ret; |
1022 | ||
de95016d RH |
1023 | tcg_debug_assert((mop & MO_SIZE) == MO_64); |
1024 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); | |
1025 | ret = load_atom_8(env, ra, haddr, mop); | |
b9e60257 | 1026 | clear_helper_retaddr(); |
b9e60257 | 1027 | |
de95016d RH |
1028 | if (mop & MO_BSWAP) { |
1029 | ret = bswap64(ret); | |
1030 | } | |
1031 | return ret; | |
b9e60257 RH |
1032 | } |
1033 | ||
fbea7a40 | 1034 | uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, |
f83bcecb | 1035 | MemOpIdx oi, uintptr_t ra) |
b9e60257 | 1036 | { |
fbea7a40 | 1037 | return do_ld8_mmu(env, addr, get_memop(oi), ra); |
b9e60257 RH |
1038 | } |
1039 | ||
fbea7a40 RH |
1040 | uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, |
1041 | MemOpIdx oi, uintptr_t ra) | |
b9e60257 | 1042 | { |
fbea7a40 | 1043 | uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra); |
f83bcecb | 1044 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
fbea7a40 | 1045 | return ret; |
ed4cfbcd RH |
1046 | } |
1047 | ||
fbea7a40 RH |
1048 | static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr, |
1049 | MemOp mop, uintptr_t ra) | |
cb48f365 RH |
1050 | { |
1051 | void *haddr; | |
1052 | Int128 ret; | |
1053 | ||
35c653c4 RH |
1054 | tcg_debug_assert((mop & MO_SIZE) == MO_128); |
1055 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); | |
1056 | ret = load_atom_16(env, ra, haddr, mop); | |
cb48f365 | 1057 | clear_helper_retaddr(); |
35c653c4 RH |
1058 | |
1059 | if (mop & MO_BSWAP) { | |
1060 | ret = bswap128(ret); | |
1061 | } | |
1062 | return ret; | |
1063 | } | |
1064 | ||
fbea7a40 RH |
1065 | Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, |
1066 | MemOpIdx oi, uintptr_t ra) | |
35c653c4 | 1067 | { |
fbea7a40 | 1068 | return do_ld16_mmu(env, addr, get_memop(oi), ra); |
35c653c4 RH |
1069 | } |
1070 | ||
fbea7a40 | 1071 | Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi) |
35c653c4 | 1072 | { |
fbea7a40 | 1073 | return helper_ld16_mmu(env, addr, oi, GETPC()); |
cb48f365 RH |
1074 | } |
1075 | ||
fbea7a40 RH |
1076 | Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, |
1077 | MemOpIdx oi, uintptr_t ra) | |
cb48f365 | 1078 | { |
fbea7a40 | 1079 | Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra); |
cb48f365 | 1080 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
cb48f365 RH |
1081 | return ret; |
1082 | } | |
1083 | ||
de95016d RH |
1084 | static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, |
1085 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1086 | { |
f83bcecb | 1087 | void *haddr; |
ed4cfbcd | 1088 | |
de95016d RH |
1089 | tcg_debug_assert((mop & MO_SIZE) == MO_8); |
1090 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); | |
f83bcecb RH |
1091 | stb_p(haddr, val); |
1092 | clear_helper_retaddr(); | |
ed4cfbcd RH |
1093 | } |
1094 | ||
24e46e6c | 1095 | void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, |
f83bcecb | 1096 | MemOpIdx oi, uintptr_t ra) |
ed4cfbcd | 1097 | { |
de95016d RH |
1098 | do_st1_mmu(env, addr, val, get_memop(oi), ra); |
1099 | } | |
ed4cfbcd | 1100 | |
de95016d RH |
1101 | void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, |
1102 | MemOpIdx oi, uintptr_t ra) | |
1103 | { | |
1104 | do_st1_mmu(env, addr, val, get_memop(oi), ra); | |
f83bcecb | 1105 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
b9e60257 RH |
1106 | } |
1107 | ||
fbea7a40 RH |
1108 | static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, |
1109 | MemOp mop, uintptr_t ra) | |
b9e60257 | 1110 | { |
f83bcecb | 1111 | void *haddr; |
b9e60257 | 1112 | |
de95016d RH |
1113 | tcg_debug_assert((mop & MO_SIZE) == MO_16); |
1114 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); | |
b9e60257 | 1115 | |
de95016d RH |
1116 | if (mop & MO_BSWAP) { |
1117 | val = bswap16(val); | |
1118 | } | |
fbea7a40 RH |
1119 | store_atom_2(env, ra, haddr, mop, val); |
1120 | clear_helper_retaddr(); | |
de95016d RH |
1121 | } |
1122 | ||
fbea7a40 | 1123 | void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, |
de95016d RH |
1124 | MemOpIdx oi, uintptr_t ra) |
1125 | { | |
fbea7a40 | 1126 | do_st2_mmu(env, addr, val, get_memop(oi), ra); |
ed4cfbcd RH |
1127 | } |
1128 | ||
fbea7a40 | 1129 | void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, |
f83bcecb | 1130 | MemOpIdx oi, uintptr_t ra) |
de95016d | 1131 | { |
fbea7a40 | 1132 | do_st2_mmu(env, addr, val, get_memop(oi), ra); |
de95016d RH |
1133 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
1134 | } | |
1135 | ||
fbea7a40 RH |
1136 | static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, |
1137 | MemOp mop, uintptr_t ra) | |
ed4cfbcd | 1138 | { |
f83bcecb | 1139 | void *haddr; |
ed4cfbcd | 1140 | |
de95016d RH |
1141 | tcg_debug_assert((mop & MO_SIZE) == MO_32); |
1142 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); | |
de95016d RH |
1143 | |
1144 | if (mop & MO_BSWAP) { | |
1145 | val = bswap32(val); | |
1146 | } | |
fbea7a40 RH |
1147 | store_atom_4(env, ra, haddr, mop, val); |
1148 | clear_helper_retaddr(); | |
de95016d RH |
1149 | } |
1150 | ||
fbea7a40 | 1151 | void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, |
de95016d RH |
1152 | MemOpIdx oi, uintptr_t ra) |
1153 | { | |
fbea7a40 | 1154 | do_st4_mmu(env, addr, val, get_memop(oi), ra); |
b9e60257 RH |
1155 | } |
1156 | ||
fbea7a40 RH |
1157 | void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, |
1158 | MemOpIdx oi, uintptr_t ra) | |
de95016d | 1159 | { |
fbea7a40 | 1160 | do_st4_mmu(env, addr, val, get_memop(oi), ra); |
de95016d RH |
1161 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
1162 | } | |
1163 | ||
fbea7a40 RH |
1164 | static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, |
1165 | MemOp mop, uintptr_t ra) | |
b9e60257 | 1166 | { |
f83bcecb | 1167 | void *haddr; |
b9e60257 | 1168 | |
de95016d RH |
1169 | tcg_debug_assert((mop & MO_SIZE) == MO_64); |
1170 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); | |
de95016d RH |
1171 | |
1172 | if (mop & MO_BSWAP) { | |
1173 | val = bswap64(val); | |
1174 | } | |
fbea7a40 RH |
1175 | store_atom_8(env, ra, haddr, mop, val); |
1176 | clear_helper_retaddr(); | |
de95016d RH |
1177 | } |
1178 | ||
fbea7a40 | 1179 | void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, |
de95016d RH |
1180 | MemOpIdx oi, uintptr_t ra) |
1181 | { | |
fbea7a40 | 1182 | do_st8_mmu(env, addr, val, get_memop(oi), ra); |
b9e60257 RH |
1183 | } |
1184 | ||
fbea7a40 | 1185 | void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, |
f83bcecb | 1186 | MemOpIdx oi, uintptr_t ra) |
ed4cfbcd | 1187 | { |
fbea7a40 | 1188 | do_st8_mmu(env, addr, val, get_memop(oi), ra); |
f83bcecb | 1189 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
ed4cfbcd RH |
1190 | } |
1191 | ||
fbea7a40 RH |
1192 | static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, |
1193 | MemOp mop, uintptr_t ra) | |
35c653c4 RH |
1194 | { |
1195 | void *haddr; | |
1196 | ||
1197 | tcg_debug_assert((mop & MO_SIZE) == MO_128); | |
1198 | haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); | |
fbea7a40 RH |
1199 | |
1200 | if (mop & MO_BSWAP) { | |
1201 | val = bswap128(val); | |
1202 | } | |
35c653c4 RH |
1203 | store_atom_16(env, ra, haddr, mop, val); |
1204 | clear_helper_retaddr(); | |
1205 | } | |
1206 | ||
24e46e6c | 1207 | void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, |
35c653c4 RH |
1208 | MemOpIdx oi, uintptr_t ra) |
1209 | { | |
fbea7a40 | 1210 | do_st16_mmu(env, addr, val, get_memop(oi), ra); |
35c653c4 RH |
1211 | } |
1212 | ||
e570597a | 1213 | void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) |
35c653c4 RH |
1214 | { |
1215 | helper_st16_mmu(env, addr, val, oi, GETPC()); | |
1216 | } | |
1217 | ||
fbea7a40 RH |
1218 | void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, |
1219 | Int128 val, MemOpIdx oi, uintptr_t ra) | |
cb48f365 | 1220 | { |
fbea7a40 | 1221 | do_st16_mmu(env, addr, val, get_memop(oi), ra); |
cb48f365 RH |
1222 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
1223 | } | |
1224 | ||
ed4cfbcd RH |
1225 | uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) |
1226 | { | |
1227 | uint32_t ret; | |
1228 | ||
1229 | set_helper_retaddr(1); | |
3e8f1628 | 1230 | ret = ldub_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1231 | clear_helper_retaddr(); |
1232 | return ret; | |
1233 | } | |
1234 | ||
1235 | uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) | |
1236 | { | |
1237 | uint32_t ret; | |
1238 | ||
1239 | set_helper_retaddr(1); | |
3e8f1628 | 1240 | ret = lduw_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1241 | clear_helper_retaddr(); |
1242 | return ret; | |
1243 | } | |
1244 | ||
1245 | uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) | |
1246 | { | |
1247 | uint32_t ret; | |
1248 | ||
1249 | set_helper_retaddr(1); | |
3e8f1628 | 1250 | ret = ldl_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1251 | clear_helper_retaddr(); |
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) | |
1256 | { | |
1257 | uint64_t ret; | |
1258 | ||
1259 | set_helper_retaddr(1); | |
3e8f1628 | 1260 | ret = ldq_p(g2h_untagged(ptr)); |
ed4cfbcd RH |
1261 | clear_helper_retaddr(); |
1262 | return ret; | |
1263 | } | |
1264 | ||
28990626 RH |
1265 | uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, |
1266 | MemOpIdx oi, uintptr_t ra) | |
1267 | { | |
1268 | void *haddr; | |
1269 | uint8_t ret; | |
1270 | ||
1271 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); | |
1272 | ret = ldub_p(haddr); | |
1273 | clear_helper_retaddr(); | |
1274 | return ret; | |
1275 | } | |
1276 | ||
1277 | uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, | |
1278 | MemOpIdx oi, uintptr_t ra) | |
1279 | { | |
1280 | void *haddr; | |
1281 | uint16_t ret; | |
1282 | ||
1283 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); | |
1284 | ret = lduw_p(haddr); | |
1285 | clear_helper_retaddr(); | |
1286 | if (get_memop(oi) & MO_BSWAP) { | |
1287 | ret = bswap16(ret); | |
1288 | } | |
1289 | return ret; | |
1290 | } | |
1291 | ||
1292 | uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, | |
1293 | MemOpIdx oi, uintptr_t ra) | |
1294 | { | |
1295 | void *haddr; | |
1296 | uint32_t ret; | |
1297 | ||
1298 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); | |
1299 | ret = ldl_p(haddr); | |
1300 | clear_helper_retaddr(); | |
1301 | if (get_memop(oi) & MO_BSWAP) { | |
1302 | ret = bswap32(ret); | |
1303 | } | |
1304 | return ret; | |
1305 | } | |
1306 | ||
1307 | uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, | |
1308 | MemOpIdx oi, uintptr_t ra) | |
1309 | { | |
1310 | void *haddr; | |
1311 | uint64_t ret; | |
1312 | ||
28990626 RH |
1313 | haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); |
1314 | ret = ldq_p(haddr); | |
1315 | clear_helper_retaddr(); | |
1316 | if (get_memop(oi) & MO_BSWAP) { | |
1317 | ret = bswap64(ret); | |
1318 | } | |
1319 | return ret; | |
1320 | } | |
1321 | ||
f83bcecb RH |
1322 | #include "ldst_common.c.inc" |
1323 | ||
a754f7f3 RH |
1324 | /* |
1325 | * Do not allow unaligned operations to proceed. Return the host address. | |
a754f7f3 | 1326 | */ |
a411d296 | 1327 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, |
7bedee32 | 1328 | MemOpIdx oi, int size, uintptr_t retaddr) |
a411d296 | 1329 | { |
fce3f474 RH |
1330 | MemOp mop = get_memop(oi); |
1331 | int a_bits = get_alignment_bits(mop); | |
1332 | void *ret; | |
1333 | ||
1334 | /* Enforce guest required alignment. */ | |
1335 | if (unlikely(addr & ((1 << a_bits) - 1))) { | |
7bedee32 | 1336 | cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr); |
fce3f474 RH |
1337 | } |
1338 | ||
a411d296 PMD |
1339 | /* Enforce qemu required alignment. */ |
1340 | if (unlikely(addr & (size - 1))) { | |
29a0af61 | 1341 | cpu_loop_exit_atomic(env_cpu(env), retaddr); |
a411d296 | 1342 | } |
fce3f474 RH |
1343 | |
1344 | ret = g2h(env_cpu(env), addr); | |
08b97f7f RH |
1345 | set_helper_retaddr(retaddr); |
1346 | return ret; | |
a411d296 PMD |
1347 | } |
1348 | ||
be9568b4 RH |
1349 | #include "atomic_common.c.inc" |
1350 | ||
1351 | /* | |
1352 | * First set of functions passes in OI and RETADDR. | |
1353 | * This makes them callable from other helpers. | |
1354 | */ | |
1355 | ||
be9568b4 RH |
1356 | #define ATOMIC_NAME(X) \ |
1357 | glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) | |
08b97f7f | 1358 | #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) |
a411d296 | 1359 | |
a411d296 PMD |
1360 | #define DATA_SIZE 1 |
1361 | #include "atomic_template.h" | |
1362 | ||
1363 | #define DATA_SIZE 2 | |
1364 | #include "atomic_template.h" | |
1365 | ||
1366 | #define DATA_SIZE 4 | |
1367 | #include "atomic_template.h" | |
1368 | ||
1369 | #ifdef CONFIG_ATOMIC64 | |
1370 | #define DATA_SIZE 8 | |
1371 | #include "atomic_template.h" | |
1372 | #endif | |
1373 | ||
4deb39eb | 1374 | #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) |
be9568b4 RH |
1375 | #define DATA_SIZE 16 |
1376 | #include "atomic_template.h" | |
1377 | #endif |