]> git.proxmox.com Git - mirror_qemu.git/blob - cputlb.c
cputlb: introduce tlb_flush_* async work.
[mirror_qemu.git] / cputlb.c
1 /*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
39
40 #ifdef DEBUG_TLB
41 # define DEBUG_TLB_GATE 1
42 # ifdef DEBUG_TLB_LOG
43 # define DEBUG_TLB_LOG_GATE 1
44 # else
45 # define DEBUG_TLB_LOG_GATE 0
46 # endif
47 #else
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
50 #endif
51
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59 } while (0)
60
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
67 /* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
71 /* statistics */
72 int tlb_flush_count;
73
74 /* This is OK because CPU architectures generally permit an
75 * implementation to drop entries from the TLB at any time, so
76 * flushing more entries than required is only an efficiency issue,
77 * not a correctness issue.
78 */
79 static void tlb_flush_nocheck(CPUState *cpu)
80 {
81 CPUArchState *env = cpu->env_ptr;
82
83 /* The QOM tests will trigger tlb_flushes without setting up TCG
84 * so we bug out here in that case.
85 */
86 if (!tcg_enabled()) {
87 return;
88 }
89
90 assert_cpu_is_self(cpu);
91 tlb_debug("(count: %d)\n", tlb_flush_count++);
92
93 tb_lock();
94
95 memset(env->tlb_table, -1, sizeof(env->tlb_table));
96 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
97 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
98
99 env->vtlb_index = 0;
100 env->tlb_flush_addr = -1;
101 env->tlb_flush_mask = 0;
102
103 tb_unlock();
104
105 atomic_mb_set(&cpu->pending_tlb_flush, false);
106 }
107
108 static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
109 {
110 tlb_flush_nocheck(cpu);
111 }
112
113 void tlb_flush(CPUState *cpu)
114 {
115 if (cpu->created && !qemu_cpu_is_self(cpu)) {
116 if (atomic_cmpxchg(&cpu->pending_tlb_flush, false, true) == true) {
117 async_run_on_cpu(cpu, tlb_flush_global_async_work,
118 RUN_ON_CPU_NULL);
119 }
120 } else {
121 tlb_flush_nocheck(cpu);
122 }
123 }
124
125 static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
126 {
127 CPUArchState *env = cpu->env_ptr;
128
129 assert_cpu_is_self(cpu);
130 tlb_debug("start\n");
131
132 tb_lock();
133
134 for (;;) {
135 int mmu_idx = va_arg(argp, int);
136
137 if (mmu_idx < 0) {
138 break;
139 }
140
141 tlb_debug("%d\n", mmu_idx);
142
143 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
144 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
145 }
146
147 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
148
149 tb_unlock();
150 }
151
152 void tlb_flush_by_mmuidx(CPUState *cpu, ...)
153 {
154 va_list argp;
155 va_start(argp, cpu);
156 v_tlb_flush_by_mmuidx(cpu, argp);
157 va_end(argp);
158 }
159
160 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
161 {
162 if (addr == (tlb_entry->addr_read &
163 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
164 addr == (tlb_entry->addr_write &
165 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
166 addr == (tlb_entry->addr_code &
167 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
168 memset(tlb_entry, -1, sizeof(*tlb_entry));
169 }
170 }
171
172 static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
173 {
174 CPUArchState *env = cpu->env_ptr;
175 target_ulong addr = (target_ulong) data.target_ptr;
176 int i;
177 int mmu_idx;
178
179 assert_cpu_is_self(cpu);
180
181 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
182
183 /* Check if we need to flush due to large pages. */
184 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
185 tlb_debug("forcing full flush ("
186 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
187 env->tlb_flush_addr, env->tlb_flush_mask);
188
189 tlb_flush(cpu);
190 return;
191 }
192
193 addr &= TARGET_PAGE_MASK;
194 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
195 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
196 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
197 }
198
199 /* check whether there are entries that need to be flushed in the vtlb */
200 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
201 int k;
202 for (k = 0; k < CPU_VTLB_SIZE; k++) {
203 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
204 }
205 }
206
207 tb_flush_jmp_cache(cpu, addr);
208 }
209
210 void tlb_flush_page(CPUState *cpu, target_ulong addr)
211 {
212 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
213
214 if (!qemu_cpu_is_self(cpu)) {
215 async_run_on_cpu(cpu, tlb_flush_page_async_work,
216 RUN_ON_CPU_TARGET_PTR(addr));
217 } else {
218 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
219 }
220 }
221
222 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
223 {
224 CPUArchState *env = cpu->env_ptr;
225 int i, k;
226 va_list argp;
227
228 va_start(argp, addr);
229
230 assert_cpu_is_self(cpu);
231 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
232
233 /* Check if we need to flush due to large pages. */
234 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
235 tlb_debug("forced full flush ("
236 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
237 env->tlb_flush_addr, env->tlb_flush_mask);
238
239 v_tlb_flush_by_mmuidx(cpu, argp);
240 va_end(argp);
241 return;
242 }
243
244 addr &= TARGET_PAGE_MASK;
245 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
246
247 for (;;) {
248 int mmu_idx = va_arg(argp, int);
249
250 if (mmu_idx < 0) {
251 break;
252 }
253
254 tlb_debug("idx %d\n", mmu_idx);
255
256 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
257
258 /* check whether there are vltb entries that need to be flushed */
259 for (k = 0; k < CPU_VTLB_SIZE; k++) {
260 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
261 }
262 }
263 va_end(argp);
264
265 tb_flush_jmp_cache(cpu, addr);
266 }
267
268 void tlb_flush_page_all(target_ulong addr)
269 {
270 CPUState *cpu;
271
272 CPU_FOREACH(cpu) {
273 async_run_on_cpu(cpu, tlb_flush_page_async_work,
274 RUN_ON_CPU_TARGET_PTR(addr));
275 }
276 }
277
278 /* update the TLBs so that writes to code in the virtual page 'addr'
279 can be detected */
280 void tlb_protect_code(ram_addr_t ram_addr)
281 {
282 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
283 DIRTY_MEMORY_CODE);
284 }
285
286 /* update the TLB so that writes in physical page 'phys_addr' are no longer
287 tested for self modifying code */
288 void tlb_unprotect_code(ram_addr_t ram_addr)
289 {
290 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
291 }
292
293 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
294 {
295 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
296 }
297
298 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
299 uintptr_t length)
300 {
301 uintptr_t addr;
302
303 if (tlb_is_dirty_ram(tlb_entry)) {
304 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
305 if ((addr - start) < length) {
306 tlb_entry->addr_write |= TLB_NOTDIRTY;
307 }
308 }
309 }
310
311 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
312 {
313 CPUArchState *env;
314
315 int mmu_idx;
316
317 assert_cpu_is_self(cpu);
318
319 env = cpu->env_ptr;
320 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
321 unsigned int i;
322
323 for (i = 0; i < CPU_TLB_SIZE; i++) {
324 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
325 start1, length);
326 }
327
328 for (i = 0; i < CPU_VTLB_SIZE; i++) {
329 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
330 start1, length);
331 }
332 }
333 }
334
335 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
336 {
337 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
338 tlb_entry->addr_write = vaddr;
339 }
340 }
341
342 /* update the TLB corresponding to virtual page vaddr
343 so that it is no longer dirty */
344 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
345 {
346 CPUArchState *env = cpu->env_ptr;
347 int i;
348 int mmu_idx;
349
350 assert_cpu_is_self(cpu);
351
352 vaddr &= TARGET_PAGE_MASK;
353 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
354 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
355 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
356 }
357
358 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
359 int k;
360 for (k = 0; k < CPU_VTLB_SIZE; k++) {
361 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
362 }
363 }
364 }
365
366 /* Our TLB does not support large pages, so remember the area covered by
367 large pages and trigger a full TLB flush if these are invalidated. */
368 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
369 target_ulong size)
370 {
371 target_ulong mask = ~(size - 1);
372
373 if (env->tlb_flush_addr == (target_ulong)-1) {
374 env->tlb_flush_addr = vaddr & mask;
375 env->tlb_flush_mask = mask;
376 return;
377 }
378 /* Extend the existing region to include the new page.
379 This is a compromise between unnecessary flushes and the cost
380 of maintaining a full variable size TLB. */
381 mask &= env->tlb_flush_mask;
382 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
383 mask <<= 1;
384 }
385 env->tlb_flush_addr &= mask;
386 env->tlb_flush_mask = mask;
387 }
388
389 /* Add a new TLB entry. At most one entry for a given virtual address
390 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
391 * supplied size is only used by tlb_flush_page.
392 *
393 * Called from TCG-generated code, which is under an RCU read-side
394 * critical section.
395 */
396 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
397 hwaddr paddr, MemTxAttrs attrs, int prot,
398 int mmu_idx, target_ulong size)
399 {
400 CPUArchState *env = cpu->env_ptr;
401 MemoryRegionSection *section;
402 unsigned int index;
403 target_ulong address;
404 target_ulong code_address;
405 uintptr_t addend;
406 CPUTLBEntry *te;
407 hwaddr iotlb, xlat, sz;
408 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
409 int asidx = cpu_asidx_from_attrs(cpu, attrs);
410
411 assert_cpu_is_self(cpu);
412 assert(size >= TARGET_PAGE_SIZE);
413 if (size != TARGET_PAGE_SIZE) {
414 tlb_add_large_page(env, vaddr, size);
415 }
416
417 sz = size;
418 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
419 assert(sz >= TARGET_PAGE_SIZE);
420
421 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
422 " prot=%x idx=%d\n",
423 vaddr, paddr, prot, mmu_idx);
424
425 address = vaddr;
426 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
427 /* IO memory case */
428 address |= TLB_MMIO;
429 addend = 0;
430 } else {
431 /* TLB_MMIO for rom/romd handled below */
432 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
433 }
434
435 code_address = address;
436 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
437 prot, &address);
438
439 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
440 te = &env->tlb_table[mmu_idx][index];
441
442 /* do not discard the translation in te, evict it into a victim tlb */
443 env->tlb_v_table[mmu_idx][vidx] = *te;
444 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
445
446 /* refill the tlb */
447 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
448 env->iotlb[mmu_idx][index].attrs = attrs;
449 te->addend = addend - vaddr;
450 if (prot & PAGE_READ) {
451 te->addr_read = address;
452 } else {
453 te->addr_read = -1;
454 }
455
456 if (prot & PAGE_EXEC) {
457 te->addr_code = code_address;
458 } else {
459 te->addr_code = -1;
460 }
461 if (prot & PAGE_WRITE) {
462 if ((memory_region_is_ram(section->mr) && section->readonly)
463 || memory_region_is_romd(section->mr)) {
464 /* Write access calls the I/O callback. */
465 te->addr_write = address | TLB_MMIO;
466 } else if (memory_region_is_ram(section->mr)
467 && cpu_physical_memory_is_clean(
468 memory_region_get_ram_addr(section->mr) + xlat)) {
469 te->addr_write = address | TLB_NOTDIRTY;
470 } else {
471 te->addr_write = address;
472 }
473 } else {
474 te->addr_write = -1;
475 }
476 }
477
478 /* Add a new TLB entry, but without specifying the memory
479 * transaction attributes to be used.
480 */
481 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
482 hwaddr paddr, int prot,
483 int mmu_idx, target_ulong size)
484 {
485 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
486 prot, mmu_idx, size);
487 }
488
489 static void report_bad_exec(CPUState *cpu, target_ulong addr)
490 {
491 /* Accidentally executing outside RAM or ROM is quite common for
492 * several user-error situations, so report it in a way that
493 * makes it clear that this isn't a QEMU bug and provide suggestions
494 * about what a user could do to fix things.
495 */
496 error_report("Trying to execute code outside RAM or ROM at 0x"
497 TARGET_FMT_lx, addr);
498 error_printf("This usually means one of the following happened:\n\n"
499 "(1) You told QEMU to execute a kernel for the wrong machine "
500 "type, and it crashed on startup (eg trying to run a "
501 "raspberry pi kernel on a versatilepb QEMU machine)\n"
502 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
503 "and QEMU executed a ROM full of no-op instructions until "
504 "it fell off the end\n"
505 "(3) Your guest kernel has a bug and crashed by jumping "
506 "off into nowhere\n\n"
507 "This is almost always one of the first two, so check your "
508 "command line and that you are using the right type of kernel "
509 "for this machine.\n"
510 "If you think option (3) is likely then you can try debugging "
511 "your guest with the -d debug options; in particular "
512 "-d guest_errors will cause the log to include a dump of the "
513 "guest register state at this point.\n\n"
514 "Execution cannot continue; stopping here.\n\n");
515
516 /* Report also to the logs, with more detail including register dump */
517 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
518 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
519 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
520 }
521
522 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
523 {
524 ram_addr_t ram_addr;
525
526 ram_addr = qemu_ram_addr_from_host(ptr);
527 if (ram_addr == RAM_ADDR_INVALID) {
528 error_report("Bad ram pointer %p", ptr);
529 abort();
530 }
531 return ram_addr;
532 }
533
534 /* NOTE: this function can trigger an exception */
535 /* NOTE2: the returned address is not exactly the physical address: it
536 * is actually a ram_addr_t (in system mode; the user mode emulation
537 * version of this function returns a guest virtual address).
538 */
539 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
540 {
541 int mmu_idx, page_index, pd;
542 void *p;
543 MemoryRegion *mr;
544 CPUState *cpu = ENV_GET_CPU(env1);
545 CPUIOTLBEntry *iotlbentry;
546
547 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
548 mmu_idx = cpu_mmu_index(env1, true);
549 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
550 (addr & TARGET_PAGE_MASK))) {
551 cpu_ldub_code(env1, addr);
552 }
553 iotlbentry = &env1->iotlb[mmu_idx][page_index];
554 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
555 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
556 if (memory_region_is_unassigned(mr)) {
557 CPUClass *cc = CPU_GET_CLASS(cpu);
558
559 if (cc->do_unassigned_access) {
560 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
561 } else {
562 report_bad_exec(cpu, addr);
563 exit(1);
564 }
565 }
566 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
567 return qemu_ram_addr_from_host_nofail(p);
568 }
569
570 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
571 target_ulong addr, uintptr_t retaddr, int size)
572 {
573 CPUState *cpu = ENV_GET_CPU(env);
574 hwaddr physaddr = iotlbentry->addr;
575 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
576 uint64_t val;
577 bool locked = false;
578
579 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
580 cpu->mem_io_pc = retaddr;
581 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
582 cpu_io_recompile(cpu, retaddr);
583 }
584
585 cpu->mem_io_vaddr = addr;
586
587 if (mr->global_locking) {
588 qemu_mutex_lock_iothread();
589 locked = true;
590 }
591 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
592 if (locked) {
593 qemu_mutex_unlock_iothread();
594 }
595
596 return val;
597 }
598
599 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
600 uint64_t val, target_ulong addr,
601 uintptr_t retaddr, int size)
602 {
603 CPUState *cpu = ENV_GET_CPU(env);
604 hwaddr physaddr = iotlbentry->addr;
605 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
606 bool locked = false;
607
608 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
609 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
610 cpu_io_recompile(cpu, retaddr);
611 }
612 cpu->mem_io_vaddr = addr;
613 cpu->mem_io_pc = retaddr;
614
615 if (mr->global_locking) {
616 qemu_mutex_lock_iothread();
617 locked = true;
618 }
619 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
620 if (locked) {
621 qemu_mutex_unlock_iothread();
622 }
623 }
624
625 /* Return true if ADDR is present in the victim tlb, and has been copied
626 back to the main tlb. */
627 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
628 size_t elt_ofs, target_ulong page)
629 {
630 size_t vidx;
631 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
632 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
633 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
634
635 if (cmp == page) {
636 /* Found entry in victim tlb, swap tlb and iotlb. */
637 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
638 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
639 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
640
641 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
642 tmpio = *io; *io = *vio; *vio = tmpio;
643 return true;
644 }
645 }
646 return false;
647 }
648
649 /* Macro to call the above, with local variables from the use context. */
650 #define VICTIM_TLB_HIT(TY, ADDR) \
651 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
652 (ADDR) & TARGET_PAGE_MASK)
653
654 /* Probe for whether the specified guest write access is permitted.
655 * If it is not permitted then an exception will be taken in the same
656 * way as if this were a real write access (and we will not return).
657 * Otherwise the function will return, and there will be a valid
658 * entry in the TLB for this access.
659 */
660 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
661 uintptr_t retaddr)
662 {
663 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
664 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
665
666 if ((addr & TARGET_PAGE_MASK)
667 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
668 /* TLB entry is for a different page */
669 if (!VICTIM_TLB_HIT(addr_write, addr)) {
670 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
671 }
672 }
673 }
674
675 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
676 * operations, or io operations to proceed. Return the host address. */
677 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
678 TCGMemOpIdx oi, uintptr_t retaddr)
679 {
680 size_t mmu_idx = get_mmuidx(oi);
681 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
682 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
683 target_ulong tlb_addr = tlbe->addr_write;
684 TCGMemOp mop = get_memop(oi);
685 int a_bits = get_alignment_bits(mop);
686 int s_bits = mop & MO_SIZE;
687
688 /* Adjust the given return address. */
689 retaddr -= GETPC_ADJ;
690
691 /* Enforce guest required alignment. */
692 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
693 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
694 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
695 mmu_idx, retaddr);
696 }
697
698 /* Enforce qemu required alignment. */
699 if (unlikely(addr & ((1 << s_bits) - 1))) {
700 /* We get here if guest alignment was not requested,
701 or was not enforced by cpu_unaligned_access above.
702 We might widen the access and emulate, but for now
703 mark an exception and exit the cpu loop. */
704 goto stop_the_world;
705 }
706
707 /* Check TLB entry and enforce page permissions. */
708 if ((addr & TARGET_PAGE_MASK)
709 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
710 if (!VICTIM_TLB_HIT(addr_write, addr)) {
711 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
712 }
713 tlb_addr = tlbe->addr_write;
714 }
715
716 /* Notice an IO access, or a notdirty page. */
717 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
718 /* There's really nothing that can be done to
719 support this apart from stop-the-world. */
720 goto stop_the_world;
721 }
722
723 /* Let the guest notice RMW on a write-only page. */
724 if (unlikely(tlbe->addr_read != tlb_addr)) {
725 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
726 /* Since we don't support reads and writes to different addresses,
727 and we do have the proper page loaded for write, this shouldn't
728 ever return. But just in case, handle via stop-the-world. */
729 goto stop_the_world;
730 }
731
732 return (void *)((uintptr_t)addr + tlbe->addend);
733
734 stop_the_world:
735 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
736 }
737
738 #ifdef TARGET_WORDS_BIGENDIAN
739 # define TGT_BE(X) (X)
740 # define TGT_LE(X) BSWAP(X)
741 #else
742 # define TGT_BE(X) BSWAP(X)
743 # define TGT_LE(X) (X)
744 #endif
745
746 #define MMUSUFFIX _mmu
747
748 #define DATA_SIZE 1
749 #include "softmmu_template.h"
750
751 #define DATA_SIZE 2
752 #include "softmmu_template.h"
753
754 #define DATA_SIZE 4
755 #include "softmmu_template.h"
756
757 #define DATA_SIZE 8
758 #include "softmmu_template.h"
759
760 /* First set of helpers allows passing in of OI and RETADDR. This makes
761 them callable from other helpers. */
762
763 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
764 #define ATOMIC_NAME(X) \
765 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
766 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
767
768 #define DATA_SIZE 1
769 #include "atomic_template.h"
770
771 #define DATA_SIZE 2
772 #include "atomic_template.h"
773
774 #define DATA_SIZE 4
775 #include "atomic_template.h"
776
777 #ifdef CONFIG_ATOMIC64
778 #define DATA_SIZE 8
779 #include "atomic_template.h"
780 #endif
781
782 #ifdef CONFIG_ATOMIC128
783 #define DATA_SIZE 16
784 #include "atomic_template.h"
785 #endif
786
787 /* Second set of helpers are directly callable from TCG as helpers. */
788
789 #undef EXTRA_ARGS
790 #undef ATOMIC_NAME
791 #undef ATOMIC_MMU_LOOKUP
792 #define EXTRA_ARGS , TCGMemOpIdx oi
793 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
794 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
795
796 #define DATA_SIZE 1
797 #include "atomic_template.h"
798
799 #define DATA_SIZE 2
800 #include "atomic_template.h"
801
802 #define DATA_SIZE 4
803 #include "atomic_template.h"
804
805 #ifdef CONFIG_ATOMIC64
806 #define DATA_SIZE 8
807 #include "atomic_template.h"
808 #endif
809
810 /* Code access functions. */
811
812 #undef MMUSUFFIX
813 #define MMUSUFFIX _cmmu
814 #undef GETPC
815 #define GETPC() ((uintptr_t)0)
816 #define SOFTMMU_CODE_ACCESS
817
818 #define DATA_SIZE 1
819 #include "softmmu_template.h"
820
821 #define DATA_SIZE 2
822 #include "softmmu_template.h"
823
824 #define DATA_SIZE 4
825 #include "softmmu_template.h"
826
827 #define DATA_SIZE 8
828 #include "softmmu_template.h"