]> git.proxmox.com Git - mirror_qemu.git/blob - cputlb.c
cputlb and arm/sparc targets: convert mmuidx flushes from varg to bitmap
[mirror_qemu.git] / cputlb.c
1 /*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
39
40 #ifdef DEBUG_TLB
41 # define DEBUG_TLB_GATE 1
42 # ifdef DEBUG_TLB_LOG
43 # define DEBUG_TLB_LOG_GATE 1
44 # else
45 # define DEBUG_TLB_LOG_GATE 0
46 # endif
47 #else
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
50 #endif
51
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59 } while (0)
60
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
67 /* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
71 /* statistics */
72 int tlb_flush_count;
73
74 /* This is OK because CPU architectures generally permit an
75 * implementation to drop entries from the TLB at any time, so
76 * flushing more entries than required is only an efficiency issue,
77 * not a correctness issue.
78 */
79 static void tlb_flush_nocheck(CPUState *cpu)
80 {
81 CPUArchState *env = cpu->env_ptr;
82
83 /* The QOM tests will trigger tlb_flushes without setting up TCG
84 * so we bug out here in that case.
85 */
86 if (!tcg_enabled()) {
87 return;
88 }
89
90 assert_cpu_is_self(cpu);
91 tlb_debug("(count: %d)\n", tlb_flush_count++);
92
93 tb_lock();
94
95 memset(env->tlb_table, -1, sizeof(env->tlb_table));
96 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
97 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
98
99 env->vtlb_index = 0;
100 env->tlb_flush_addr = -1;
101 env->tlb_flush_mask = 0;
102
103 tb_unlock();
104
105 atomic_mb_set(&cpu->pending_tlb_flush, false);
106 }
107
108 static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
109 {
110 tlb_flush_nocheck(cpu);
111 }
112
113 void tlb_flush(CPUState *cpu)
114 {
115 if (cpu->created && !qemu_cpu_is_self(cpu)) {
116 if (atomic_cmpxchg(&cpu->pending_tlb_flush, false, true) == true) {
117 async_run_on_cpu(cpu, tlb_flush_global_async_work,
118 RUN_ON_CPU_NULL);
119 }
120 } else {
121 tlb_flush_nocheck(cpu);
122 }
123 }
124
125 static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
126 {
127 CPUArchState *env = cpu->env_ptr;
128 unsigned long mmu_idx_bitmask = idxmap;
129 int mmu_idx;
130
131 assert_cpu_is_self(cpu);
132 tlb_debug("start\n");
133
134 tb_lock();
135
136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
137
138 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
139 tlb_debug("%d\n", mmu_idx);
140
141 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
142 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
143 }
144 }
145
146 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
147
148 tb_unlock();
149 }
150
151 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
152 {
153 v_tlb_flush_by_mmuidx(cpu, idxmap);
154 }
155
156 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
157 {
158 if (addr == (tlb_entry->addr_read &
159 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
160 addr == (tlb_entry->addr_write &
161 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
162 addr == (tlb_entry->addr_code &
163 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
164 memset(tlb_entry, -1, sizeof(*tlb_entry));
165 }
166 }
167
168 static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
169 {
170 CPUArchState *env = cpu->env_ptr;
171 target_ulong addr = (target_ulong) data.target_ptr;
172 int i;
173 int mmu_idx;
174
175 assert_cpu_is_self(cpu);
176
177 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
178
179 /* Check if we need to flush due to large pages. */
180 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
181 tlb_debug("forcing full flush ("
182 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
183 env->tlb_flush_addr, env->tlb_flush_mask);
184
185 tlb_flush(cpu);
186 return;
187 }
188
189 addr &= TARGET_PAGE_MASK;
190 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
191 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
192 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
193 }
194
195 /* check whether there are entries that need to be flushed in the vtlb */
196 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
197 int k;
198 for (k = 0; k < CPU_VTLB_SIZE; k++) {
199 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
200 }
201 }
202
203 tb_flush_jmp_cache(cpu, addr);
204 }
205
206 void tlb_flush_page(CPUState *cpu, target_ulong addr)
207 {
208 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
209
210 if (!qemu_cpu_is_self(cpu)) {
211 async_run_on_cpu(cpu, tlb_flush_page_async_work,
212 RUN_ON_CPU_TARGET_PTR(addr));
213 } else {
214 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
215 }
216 }
217
218 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
219 {
220 CPUArchState *env = cpu->env_ptr;
221 unsigned long mmu_idx_bitmap = idxmap;
222 int i, page, mmu_idx;
223
224 assert_cpu_is_self(cpu);
225 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
226
227 /* Check if we need to flush due to large pages. */
228 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
229 tlb_debug("forced full flush ("
230 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
231 env->tlb_flush_addr, env->tlb_flush_mask);
232
233 v_tlb_flush_by_mmuidx(cpu, idxmap);
234 return;
235 }
236
237 addr &= TARGET_PAGE_MASK;
238 page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
239
240 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
241 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
242 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
243
244 /* check whether there are vltb entries that need to be flushed */
245 for (i = 0; i < CPU_VTLB_SIZE; i++) {
246 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
247 }
248 }
249 }
250
251 tb_flush_jmp_cache(cpu, addr);
252 }
253
254 void tlb_flush_page_all(target_ulong addr)
255 {
256 CPUState *cpu;
257
258 CPU_FOREACH(cpu) {
259 async_run_on_cpu(cpu, tlb_flush_page_async_work,
260 RUN_ON_CPU_TARGET_PTR(addr));
261 }
262 }
263
264 /* update the TLBs so that writes to code in the virtual page 'addr'
265 can be detected */
266 void tlb_protect_code(ram_addr_t ram_addr)
267 {
268 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
269 DIRTY_MEMORY_CODE);
270 }
271
272 /* update the TLB so that writes in physical page 'phys_addr' are no longer
273 tested for self modifying code */
274 void tlb_unprotect_code(ram_addr_t ram_addr)
275 {
276 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
277 }
278
279 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
280 {
281 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
282 }
283
284 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
285 uintptr_t length)
286 {
287 uintptr_t addr;
288
289 if (tlb_is_dirty_ram(tlb_entry)) {
290 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
291 if ((addr - start) < length) {
292 tlb_entry->addr_write |= TLB_NOTDIRTY;
293 }
294 }
295 }
296
297 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
298 {
299 CPUArchState *env;
300
301 int mmu_idx;
302
303 assert_cpu_is_self(cpu);
304
305 env = cpu->env_ptr;
306 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
307 unsigned int i;
308
309 for (i = 0; i < CPU_TLB_SIZE; i++) {
310 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
311 start1, length);
312 }
313
314 for (i = 0; i < CPU_VTLB_SIZE; i++) {
315 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
316 start1, length);
317 }
318 }
319 }
320
321 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
322 {
323 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
324 tlb_entry->addr_write = vaddr;
325 }
326 }
327
328 /* update the TLB corresponding to virtual page vaddr
329 so that it is no longer dirty */
330 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
331 {
332 CPUArchState *env = cpu->env_ptr;
333 int i;
334 int mmu_idx;
335
336 assert_cpu_is_self(cpu);
337
338 vaddr &= TARGET_PAGE_MASK;
339 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
340 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
341 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
342 }
343
344 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
345 int k;
346 for (k = 0; k < CPU_VTLB_SIZE; k++) {
347 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
348 }
349 }
350 }
351
352 /* Our TLB does not support large pages, so remember the area covered by
353 large pages and trigger a full TLB flush if these are invalidated. */
354 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
355 target_ulong size)
356 {
357 target_ulong mask = ~(size - 1);
358
359 if (env->tlb_flush_addr == (target_ulong)-1) {
360 env->tlb_flush_addr = vaddr & mask;
361 env->tlb_flush_mask = mask;
362 return;
363 }
364 /* Extend the existing region to include the new page.
365 This is a compromise between unnecessary flushes and the cost
366 of maintaining a full variable size TLB. */
367 mask &= env->tlb_flush_mask;
368 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
369 mask <<= 1;
370 }
371 env->tlb_flush_addr &= mask;
372 env->tlb_flush_mask = mask;
373 }
374
375 /* Add a new TLB entry. At most one entry for a given virtual address
376 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
377 * supplied size is only used by tlb_flush_page.
378 *
379 * Called from TCG-generated code, which is under an RCU read-side
380 * critical section.
381 */
382 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
383 hwaddr paddr, MemTxAttrs attrs, int prot,
384 int mmu_idx, target_ulong size)
385 {
386 CPUArchState *env = cpu->env_ptr;
387 MemoryRegionSection *section;
388 unsigned int index;
389 target_ulong address;
390 target_ulong code_address;
391 uintptr_t addend;
392 CPUTLBEntry *te;
393 hwaddr iotlb, xlat, sz;
394 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
395 int asidx = cpu_asidx_from_attrs(cpu, attrs);
396
397 assert_cpu_is_self(cpu);
398 assert(size >= TARGET_PAGE_SIZE);
399 if (size != TARGET_PAGE_SIZE) {
400 tlb_add_large_page(env, vaddr, size);
401 }
402
403 sz = size;
404 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
405 assert(sz >= TARGET_PAGE_SIZE);
406
407 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
408 " prot=%x idx=%d\n",
409 vaddr, paddr, prot, mmu_idx);
410
411 address = vaddr;
412 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
413 /* IO memory case */
414 address |= TLB_MMIO;
415 addend = 0;
416 } else {
417 /* TLB_MMIO for rom/romd handled below */
418 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
419 }
420
421 code_address = address;
422 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
423 prot, &address);
424
425 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
426 te = &env->tlb_table[mmu_idx][index];
427
428 /* do not discard the translation in te, evict it into a victim tlb */
429 env->tlb_v_table[mmu_idx][vidx] = *te;
430 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
431
432 /* refill the tlb */
433 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
434 env->iotlb[mmu_idx][index].attrs = attrs;
435 te->addend = addend - vaddr;
436 if (prot & PAGE_READ) {
437 te->addr_read = address;
438 } else {
439 te->addr_read = -1;
440 }
441
442 if (prot & PAGE_EXEC) {
443 te->addr_code = code_address;
444 } else {
445 te->addr_code = -1;
446 }
447 if (prot & PAGE_WRITE) {
448 if ((memory_region_is_ram(section->mr) && section->readonly)
449 || memory_region_is_romd(section->mr)) {
450 /* Write access calls the I/O callback. */
451 te->addr_write = address | TLB_MMIO;
452 } else if (memory_region_is_ram(section->mr)
453 && cpu_physical_memory_is_clean(
454 memory_region_get_ram_addr(section->mr) + xlat)) {
455 te->addr_write = address | TLB_NOTDIRTY;
456 } else {
457 te->addr_write = address;
458 }
459 } else {
460 te->addr_write = -1;
461 }
462 }
463
464 /* Add a new TLB entry, but without specifying the memory
465 * transaction attributes to be used.
466 */
467 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
468 hwaddr paddr, int prot,
469 int mmu_idx, target_ulong size)
470 {
471 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
472 prot, mmu_idx, size);
473 }
474
475 static void report_bad_exec(CPUState *cpu, target_ulong addr)
476 {
477 /* Accidentally executing outside RAM or ROM is quite common for
478 * several user-error situations, so report it in a way that
479 * makes it clear that this isn't a QEMU bug and provide suggestions
480 * about what a user could do to fix things.
481 */
482 error_report("Trying to execute code outside RAM or ROM at 0x"
483 TARGET_FMT_lx, addr);
484 error_printf("This usually means one of the following happened:\n\n"
485 "(1) You told QEMU to execute a kernel for the wrong machine "
486 "type, and it crashed on startup (eg trying to run a "
487 "raspberry pi kernel on a versatilepb QEMU machine)\n"
488 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
489 "and QEMU executed a ROM full of no-op instructions until "
490 "it fell off the end\n"
491 "(3) Your guest kernel has a bug and crashed by jumping "
492 "off into nowhere\n\n"
493 "This is almost always one of the first two, so check your "
494 "command line and that you are using the right type of kernel "
495 "for this machine.\n"
496 "If you think option (3) is likely then you can try debugging "
497 "your guest with the -d debug options; in particular "
498 "-d guest_errors will cause the log to include a dump of the "
499 "guest register state at this point.\n\n"
500 "Execution cannot continue; stopping here.\n\n");
501
502 /* Report also to the logs, with more detail including register dump */
503 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
504 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
505 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
506 }
507
508 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
509 {
510 ram_addr_t ram_addr;
511
512 ram_addr = qemu_ram_addr_from_host(ptr);
513 if (ram_addr == RAM_ADDR_INVALID) {
514 error_report("Bad ram pointer %p", ptr);
515 abort();
516 }
517 return ram_addr;
518 }
519
520 /* NOTE: this function can trigger an exception */
521 /* NOTE2: the returned address is not exactly the physical address: it
522 * is actually a ram_addr_t (in system mode; the user mode emulation
523 * version of this function returns a guest virtual address).
524 */
525 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
526 {
527 int mmu_idx, page_index, pd;
528 void *p;
529 MemoryRegion *mr;
530 CPUState *cpu = ENV_GET_CPU(env1);
531 CPUIOTLBEntry *iotlbentry;
532
533 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
534 mmu_idx = cpu_mmu_index(env1, true);
535 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
536 (addr & TARGET_PAGE_MASK))) {
537 cpu_ldub_code(env1, addr);
538 }
539 iotlbentry = &env1->iotlb[mmu_idx][page_index];
540 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
541 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
542 if (memory_region_is_unassigned(mr)) {
543 CPUClass *cc = CPU_GET_CLASS(cpu);
544
545 if (cc->do_unassigned_access) {
546 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
547 } else {
548 report_bad_exec(cpu, addr);
549 exit(1);
550 }
551 }
552 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
553 return qemu_ram_addr_from_host_nofail(p);
554 }
555
556 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
557 target_ulong addr, uintptr_t retaddr, int size)
558 {
559 CPUState *cpu = ENV_GET_CPU(env);
560 hwaddr physaddr = iotlbentry->addr;
561 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
562 uint64_t val;
563 bool locked = false;
564
565 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
566 cpu->mem_io_pc = retaddr;
567 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
568 cpu_io_recompile(cpu, retaddr);
569 }
570
571 cpu->mem_io_vaddr = addr;
572
573 if (mr->global_locking) {
574 qemu_mutex_lock_iothread();
575 locked = true;
576 }
577 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
578 if (locked) {
579 qemu_mutex_unlock_iothread();
580 }
581
582 return val;
583 }
584
585 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
586 uint64_t val, target_ulong addr,
587 uintptr_t retaddr, int size)
588 {
589 CPUState *cpu = ENV_GET_CPU(env);
590 hwaddr physaddr = iotlbentry->addr;
591 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
592 bool locked = false;
593
594 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
595 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
596 cpu_io_recompile(cpu, retaddr);
597 }
598 cpu->mem_io_vaddr = addr;
599 cpu->mem_io_pc = retaddr;
600
601 if (mr->global_locking) {
602 qemu_mutex_lock_iothread();
603 locked = true;
604 }
605 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
606 if (locked) {
607 qemu_mutex_unlock_iothread();
608 }
609 }
610
611 /* Return true if ADDR is present in the victim tlb, and has been copied
612 back to the main tlb. */
613 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
614 size_t elt_ofs, target_ulong page)
615 {
616 size_t vidx;
617 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
618 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
619 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
620
621 if (cmp == page) {
622 /* Found entry in victim tlb, swap tlb and iotlb. */
623 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
624 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
625 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
626
627 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
628 tmpio = *io; *io = *vio; *vio = tmpio;
629 return true;
630 }
631 }
632 return false;
633 }
634
635 /* Macro to call the above, with local variables from the use context. */
636 #define VICTIM_TLB_HIT(TY, ADDR) \
637 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
638 (ADDR) & TARGET_PAGE_MASK)
639
640 /* Probe for whether the specified guest write access is permitted.
641 * If it is not permitted then an exception will be taken in the same
642 * way as if this were a real write access (and we will not return).
643 * Otherwise the function will return, and there will be a valid
644 * entry in the TLB for this access.
645 */
646 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
647 uintptr_t retaddr)
648 {
649 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
650 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
651
652 if ((addr & TARGET_PAGE_MASK)
653 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
654 /* TLB entry is for a different page */
655 if (!VICTIM_TLB_HIT(addr_write, addr)) {
656 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
657 }
658 }
659 }
660
661 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
662 * operations, or io operations to proceed. Return the host address. */
663 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
664 TCGMemOpIdx oi, uintptr_t retaddr)
665 {
666 size_t mmu_idx = get_mmuidx(oi);
667 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
668 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
669 target_ulong tlb_addr = tlbe->addr_write;
670 TCGMemOp mop = get_memop(oi);
671 int a_bits = get_alignment_bits(mop);
672 int s_bits = mop & MO_SIZE;
673
674 /* Adjust the given return address. */
675 retaddr -= GETPC_ADJ;
676
677 /* Enforce guest required alignment. */
678 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
679 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
680 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
681 mmu_idx, retaddr);
682 }
683
684 /* Enforce qemu required alignment. */
685 if (unlikely(addr & ((1 << s_bits) - 1))) {
686 /* We get here if guest alignment was not requested,
687 or was not enforced by cpu_unaligned_access above.
688 We might widen the access and emulate, but for now
689 mark an exception and exit the cpu loop. */
690 goto stop_the_world;
691 }
692
693 /* Check TLB entry and enforce page permissions. */
694 if ((addr & TARGET_PAGE_MASK)
695 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
696 if (!VICTIM_TLB_HIT(addr_write, addr)) {
697 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
698 }
699 tlb_addr = tlbe->addr_write;
700 }
701
702 /* Notice an IO access, or a notdirty page. */
703 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
704 /* There's really nothing that can be done to
705 support this apart from stop-the-world. */
706 goto stop_the_world;
707 }
708
709 /* Let the guest notice RMW on a write-only page. */
710 if (unlikely(tlbe->addr_read != tlb_addr)) {
711 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
712 /* Since we don't support reads and writes to different addresses,
713 and we do have the proper page loaded for write, this shouldn't
714 ever return. But just in case, handle via stop-the-world. */
715 goto stop_the_world;
716 }
717
718 return (void *)((uintptr_t)addr + tlbe->addend);
719
720 stop_the_world:
721 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
722 }
723
724 #ifdef TARGET_WORDS_BIGENDIAN
725 # define TGT_BE(X) (X)
726 # define TGT_LE(X) BSWAP(X)
727 #else
728 # define TGT_BE(X) BSWAP(X)
729 # define TGT_LE(X) (X)
730 #endif
731
732 #define MMUSUFFIX _mmu
733
734 #define DATA_SIZE 1
735 #include "softmmu_template.h"
736
737 #define DATA_SIZE 2
738 #include "softmmu_template.h"
739
740 #define DATA_SIZE 4
741 #include "softmmu_template.h"
742
743 #define DATA_SIZE 8
744 #include "softmmu_template.h"
745
746 /* First set of helpers allows passing in of OI and RETADDR. This makes
747 them callable from other helpers. */
748
749 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
750 #define ATOMIC_NAME(X) \
751 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
752 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
753
754 #define DATA_SIZE 1
755 #include "atomic_template.h"
756
757 #define DATA_SIZE 2
758 #include "atomic_template.h"
759
760 #define DATA_SIZE 4
761 #include "atomic_template.h"
762
763 #ifdef CONFIG_ATOMIC64
764 #define DATA_SIZE 8
765 #include "atomic_template.h"
766 #endif
767
768 #ifdef CONFIG_ATOMIC128
769 #define DATA_SIZE 16
770 #include "atomic_template.h"
771 #endif
772
773 /* Second set of helpers are directly callable from TCG as helpers. */
774
775 #undef EXTRA_ARGS
776 #undef ATOMIC_NAME
777 #undef ATOMIC_MMU_LOOKUP
778 #define EXTRA_ARGS , TCGMemOpIdx oi
779 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
780 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
781
782 #define DATA_SIZE 1
783 #include "atomic_template.h"
784
785 #define DATA_SIZE 2
786 #include "atomic_template.h"
787
788 #define DATA_SIZE 4
789 #include "atomic_template.h"
790
791 #ifdef CONFIG_ATOMIC64
792 #define DATA_SIZE 8
793 #include "atomic_template.h"
794 #endif
795
796 /* Code access functions. */
797
798 #undef MMUSUFFIX
799 #define MMUSUFFIX _cmmu
800 #undef GETPC
801 #define GETPC() ((uintptr_t)0)
802 #define SOFTMMU_CODE_ACCESS
803
804 #define DATA_SIZE 1
805 #include "softmmu_template.h"
806
807 #define DATA_SIZE 2
808 #include "softmmu_template.h"
809
810 #define DATA_SIZE 4
811 #include "softmmu_template.h"
812
813 #define DATA_SIZE 8
814 #include "softmmu_template.h"