]> git.proxmox.com Git - mirror_qemu.git/blame - cputlb.c
ACPI: Virt: Generate SRAT table
[mirror_qemu.git] / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
0cac1b66 21#include "cpu.h"
022c62cb
PB
22#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
f08b6170 25#include "exec/cpu_ldst.h"
0cac1b66 26
022c62cb 27#include "exec/cputlb.h"
0cac1b66 28
022c62cb 29#include "exec/memory-internal.h"
220c3ebd 30#include "exec/ram_addr.h"
0f590e74 31#include "tcg/tcg.h"
0cac1b66 32
8526e1f4
AB
33/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
34/* #define DEBUG_TLB */
35/* #define DEBUG_TLB_LOG */
36
37#ifdef DEBUG_TLB
38# define DEBUG_TLB_GATE 1
39# ifdef DEBUG_TLB_LOG
40# define DEBUG_TLB_LOG_GATE 1
41# else
42# define DEBUG_TLB_LOG_GATE 0
43# endif
44#else
45# define DEBUG_TLB_GATE 0
46# define DEBUG_TLB_LOG_GATE 0
47#endif
48
49#define tlb_debug(fmt, ...) do { \
50 if (DEBUG_TLB_LOG_GATE) { \
51 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
52 ## __VA_ARGS__); \
53 } else if (DEBUG_TLB_GATE) { \
54 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
55 } \
56} while (0)
0cac1b66
BS
57
58/* statistics */
59int tlb_flush_count;
60
0cac1b66
BS
61/* NOTE:
62 * If flush_global is true (the usual case), flush all tlb entries.
63 * If flush_global is false, flush (at least) all tlb entries not
64 * marked global.
65 *
66 * Since QEMU doesn't currently implement a global/not-global flag
67 * for tlb entries, at the moment tlb_flush() will also flush all
68 * tlb entries in the flush_global == false case. This is OK because
69 * CPU architectures generally permit an implementation to drop
70 * entries from the TLB at any time, so flushing more entries than
71 * required is only an efficiency issue, not a correctness issue.
72 */
00c8cb0a 73void tlb_flush(CPUState *cpu, int flush_global)
0cac1b66 74{
00c8cb0a 75 CPUArchState *env = cpu->env_ptr;
0cac1b66 76
8526e1f4
AB
77 tlb_debug("(%d)\n", flush_global);
78
0cac1b66
BS
79 /* must reset current TB so that interrupts cannot modify the
80 links while we are modifying them */
d77953b9 81 cpu->current_tb = NULL;
0cac1b66 82
4fadb3bb 83 memset(env->tlb_table, -1, sizeof(env->tlb_table));
88e89a57 84 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
8cd70437 85 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
0cac1b66 86
88e89a57 87 env->vtlb_index = 0;
0cac1b66
BS
88 env->tlb_flush_addr = -1;
89 env->tlb_flush_mask = 0;
90 tlb_flush_count++;
91}
92
d7a74a9d
PM
93static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
94{
95 CPUArchState *env = cpu->env_ptr;
96
8526e1f4 97 tlb_debug("start\n");
d7a74a9d
PM
98 /* must reset current TB so that interrupts cannot modify the
99 links while we are modifying them */
100 cpu->current_tb = NULL;
101
102 for (;;) {
103 int mmu_idx = va_arg(argp, int);
104
105 if (mmu_idx < 0) {
106 break;
107 }
108
8526e1f4 109 tlb_debug("%d\n", mmu_idx);
d7a74a9d
PM
110
111 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
112 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
113 }
114
d7a74a9d
PM
115 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
116}
117
118void tlb_flush_by_mmuidx(CPUState *cpu, ...)
119{
120 va_list argp;
121 va_start(argp, cpu);
122 v_tlb_flush_by_mmuidx(cpu, argp);
123 va_end(argp);
124}
125
0cac1b66
BS
126static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
127{
128 if (addr == (tlb_entry->addr_read &
129 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
130 addr == (tlb_entry->addr_write &
131 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
132 addr == (tlb_entry->addr_code &
133 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
4fadb3bb 134 memset(tlb_entry, -1, sizeof(*tlb_entry));
0cac1b66
BS
135 }
136}
137
31b030d4 138void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66 139{
31b030d4 140 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
141 int i;
142 int mmu_idx;
143
8526e1f4
AB
144 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
145
0cac1b66
BS
146 /* Check if we need to flush due to large pages. */
147 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
148 tlb_debug("forcing full flush ("
149 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
150 env->tlb_flush_addr, env->tlb_flush_mask);
151
00c8cb0a 152 tlb_flush(cpu, 1);
0cac1b66
BS
153 return;
154 }
155 /* must reset current TB so that interrupts cannot modify the
156 links while we are modifying them */
d77953b9 157 cpu->current_tb = NULL;
0cac1b66
BS
158
159 addr &= TARGET_PAGE_MASK;
160 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
161 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
162 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
163 }
164
88e89a57
XT
165 /* check whether there are entries that need to be flushed in the vtlb */
166 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
167 int k;
168 for (k = 0; k < CPU_VTLB_SIZE; k++) {
169 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
170 }
171 }
172
611d4f99 173 tb_flush_jmp_cache(cpu, addr);
0cac1b66
BS
174}
175
d7a74a9d
PM
176void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
177{
178 CPUArchState *env = cpu->env_ptr;
179 int i, k;
180 va_list argp;
181
182 va_start(argp, addr);
183
8526e1f4
AB
184 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
185
d7a74a9d
PM
186 /* Check if we need to flush due to large pages. */
187 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
8526e1f4
AB
188 tlb_debug("forced full flush ("
189 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
190 env->tlb_flush_addr, env->tlb_flush_mask);
191
d7a74a9d
PM
192 v_tlb_flush_by_mmuidx(cpu, argp);
193 va_end(argp);
194 return;
195 }
196 /* must reset current TB so that interrupts cannot modify the
197 links while we are modifying them */
198 cpu->current_tb = NULL;
199
200 addr &= TARGET_PAGE_MASK;
201 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
202
203 for (;;) {
204 int mmu_idx = va_arg(argp, int);
205
206 if (mmu_idx < 0) {
207 break;
208 }
209
8526e1f4 210 tlb_debug("idx %d\n", mmu_idx);
d7a74a9d
PM
211
212 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
213
214 /* check whether there are vltb entries that need to be flushed */
215 for (k = 0; k < CPU_VTLB_SIZE; k++) {
216 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
217 }
218 }
219 va_end(argp);
220
d7a74a9d
PM
221 tb_flush_jmp_cache(cpu, addr);
222}
223
0cac1b66
BS
224/* update the TLBs so that writes to code in the virtual page 'addr'
225 can be detected */
226void tlb_protect_code(ram_addr_t ram_addr)
227{
03eebc9e
SH
228 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
229 DIRTY_MEMORY_CODE);
0cac1b66
BS
230}
231
232/* update the TLB so that writes in physical page 'phys_addr' are no longer
233 tested for self modifying code */
9564f52d 234void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 235{
52159192 236 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
237}
238
239static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
240{
241 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
242}
243
244void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
245 uintptr_t length)
246{
247 uintptr_t addr;
248
249 if (tlb_is_dirty_ram(tlb_entry)) {
250 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
251 if ((addr - start) < length) {
252 tlb_entry->addr_write |= TLB_NOTDIRTY;
253 }
254 }
255}
256
7443b437
PB
257static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
258{
259 ram_addr_t ram_addr;
260
1b5ec234 261 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
7443b437
PB
262 fprintf(stderr, "Bad ram pointer %p\n", ptr);
263 abort();
264 }
265 return ram_addr;
266}
267
9a13565d 268void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
269{
270 CPUArchState *env;
271
9a13565d 272 int mmu_idx;
0cac1b66 273
9a13565d
PC
274 env = cpu->env_ptr;
275 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
276 unsigned int i;
0cac1b66 277
9a13565d
PC
278 for (i = 0; i < CPU_TLB_SIZE; i++) {
279 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
280 start1, length);
281 }
88e89a57 282
9a13565d
PC
283 for (i = 0; i < CPU_VTLB_SIZE; i++) {
284 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
285 start1, length);
0cac1b66
BS
286 }
287 }
288}
289
290static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
291{
292 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
293 tlb_entry->addr_write = vaddr;
294 }
295}
296
297/* update the TLB corresponding to virtual page vaddr
298 so that it is no longer dirty */
bcae01e4 299void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 300{
bcae01e4 301 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
302 int i;
303 int mmu_idx;
304
305 vaddr &= TARGET_PAGE_MASK;
306 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
307 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
308 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
309 }
88e89a57
XT
310
311 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
312 int k;
313 for (k = 0; k < CPU_VTLB_SIZE; k++) {
314 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
315 }
316 }
0cac1b66
BS
317}
318
319/* Our TLB does not support large pages, so remember the area covered by
320 large pages and trigger a full TLB flush if these are invalidated. */
321static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
322 target_ulong size)
323{
324 target_ulong mask = ~(size - 1);
325
326 if (env->tlb_flush_addr == (target_ulong)-1) {
327 env->tlb_flush_addr = vaddr & mask;
328 env->tlb_flush_mask = mask;
329 return;
330 }
331 /* Extend the existing region to include the new page.
332 This is a compromise between unnecessary flushes and the cost
333 of maintaining a full variable size TLB. */
334 mask &= env->tlb_flush_mask;
335 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
336 mask <<= 1;
337 }
338 env->tlb_flush_addr &= mask;
339 env->tlb_flush_mask = mask;
340}
341
342/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
343 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
344 * supplied size is only used by tlb_flush_page.
345 *
346 * Called from TCG-generated code, which is under an RCU read-side
347 * critical section.
348 */
fadc1cbe
PM
349void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
350 hwaddr paddr, MemTxAttrs attrs, int prot,
351 int mmu_idx, target_ulong size)
0cac1b66 352{
0c591eb0 353 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
354 MemoryRegionSection *section;
355 unsigned int index;
356 target_ulong address;
357 target_ulong code_address;
358 uintptr_t addend;
359 CPUTLBEntry *te;
149f54b5 360 hwaddr iotlb, xlat, sz;
88e89a57 361 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
d7898cda 362 int asidx = cpu_asidx_from_attrs(cpu, attrs);
0cac1b66
BS
363
364 assert(size >= TARGET_PAGE_SIZE);
365 if (size != TARGET_PAGE_SIZE) {
366 tlb_add_large_page(env, vaddr, size);
367 }
149f54b5
PB
368
369 sz = size;
d7898cda 370 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
149f54b5
PB
371 assert(sz >= TARGET_PAGE_SIZE);
372
8526e1f4
AB
373 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
374 " prot=%x idx=%d\n",
375 vaddr, paddr, prot, mmu_idx);
0cac1b66
BS
376
377 address = vaddr;
8f3e03cb
PB
378 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
379 /* IO memory case */
0cac1b66 380 address |= TLB_MMIO;
8f3e03cb
PB
381 addend = 0;
382 } else {
383 /* TLB_MMIO for rom/romd handled below */
149f54b5 384 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
0cac1b66 385 }
0cac1b66
BS
386
387 code_address = address;
bb0e627a 388 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
149f54b5 389 prot, &address);
0cac1b66
BS
390
391 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0cac1b66 392 te = &env->tlb_table[mmu_idx][index];
88e89a57
XT
393
394 /* do not discard the translation in te, evict it into a victim tlb */
395 env->tlb_v_table[mmu_idx][vidx] = *te;
396 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
397
398 /* refill the tlb */
e469b22f 399 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
fadc1cbe 400 env->iotlb[mmu_idx][index].attrs = attrs;
0cac1b66
BS
401 te->addend = addend - vaddr;
402 if (prot & PAGE_READ) {
403 te->addr_read = address;
404 } else {
405 te->addr_read = -1;
406 }
407
408 if (prot & PAGE_EXEC) {
409 te->addr_code = code_address;
410 } else {
411 te->addr_code = -1;
412 }
413 if (prot & PAGE_WRITE) {
414 if ((memory_region_is_ram(section->mr) && section->readonly)
cc5bea60 415 || memory_region_is_romd(section->mr)) {
0cac1b66
BS
416 /* Write access calls the I/O callback. */
417 te->addr_write = address | TLB_MMIO;
418 } else if (memory_region_is_ram(section->mr)
8e41fb63
FZ
419 && cpu_physical_memory_is_clean(
420 memory_region_get_ram_addr(section->mr) + xlat)) {
0cac1b66
BS
421 te->addr_write = address | TLB_NOTDIRTY;
422 } else {
423 te->addr_write = address;
424 }
425 } else {
426 te->addr_write = -1;
427 }
428}
429
fadc1cbe
PM
430/* Add a new TLB entry, but without specifying the memory
431 * transaction attributes to be used.
432 */
433void tlb_set_page(CPUState *cpu, target_ulong vaddr,
434 hwaddr paddr, int prot,
435 int mmu_idx, target_ulong size)
436{
437 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
438 prot, mmu_idx, size);
439}
440
0cac1b66
BS
441/* NOTE: this function can trigger an exception */
442/* NOTE2: the returned address is not exactly the physical address: it
116aae36
PM
443 * is actually a ram_addr_t (in system mode; the user mode emulation
444 * version of this function returns a guest virtual address).
445 */
0cac1b66
BS
446tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
447{
448 int mmu_idx, page_index, pd;
449 void *p;
450 MemoryRegion *mr;
09daed84 451 CPUState *cpu = ENV_GET_CPU(env1);
a54c87b6 452 CPUIOTLBEntry *iotlbentry;
0cac1b66
BS
453
454 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97ed5ccd 455 mmu_idx = cpu_mmu_index(env1, true);
0cac1b66
BS
456 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
457 (addr & TARGET_PAGE_MASK))) {
0cac1b66 458 cpu_ldub_code(env1, addr);
0cac1b66 459 }
a54c87b6
PM
460 iotlbentry = &env1->iotlb[mmu_idx][page_index];
461 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
462 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
0cac1b66 463 if (memory_region_is_unassigned(mr)) {
c658b94f
AF
464 CPUClass *cc = CPU_GET_CLASS(cpu);
465
466 if (cc->do_unassigned_access) {
467 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
468 } else {
a47dddd7 469 cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
c658b94f
AF
470 TARGET_FMT_lx "\n", addr);
471 }
0cac1b66
BS
472 }
473 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
474 return qemu_ram_addr_from_host_nofail(p);
475}
476
0f590e74
PB
477#define MMUSUFFIX _mmu
478
479#define SHIFT 0
58ed270d 480#include "softmmu_template.h"
0f590e74
PB
481
482#define SHIFT 1
58ed270d 483#include "softmmu_template.h"
0f590e74
PB
484
485#define SHIFT 2
58ed270d 486#include "softmmu_template.h"
0f590e74
PB
487
488#define SHIFT 3
58ed270d 489#include "softmmu_template.h"
0f590e74
PB
490#undef MMUSUFFIX
491
0cac1b66 492#define MMUSUFFIX _cmmu
7e4e8865
SW
493#undef GETPC_ADJ
494#define GETPC_ADJ 0
495#undef GETRA
496#define GETRA() ((uintptr_t)0)
0cac1b66
BS
497#define SOFTMMU_CODE_ACCESS
498
499#define SHIFT 0
58ed270d 500#include "softmmu_template.h"
0cac1b66
BS
501
502#define SHIFT 1
58ed270d 503#include "softmmu_template.h"
0cac1b66
BS
504
505#define SHIFT 2
58ed270d 506#include "softmmu_template.h"
0cac1b66
BS
507
508#define SHIFT 3
58ed270d 509#include "softmmu_template.h"