1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * QEMU LoongArch TLB helpers
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/exec-all.h"
16 #include "exec/cpu_ldst.h"
31 static int loongarch_map_tlb_entry(CPULoongArchState
*env
, hwaddr
*physical
,
32 int *prot
, target_ulong address
,
33 int access_type
, int index
, int mmu_idx
)
35 LoongArchTLB
*tlb
= &env
->tlb
[index
];
36 uint64_t plv
= mmu_idx
;
37 uint64_t tlb_entry
, tlb_ppn
;
38 uint8_t tlb_ps
, n
, tlb_v
, tlb_d
, tlb_plv
, tlb_nx
, tlb_nr
, tlb_rplv
;
40 if (index
>= LOONGARCH_STLB
) {
41 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
43 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
45 n
= (address
>> tlb_ps
) & 0x1;/* Odd or even */
47 tlb_entry
= n
? tlb
->tlb_entry1
: tlb
->tlb_entry0
;
48 tlb_v
= FIELD_EX64(tlb_entry
, TLBENTRY
, V
);
49 tlb_d
= FIELD_EX64(tlb_entry
, TLBENTRY
, D
);
50 tlb_plv
= FIELD_EX64(tlb_entry
, TLBENTRY
, PLV
);
52 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, PPN
);
53 tlb_nx
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, NX
);
54 tlb_nr
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, NR
);
55 tlb_rplv
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, RPLV
);
57 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY_32
, PPN
);
63 /* Check access rights */
65 return TLBRET_INVALID
;
68 if (access_type
== MMU_INST_FETCH
&& tlb_nx
) {
72 if (access_type
== MMU_DATA_LOAD
&& tlb_nr
) {
76 if (((tlb_rplv
== 0) && (plv
> tlb_plv
)) ||
77 ((tlb_rplv
== 1) && (plv
!= tlb_plv
))) {
81 if ((access_type
== MMU_DATA_STORE
) && !tlb_d
) {
86 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15]
89 *physical
= (tlb_ppn
<< R_TLBENTRY_64_PPN_SHIFT
) |
90 (address
& MAKE_64BIT_MASK(0, tlb_ps
));
102 * One tlb entry holds an adjacent odd/even pair, the vpn is the
103 * content of the virtual page number divided by 2. So the
104 * compare vpn is bit[47:15] for 16KiB page. while the vppn
105 * field in tlb entry contains bit[47:13], so need adjust.
106 * virt_vpn = vaddr[47:13]
108 static bool loongarch_tlb_search(CPULoongArchState
*env
, target_ulong vaddr
,
112 uint16_t csr_asid
, tlb_asid
, stlb_idx
;
113 uint8_t tlb_e
, tlb_ps
, tlb_g
, stlb_ps
;
114 int i
, compare_shift
;
115 uint64_t vpn
, tlb_vppn
;
117 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
118 stlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
119 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (stlb_ps
+ 1);
120 stlb_idx
= vpn
& 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
121 compare_shift
= stlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
124 for (i
= 0; i
< 8; ++i
) {
125 tlb
= &env
->tlb
[i
* 256 + stlb_idx
];
126 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
128 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
129 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
130 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
132 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
133 (vpn
== (tlb_vppn
>> compare_shift
))) {
134 *index
= i
* 256 + stlb_idx
;
141 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; ++i
) {
143 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
145 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
146 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
147 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
148 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
149 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
150 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
151 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
152 (vpn
== (tlb_vppn
>> compare_shift
))) {
161 static int loongarch_map_address(CPULoongArchState
*env
, hwaddr
*physical
,
162 int *prot
, target_ulong address
,
163 MMUAccessType access_type
, int mmu_idx
)
167 match
= loongarch_tlb_search(env
, address
, &index
);
169 return loongarch_map_tlb_entry(env
, physical
, prot
,
170 address
, access_type
, index
, mmu_idx
);
173 return TLBRET_NOMATCH
;
176 static hwaddr
dmw_va2pa(CPULoongArchState
*env
, target_ulong va
,
180 return va
& TARGET_VIRT_MASK
;
182 uint32_t pseg
= FIELD_EX32(dmw
, CSR_DMW_32
, PSEG
);
183 return (va
& MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT
)) | \
184 (pseg
<< R_CSR_DMW_32_VSEG_SHIFT
);
188 static int get_physical_address(CPULoongArchState
*env
, hwaddr
*physical
,
189 int *prot
, target_ulong address
,
190 MMUAccessType access_type
, int mmu_idx
)
192 int user_mode
= mmu_idx
== MMU_IDX_USER
;
193 int kernel_mode
= mmu_idx
== MMU_IDX_KERNEL
;
194 uint32_t plv
, base_c
, base_v
;
196 uint8_t da
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, DA
);
197 uint8_t pg
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, PG
);
199 /* Check PG and DA */
201 *physical
= address
& TARGET_PHYS_MASK
;
202 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
206 plv
= kernel_mode
| (user_mode
<< R_CSR_DMW_PLV3_SHIFT
);
208 base_v
= address
>> R_CSR_DMW_64_VSEG_SHIFT
;
210 base_v
= address
>> R_CSR_DMW_32_VSEG_SHIFT
;
212 /* Check direct map window */
213 for (int i
= 0; i
< 4; i
++) {
215 base_c
= FIELD_EX64(env
->CSR_DMW
[i
], CSR_DMW_64
, VSEG
);
217 base_c
= FIELD_EX64(env
->CSR_DMW
[i
], CSR_DMW_32
, VSEG
);
219 if ((plv
& env
->CSR_DMW
[i
]) && (base_c
== base_v
)) {
220 *physical
= dmw_va2pa(env
, address
, env
->CSR_DMW
[i
]);
221 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
226 /* Check valid extension */
227 addr_high
= sextract64(address
, TARGET_VIRT_ADDR_SPACE_BITS
, 16);
228 if (!(addr_high
== 0 || addr_high
== -1)) {
229 return TLBRET_BADADDR
;
233 return loongarch_map_address(env
, physical
, prot
, address
,
234 access_type
, mmu_idx
);
237 hwaddr
loongarch_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
239 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
240 CPULoongArchState
*env
= &cpu
->env
;
244 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, MMU_DATA_LOAD
,
245 cpu_mmu_index(env
, false)) != 0) {
251 static void raise_mmu_exception(CPULoongArchState
*env
, target_ulong address
,
252 MMUAccessType access_type
, int tlb_error
)
254 CPUState
*cs
= env_cpu(env
);
259 cs
->exception_index
= access_type
== MMU_INST_FETCH
260 ? EXCCODE_ADEF
: EXCCODE_ADEM
;
263 /* No TLB match for a mapped address */
264 if (access_type
== MMU_DATA_LOAD
) {
265 cs
->exception_index
= EXCCODE_PIL
;
266 } else if (access_type
== MMU_DATA_STORE
) {
267 cs
->exception_index
= EXCCODE_PIS
;
268 } else if (access_type
== MMU_INST_FETCH
) {
269 cs
->exception_index
= EXCCODE_PIF
;
271 env
->CSR_TLBRERA
= FIELD_DP64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
, 1);
274 /* TLB match with no valid bit */
275 if (access_type
== MMU_DATA_LOAD
) {
276 cs
->exception_index
= EXCCODE_PIL
;
277 } else if (access_type
== MMU_DATA_STORE
) {
278 cs
->exception_index
= EXCCODE_PIS
;
279 } else if (access_type
== MMU_INST_FETCH
) {
280 cs
->exception_index
= EXCCODE_PIF
;
284 /* TLB match but 'D' bit is cleared */
285 cs
->exception_index
= EXCCODE_PME
;
288 /* Execute-Inhibit Exception */
289 cs
->exception_index
= EXCCODE_PNX
;
292 /* Read-Inhibit Exception */
293 cs
->exception_index
= EXCCODE_PNR
;
296 /* Privileged Exception */
297 cs
->exception_index
= EXCCODE_PPI
;
301 if (tlb_error
== TLBRET_NOMATCH
) {
302 env
->CSR_TLBRBADV
= address
;
304 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI_64
,
305 VPPN
, extract64(address
, 13, 35));
307 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI_32
,
308 VPPN
, extract64(address
, 13, 19));
311 if (!FIELD_EX64(env
->CSR_DBG
, CSR_DBG
, DST
)) {
312 env
->CSR_BADV
= address
;
314 env
->CSR_TLBEHI
= address
& (TARGET_PAGE_MASK
<< 1);
318 static void invalidate_tlb_entry(CPULoongArchState
*env
, int index
)
320 target_ulong addr
, mask
, pagesize
;
322 LoongArchTLB
*tlb
= &env
->tlb
[index
];
324 int mmu_idx
= cpu_mmu_index(env
, false);
325 uint8_t tlb_v0
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, V
);
326 uint8_t tlb_v1
= FIELD_EX64(tlb
->tlb_entry1
, TLBENTRY
, V
);
327 uint64_t tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
329 if (index
>= LOONGARCH_STLB
) {
330 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
332 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
334 pagesize
= MAKE_64BIT_MASK(tlb_ps
, 1);
335 mask
= MAKE_64BIT_MASK(0, tlb_ps
+ 1);
338 addr
= (tlb_vppn
<< R_TLB_MISC_VPPN_SHIFT
) & ~mask
; /* even */
339 tlb_flush_range_by_mmuidx(env_cpu(env
), addr
, pagesize
,
340 mmu_idx
, TARGET_LONG_BITS
);
344 addr
= (tlb_vppn
<< R_TLB_MISC_VPPN_SHIFT
) & pagesize
; /* odd */
345 tlb_flush_range_by_mmuidx(env_cpu(env
), addr
, pagesize
,
346 mmu_idx
, TARGET_LONG_BITS
);
350 static void invalidate_tlb(CPULoongArchState
*env
, int index
)
353 uint16_t csr_asid
, tlb_asid
, tlb_g
;
355 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
356 tlb
= &env
->tlb
[index
];
357 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
358 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
359 if (tlb_g
== 0 && tlb_asid
!= csr_asid
) {
362 invalidate_tlb_entry(env
, index
);
365 static void fill_tlb_entry(CPULoongArchState
*env
, int index
)
367 LoongArchTLB
*tlb
= &env
->tlb
[index
];
368 uint64_t lo0
, lo1
, csr_vppn
;
372 if (FIELD_EX64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
)) {
373 csr_ps
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI
, PS
);
375 csr_vppn
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI_64
, VPPN
);
377 csr_vppn
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI_32
, VPPN
);
379 lo0
= env
->CSR_TLBRELO0
;
380 lo1
= env
->CSR_TLBRELO1
;
382 csr_ps
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, PS
);
384 csr_vppn
= FIELD_EX64(env
->CSR_TLBEHI
, CSR_TLBEHI_64
, VPPN
);
386 csr_vppn
= FIELD_EX64(env
->CSR_TLBEHI
, CSR_TLBEHI_32
, VPPN
);
388 lo0
= env
->CSR_TLBELO0
;
389 lo1
= env
->CSR_TLBELO1
;
393 qemu_log_mask(CPU_LOG_MMU
, "page size is 0\n");
396 /* Only MTLB has the ps fields */
397 if (index
>= LOONGARCH_STLB
) {
398 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, PS
, csr_ps
);
401 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, VPPN
, csr_vppn
);
402 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 1);
403 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
404 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, ASID
, csr_asid
);
406 tlb
->tlb_entry0
= lo0
;
407 tlb
->tlb_entry1
= lo1
;
410 /* Return an random value between low and high */
411 static uint32_t get_random_tlb(uint32_t low
, uint32_t high
)
415 qemu_guest_getrandom_nofail(&val
, sizeof(val
));
416 return val
% (high
- low
+ 1) + low
;
419 void helper_tlbsrch(CPULoongArchState
*env
)
423 if (FIELD_EX64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
)) {
424 match
= loongarch_tlb_search(env
, env
->CSR_TLBREHI
, &index
);
426 match
= loongarch_tlb_search(env
, env
->CSR_TLBEHI
, &index
);
430 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
, index
);
431 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 0);
435 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 1);
438 void helper_tlbrd(CPULoongArchState
*env
)
442 uint8_t tlb_ps
, tlb_e
;
444 index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
445 tlb
= &env
->tlb
[index
];
447 if (index
>= LOONGARCH_STLB
) {
448 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
450 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
452 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
455 /* Invalid TLB entry */
456 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 1);
457 env
->CSR_ASID
= FIELD_DP64(env
->CSR_ASID
, CSR_ASID
, ASID
, 0);
459 env
->CSR_TLBELO0
= 0;
460 env
->CSR_TLBELO1
= 0;
461 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, PS
, 0);
463 /* Valid TLB entry */
464 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 0);
465 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
,
466 PS
, (tlb_ps
& 0x3f));
467 env
->CSR_TLBEHI
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
) <<
468 R_TLB_MISC_VPPN_SHIFT
;
469 env
->CSR_TLBELO0
= tlb
->tlb_entry0
;
470 env
->CSR_TLBELO1
= tlb
->tlb_entry1
;
474 void helper_tlbwr(CPULoongArchState
*env
)
476 int index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
478 invalidate_tlb(env
, index
);
480 if (FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
)) {
481 env
->tlb
[index
].tlb_misc
= FIELD_DP64(env
->tlb
[index
].tlb_misc
,
486 fill_tlb_entry(env
, index
);
489 void helper_tlbfill(CPULoongArchState
*env
)
491 uint64_t address
, entryhi
;
492 int index
, set
, stlb_idx
;
493 uint16_t pagesize
, stlb_ps
;
495 if (FIELD_EX64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
)) {
496 entryhi
= env
->CSR_TLBREHI
;
497 pagesize
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI
, PS
);
499 entryhi
= env
->CSR_TLBEHI
;
500 pagesize
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, PS
);
503 stlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
505 if (pagesize
== stlb_ps
) {
506 /* Only write into STLB bits [47:13] */
507 address
= entryhi
& ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT
);
509 /* Choose one set ramdomly */
510 set
= get_random_tlb(0, 7);
512 /* Index in one set */
513 stlb_idx
= (address
>> (stlb_ps
+ 1)) & 0xff; /* [0,255] */
515 index
= set
* 256 + stlb_idx
;
517 /* Only write into MTLB */
518 index
= get_random_tlb(LOONGARCH_STLB
, LOONGARCH_TLB_MAX
- 1);
521 invalidate_tlb(env
, index
);
522 fill_tlb_entry(env
, index
);
525 void helper_tlbclr(CPULoongArchState
*env
)
529 uint16_t csr_asid
, tlb_asid
, tlb_g
;
531 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
532 index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
534 if (index
< LOONGARCH_STLB
) {
535 /* STLB. One line per operation */
536 for (i
= 0; i
< 8; i
++) {
537 tlb
= &env
->tlb
[i
* 256 + (index
% 256)];
538 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
539 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
540 if (!tlb_g
&& tlb_asid
== csr_asid
) {
541 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
544 } else if (index
< LOONGARCH_TLB_MAX
) {
545 /* All MTLB entries */
546 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; i
++) {
548 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
549 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
550 if (!tlb_g
&& tlb_asid
== csr_asid
) {
551 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
556 tlb_flush(env_cpu(env
));
559 void helper_tlbflush(CPULoongArchState
*env
)
563 index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
565 if (index
< LOONGARCH_STLB
) {
566 /* STLB. One line per operation */
567 for (i
= 0; i
< 8; i
++) {
568 int s_idx
= i
* 256 + (index
% 256);
569 env
->tlb
[s_idx
].tlb_misc
= FIELD_DP64(env
->tlb
[s_idx
].tlb_misc
,
572 } else if (index
< LOONGARCH_TLB_MAX
) {
573 /* All MTLB entries */
574 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; i
++) {
575 env
->tlb
[i
].tlb_misc
= FIELD_DP64(env
->tlb
[i
].tlb_misc
,
580 tlb_flush(env_cpu(env
));
583 void helper_invtlb_all(CPULoongArchState
*env
)
585 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
586 env
->tlb
[i
].tlb_misc
= FIELD_DP64(env
->tlb
[i
].tlb_misc
,
589 tlb_flush(env_cpu(env
));
592 void helper_invtlb_all_g(CPULoongArchState
*env
, uint32_t g
)
594 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
595 LoongArchTLB
*tlb
= &env
->tlb
[i
];
596 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
599 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
602 tlb_flush(env_cpu(env
));
605 void helper_invtlb_all_asid(CPULoongArchState
*env
, target_ulong info
)
607 uint16_t asid
= info
& R_CSR_ASID_ASID_MASK
;
609 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
610 LoongArchTLB
*tlb
= &env
->tlb
[i
];
611 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
612 uint16_t tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
614 if (!tlb_g
&& (tlb_asid
== asid
)) {
615 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
618 tlb_flush(env_cpu(env
));
621 void helper_invtlb_page_asid(CPULoongArchState
*env
, target_ulong info
,
624 uint16_t asid
= info
& 0x3ff;
626 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
627 LoongArchTLB
*tlb
= &env
->tlb
[i
];
628 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
629 uint16_t tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
630 uint64_t vpn
, tlb_vppn
;
631 uint8_t tlb_ps
, compare_shift
;
633 if (i
>= LOONGARCH_STLB
) {
634 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
636 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
638 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
639 vpn
= (addr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
640 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
642 if (!tlb_g
&& (tlb_asid
== asid
) &&
643 (vpn
== (tlb_vppn
>> compare_shift
))) {
644 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
647 tlb_flush(env_cpu(env
));
650 void helper_invtlb_page_asid_or_g(CPULoongArchState
*env
,
651 target_ulong info
, target_ulong addr
)
653 uint16_t asid
= info
& 0x3ff;
655 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
656 LoongArchTLB
*tlb
= &env
->tlb
[i
];
657 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
658 uint16_t tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
659 uint64_t vpn
, tlb_vppn
;
660 uint8_t tlb_ps
, compare_shift
;
662 if (i
>= LOONGARCH_STLB
) {
663 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
665 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
667 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
668 vpn
= (addr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
669 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
671 if ((tlb_g
|| (tlb_asid
== asid
)) &&
672 (vpn
== (tlb_vppn
>> compare_shift
))) {
673 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
676 tlb_flush(env_cpu(env
));
679 bool loongarch_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
680 MMUAccessType access_type
, int mmu_idx
,
681 bool probe
, uintptr_t retaddr
)
683 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
684 CPULoongArchState
*env
= &cpu
->env
;
690 ret
= get_physical_address(env
, &physical
, &prot
, address
,
691 access_type
, mmu_idx
);
693 if (ret
== TLBRET_MATCH
) {
694 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
695 physical
& TARGET_PAGE_MASK
, prot
,
696 mmu_idx
, TARGET_PAGE_SIZE
);
697 qemu_log_mask(CPU_LOG_MMU
,
698 "%s address=%" VADDR_PRIx
" physical " HWADDR_FMT_plx
699 " prot %d\n", __func__
, address
, physical
, prot
);
702 qemu_log_mask(CPU_LOG_MMU
,
703 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
709 raise_mmu_exception(env
, address
, access_type
, ret
);
710 cpu_loop_exit_restore(cs
, retaddr
);
713 target_ulong
helper_lddir(CPULoongArchState
*env
, target_ulong base
,
714 target_ulong level
, uint32_t mem_idx
)
716 CPUState
*cs
= env_cpu(env
);
717 target_ulong badvaddr
, index
, phys
, ret
;
719 uint64_t dir_base
, dir_width
;
720 bool huge
= (base
>> LOONGARCH_PAGE_HUGE_SHIFT
) & 0x1;
722 badvaddr
= env
->CSR_TLBRBADV
;
723 base
= base
& TARGET_PHYS_MASK
;
725 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
726 shift
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTEWIDTH
);
727 shift
= (shift
+ 1) * 3;
734 dir_base
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR1_BASE
);
735 dir_width
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR1_WIDTH
);
738 dir_base
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR2_BASE
);
739 dir_width
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR2_WIDTH
);
742 dir_base
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR3_BASE
);
743 dir_width
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR3_WIDTH
);
746 dir_base
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR4_BASE
);
747 dir_width
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR4_WIDTH
);
750 do_raise_exception(env
, EXCCODE_INE
, GETPC());
753 index
= (badvaddr
>> dir_base
) & ((1 << dir_width
) - 1);
754 phys
= base
| index
<< shift
;
755 ret
= ldq_phys(cs
->as
, phys
) & TARGET_PHYS_MASK
;
759 void helper_ldpte(CPULoongArchState
*env
, target_ulong base
, target_ulong odd
,
762 CPUState
*cs
= env_cpu(env
);
763 target_ulong phys
, tmp0
, ptindex
, ptoffset0
, ptoffset1
, ps
, badv
;
765 bool huge
= (base
>> LOONGARCH_PAGE_HUGE_SHIFT
) & 0x1;
766 uint64_t ptbase
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTBASE
);
767 uint64_t ptwidth
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTWIDTH
);
769 base
= base
& TARGET_PHYS_MASK
;
772 /* Huge Page. base is paddr */
773 tmp0
= base
^ (1 << LOONGARCH_PAGE_HUGE_SHIFT
);
774 /* Move Global bit */
775 tmp0
= ((tmp0
& (1 << LOONGARCH_HGLOBAL_SHIFT
)) >>
776 LOONGARCH_HGLOBAL_SHIFT
) << R_TLBENTRY_G_SHIFT
|
777 (tmp0
& (~(1 << R_TLBENTRY_G_SHIFT
)));
778 ps
= ptbase
+ ptwidth
- 1;
780 tmp0
+= MAKE_64BIT_MASK(ps
, 1);
783 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
784 shift
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTEWIDTH
);
785 shift
= (shift
+ 1) * 3;
786 badv
= env
->CSR_TLBRBADV
;
788 ptindex
= (badv
>> ptbase
) & ((1 << ptwidth
) - 1);
789 ptindex
= ptindex
& ~0x1; /* clear bit 0 */
790 ptoffset0
= ptindex
<< shift
;
791 ptoffset1
= (ptindex
+ 1) << shift
;
793 phys
= base
| (odd
? ptoffset1
: ptoffset0
);
794 tmp0
= ldq_phys(cs
->as
, phys
) & TARGET_PHYS_MASK
;
799 env
->CSR_TLBRELO1
= tmp0
;
801 env
->CSR_TLBRELO0
= tmp0
;
803 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI
, PS
, ps
);