1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * QEMU LoongArch TLB helpers
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/exec-all.h"
16 #include "exec/cpu_ldst.h"
31 static int loongarch_map_tlb_entry(CPULoongArchState
*env
, hwaddr
*physical
,
32 int *prot
, target_ulong address
,
33 int access_type
, int index
, int mmu_idx
)
35 LoongArchTLB
*tlb
= &env
->tlb
[index
];
36 uint64_t plv
= mmu_idx
;
37 uint64_t tlb_entry
, tlb_ppn
;
38 uint8_t tlb_ps
, n
, tlb_v
, tlb_d
, tlb_plv
, tlb_nx
, tlb_nr
, tlb_rplv
;
40 if (index
>= LOONGARCH_STLB
) {
41 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
43 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
45 n
= (address
>> tlb_ps
) & 0x1;/* Odd or even */
47 tlb_entry
= n
? tlb
->tlb_entry1
: tlb
->tlb_entry0
;
48 tlb_v
= FIELD_EX64(tlb_entry
, TLBENTRY
, V
);
49 tlb_d
= FIELD_EX64(tlb_entry
, TLBENTRY
, D
);
50 tlb_plv
= FIELD_EX64(tlb_entry
, TLBENTRY
, PLV
);
52 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, PPN
);
53 tlb_nx
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, NX
);
54 tlb_nr
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, NR
);
55 tlb_rplv
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, RPLV
);
57 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY_32
, PPN
);
63 /* Remove sw bit between bit12 -- bit PS*/
64 tlb_ppn
= tlb_ppn
& ~(((0x1UL
<< (tlb_ps
- 12)) -1));
66 /* Check access rights */
68 return TLBRET_INVALID
;
71 if (access_type
== MMU_INST_FETCH
&& tlb_nx
) {
75 if (access_type
== MMU_DATA_LOAD
&& tlb_nr
) {
79 if (((tlb_rplv
== 0) && (plv
> tlb_plv
)) ||
80 ((tlb_rplv
== 1) && (plv
!= tlb_plv
))) {
84 if ((access_type
== MMU_DATA_STORE
) && !tlb_d
) {
88 *physical
= (tlb_ppn
<< R_TLBENTRY_64_PPN_SHIFT
) |
89 (address
& MAKE_64BIT_MASK(0, tlb_ps
));
101 * One tlb entry holds an adjacent odd/even pair, the vpn is the
102 * content of the virtual page number divided by 2. So the
103 * compare vpn is bit[47:15] for 16KiB page. while the vppn
104 * field in tlb entry contains bit[47:13], so need adjust.
105 * virt_vpn = vaddr[47:13]
107 static bool loongarch_tlb_search(CPULoongArchState
*env
, target_ulong vaddr
,
111 uint16_t csr_asid
, tlb_asid
, stlb_idx
;
112 uint8_t tlb_e
, tlb_ps
, tlb_g
, stlb_ps
;
113 int i
, compare_shift
;
114 uint64_t vpn
, tlb_vppn
;
116 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
117 stlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
118 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (stlb_ps
+ 1);
119 stlb_idx
= vpn
& 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
120 compare_shift
= stlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
123 for (i
= 0; i
< 8; ++i
) {
124 tlb
= &env
->tlb
[i
* 256 + stlb_idx
];
125 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
127 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
128 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
129 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
131 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
132 (vpn
== (tlb_vppn
>> compare_shift
))) {
133 *index
= i
* 256 + stlb_idx
;
140 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; ++i
) {
142 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
144 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
145 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
146 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
147 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
148 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
149 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
150 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
151 (vpn
== (tlb_vppn
>> compare_shift
))) {
160 static int loongarch_map_address(CPULoongArchState
*env
, hwaddr
*physical
,
161 int *prot
, target_ulong address
,
162 MMUAccessType access_type
, int mmu_idx
)
166 match
= loongarch_tlb_search(env
, address
, &index
);
168 return loongarch_map_tlb_entry(env
, physical
, prot
,
169 address
, access_type
, index
, mmu_idx
);
172 return TLBRET_NOMATCH
;
175 static hwaddr
dmw_va2pa(CPULoongArchState
*env
, target_ulong va
,
179 return va
& TARGET_VIRT_MASK
;
181 uint32_t pseg
= FIELD_EX32(dmw
, CSR_DMW_32
, PSEG
);
182 return (va
& MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT
)) | \
183 (pseg
<< R_CSR_DMW_32_VSEG_SHIFT
);
187 static int get_physical_address(CPULoongArchState
*env
, hwaddr
*physical
,
188 int *prot
, target_ulong address
,
189 MMUAccessType access_type
, int mmu_idx
)
191 int user_mode
= mmu_idx
== MMU_IDX_USER
;
192 int kernel_mode
= mmu_idx
== MMU_IDX_KERNEL
;
193 uint32_t plv
, base_c
, base_v
;
195 uint8_t da
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, DA
);
196 uint8_t pg
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, PG
);
198 /* Check PG and DA */
200 *physical
= address
& TARGET_PHYS_MASK
;
201 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
205 plv
= kernel_mode
| (user_mode
<< R_CSR_DMW_PLV3_SHIFT
);
207 base_v
= address
>> R_CSR_DMW_64_VSEG_SHIFT
;
209 base_v
= address
>> R_CSR_DMW_32_VSEG_SHIFT
;
211 /* Check direct map window */
212 for (int i
= 0; i
< 4; i
++) {
214 base_c
= FIELD_EX64(env
->CSR_DMW
[i
], CSR_DMW_64
, VSEG
);
216 base_c
= FIELD_EX64(env
->CSR_DMW
[i
], CSR_DMW_32
, VSEG
);
218 if ((plv
& env
->CSR_DMW
[i
]) && (base_c
== base_v
)) {
219 *physical
= dmw_va2pa(env
, address
, env
->CSR_DMW
[i
]);
220 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
225 /* Check valid extension */
226 addr_high
= sextract64(address
, TARGET_VIRT_ADDR_SPACE_BITS
, 16);
227 if (!(addr_high
== 0 || addr_high
== -1)) {
228 return TLBRET_BADADDR
;
232 return loongarch_map_address(env
, physical
, prot
, address
,
233 access_type
, mmu_idx
);
236 hwaddr
loongarch_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
238 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
239 CPULoongArchState
*env
= &cpu
->env
;
243 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, MMU_DATA_LOAD
,
244 cpu_mmu_index(env
, false)) != 0) {
250 static void raise_mmu_exception(CPULoongArchState
*env
, target_ulong address
,
251 MMUAccessType access_type
, int tlb_error
)
253 CPUState
*cs
= env_cpu(env
);
258 cs
->exception_index
= access_type
== MMU_INST_FETCH
259 ? EXCCODE_ADEF
: EXCCODE_ADEM
;
262 /* No TLB match for a mapped address */
263 if (access_type
== MMU_DATA_LOAD
) {
264 cs
->exception_index
= EXCCODE_PIL
;
265 } else if (access_type
== MMU_DATA_STORE
) {
266 cs
->exception_index
= EXCCODE_PIS
;
267 } else if (access_type
== MMU_INST_FETCH
) {
268 cs
->exception_index
= EXCCODE_PIF
;
270 env
->CSR_TLBRERA
= FIELD_DP64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
, 1);
273 /* TLB match with no valid bit */
274 if (access_type
== MMU_DATA_LOAD
) {
275 cs
->exception_index
= EXCCODE_PIL
;
276 } else if (access_type
== MMU_DATA_STORE
) {
277 cs
->exception_index
= EXCCODE_PIS
;
278 } else if (access_type
== MMU_INST_FETCH
) {
279 cs
->exception_index
= EXCCODE_PIF
;
283 /* TLB match but 'D' bit is cleared */
284 cs
->exception_index
= EXCCODE_PME
;
287 /* Execute-Inhibit Exception */
288 cs
->exception_index
= EXCCODE_PNX
;
291 /* Read-Inhibit Exception */
292 cs
->exception_index
= EXCCODE_PNR
;
295 /* Privileged Exception */
296 cs
->exception_index
= EXCCODE_PPI
;
300 if (tlb_error
== TLBRET_NOMATCH
) {
301 env
->CSR_TLBRBADV
= address
;
303 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI_64
,
304 VPPN
, extract64(address
, 13, 35));
306 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI_32
,
307 VPPN
, extract64(address
, 13, 19));
310 if (!FIELD_EX64(env
->CSR_DBG
, CSR_DBG
, DST
)) {
311 env
->CSR_BADV
= address
;
313 env
->CSR_TLBEHI
= address
& (TARGET_PAGE_MASK
<< 1);
317 static void invalidate_tlb_entry(CPULoongArchState
*env
, int index
)
319 target_ulong addr
, mask
, pagesize
;
321 LoongArchTLB
*tlb
= &env
->tlb
[index
];
323 int mmu_idx
= cpu_mmu_index(env
, false);
324 uint8_t tlb_v0
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, V
);
325 uint8_t tlb_v1
= FIELD_EX64(tlb
->tlb_entry1
, TLBENTRY
, V
);
326 uint64_t tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
328 if (index
>= LOONGARCH_STLB
) {
329 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
331 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
333 pagesize
= MAKE_64BIT_MASK(tlb_ps
, 1);
334 mask
= MAKE_64BIT_MASK(0, tlb_ps
+ 1);
337 addr
= (tlb_vppn
<< R_TLB_MISC_VPPN_SHIFT
) & ~mask
; /* even */
338 tlb_flush_range_by_mmuidx(env_cpu(env
), addr
, pagesize
,
339 mmu_idx
, TARGET_LONG_BITS
);
343 addr
= (tlb_vppn
<< R_TLB_MISC_VPPN_SHIFT
) & pagesize
; /* odd */
344 tlb_flush_range_by_mmuidx(env_cpu(env
), addr
, pagesize
,
345 mmu_idx
, TARGET_LONG_BITS
);
349 static void invalidate_tlb(CPULoongArchState
*env
, int index
)
352 uint16_t csr_asid
, tlb_asid
, tlb_g
;
354 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
355 tlb
= &env
->tlb
[index
];
356 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
357 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
358 if (tlb_g
== 0 && tlb_asid
!= csr_asid
) {
361 invalidate_tlb_entry(env
, index
);
364 static void fill_tlb_entry(CPULoongArchState
*env
, int index
)
366 LoongArchTLB
*tlb
= &env
->tlb
[index
];
367 uint64_t lo0
, lo1
, csr_vppn
;
371 if (FIELD_EX64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
)) {
372 csr_ps
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI
, PS
);
374 csr_vppn
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI_64
, VPPN
);
376 csr_vppn
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI_32
, VPPN
);
378 lo0
= env
->CSR_TLBRELO0
;
379 lo1
= env
->CSR_TLBRELO1
;
381 csr_ps
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, PS
);
383 csr_vppn
= FIELD_EX64(env
->CSR_TLBEHI
, CSR_TLBEHI_64
, VPPN
);
385 csr_vppn
= FIELD_EX64(env
->CSR_TLBEHI
, CSR_TLBEHI_32
, VPPN
);
387 lo0
= env
->CSR_TLBELO0
;
388 lo1
= env
->CSR_TLBELO1
;
392 qemu_log_mask(CPU_LOG_MMU
, "page size is 0\n");
395 /* Only MTLB has the ps fields */
396 if (index
>= LOONGARCH_STLB
) {
397 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, PS
, csr_ps
);
400 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, VPPN
, csr_vppn
);
401 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 1);
402 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
403 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, ASID
, csr_asid
);
405 tlb
->tlb_entry0
= lo0
;
406 tlb
->tlb_entry1
= lo1
;
409 /* Return an random value between low and high */
410 static uint32_t get_random_tlb(uint32_t low
, uint32_t high
)
414 qemu_guest_getrandom_nofail(&val
, sizeof(val
));
415 return val
% (high
- low
+ 1) + low
;
418 void helper_tlbsrch(CPULoongArchState
*env
)
422 if (FIELD_EX64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
)) {
423 match
= loongarch_tlb_search(env
, env
->CSR_TLBREHI
, &index
);
425 match
= loongarch_tlb_search(env
, env
->CSR_TLBEHI
, &index
);
429 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
, index
);
430 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 0);
434 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 1);
437 void helper_tlbrd(CPULoongArchState
*env
)
441 uint8_t tlb_ps
, tlb_e
;
443 index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
444 tlb
= &env
->tlb
[index
];
446 if (index
>= LOONGARCH_STLB
) {
447 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
449 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
451 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
454 /* Invalid TLB entry */
455 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 1);
456 env
->CSR_ASID
= FIELD_DP64(env
->CSR_ASID
, CSR_ASID
, ASID
, 0);
458 env
->CSR_TLBELO0
= 0;
459 env
->CSR_TLBELO1
= 0;
460 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, PS
, 0);
462 /* Valid TLB entry */
463 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
, 0);
464 env
->CSR_TLBIDX
= FIELD_DP64(env
->CSR_TLBIDX
, CSR_TLBIDX
,
465 PS
, (tlb_ps
& 0x3f));
466 env
->CSR_TLBEHI
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
) <<
467 R_TLB_MISC_VPPN_SHIFT
;
468 env
->CSR_TLBELO0
= tlb
->tlb_entry0
;
469 env
->CSR_TLBELO1
= tlb
->tlb_entry1
;
473 void helper_tlbwr(CPULoongArchState
*env
)
475 int index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
477 invalidate_tlb(env
, index
);
479 if (FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, NE
)) {
480 env
->tlb
[index
].tlb_misc
= FIELD_DP64(env
->tlb
[index
].tlb_misc
,
485 fill_tlb_entry(env
, index
);
488 void helper_tlbfill(CPULoongArchState
*env
)
490 uint64_t address
, entryhi
;
491 int index
, set
, stlb_idx
;
492 uint16_t pagesize
, stlb_ps
;
494 if (FIELD_EX64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
)) {
495 entryhi
= env
->CSR_TLBREHI
;
496 pagesize
= FIELD_EX64(env
->CSR_TLBREHI
, CSR_TLBREHI
, PS
);
498 entryhi
= env
->CSR_TLBEHI
;
499 pagesize
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, PS
);
502 stlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
504 if (pagesize
== stlb_ps
) {
505 /* Only write into STLB bits [47:13] */
506 address
= entryhi
& ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT
);
508 /* Choose one set ramdomly */
509 set
= get_random_tlb(0, 7);
511 /* Index in one set */
512 stlb_idx
= (address
>> (stlb_ps
+ 1)) & 0xff; /* [0,255] */
514 index
= set
* 256 + stlb_idx
;
516 /* Only write into MTLB */
517 index
= get_random_tlb(LOONGARCH_STLB
, LOONGARCH_TLB_MAX
- 1);
520 invalidate_tlb(env
, index
);
521 fill_tlb_entry(env
, index
);
524 void helper_tlbclr(CPULoongArchState
*env
)
528 uint16_t csr_asid
, tlb_asid
, tlb_g
;
530 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
531 index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
533 if (index
< LOONGARCH_STLB
) {
534 /* STLB. One line per operation */
535 for (i
= 0; i
< 8; i
++) {
536 tlb
= &env
->tlb
[i
* 256 + (index
% 256)];
537 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
538 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
539 if (!tlb_g
&& tlb_asid
== csr_asid
) {
540 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
543 } else if (index
< LOONGARCH_TLB_MAX
) {
544 /* All MTLB entries */
545 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; i
++) {
547 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
548 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
549 if (!tlb_g
&& tlb_asid
== csr_asid
) {
550 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
555 tlb_flush(env_cpu(env
));
558 void helper_tlbflush(CPULoongArchState
*env
)
562 index
= FIELD_EX64(env
->CSR_TLBIDX
, CSR_TLBIDX
, INDEX
);
564 if (index
< LOONGARCH_STLB
) {
565 /* STLB. One line per operation */
566 for (i
= 0; i
< 8; i
++) {
567 int s_idx
= i
* 256 + (index
% 256);
568 env
->tlb
[s_idx
].tlb_misc
= FIELD_DP64(env
->tlb
[s_idx
].tlb_misc
,
571 } else if (index
< LOONGARCH_TLB_MAX
) {
572 /* All MTLB entries */
573 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; i
++) {
574 env
->tlb
[i
].tlb_misc
= FIELD_DP64(env
->tlb
[i
].tlb_misc
,
579 tlb_flush(env_cpu(env
));
582 void helper_invtlb_all(CPULoongArchState
*env
)
584 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
585 env
->tlb
[i
].tlb_misc
= FIELD_DP64(env
->tlb
[i
].tlb_misc
,
588 tlb_flush(env_cpu(env
));
591 void helper_invtlb_all_g(CPULoongArchState
*env
, uint32_t g
)
593 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
594 LoongArchTLB
*tlb
= &env
->tlb
[i
];
595 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
598 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
601 tlb_flush(env_cpu(env
));
604 void helper_invtlb_all_asid(CPULoongArchState
*env
, target_ulong info
)
606 uint16_t asid
= info
& R_CSR_ASID_ASID_MASK
;
608 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
609 LoongArchTLB
*tlb
= &env
->tlb
[i
];
610 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
611 uint16_t tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
613 if (!tlb_g
&& (tlb_asid
== asid
)) {
614 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
617 tlb_flush(env_cpu(env
));
620 void helper_invtlb_page_asid(CPULoongArchState
*env
, target_ulong info
,
623 uint16_t asid
= info
& 0x3ff;
625 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
626 LoongArchTLB
*tlb
= &env
->tlb
[i
];
627 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
628 uint16_t tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
629 uint64_t vpn
, tlb_vppn
;
630 uint8_t tlb_ps
, compare_shift
;
632 if (i
>= LOONGARCH_STLB
) {
633 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
635 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
637 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
638 vpn
= (addr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
639 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
641 if (!tlb_g
&& (tlb_asid
== asid
) &&
642 (vpn
== (tlb_vppn
>> compare_shift
))) {
643 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
646 tlb_flush(env_cpu(env
));
649 void helper_invtlb_page_asid_or_g(CPULoongArchState
*env
,
650 target_ulong info
, target_ulong addr
)
652 uint16_t asid
= info
& 0x3ff;
654 for (int i
= 0; i
< LOONGARCH_TLB_MAX
; i
++) {
655 LoongArchTLB
*tlb
= &env
->tlb
[i
];
656 uint8_t tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
657 uint16_t tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
658 uint64_t vpn
, tlb_vppn
;
659 uint8_t tlb_ps
, compare_shift
;
661 if (i
>= LOONGARCH_STLB
) {
662 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
664 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
666 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
667 vpn
= (addr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
668 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
670 if ((tlb_g
|| (tlb_asid
== asid
)) &&
671 (vpn
== (tlb_vppn
>> compare_shift
))) {
672 tlb
->tlb_misc
= FIELD_DP64(tlb
->tlb_misc
, TLB_MISC
, E
, 0);
675 tlb_flush(env_cpu(env
));
678 bool loongarch_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
679 MMUAccessType access_type
, int mmu_idx
,
680 bool probe
, uintptr_t retaddr
)
682 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
683 CPULoongArchState
*env
= &cpu
->env
;
689 ret
= get_physical_address(env
, &physical
, &prot
, address
,
690 access_type
, mmu_idx
);
692 if (ret
== TLBRET_MATCH
) {
693 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
694 physical
& TARGET_PAGE_MASK
, prot
,
695 mmu_idx
, TARGET_PAGE_SIZE
);
696 qemu_log_mask(CPU_LOG_MMU
,
697 "%s address=%" VADDR_PRIx
" physical " HWADDR_FMT_plx
698 " prot %d\n", __func__
, address
, physical
, prot
);
701 qemu_log_mask(CPU_LOG_MMU
,
702 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
708 raise_mmu_exception(env
, address
, access_type
, ret
);
709 cpu_loop_exit_restore(cs
, retaddr
);
712 target_ulong
helper_lddir(CPULoongArchState
*env
, target_ulong base
,
713 target_ulong level
, uint32_t mem_idx
)
715 CPUState
*cs
= env_cpu(env
);
716 target_ulong badvaddr
, index
, phys
, ret
;
718 uint64_t dir_base
, dir_width
;
719 bool huge
= (base
>> LOONGARCH_PAGE_HUGE_SHIFT
) & 0x1;
721 badvaddr
= env
->CSR_TLBRBADV
;
722 base
= base
& TARGET_PHYS_MASK
;
724 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
725 shift
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTEWIDTH
);
726 shift
= (shift
+ 1) * 3;
733 dir_base
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR1_BASE
);
734 dir_width
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR1_WIDTH
);
737 dir_base
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR2_BASE
);
738 dir_width
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, DIR2_WIDTH
);
741 dir_base
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR3_BASE
);
742 dir_width
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR3_WIDTH
);
745 dir_base
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR4_BASE
);
746 dir_width
= FIELD_EX64(env
->CSR_PWCH
, CSR_PWCH
, DIR4_WIDTH
);
749 do_raise_exception(env
, EXCCODE_INE
, GETPC());
752 index
= (badvaddr
>> dir_base
) & ((1 << dir_width
) - 1);
753 phys
= base
| index
<< shift
;
754 ret
= ldq_phys(cs
->as
, phys
) & TARGET_PHYS_MASK
;
758 void helper_ldpte(CPULoongArchState
*env
, target_ulong base
, target_ulong odd
,
761 CPUState
*cs
= env_cpu(env
);
762 target_ulong phys
, tmp0
, ptindex
, ptoffset0
, ptoffset1
, ps
, badv
;
764 bool huge
= (base
>> LOONGARCH_PAGE_HUGE_SHIFT
) & 0x1;
765 uint64_t ptbase
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTBASE
);
766 uint64_t ptwidth
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTWIDTH
);
768 base
= base
& TARGET_PHYS_MASK
;
771 /* Huge Page. base is paddr */
772 tmp0
= base
^ (1 << LOONGARCH_PAGE_HUGE_SHIFT
);
773 /* Move Global bit */
774 tmp0
= ((tmp0
& (1 << LOONGARCH_HGLOBAL_SHIFT
)) >>
775 LOONGARCH_HGLOBAL_SHIFT
) << R_TLBENTRY_G_SHIFT
|
776 (tmp0
& (~(1 << LOONGARCH_HGLOBAL_SHIFT
)));
777 ps
= ptbase
+ ptwidth
- 1;
779 tmp0
+= MAKE_64BIT_MASK(ps
, 1);
782 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
783 shift
= FIELD_EX64(env
->CSR_PWCL
, CSR_PWCL
, PTEWIDTH
);
784 shift
= (shift
+ 1) * 3;
785 badv
= env
->CSR_TLBRBADV
;
787 ptindex
= (badv
>> ptbase
) & ((1 << ptwidth
) - 1);
788 ptindex
= ptindex
& ~0x1; /* clear bit 0 */
789 ptoffset0
= ptindex
<< shift
;
790 ptoffset1
= (ptindex
+ 1) << shift
;
792 phys
= base
| (odd
? ptoffset1
: ptoffset0
);
793 tmp0
= ldq_phys(cs
->as
, phys
) & TARGET_PHYS_MASK
;
798 env
->CSR_TLBRELO1
= tmp0
;
800 env
->CSR_TLBRELO0
= tmp0
;
802 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI
, PS
, ps
);