]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/mmu_helper.c
Merge tag 'qemu-openbios-20230307' of https://github.com/mcayland/qemu into staging
[mirror_qemu.git] / target / ppc / mmu_helper.c
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_ppc.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
28 #include "exec/log.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
33 #include "internal.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
36 #include "exec/helper-proto.h"
37 #include "exec/cpu_ldst.h"
38
39 /* #define FLUSH_ALL_TLBS */
40
41 /*****************************************************************************/
42 /* PowerPC MMU emulation */
43
44 /* Software driven TLB helpers */
45 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
46 {
47 ppc6xx_tlb_t *tlb;
48 int nr, max;
49
50 /* LOG_SWTLB("Invalidate all TLBs\n"); */
51 /* Invalidate all defined software TLB */
52 max = env->nb_tlb;
53 if (env->id_tlbs == 1) {
54 max *= 2;
55 }
56 for (nr = 0; nr < max; nr++) {
57 tlb = &env->tlb.tlb6[nr];
58 pte_invalidate(&tlb->pte0);
59 }
60 tlb_flush(env_cpu(env));
61 }
62
63 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
64 target_ulong eaddr,
65 int is_code, int match_epn)
66 {
67 #if !defined(FLUSH_ALL_TLBS)
68 CPUState *cs = env_cpu(env);
69 ppc6xx_tlb_t *tlb;
70 int way, nr;
71
72 /* Invalidate ITLB + DTLB, all ways */
73 for (way = 0; way < env->nb_ways; way++) {
74 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
75 tlb = &env->tlb.tlb6[nr];
76 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
77 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
78 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
79 pte_invalidate(&tlb->pte0);
80 tlb_flush_page(cs, tlb->EPN);
81 }
82 }
83 #else
84 /* XXX: PowerPC specification say this is valid as well */
85 ppc6xx_tlb_invalidate_all(env);
86 #endif
87 }
88
89 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
90 target_ulong eaddr, int is_code)
91 {
92 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
93 }
94
95 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
96 int is_code, target_ulong pte0, target_ulong pte1)
97 {
98 ppc6xx_tlb_t *tlb;
99 int nr;
100
101 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
102 tlb = &env->tlb.tlb6[nr];
103 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
104 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
105 EPN, pte0, pte1);
106 /* Invalidate any pending reference in QEMU for this virtual address */
107 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
108 tlb->pte0 = pte0;
109 tlb->pte1 = pte1;
110 tlb->EPN = EPN;
111 /* Store last way for LRU mechanism */
112 env->last_way = way;
113 }
114
115 /* Generic TLB search function for PowerPC embedded implementations */
116 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
117 uint32_t pid)
118 {
119 ppcemb_tlb_t *tlb;
120 hwaddr raddr;
121 int i, ret;
122
123 /* Default return value is no match */
124 ret = -1;
125 for (i = 0; i < env->nb_tlb; i++) {
126 tlb = &env->tlb.tlbe[i];
127 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
128 ret = i;
129 break;
130 }
131 }
132
133 return ret;
134 }
135
136 /* Helpers specific to PowerPC 40x implementations */
137 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
138 {
139 ppcemb_tlb_t *tlb;
140 int i;
141
142 for (i = 0; i < env->nb_tlb; i++) {
143 tlb = &env->tlb.tlbe[i];
144 tlb->prot &= ~PAGE_VALID;
145 }
146 tlb_flush(env_cpu(env));
147 }
148
149 static void booke206_flush_tlb(CPUPPCState *env, int flags,
150 const int check_iprot)
151 {
152 int tlb_size;
153 int i, j;
154 ppcmas_tlb_t *tlb = env->tlb.tlbm;
155
156 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
157 if (flags & (1 << i)) {
158 tlb_size = booke206_tlb_size(env, i);
159 for (j = 0; j < tlb_size; j++) {
160 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
161 tlb[j].mas1 &= ~MAS1_VALID;
162 }
163 }
164 }
165 tlb += booke206_tlb_size(env, i);
166 }
167
168 tlb_flush(env_cpu(env));
169 }
170
171 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
172 target_ulong eaddr, MMUAccessType access_type,
173 int type)
174 {
175 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
176 }
177
178
179
180 /*****************************************************************************/
181 /* BATs management */
182 #if !defined(FLUSH_ALL_TLBS)
183 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
184 target_ulong mask)
185 {
186 CPUState *cs = env_cpu(env);
187 target_ulong base, end, page;
188
189 base = BATu & ~0x0001FFFF;
190 end = base + mask + 0x00020000;
191 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
192 /* Flushing 1024 4K pages is slower than a complete flush */
193 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
194 tlb_flush(cs);
195 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
196 return;
197 }
198 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
199 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
200 base, end, mask);
201 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
202 tlb_flush_page(cs, page);
203 }
204 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
205 }
206 #endif
207
208 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
209 target_ulong value)
210 {
211 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
212 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
213 value, env->nip);
214 }
215
216 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
217 {
218 target_ulong mask;
219
220 dump_store_bat(env, 'I', 0, nr, value);
221 if (env->IBAT[0][nr] != value) {
222 mask = (value << 15) & 0x0FFE0000UL;
223 #if !defined(FLUSH_ALL_TLBS)
224 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
225 #endif
226 /*
227 * When storing valid upper BAT, mask BEPI and BRPN and
228 * invalidate all TLBs covered by this BAT
229 */
230 mask = (value << 15) & 0x0FFE0000UL;
231 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
232 (value & ~0x0001FFFFUL & ~mask);
233 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
234 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
235 #if !defined(FLUSH_ALL_TLBS)
236 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
237 #else
238 tlb_flush(env_cpu(env));
239 #endif
240 }
241 }
242
243 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
244 {
245 dump_store_bat(env, 'I', 1, nr, value);
246 env->IBAT[1][nr] = value;
247 }
248
249 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
250 {
251 target_ulong mask;
252
253 dump_store_bat(env, 'D', 0, nr, value);
254 if (env->DBAT[0][nr] != value) {
255 /*
256 * When storing valid upper BAT, mask BEPI and BRPN and
257 * invalidate all TLBs covered by this BAT
258 */
259 mask = (value << 15) & 0x0FFE0000UL;
260 #if !defined(FLUSH_ALL_TLBS)
261 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
262 #endif
263 mask = (value << 15) & 0x0FFE0000UL;
264 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
265 (value & ~0x0001FFFFUL & ~mask);
266 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
267 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
268 #if !defined(FLUSH_ALL_TLBS)
269 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
270 #else
271 tlb_flush(env_cpu(env));
272 #endif
273 }
274 }
275
276 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
277 {
278 dump_store_bat(env, 'D', 1, nr, value);
279 env->DBAT[1][nr] = value;
280 }
281
282 /*****************************************************************************/
283 /* TLB management */
284 void ppc_tlb_invalidate_all(CPUPPCState *env)
285 {
286 #if defined(TARGET_PPC64)
287 if (mmu_is_64bit(env->mmu_model)) {
288 env->tlb_need_flush = 0;
289 tlb_flush(env_cpu(env));
290 } else
291 #endif /* defined(TARGET_PPC64) */
292 switch (env->mmu_model) {
293 case POWERPC_MMU_SOFT_6xx:
294 ppc6xx_tlb_invalidate_all(env);
295 break;
296 case POWERPC_MMU_SOFT_4xx:
297 ppc4xx_tlb_invalidate_all(env);
298 break;
299 case POWERPC_MMU_REAL:
300 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
301 break;
302 case POWERPC_MMU_MPC8xx:
303 /* XXX: TODO */
304 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
305 break;
306 case POWERPC_MMU_BOOKE:
307 tlb_flush(env_cpu(env));
308 break;
309 case POWERPC_MMU_BOOKE206:
310 booke206_flush_tlb(env, -1, 0);
311 break;
312 case POWERPC_MMU_32B:
313 env->tlb_need_flush = 0;
314 tlb_flush(env_cpu(env));
315 break;
316 default:
317 /* XXX: TODO */
318 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
319 break;
320 }
321 }
322
323 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
324 {
325 #if !defined(FLUSH_ALL_TLBS)
326 addr &= TARGET_PAGE_MASK;
327 #if defined(TARGET_PPC64)
328 if (mmu_is_64bit(env->mmu_model)) {
329 /* tlbie invalidate TLBs for all segments */
330 /*
331 * XXX: given the fact that there are too many segments to invalidate,
332 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
333 * we just invalidate all TLBs
334 */
335 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
336 } else
337 #endif /* defined(TARGET_PPC64) */
338 switch (env->mmu_model) {
339 case POWERPC_MMU_SOFT_6xx:
340 ppc6xx_tlb_invalidate_virt(env, addr, 0);
341 if (env->id_tlbs == 1) {
342 ppc6xx_tlb_invalidate_virt(env, addr, 1);
343 }
344 break;
345 case POWERPC_MMU_32B:
346 /*
347 * Actual CPUs invalidate entire congruence classes based on
348 * the geometry of their TLBs and some OSes take that into
349 * account, we just mark the TLB to be flushed later (context
350 * synchronizing event or sync instruction on 32-bit).
351 */
352 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
353 break;
354 default:
355 /* Should never reach here with other MMU models */
356 assert(0);
357 }
358 #else
359 ppc_tlb_invalidate_all(env);
360 #endif
361 }
362
363 /*****************************************************************************/
364 /* Special registers manipulation */
365
366 /* Segment registers load and store */
367 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
368 {
369 #if defined(TARGET_PPC64)
370 if (mmu_is_64bit(env->mmu_model)) {
371 /* XXX */
372 return 0;
373 }
374 #endif
375 return env->sr[sr_num];
376 }
377
378 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
379 {
380 qemu_log_mask(CPU_LOG_MMU,
381 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
382 (int)srnum, value, env->sr[srnum]);
383 #if defined(TARGET_PPC64)
384 if (mmu_is_64bit(env->mmu_model)) {
385 PowerPCCPU *cpu = env_archcpu(env);
386 uint64_t esid, vsid;
387
388 /* ESID = srnum */
389 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
390
391 /* VSID = VSID */
392 vsid = (value & 0xfffffff) << 12;
393 /* flags = flags */
394 vsid |= ((value >> 27) & 0xf) << 8;
395
396 ppc_store_slb(cpu, srnum, esid, vsid);
397 } else
398 #endif
399 if (env->sr[srnum] != value) {
400 env->sr[srnum] = value;
401 /*
402 * Invalidating 256MB of virtual memory in 4kB pages is way
403 * longer than flushing the whole TLB.
404 */
405 #if !defined(FLUSH_ALL_TLBS) && 0
406 {
407 target_ulong page, end;
408 /* Invalidate 256 MB of virtual memory */
409 page = (16 << 20) * srnum;
410 end = page + (16 << 20);
411 for (; page != end; page += TARGET_PAGE_SIZE) {
412 tlb_flush_page(env_cpu(env), page);
413 }
414 }
415 #else
416 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
417 #endif
418 }
419 }
420
421 /* TLB management */
422 void helper_tlbia(CPUPPCState *env)
423 {
424 ppc_tlb_invalidate_all(env);
425 }
426
427 void helper_tlbie(CPUPPCState *env, target_ulong addr)
428 {
429 ppc_tlb_invalidate_one(env, addr);
430 }
431
432 #if defined(TARGET_PPC64)
433
434 /* Invalidation Selector */
435 #define TLBIE_IS_VA 0
436 #define TLBIE_IS_PID 1
437 #define TLBIE_IS_LPID 2
438 #define TLBIE_IS_ALL 3
439
440 /* Radix Invalidation Control */
441 #define TLBIE_RIC_TLB 0
442 #define TLBIE_RIC_PWC 1
443 #define TLBIE_RIC_ALL 2
444 #define TLBIE_RIC_GRP 3
445
446 /* Radix Actual Page sizes */
447 #define TLBIE_R_AP_4K 0
448 #define TLBIE_R_AP_64K 5
449 #define TLBIE_R_AP_2M 1
450 #define TLBIE_R_AP_1G 2
451
452 /* RB field masks */
453 #define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51)
454 #define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53)
455 #define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58)
456
457 void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
458 uint32_t flags)
459 {
460 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT;
461 /*
462 * With the exception of the checks for invalid instruction forms,
463 * PRS is currently ignored, because we don't know if a given TLB entry
464 * is process or partition scoped.
465 */
466 bool prs = flags & TLBIE_F_PRS;
467 bool r = flags & TLBIE_F_R;
468 bool local = flags & TLBIE_F_LOCAL;
469 bool effR;
470 unsigned is = extract64(rb, PPC_BIT_NR(53), 2);
471 unsigned ap; /* actual page size */
472 target_ulong addr, pgoffs_mask;
473
474 qemu_log_mask(CPU_LOG_MMU,
475 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n",
476 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is);
477
478 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR;
479
480 /* Partial TLB invalidation is supported for Radix only for now. */
481 if (!effR) {
482 goto inval_all;
483 }
484
485 /* Check for invalid instruction forms (effR=1). */
486 if (unlikely(ric == TLBIE_RIC_GRP ||
487 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) &&
488 is == TLBIE_IS_VA) ||
489 (!prs && is == TLBIE_IS_PID))) {
490 qemu_log_mask(LOG_GUEST_ERROR,
491 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n",
492 __func__, ric, prs, r, is);
493 goto invalid;
494 }
495
496 /* We don't cache Page Walks. */
497 if (ric == TLBIE_RIC_PWC) {
498 if (local) {
499 unsigned set = extract64(rb, PPC_BIT_NR(51), 12);
500 if (set != 0) {
501 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n",
502 __func__, set);
503 goto invalid;
504 }
505 }
506 return;
507 }
508
509 /*
510 * Invalidation by LPID or PID is not supported, so fallback
511 * to full TLB flush in these cases.
512 */
513 if (is != TLBIE_IS_VA) {
514 goto inval_all;
515 }
516
517 /*
518 * The results of an attempt to invalidate a translation outside of
519 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0,
520 * and EA 0:1 != 0b00) are boundedly undefined.
521 */
522 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA &&
523 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) {
524 qemu_log_mask(LOG_GUEST_ERROR,
525 "%s: attempt to invalidate a translation outside of quadrant 0\n",
526 __func__);
527 goto inval_all;
528 }
529
530 assert(is == TLBIE_IS_VA);
531 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL);
532
533 ap = extract64(rb, PPC_BIT_NR(58), 3);
534 switch (ap) {
535 case TLBIE_R_AP_4K:
536 pgoffs_mask = 0xfffull;
537 break;
538
539 case TLBIE_R_AP_64K:
540 pgoffs_mask = 0xffffull;
541 break;
542
543 case TLBIE_R_AP_2M:
544 pgoffs_mask = 0x1fffffull;
545 break;
546
547 case TLBIE_R_AP_1G:
548 pgoffs_mask = 0x3fffffffull;
549 break;
550
551 default:
552 /*
553 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58,
554 * RB 44:51, or RB 56:63, when it is needed to perform the specified
555 * operation, is not supported by the implementation, the instruction
556 * is treated as if the instruction form were invalid.
557 */
558 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap);
559 goto invalid;
560 }
561
562 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask;
563
564 if (local) {
565 tlb_flush_page(env_cpu(env), addr);
566 } else {
567 tlb_flush_page_all_cpus(env_cpu(env), addr);
568 }
569 return;
570
571 inval_all:
572 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
573 if (!local) {
574 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH;
575 }
576 return;
577
578 invalid:
579 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
580 POWERPC_EXCP_INVAL |
581 POWERPC_EXCP_INVAL_INVAL, GETPC());
582 }
583
584 #endif
585
586 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
587 {
588 /* tlbiva instruction only exists on BookE */
589 assert(env->mmu_model == POWERPC_MMU_BOOKE);
590 /* XXX: TODO */
591 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
592 }
593
594 /* Software driven TLBs management */
595 /* PowerPC 602/603 software TLB load instructions helpers */
596 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
597 {
598 target_ulong RPN, CMP, EPN;
599 int way;
600
601 RPN = env->spr[SPR_RPA];
602 if (is_code) {
603 CMP = env->spr[SPR_ICMP];
604 EPN = env->spr[SPR_IMISS];
605 } else {
606 CMP = env->spr[SPR_DCMP];
607 EPN = env->spr[SPR_DMISS];
608 }
609 way = (env->spr[SPR_SRR1] >> 17) & 1;
610 (void)EPN; /* avoid a compiler warning */
611 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
612 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
613 __func__, new_EPN, EPN, CMP, RPN, way);
614 /* Store this TLB */
615 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
616 way, is_code, CMP, RPN);
617 }
618
619 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
620 {
621 do_6xx_tlb(env, EPN, 0);
622 }
623
624 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
625 {
626 do_6xx_tlb(env, EPN, 1);
627 }
628
629 /*****************************************************************************/
630 /* PowerPC 601 specific instructions (POWER bridge) */
631
632 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
633 {
634 mmu_ctx_t ctx;
635 int nb_BATs;
636 target_ulong ret = 0;
637
638 /*
639 * We don't have to generate many instances of this instruction,
640 * as rac is supervisor only.
641 *
642 * XXX: FIX THIS: Pretend we have no BAT
643 */
644 nb_BATs = env->nb_BATs;
645 env->nb_BATs = 0;
646 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
647 ret = ctx.raddr;
648 }
649 env->nb_BATs = nb_BATs;
650 return ret;
651 }
652
653 static inline target_ulong booke_tlb_to_page_size(int size)
654 {
655 return 1024 << (2 * size);
656 }
657
658 static inline int booke_page_size_to_tlb(target_ulong page_size)
659 {
660 int size;
661
662 switch (page_size) {
663 case 0x00000400UL:
664 size = 0x0;
665 break;
666 case 0x00001000UL:
667 size = 0x1;
668 break;
669 case 0x00004000UL:
670 size = 0x2;
671 break;
672 case 0x00010000UL:
673 size = 0x3;
674 break;
675 case 0x00040000UL:
676 size = 0x4;
677 break;
678 case 0x00100000UL:
679 size = 0x5;
680 break;
681 case 0x00400000UL:
682 size = 0x6;
683 break;
684 case 0x01000000UL:
685 size = 0x7;
686 break;
687 case 0x04000000UL:
688 size = 0x8;
689 break;
690 case 0x10000000UL:
691 size = 0x9;
692 break;
693 case 0x40000000UL:
694 size = 0xA;
695 break;
696 #if defined(TARGET_PPC64)
697 case 0x000100000000ULL:
698 size = 0xB;
699 break;
700 case 0x000400000000ULL:
701 size = 0xC;
702 break;
703 case 0x001000000000ULL:
704 size = 0xD;
705 break;
706 case 0x004000000000ULL:
707 size = 0xE;
708 break;
709 case 0x010000000000ULL:
710 size = 0xF;
711 break;
712 #endif
713 default:
714 size = -1;
715 break;
716 }
717
718 return size;
719 }
720
721 /* Helpers for 4xx TLB management */
722 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
723
724 #define PPC4XX_TLBHI_V 0x00000040
725 #define PPC4XX_TLBHI_E 0x00000020
726 #define PPC4XX_TLBHI_SIZE_MIN 0
727 #define PPC4XX_TLBHI_SIZE_MAX 7
728 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
729 #define PPC4XX_TLBHI_SIZE_SHIFT 7
730 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
731
732 #define PPC4XX_TLBLO_EX 0x00000200
733 #define PPC4XX_TLBLO_WR 0x00000100
734 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
735 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
736
737 void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
738 {
739 if (env->spr[SPR_40x_PID] != val) {
740 env->spr[SPR_40x_PID] = val;
741 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
742 }
743 }
744
745 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
746 {
747 ppcemb_tlb_t *tlb;
748 target_ulong ret;
749 int size;
750
751 entry &= PPC4XX_TLB_ENTRY_MASK;
752 tlb = &env->tlb.tlbe[entry];
753 ret = tlb->EPN;
754 if (tlb->prot & PAGE_VALID) {
755 ret |= PPC4XX_TLBHI_V;
756 }
757 size = booke_page_size_to_tlb(tlb->size);
758 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
759 size = PPC4XX_TLBHI_SIZE_DEFAULT;
760 }
761 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
762 helper_store_40x_pid(env, tlb->PID);
763 return ret;
764 }
765
766 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
767 {
768 ppcemb_tlb_t *tlb;
769 target_ulong ret;
770
771 entry &= PPC4XX_TLB_ENTRY_MASK;
772 tlb = &env->tlb.tlbe[entry];
773 ret = tlb->RPN;
774 if (tlb->prot & PAGE_EXEC) {
775 ret |= PPC4XX_TLBLO_EX;
776 }
777 if (tlb->prot & PAGE_WRITE) {
778 ret |= PPC4XX_TLBLO_WR;
779 }
780 return ret;
781 }
782
783 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
784 target_ulong val)
785 {
786 CPUState *cs = env_cpu(env);
787 ppcemb_tlb_t *tlb;
788 target_ulong page, end;
789
790 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
791 __func__, (int)entry,
792 val);
793 entry &= PPC4XX_TLB_ENTRY_MASK;
794 tlb = &env->tlb.tlbe[entry];
795 /* Invalidate previous TLB (if it's valid) */
796 if (tlb->prot & PAGE_VALID) {
797 end = tlb->EPN + tlb->size;
798 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
799 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
800 (int)entry, tlb->EPN, end);
801 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
802 tlb_flush_page(cs, page);
803 }
804 }
805 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
806 & PPC4XX_TLBHI_SIZE_MASK);
807 /*
808 * We cannot handle TLB size < TARGET_PAGE_SIZE.
809 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
810 */
811 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
812 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
813 "are not supported (%d)\n"
814 "Please implement TARGET_PAGE_BITS_VARY\n",
815 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
816 }
817 tlb->EPN = val & ~(tlb->size - 1);
818 if (val & PPC4XX_TLBHI_V) {
819 tlb->prot |= PAGE_VALID;
820 if (val & PPC4XX_TLBHI_E) {
821 /* XXX: TO BE FIXED */
822 cpu_abort(cs,
823 "Little-endian TLB entries are not supported by now\n");
824 }
825 } else {
826 tlb->prot &= ~PAGE_VALID;
827 }
828 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
829 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
830 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
831 " prot %c%c%c%c PID %d\n", __func__,
832 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
833 tlb->prot & PAGE_READ ? 'r' : '-',
834 tlb->prot & PAGE_WRITE ? 'w' : '-',
835 tlb->prot & PAGE_EXEC ? 'x' : '-',
836 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
837 /* Invalidate new TLB (if valid) */
838 if (tlb->prot & PAGE_VALID) {
839 end = tlb->EPN + tlb->size;
840 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate TLB %d start "
841 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
842 (int)entry, tlb->EPN, end);
843 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
844 tlb_flush_page(cs, page);
845 }
846 }
847 }
848
849 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
850 target_ulong val)
851 {
852 ppcemb_tlb_t *tlb;
853
854 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
855 __func__, (int)entry, val);
856 entry &= PPC4XX_TLB_ENTRY_MASK;
857 tlb = &env->tlb.tlbe[entry];
858 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
859 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
860 tlb->prot = PAGE_READ;
861 if (val & PPC4XX_TLBLO_EX) {
862 tlb->prot |= PAGE_EXEC;
863 }
864 if (val & PPC4XX_TLBLO_WR) {
865 tlb->prot |= PAGE_WRITE;
866 }
867 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
868 " EPN " TARGET_FMT_lx
869 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
870 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
871 tlb->prot & PAGE_READ ? 'r' : '-',
872 tlb->prot & PAGE_WRITE ? 'w' : '-',
873 tlb->prot & PAGE_EXEC ? 'x' : '-',
874 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
875
876 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
877 }
878
879 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
880 {
881 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
882 }
883
884 /* PowerPC 440 TLB management */
885 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
886 target_ulong value)
887 {
888 ppcemb_tlb_t *tlb;
889 target_ulong EPN, RPN, size;
890 int do_flush_tlbs;
891
892 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
893 __func__, word, (int)entry, value);
894 do_flush_tlbs = 0;
895 entry &= 0x3F;
896 tlb = &env->tlb.tlbe[entry];
897 switch (word) {
898 default:
899 /* Just here to please gcc */
900 case 0:
901 EPN = value & 0xFFFFFC00;
902 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
903 do_flush_tlbs = 1;
904 }
905 tlb->EPN = EPN;
906 size = booke_tlb_to_page_size((value >> 4) & 0xF);
907 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
908 do_flush_tlbs = 1;
909 }
910 tlb->size = size;
911 tlb->attr &= ~0x1;
912 tlb->attr |= (value >> 8) & 1;
913 if (value & 0x200) {
914 tlb->prot |= PAGE_VALID;
915 } else {
916 if (tlb->prot & PAGE_VALID) {
917 tlb->prot &= ~PAGE_VALID;
918 do_flush_tlbs = 1;
919 }
920 }
921 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
922 if (do_flush_tlbs) {
923 tlb_flush(env_cpu(env));
924 }
925 break;
926 case 1:
927 RPN = value & 0xFFFFFC0F;
928 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
929 tlb_flush(env_cpu(env));
930 }
931 tlb->RPN = RPN;
932 break;
933 case 2:
934 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
935 tlb->prot = tlb->prot & PAGE_VALID;
936 if (value & 0x1) {
937 tlb->prot |= PAGE_READ << 4;
938 }
939 if (value & 0x2) {
940 tlb->prot |= PAGE_WRITE << 4;
941 }
942 if (value & 0x4) {
943 tlb->prot |= PAGE_EXEC << 4;
944 }
945 if (value & 0x8) {
946 tlb->prot |= PAGE_READ;
947 }
948 if (value & 0x10) {
949 tlb->prot |= PAGE_WRITE;
950 }
951 if (value & 0x20) {
952 tlb->prot |= PAGE_EXEC;
953 }
954 break;
955 }
956 }
957
958 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
959 target_ulong entry)
960 {
961 ppcemb_tlb_t *tlb;
962 target_ulong ret;
963 int size;
964
965 entry &= 0x3F;
966 tlb = &env->tlb.tlbe[entry];
967 switch (word) {
968 default:
969 /* Just here to please gcc */
970 case 0:
971 ret = tlb->EPN;
972 size = booke_page_size_to_tlb(tlb->size);
973 if (size < 0 || size > 0xF) {
974 size = 1;
975 }
976 ret |= size << 4;
977 if (tlb->attr & 0x1) {
978 ret |= 0x100;
979 }
980 if (tlb->prot & PAGE_VALID) {
981 ret |= 0x200;
982 }
983 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
984 env->spr[SPR_440_MMUCR] |= tlb->PID;
985 break;
986 case 1:
987 ret = tlb->RPN;
988 break;
989 case 2:
990 ret = tlb->attr & ~0x1;
991 if (tlb->prot & (PAGE_READ << 4)) {
992 ret |= 0x1;
993 }
994 if (tlb->prot & (PAGE_WRITE << 4)) {
995 ret |= 0x2;
996 }
997 if (tlb->prot & (PAGE_EXEC << 4)) {
998 ret |= 0x4;
999 }
1000 if (tlb->prot & PAGE_READ) {
1001 ret |= 0x8;
1002 }
1003 if (tlb->prot & PAGE_WRITE) {
1004 ret |= 0x10;
1005 }
1006 if (tlb->prot & PAGE_EXEC) {
1007 ret |= 0x20;
1008 }
1009 break;
1010 }
1011 return ret;
1012 }
1013
1014 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
1015 {
1016 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
1017 }
1018
1019 /* PowerPC BookE 2.06 TLB management */
1020
1021 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
1022 {
1023 uint32_t tlbncfg = 0;
1024 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
1025 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
1026 int tlb;
1027
1028 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1029 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
1030
1031 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
1032 cpu_abort(env_cpu(env), "we don't support HES yet\n");
1033 }
1034
1035 return booke206_get_tlbm(env, tlb, ea, esel);
1036 }
1037
1038 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
1039 {
1040 env->spr[pidn] = pid;
1041 /* changing PIDs mean we're in a different address space now */
1042 tlb_flush(env_cpu(env));
1043 }
1044
1045 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
1046 {
1047 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
1048 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
1049 }
1050 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
1051 {
1052 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
1053 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
1054 }
1055
1056 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
1057 {
1058 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
1059 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
1060 } else {
1061 tlb_flush(env_cpu(env));
1062 }
1063 }
1064
1065 void helper_booke206_tlbwe(CPUPPCState *env)
1066 {
1067 uint32_t tlbncfg, tlbn;
1068 ppcmas_tlb_t *tlb;
1069 uint32_t size_tlb, size_ps;
1070 target_ulong mask;
1071
1072
1073 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
1074 case MAS0_WQ_ALWAYS:
1075 /* good to go, write that entry */
1076 break;
1077 case MAS0_WQ_COND:
1078 /* XXX check if reserved */
1079 if (0) {
1080 return;
1081 }
1082 break;
1083 case MAS0_WQ_CLR_RSRV:
1084 /* XXX clear entry */
1085 return;
1086 default:
1087 /* no idea what to do */
1088 return;
1089 }
1090
1091 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
1092 !FIELD_EX64(env->msr, MSR, GS)) {
1093 /* XXX we don't support direct LRAT setting yet */
1094 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
1095 return;
1096 }
1097
1098 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1099 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
1100
1101 tlb = booke206_cur_tlb(env);
1102
1103 if (!tlb) {
1104 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1105 POWERPC_EXCP_INVAL |
1106 POWERPC_EXCP_INVAL_INVAL, GETPC());
1107 }
1108
1109 /* check that we support the targeted size */
1110 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1111 size_ps = booke206_tlbnps(env, tlbn);
1112 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
1113 !(size_ps & (1 << size_tlb))) {
1114 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1115 POWERPC_EXCP_INVAL |
1116 POWERPC_EXCP_INVAL_INVAL, GETPC());
1117 }
1118
1119 if (FIELD_EX64(env->msr, MSR, GS)) {
1120 cpu_abort(env_cpu(env), "missing HV implementation\n");
1121 }
1122
1123 if (tlb->mas1 & MAS1_VALID) {
1124 /*
1125 * Invalidate the page in QEMU TLB if it was a valid entry.
1126 *
1127 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
1128 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
1129 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
1130 *
1131 * "Note that when an L2 TLB entry is written, it may be displacing an
1132 * already valid entry in the same L2 TLB location (a victim). If a
1133 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
1134 * TLB entry is automatically invalidated."
1135 */
1136 flush_page(env, tlb);
1137 }
1138
1139 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
1140 env->spr[SPR_BOOKE_MAS3];
1141 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
1142
1143 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
1144 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
1145 booke206_fixed_size_tlbn(env, tlbn, tlb);
1146 } else {
1147 if (!(tlbncfg & TLBnCFG_AVAIL)) {
1148 /* force !AVAIL TLB entries to correct page size */
1149 tlb->mas1 &= ~MAS1_TSIZE_MASK;
1150 /* XXX can be configured in MMUCSR0 */
1151 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
1152 }
1153 }
1154
1155 /* Make a mask from TLB size to discard invalid bits in EPN field */
1156 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1157 /* Add a mask for page attributes */
1158 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
1159
1160 if (!FIELD_EX64(env->msr, MSR, CM)) {
1161 /*
1162 * Executing a tlbwe instruction in 32-bit mode will set bits
1163 * 0:31 of the TLB EPN field to zero.
1164 */
1165 mask &= 0xffffffff;
1166 }
1167
1168 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
1169
1170 if (!(tlbncfg & TLBnCFG_IPROT)) {
1171 /* no IPROT supported by TLB */
1172 tlb->mas1 &= ~MAS1_IPROT;
1173 }
1174
1175 flush_page(env, tlb);
1176 }
1177
1178 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
1179 {
1180 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
1181 int way = booke206_tlbm_to_way(env, tlb);
1182
1183 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
1184 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
1185 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1186
1187 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
1188 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
1189 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
1190 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
1191 }
1192
1193 void helper_booke206_tlbre(CPUPPCState *env)
1194 {
1195 ppcmas_tlb_t *tlb = NULL;
1196
1197 tlb = booke206_cur_tlb(env);
1198 if (!tlb) {
1199 env->spr[SPR_BOOKE_MAS1] = 0;
1200 } else {
1201 booke206_tlb_to_mas(env, tlb);
1202 }
1203 }
1204
1205 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
1206 {
1207 ppcmas_tlb_t *tlb = NULL;
1208 int i, j;
1209 hwaddr raddr;
1210 uint32_t spid, sas;
1211
1212 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
1213 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
1214
1215 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1216 int ways = booke206_tlb_ways(env, i);
1217
1218 for (j = 0; j < ways; j++) {
1219 tlb = booke206_get_tlbm(env, i, address, j);
1220
1221 if (!tlb) {
1222 continue;
1223 }
1224
1225 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
1226 continue;
1227 }
1228
1229 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1230 continue;
1231 }
1232
1233 booke206_tlb_to_mas(env, tlb);
1234 return;
1235 }
1236 }
1237
1238 /* no entry found, fill with defaults */
1239 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1240 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1241 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1242 env->spr[SPR_BOOKE_MAS3] = 0;
1243 env->spr[SPR_BOOKE_MAS7] = 0;
1244
1245 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
1246 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1247 }
1248
1249 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
1250 << MAS1_TID_SHIFT;
1251
1252 /* next victim logic */
1253 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1254 env->last_way++;
1255 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1256 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1257 }
1258
1259 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
1260 vaddr ea)
1261 {
1262 int i;
1263 int ways = booke206_tlb_ways(env, tlbn);
1264 target_ulong mask;
1265
1266 for (i = 0; i < ways; i++) {
1267 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
1268 if (!tlb) {
1269 continue;
1270 }
1271 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1272 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
1273 !(tlb->mas1 & MAS1_IPROT)) {
1274 tlb->mas1 &= ~MAS1_VALID;
1275 }
1276 }
1277 }
1278
1279 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
1280 {
1281 CPUState *cs;
1282
1283 if (address & 0x4) {
1284 /* flush all entries */
1285 if (address & 0x8) {
1286 /* flush all of TLB1 */
1287 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
1288 } else {
1289 /* flush all of TLB0 */
1290 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
1291 }
1292 return;
1293 }
1294
1295 if (address & 0x8) {
1296 /* flush TLB1 entries */
1297 booke206_invalidate_ea_tlb(env, 1, address);
1298 CPU_FOREACH(cs) {
1299 tlb_flush(cs);
1300 }
1301 } else {
1302 /* flush TLB0 entries */
1303 booke206_invalidate_ea_tlb(env, 0, address);
1304 CPU_FOREACH(cs) {
1305 tlb_flush_page(cs, address & MAS2_EPN_MASK);
1306 }
1307 }
1308 }
1309
1310 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
1311 {
1312 /* XXX missing LPID handling */
1313 booke206_flush_tlb(env, -1, 1);
1314 }
1315
1316 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
1317 {
1318 int i, j;
1319 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1320 ppcmas_tlb_t *tlb = env->tlb.tlbm;
1321 int tlb_size;
1322
1323 /* XXX missing LPID handling */
1324 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1325 tlb_size = booke206_tlb_size(env, i);
1326 for (j = 0; j < tlb_size; j++) {
1327 if (!(tlb[j].mas1 & MAS1_IPROT) &&
1328 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
1329 tlb[j].mas1 &= ~MAS1_VALID;
1330 }
1331 }
1332 tlb += booke206_tlb_size(env, i);
1333 }
1334 tlb_flush(env_cpu(env));
1335 }
1336
1337 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
1338 {
1339 int i, j;
1340 ppcmas_tlb_t *tlb;
1341 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1342 int pid = tid >> MAS6_SPID_SHIFT;
1343 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
1344 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
1345 /* XXX check for unsupported isize and raise an invalid opcode then */
1346 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
1347 /* XXX implement MAV2 handling */
1348 bool mav2 = false;
1349
1350 /* XXX missing LPID handling */
1351 /* flush by pid and ea */
1352 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1353 int ways = booke206_tlb_ways(env, i);
1354
1355 for (j = 0; j < ways; j++) {
1356 tlb = booke206_get_tlbm(env, i, address, j);
1357 if (!tlb) {
1358 continue;
1359 }
1360 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
1361 (tlb->mas1 & MAS1_IPROT) ||
1362 ((tlb->mas1 & MAS1_IND) != ind) ||
1363 ((tlb->mas8 & MAS8_TGS) != sgs)) {
1364 continue;
1365 }
1366 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
1367 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
1368 continue;
1369 }
1370 /* XXX e500mc doesn't match SAS, but other cores might */
1371 tlb->mas1 &= ~MAS1_VALID;
1372 }
1373 }
1374 tlb_flush(env_cpu(env));
1375 }
1376
1377 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
1378 {
1379 int flags = 0;
1380
1381 if (type & 2) {
1382 flags |= BOOKE206_FLUSH_TLB1;
1383 }
1384
1385 if (type & 4) {
1386 flags |= BOOKE206_FLUSH_TLB0;
1387 }
1388
1389 booke206_flush_tlb(env, flags, 1);
1390 }
1391
1392
1393 void helper_check_tlb_flush_local(CPUPPCState *env)
1394 {
1395 check_tlb_flush(env, false);
1396 }
1397
1398 void helper_check_tlb_flush_global(CPUPPCState *env)
1399 {
1400 check_tlb_flush(env, true);
1401 }
1402
1403
1404 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
1405 MMUAccessType access_type, int mmu_idx,
1406 bool probe, uintptr_t retaddr)
1407 {
1408 PowerPCCPU *cpu = POWERPC_CPU(cs);
1409 hwaddr raddr;
1410 int page_size, prot;
1411
1412 if (ppc_xlate(cpu, eaddr, access_type, &raddr,
1413 &page_size, &prot, mmu_idx, !probe)) {
1414 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1415 prot, mmu_idx, 1UL << page_size);
1416 return true;
1417 }
1418 if (probe) {
1419 return false;
1420 }
1421 raise_exception_err_ra(&cpu->env, cs->exception_index,
1422 cpu->env.error_code, retaddr);
1423 }