2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
13 * ... and the days got worse and worse and now you see
14 * I've gone completly out of my mind.
16 * They're coming to take me a away haha
17 * they're coming to take me a away hoho hihi haha
18 * to the funny farm where code is beautiful all the time ...
20 * (Condolences to Napoleon XIV)
23 #include <linux/bug.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/smp.h>
27 #include <linux/string.h>
28 #include <linux/init.h>
29 #include <linux/cache.h>
31 #include <asm/cacheflush.h>
32 #include <asm/pgtable.h>
37 * TLB load/store/modify handlers.
39 * Only the fastpath gets synthesized at runtime, the slowpath for
40 * do_page_fault remains normal asm.
42 extern void tlb_do_page_fault_0(void);
43 extern void tlb_do_page_fault_1(void);
46 static inline int r45k_bvahwbug(void)
48 /* XXX: We should probe for the presence of this bug, but we don't. */
52 static inline int r4k_250MHZhwbug(void)
54 /* XXX: We should probe for the presence of this bug, but we don't. */
58 static inline int __maybe_unused
bcm1250_m3_war(void)
60 return BCM1250_M3_WAR
;
63 static inline int __maybe_unused
r10000_llsc_war(void)
65 return R10000_LLSC_WAR
;
68 static int use_bbit_insns(void)
70 switch (current_cpu_type()) {
71 case CPU_CAVIUM_OCTEON
:
72 case CPU_CAVIUM_OCTEON_PLUS
:
73 case CPU_CAVIUM_OCTEON2
:
80 static int use_lwx_insns(void)
82 switch (current_cpu_type()) {
83 case CPU_CAVIUM_OCTEON2
:
89 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
90 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
91 static bool scratchpad_available(void)
95 static int scratchpad_offset(int i
)
98 * CVMSEG starts at address -32768 and extends for
99 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
101 i
+= 1; /* Kernel use starts at the top and works down. */
102 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
* 128 - (8 * i
) - 32768;
105 static bool scratchpad_available(void)
109 static int scratchpad_offset(int i
)
112 /* Really unreachable, but evidently some GCC want this. */
117 * Found by experiment: At least some revisions of the 4kc throw under
118 * some circumstances a machine check exception, triggered by invalid
119 * values in the index register. Delaying the tlbp instruction until
120 * after the next branch, plus adding an additional nop in front of
121 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
122 * why; it's not an issue caused by the core RTL.
125 static int __cpuinit
m4kc_tlbp_war(void)
127 return (current_cpu_data
.processor_id
& 0xffff00) ==
128 (PRID_COMP_MIPS
| PRID_IMP_4KC
);
131 /* Handle labels (which must be positive integers). */
133 label_second_part
= 1,
139 label_tlbl_goaround1
,
140 label_tlbl_goaround2
,
144 label_smp_pgtable_change
,
145 label_r3000_write_probe_fail
,
146 label_large_segbits_fault
,
147 #ifdef CONFIG_HUGETLB_PAGE
148 label_tlb_huge_update
,
152 UASM_L_LA(_second_part
)
155 UASM_L_LA(_vmalloc_done
)
156 UASM_L_LA(_tlbw_hazard
)
158 UASM_L_LA(_tlbl_goaround1
)
159 UASM_L_LA(_tlbl_goaround2
)
160 UASM_L_LA(_nopage_tlbl
)
161 UASM_L_LA(_nopage_tlbs
)
162 UASM_L_LA(_nopage_tlbm
)
163 UASM_L_LA(_smp_pgtable_change
)
164 UASM_L_LA(_r3000_write_probe_fail
)
165 UASM_L_LA(_large_segbits_fault
)
166 #ifdef CONFIG_HUGETLB_PAGE
167 UASM_L_LA(_tlb_huge_update
)
171 * For debug purposes.
173 static inline void dump_handler(const u32
*handler
, int count
)
177 pr_debug("\t.set push\n");
178 pr_debug("\t.set noreorder\n");
180 for (i
= 0; i
< count
; i
++)
181 pr_debug("\t%p\t.word 0x%08x\n", &handler
[i
], handler
[i
]);
183 pr_debug("\t.set pop\n");
186 /* The only general purpose registers allowed in TLB handlers. */
190 /* Some CP0 registers */
191 #define C0_INDEX 0, 0
192 #define C0_ENTRYLO0 2, 0
193 #define C0_TCBIND 2, 2
194 #define C0_ENTRYLO1 3, 0
195 #define C0_CONTEXT 4, 0
196 #define C0_PAGEMASK 5, 0
197 #define C0_BADVADDR 8, 0
198 #define C0_ENTRYHI 10, 0
200 #define C0_XCONTEXT 20, 0
203 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
205 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
208 /* The worst case length of the handler is around 18 instructions for
209 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
210 * Maximum space available is 32 instructions for R3000 and 64
211 * instructions for R4000.
213 * We deliberately chose a buffer size of 128, so we won't scribble
214 * over anything important on overflow before we panic.
216 static u32 tlb_handler
[128] __cpuinitdata
;
218 /* simply assume worst case size for labels and relocs */
219 static struct uasm_label labels
[128] __cpuinitdata
;
220 static struct uasm_reloc relocs
[128] __cpuinitdata
;
223 static int check_for_high_segbits __cpuinitdata
;
226 static int check_for_high_segbits __cpuinitdata
;
228 static unsigned int kscratch_used_mask __cpuinitdata
;
230 static int __cpuinit
allocate_kscratch(void)
233 unsigned int a
= cpu_data
[0].kscratch_mask
& ~kscratch_used_mask
;
240 r
--; /* make it zero based */
242 kscratch_used_mask
|= (1 << r
);
247 static int scratch_reg __cpuinitdata
;
248 static int pgd_reg __cpuinitdata
;
249 enum vmalloc64_mode
{not_refill
, refill_scratch
, refill_noscratch
};
251 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
254 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
255 * we cannot do r3000 under these circumstances.
257 * Declare pgd_current here instead of including mmu_context.h to avoid type
258 * conflicts for tlbmiss_handler_setup_pgd
260 extern unsigned long pgd_current
[];
263 * The R3000 TLB handler is simple.
265 static void __cpuinit
build_r3000_tlb_refill_handler(void)
267 long pgdc
= (long)pgd_current
;
270 memset(tlb_handler
, 0, sizeof(tlb_handler
));
273 uasm_i_mfc0(&p
, K0
, C0_BADVADDR
);
274 uasm_i_lui(&p
, K1
, uasm_rel_hi(pgdc
)); /* cp0 delay */
275 uasm_i_lw(&p
, K1
, uasm_rel_lo(pgdc
), K1
);
276 uasm_i_srl(&p
, K0
, K0
, 22); /* load delay */
277 uasm_i_sll(&p
, K0
, K0
, 2);
278 uasm_i_addu(&p
, K1
, K1
, K0
);
279 uasm_i_mfc0(&p
, K0
, C0_CONTEXT
);
280 uasm_i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
281 uasm_i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
282 uasm_i_addu(&p
, K1
, K1
, K0
);
283 uasm_i_lw(&p
, K0
, 0, K1
);
284 uasm_i_nop(&p
); /* load delay */
285 uasm_i_mtc0(&p
, K0
, C0_ENTRYLO0
);
286 uasm_i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
287 uasm_i_tlbwr(&p
); /* cp0 delay */
289 uasm_i_rfe(&p
); /* branch delay */
291 if (p
> tlb_handler
+ 32)
292 panic("TLB refill handler space exceeded");
294 pr_debug("Wrote TLB refill handler (%u instructions).\n",
295 (unsigned int)(p
- tlb_handler
));
297 memcpy((void *)ebase
, tlb_handler
, 0x80);
299 dump_handler((u32
*)ebase
, 32);
301 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
304 * The R4000 TLB handler is much more complicated. We have two
305 * consecutive handler areas with 32 instructions space each.
306 * Since they aren't used at the same time, we can overflow in the
307 * other one.To keep things simple, we first assume linear space,
308 * then we relocate it to the final handler layout as needed.
310 static u32 final_handler
[64] __cpuinitdata
;
315 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
316 * 2. A timing hazard exists for the TLBP instruction.
318 * stalling_instruction
321 * The JTLB is being read for the TLBP throughout the stall generated by the
322 * previous instruction. This is not really correct as the stalling instruction
323 * can modify the address used to access the JTLB. The failure symptom is that
324 * the TLBP instruction will use an address created for the stalling instruction
325 * and not the address held in C0_ENHI and thus report the wrong results.
327 * The software work-around is to not allow the instruction preceding the TLBP
328 * to stall - make it an NOP or some other instruction guaranteed not to stall.
330 * Errata 2 will not be fixed. This errata is also on the R5000.
332 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
334 static void __cpuinit __maybe_unused
build_tlb_probe_entry(u32
**p
)
336 switch (current_cpu_type()) {
337 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
354 * Write random or indexed TLB entry, and care about the hazards from
355 * the preceeding mtc0 and for the following eret.
357 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
359 static void __cpuinit
build_tlb_write_entry(u32
**p
, struct uasm_label
**l
,
360 struct uasm_reloc
**r
,
361 enum tlb_write_entry wmode
)
363 void(*tlbw
)(u32
**) = NULL
;
366 case tlb_random
: tlbw
= uasm_i_tlbwr
; break;
367 case tlb_indexed
: tlbw
= uasm_i_tlbwi
; break;
370 if (cpu_has_mips_r2
) {
371 if (cpu_has_mips_r2_exec_hazard
)
377 switch (current_cpu_type()) {
385 * This branch uses up a mtc0 hazard nop slot and saves
386 * two nops after the tlbw instruction.
388 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
390 uasm_l_tlbw_hazard(l
, *p
);
435 uasm_i_nop(p
); /* QED specifies 2 nops hazard */
437 * This branch uses up a mtc0 hazard nop slot and saves
438 * a nop after the tlbw instruction.
440 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
442 uasm_l_tlbw_hazard(l
, *p
);
455 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
456 * use of the JTLB for instructions should not occur for 4
457 * cpu cycles and use for data translations should not occur
497 panic("No TLB refill handler yet (CPU type: %d)",
498 current_cpu_data
.cputype
);
503 static __cpuinit __maybe_unused
void build_convert_pte_to_entrylo(u32
**p
,
506 if (kernel_uses_smartmips_rixi
) {
507 UASM_i_SRL(p
, reg
, reg
, ilog2(_PAGE_NO_EXEC
));
508 UASM_i_ROTR(p
, reg
, reg
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
510 #ifdef CONFIG_64BIT_PHYS_ADDR
511 uasm_i_dsrl_safe(p
, reg
, reg
, ilog2(_PAGE_GLOBAL
));
513 UASM_i_SRL(p
, reg
, reg
, ilog2(_PAGE_GLOBAL
));
518 #ifdef CONFIG_HUGETLB_PAGE
520 static __cpuinit
void build_restore_pagemask(u32
**p
,
521 struct uasm_reloc
**r
,
526 if (restore_scratch
) {
527 /* Reset default page size */
528 if (PM_DEFAULT_MASK
>> 16) {
529 uasm_i_lui(p
, tmp
, PM_DEFAULT_MASK
>> 16);
530 uasm_i_ori(p
, tmp
, tmp
, PM_DEFAULT_MASK
& 0xffff);
531 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
532 uasm_il_b(p
, r
, lid
);
533 } else if (PM_DEFAULT_MASK
) {
534 uasm_i_ori(p
, tmp
, 0, PM_DEFAULT_MASK
);
535 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
536 uasm_il_b(p
, r
, lid
);
538 uasm_i_mtc0(p
, 0, C0_PAGEMASK
);
539 uasm_il_b(p
, r
, lid
);
542 UASM_i_MFC0(p
, 1, 31, scratch_reg
);
544 UASM_i_LW(p
, 1, scratchpad_offset(0), 0);
546 /* Reset default page size */
547 if (PM_DEFAULT_MASK
>> 16) {
548 uasm_i_lui(p
, tmp
, PM_DEFAULT_MASK
>> 16);
549 uasm_i_ori(p
, tmp
, tmp
, PM_DEFAULT_MASK
& 0xffff);
550 uasm_il_b(p
, r
, lid
);
551 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
552 } else if (PM_DEFAULT_MASK
) {
553 uasm_i_ori(p
, tmp
, 0, PM_DEFAULT_MASK
);
554 uasm_il_b(p
, r
, lid
);
555 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
557 uasm_il_b(p
, r
, lid
);
558 uasm_i_mtc0(p
, 0, C0_PAGEMASK
);
563 static __cpuinit
void build_huge_tlb_write_entry(u32
**p
,
564 struct uasm_label
**l
,
565 struct uasm_reloc
**r
,
567 enum tlb_write_entry wmode
,
570 /* Set huge page tlb entry size */
571 uasm_i_lui(p
, tmp
, PM_HUGE_MASK
>> 16);
572 uasm_i_ori(p
, tmp
, tmp
, PM_HUGE_MASK
& 0xffff);
573 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
575 build_tlb_write_entry(p
, l
, r
, wmode
);
577 build_restore_pagemask(p
, r
, tmp
, label_leave
, restore_scratch
);
581 * Check if Huge PTE is present, if so then jump to LABEL.
583 static void __cpuinit
584 build_is_huge_pte(u32
**p
, struct uasm_reloc
**r
, unsigned int tmp
,
585 unsigned int pmd
, int lid
)
587 UASM_i_LW(p
, tmp
, 0, pmd
);
588 if (use_bbit_insns()) {
589 uasm_il_bbit1(p
, r
, tmp
, ilog2(_PAGE_HUGE
), lid
);
591 uasm_i_andi(p
, tmp
, tmp
, _PAGE_HUGE
);
592 uasm_il_bnez(p
, r
, tmp
, lid
);
596 static __cpuinit
void build_huge_update_entries(u32
**p
,
603 * A huge PTE describes an area the size of the
604 * configured huge page size. This is twice the
605 * of the large TLB entry size we intend to use.
606 * A TLB entry half the size of the configured
607 * huge page size is configured into entrylo0
608 * and entrylo1 to cover the contiguous huge PTE
611 small_sequence
= (HPAGE_SIZE
>> 7) < 0x10000;
613 /* We can clobber tmp. It isn't used after this.*/
615 uasm_i_lui(p
, tmp
, HPAGE_SIZE
>> (7 + 16));
617 build_convert_pte_to_entrylo(p
, pte
);
618 UASM_i_MTC0(p
, pte
, C0_ENTRYLO0
); /* load it */
619 /* convert to entrylo1 */
621 UASM_i_ADDIU(p
, pte
, pte
, HPAGE_SIZE
>> 7);
623 UASM_i_ADDU(p
, pte
, pte
, tmp
);
625 UASM_i_MTC0(p
, pte
, C0_ENTRYLO1
); /* load it */
628 static __cpuinit
void build_huge_handler_tail(u32
**p
,
629 struct uasm_reloc
**r
,
630 struct uasm_label
**l
,
635 UASM_i_SC(p
, pte
, 0, ptr
);
636 uasm_il_beqz(p
, r
, pte
, label_tlb_huge_update
);
637 UASM_i_LW(p
, pte
, 0, ptr
); /* Needed because SC killed our PTE */
639 UASM_i_SW(p
, pte
, 0, ptr
);
641 build_huge_update_entries(p
, pte
, ptr
);
642 build_huge_tlb_write_entry(p
, l
, r
, pte
, tlb_indexed
, 0);
644 #endif /* CONFIG_HUGETLB_PAGE */
648 * TMP and PTR are scratch.
649 * TMP will be clobbered, PTR will hold the pmd entry.
651 static void __cpuinit
652 build_get_pmde64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
653 unsigned int tmp
, unsigned int ptr
)
655 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
656 long pgdc
= (long)pgd_current
;
659 * The vmalloc handling is not in the hotpath.
661 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
663 if (check_for_high_segbits
) {
665 * The kernel currently implicitely assumes that the
666 * MIPS SEGBITS parameter for the processor is
667 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
668 * allocate virtual addresses outside the maximum
669 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
670 * that doesn't prevent user code from accessing the
671 * higher xuseg addresses. Here, we make sure that
672 * everything but the lower xuseg addresses goes down
673 * the module_alloc/vmalloc path.
675 uasm_i_dsrl_safe(p
, ptr
, tmp
, PGDIR_SHIFT
+ PGD_ORDER
+ PAGE_SHIFT
- 3);
676 uasm_il_bnez(p
, r
, ptr
, label_vmalloc
);
678 uasm_il_bltz(p
, r
, tmp
, label_vmalloc
);
680 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
682 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
684 /* pgd is in pgd_reg */
685 UASM_i_MFC0(p
, ptr
, 31, pgd_reg
);
688 * &pgd << 11 stored in CONTEXT [23..63].
690 UASM_i_MFC0(p
, ptr
, C0_CONTEXT
);
692 /* Clear lower 23 bits of context. */
693 uasm_i_dins(p
, ptr
, 0, 0, 23);
695 /* 1 0 1 0 1 << 6 xkphys cached */
696 uasm_i_ori(p
, ptr
, ptr
, 0x540);
697 uasm_i_drotr(p
, ptr
, ptr
, 11);
699 #elif defined(CONFIG_SMP)
700 # ifdef CONFIG_MIPS_MT_SMTC
702 * SMTC uses TCBind value as "CPU" index
704 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
705 uasm_i_dsrl_safe(p
, ptr
, ptr
, 19);
708 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
711 uasm_i_dmfc0(p
, ptr
, C0_CONTEXT
);
712 uasm_i_dsrl_safe(p
, ptr
, ptr
, 23);
714 UASM_i_LA_mostly(p
, tmp
, pgdc
);
715 uasm_i_daddu(p
, ptr
, ptr
, tmp
);
716 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
717 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
719 UASM_i_LA_mostly(p
, ptr
, pgdc
);
720 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
723 uasm_l_vmalloc_done(l
, *p
);
725 /* get pgd offset in bytes */
726 uasm_i_dsrl_safe(p
, tmp
, tmp
, PGDIR_SHIFT
- 3);
728 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
729 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
730 #ifndef __PAGETABLE_PMD_FOLDED
731 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
732 uasm_i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
733 uasm_i_dsrl_safe(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
734 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
735 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
740 * BVADDR is the faulting address, PTR is scratch.
741 * PTR will hold the pgd for vmalloc.
743 static void __cpuinit
744 build_get_pgd_vmalloc64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
745 unsigned int bvaddr
, unsigned int ptr
,
746 enum vmalloc64_mode mode
)
748 long swpd
= (long)swapper_pg_dir
;
749 int single_insn_swpd
;
750 int did_vmalloc_branch
= 0;
752 single_insn_swpd
= uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
);
754 uasm_l_vmalloc(l
, *p
);
756 if (mode
!= not_refill
&& check_for_high_segbits
) {
757 if (single_insn_swpd
) {
758 uasm_il_bltz(p
, r
, bvaddr
, label_vmalloc_done
);
759 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
760 did_vmalloc_branch
= 1;
763 uasm_il_bgez(p
, r
, bvaddr
, label_large_segbits_fault
);
766 if (!did_vmalloc_branch
) {
767 if (uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
)) {
768 uasm_il_b(p
, r
, label_vmalloc_done
);
769 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
771 UASM_i_LA_mostly(p
, ptr
, swpd
);
772 uasm_il_b(p
, r
, label_vmalloc_done
);
773 if (uasm_in_compat_space_p(swpd
))
774 uasm_i_addiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
776 uasm_i_daddiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
779 if (mode
!= not_refill
&& check_for_high_segbits
) {
780 uasm_l_large_segbits_fault(l
, *p
);
782 * We get here if we are an xsseg address, or if we are
783 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
785 * Ignoring xsseg (assume disabled so would generate
786 * (address errors?), the only remaining possibility
787 * is the upper xuseg addresses. On processors with
788 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
789 * addresses would have taken an address error. We try
790 * to mimic that here by taking a load/istream page
793 UASM_i_LA(p
, ptr
, (unsigned long)tlb_do_page_fault_0
);
796 if (mode
== refill_scratch
) {
798 UASM_i_MFC0(p
, 1, 31, scratch_reg
);
800 UASM_i_LW(p
, 1, scratchpad_offset(0), 0);
807 #else /* !CONFIG_64BIT */
810 * TMP and PTR are scratch.
811 * TMP will be clobbered, PTR will hold the pgd entry.
813 static void __cpuinit __maybe_unused
814 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
816 long pgdc
= (long)pgd_current
;
818 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
820 #ifdef CONFIG_MIPS_MT_SMTC
822 * SMTC uses TCBind value as "CPU" index
824 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
825 UASM_i_LA_mostly(p
, tmp
, pgdc
);
826 uasm_i_srl(p
, ptr
, ptr
, 19);
829 * smp_processor_id() << 3 is stored in CONTEXT.
831 uasm_i_mfc0(p
, ptr
, C0_CONTEXT
);
832 UASM_i_LA_mostly(p
, tmp
, pgdc
);
833 uasm_i_srl(p
, ptr
, ptr
, 23);
835 uasm_i_addu(p
, ptr
, tmp
, ptr
);
837 UASM_i_LA_mostly(p
, ptr
, pgdc
);
839 uasm_i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
840 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
841 uasm_i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
842 uasm_i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
843 uasm_i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
846 #endif /* !CONFIG_64BIT */
848 static void __cpuinit
build_adjust_context(u32
**p
, unsigned int ctx
)
850 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1) + PAGE_SHIFT
- 12;
851 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
853 switch (current_cpu_type()) {
870 UASM_i_SRL(p
, ctx
, ctx
, shift
);
871 uasm_i_andi(p
, ctx
, ctx
, mask
);
874 static void __cpuinit
build_get_ptep(u32
**p
, unsigned int tmp
, unsigned int ptr
)
877 * Bug workaround for the Nevada. It seems as if under certain
878 * circumstances the move from cp0_context might produce a
879 * bogus result when the mfc0 instruction and its consumer are
880 * in a different cacheline or a load instruction, probably any
881 * memory reference, is between them.
883 switch (current_cpu_type()) {
885 UASM_i_LW(p
, ptr
, 0, ptr
);
886 GET_CONTEXT(p
, tmp
); /* get context reg */
890 GET_CONTEXT(p
, tmp
); /* get context reg */
891 UASM_i_LW(p
, ptr
, 0, ptr
);
895 build_adjust_context(p
, tmp
);
896 UASM_i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
899 static void __cpuinit
build_update_entries(u32
**p
, unsigned int tmp
,
903 * 64bit address support (36bit on a 32bit CPU) in a 32bit
904 * Kernel is a special case. Only a few CPUs use it.
906 #ifdef CONFIG_64BIT_PHYS_ADDR
907 if (cpu_has_64bits
) {
908 uasm_i_ld(p
, tmp
, 0, ptep
); /* get even pte */
909 uasm_i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
910 if (kernel_uses_smartmips_rixi
) {
911 UASM_i_SRL(p
, tmp
, tmp
, ilog2(_PAGE_NO_EXEC
));
912 UASM_i_SRL(p
, ptep
, ptep
, ilog2(_PAGE_NO_EXEC
));
913 UASM_i_ROTR(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
914 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
915 UASM_i_ROTR(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
917 uasm_i_dsrl_safe(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo0 */
918 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
919 uasm_i_dsrl_safe(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo1 */
921 UASM_i_MTC0(p
, ptep
, C0_ENTRYLO1
); /* load it */
923 int pte_off_even
= sizeof(pte_t
) / 2;
924 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
926 /* The pte entries are pre-shifted */
927 uasm_i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
928 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
929 uasm_i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
930 UASM_i_MTC0(p
, ptep
, C0_ENTRYLO1
); /* load it */
933 UASM_i_LW(p
, tmp
, 0, ptep
); /* get even pte */
934 UASM_i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
936 build_tlb_probe_entry(p
);
937 if (kernel_uses_smartmips_rixi
) {
938 UASM_i_SRL(p
, tmp
, tmp
, ilog2(_PAGE_NO_EXEC
));
939 UASM_i_SRL(p
, ptep
, ptep
, ilog2(_PAGE_NO_EXEC
));
940 UASM_i_ROTR(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
941 if (r4k_250MHZhwbug())
942 UASM_i_MTC0(p
, 0, C0_ENTRYLO0
);
943 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
944 UASM_i_ROTR(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
946 UASM_i_SRL(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo0 */
947 if (r4k_250MHZhwbug())
948 UASM_i_MTC0(p
, 0, C0_ENTRYLO0
);
949 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
950 UASM_i_SRL(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo1 */
952 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
954 if (r4k_250MHZhwbug())
955 UASM_i_MTC0(p
, 0, C0_ENTRYLO1
);
956 UASM_i_MTC0(p
, ptep
, C0_ENTRYLO1
); /* load it */
960 struct mips_huge_tlb_info
{
965 static struct mips_huge_tlb_info __cpuinit
966 build_fast_tlb_refill_handler (u32
**p
, struct uasm_label
**l
,
967 struct uasm_reloc
**r
, unsigned int tmp
,
968 unsigned int ptr
, int c0_scratch
)
970 struct mips_huge_tlb_info rv
;
971 unsigned int even
, odd
;
972 int vmalloc_branch_delay_filled
= 0;
973 const int scratch
= 1; /* Our extra working register */
975 rv
.huge_pte
= scratch
;
976 rv
.restore_scratch
= 0;
978 if (check_for_high_segbits
) {
979 UASM_i_MFC0(p
, tmp
, C0_BADVADDR
);
982 UASM_i_MFC0(p
, ptr
, 31, pgd_reg
);
984 UASM_i_MFC0(p
, ptr
, C0_CONTEXT
);
987 UASM_i_MTC0(p
, scratch
, 31, c0_scratch
);
989 UASM_i_SW(p
, scratch
, scratchpad_offset(0), 0);
991 uasm_i_dsrl_safe(p
, scratch
, tmp
,
992 PGDIR_SHIFT
+ PGD_ORDER
+ PAGE_SHIFT
- 3);
993 uasm_il_bnez(p
, r
, scratch
, label_vmalloc
);
996 vmalloc_branch_delay_filled
= 1;
997 /* Clear lower 23 bits of context. */
998 uasm_i_dins(p
, ptr
, 0, 0, 23);
1002 UASM_i_MFC0(p
, ptr
, 31, pgd_reg
);
1004 UASM_i_MFC0(p
, ptr
, C0_CONTEXT
);
1006 UASM_i_MFC0(p
, tmp
, C0_BADVADDR
);
1008 if (c0_scratch
>= 0)
1009 UASM_i_MTC0(p
, scratch
, 31, c0_scratch
);
1011 UASM_i_SW(p
, scratch
, scratchpad_offset(0), 0);
1014 /* Clear lower 23 bits of context. */
1015 uasm_i_dins(p
, ptr
, 0, 0, 23);
1017 uasm_il_bltz(p
, r
, tmp
, label_vmalloc
);
1020 if (pgd_reg
== -1) {
1021 vmalloc_branch_delay_filled
= 1;
1022 /* 1 0 1 0 1 << 6 xkphys cached */
1023 uasm_i_ori(p
, ptr
, ptr
, 0x540);
1024 uasm_i_drotr(p
, ptr
, ptr
, 11);
1027 #ifdef __PAGETABLE_PMD_FOLDED
1028 #define LOC_PTEP scratch
1030 #define LOC_PTEP ptr
1033 if (!vmalloc_branch_delay_filled
)
1034 /* get pgd offset in bytes */
1035 uasm_i_dsrl_safe(p
, scratch
, tmp
, PGDIR_SHIFT
- 3);
1037 uasm_l_vmalloc_done(l
, *p
);
1041 * fall-through case = badvaddr *pgd_current
1042 * vmalloc case = badvaddr swapper_pg_dir
1045 if (vmalloc_branch_delay_filled
)
1046 /* get pgd offset in bytes */
1047 uasm_i_dsrl_safe(p
, scratch
, tmp
, PGDIR_SHIFT
- 3);
1049 #ifdef __PAGETABLE_PMD_FOLDED
1050 GET_CONTEXT(p
, tmp
); /* get context reg */
1052 uasm_i_andi(p
, scratch
, scratch
, (PTRS_PER_PGD
- 1) << 3);
1054 if (use_lwx_insns()) {
1055 UASM_i_LWX(p
, LOC_PTEP
, scratch
, ptr
);
1057 uasm_i_daddu(p
, ptr
, ptr
, scratch
); /* add in pgd offset */
1058 uasm_i_ld(p
, LOC_PTEP
, 0, ptr
); /* get pmd pointer */
1061 #ifndef __PAGETABLE_PMD_FOLDED
1062 /* get pmd offset in bytes */
1063 uasm_i_dsrl_safe(p
, scratch
, tmp
, PMD_SHIFT
- 3);
1064 uasm_i_andi(p
, scratch
, scratch
, (PTRS_PER_PMD
- 1) << 3);
1065 GET_CONTEXT(p
, tmp
); /* get context reg */
1067 if (use_lwx_insns()) {
1068 UASM_i_LWX(p
, scratch
, scratch
, ptr
);
1070 uasm_i_daddu(p
, ptr
, ptr
, scratch
); /* add in pmd offset */
1071 UASM_i_LW(p
, scratch
, 0, ptr
);
1074 /* Adjust the context during the load latency. */
1075 build_adjust_context(p
, tmp
);
1077 #ifdef CONFIG_HUGETLB_PAGE
1078 uasm_il_bbit1(p
, r
, scratch
, ilog2(_PAGE_HUGE
), label_tlb_huge_update
);
1080 * The in the LWX case we don't want to do the load in the
1081 * delay slot. It cannot issue in the same cycle and may be
1082 * speculative and unneeded.
1084 if (use_lwx_insns())
1086 #endif /* CONFIG_HUGETLB_PAGE */
1089 /* build_update_entries */
1090 if (use_lwx_insns()) {
1093 UASM_i_LWX(p
, even
, scratch
, tmp
);
1094 UASM_i_ADDIU(p
, tmp
, tmp
, sizeof(pte_t
));
1095 UASM_i_LWX(p
, odd
, scratch
, tmp
);
1097 UASM_i_ADDU(p
, ptr
, scratch
, tmp
); /* add in offset */
1100 UASM_i_LW(p
, even
, 0, ptr
); /* get even pte */
1101 UASM_i_LW(p
, odd
, sizeof(pte_t
), ptr
); /* get odd pte */
1103 if (kernel_uses_smartmips_rixi
) {
1104 uasm_i_dsrl_safe(p
, even
, even
, ilog2(_PAGE_NO_EXEC
));
1105 uasm_i_dsrl_safe(p
, odd
, odd
, ilog2(_PAGE_NO_EXEC
));
1106 uasm_i_drotr(p
, even
, even
,
1107 ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
1108 UASM_i_MTC0(p
, even
, C0_ENTRYLO0
); /* load it */
1109 uasm_i_drotr(p
, odd
, odd
,
1110 ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
1112 uasm_i_dsrl_safe(p
, even
, even
, ilog2(_PAGE_GLOBAL
));
1113 UASM_i_MTC0(p
, even
, C0_ENTRYLO0
); /* load it */
1114 uasm_i_dsrl_safe(p
, odd
, odd
, ilog2(_PAGE_GLOBAL
));
1116 UASM_i_MTC0(p
, odd
, C0_ENTRYLO1
); /* load it */
1118 if (c0_scratch
>= 0) {
1119 UASM_i_MFC0(p
, scratch
, 31, c0_scratch
);
1120 build_tlb_write_entry(p
, l
, r
, tlb_random
);
1121 uasm_l_leave(l
, *p
);
1122 rv
.restore_scratch
= 1;
1123 } else if (PAGE_SHIFT
== 14 || PAGE_SHIFT
== 13) {
1124 build_tlb_write_entry(p
, l
, r
, tlb_random
);
1125 uasm_l_leave(l
, *p
);
1126 UASM_i_LW(p
, scratch
, scratchpad_offset(0), 0);
1128 UASM_i_LW(p
, scratch
, scratchpad_offset(0), 0);
1129 build_tlb_write_entry(p
, l
, r
, tlb_random
);
1130 uasm_l_leave(l
, *p
);
1131 rv
.restore_scratch
= 1;
1134 uasm_i_eret(p
); /* return from trap */
1140 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1141 * because EXL == 0. If we wrap, we can also use the 32 instruction
1142 * slots before the XTLB refill exception handler which belong to the
1143 * unused TLB refill exception.
1145 #define MIPS64_REFILL_INSNS 32
1147 static void __cpuinit
build_r4000_tlb_refill_handler(void)
1149 u32
*p
= tlb_handler
;
1150 struct uasm_label
*l
= labels
;
1151 struct uasm_reloc
*r
= relocs
;
1153 unsigned int final_len
;
1154 struct mips_huge_tlb_info htlb_info
;
1155 enum vmalloc64_mode vmalloc_mode
;
1157 memset(tlb_handler
, 0, sizeof(tlb_handler
));
1158 memset(labels
, 0, sizeof(labels
));
1159 memset(relocs
, 0, sizeof(relocs
));
1160 memset(final_handler
, 0, sizeof(final_handler
));
1162 if (scratch_reg
== 0)
1163 scratch_reg
= allocate_kscratch();
1165 if ((scratch_reg
> 0 || scratchpad_available()) && use_bbit_insns()) {
1166 htlb_info
= build_fast_tlb_refill_handler(&p
, &l
, &r
, K0
, K1
,
1168 vmalloc_mode
= refill_scratch
;
1170 htlb_info
.huge_pte
= K0
;
1171 htlb_info
.restore_scratch
= 0;
1172 vmalloc_mode
= refill_noscratch
;
1174 * create the plain linear handler
1176 if (bcm1250_m3_war()) {
1177 unsigned int segbits
= 44;
1179 uasm_i_dmfc0(&p
, K0
, C0_BADVADDR
);
1180 uasm_i_dmfc0(&p
, K1
, C0_ENTRYHI
);
1181 uasm_i_xor(&p
, K0
, K0
, K1
);
1182 uasm_i_dsrl_safe(&p
, K1
, K0
, 62);
1183 uasm_i_dsrl_safe(&p
, K0
, K0
, 12 + 1);
1184 uasm_i_dsll_safe(&p
, K0
, K0
, 64 + 12 + 1 - segbits
);
1185 uasm_i_or(&p
, K0
, K0
, K1
);
1186 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
1187 /* No need for uasm_i_nop */
1191 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
1193 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
1196 #ifdef CONFIG_HUGETLB_PAGE
1197 build_is_huge_pte(&p
, &r
, K0
, K1
, label_tlb_huge_update
);
1200 build_get_ptep(&p
, K0
, K1
);
1201 build_update_entries(&p
, K0
, K1
);
1202 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
1203 uasm_l_leave(&l
, p
);
1204 uasm_i_eret(&p
); /* return from trap */
1206 #ifdef CONFIG_HUGETLB_PAGE
1207 uasm_l_tlb_huge_update(&l
, p
);
1208 build_huge_update_entries(&p
, htlb_info
.huge_pte
, K1
);
1209 build_huge_tlb_write_entry(&p
, &l
, &r
, K0
, tlb_random
,
1210 htlb_info
.restore_scratch
);
1214 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
, vmalloc_mode
);
1218 * Overflow check: For the 64bit handler, we need at least one
1219 * free instruction slot for the wrap-around branch. In worst
1220 * case, if the intended insertion point is a delay slot, we
1221 * need three, with the second nop'ed and the third being
1224 /* Loongson2 ebase is different than r4k, we have more space */
1225 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1226 if ((p
- tlb_handler
) > 64)
1227 panic("TLB refill handler space exceeded");
1229 if (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 1)
1230 || (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 3)
1231 && uasm_insn_has_bdelay(relocs
,
1232 tlb_handler
+ MIPS64_REFILL_INSNS
- 3)))
1233 panic("TLB refill handler space exceeded");
1237 * Now fold the handler in the TLB refill handler space.
1239 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1241 /* Simplest case, just copy the handler. */
1242 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
1243 final_len
= p
- tlb_handler
;
1244 #else /* CONFIG_64BIT */
1245 f
= final_handler
+ MIPS64_REFILL_INSNS
;
1246 if ((p
- tlb_handler
) <= MIPS64_REFILL_INSNS
) {
1247 /* Just copy the handler. */
1248 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
1249 final_len
= p
- tlb_handler
;
1251 #if defined(CONFIG_HUGETLB_PAGE)
1252 const enum label_id ls
= label_tlb_huge_update
;
1254 const enum label_id ls
= label_vmalloc
;
1260 for (i
= 0; i
< ARRAY_SIZE(labels
) && labels
[i
].lab
!= ls
; i
++)
1262 BUG_ON(i
== ARRAY_SIZE(labels
));
1263 split
= labels
[i
].addr
;
1266 * See if we have overflown one way or the other.
1268 if (split
> tlb_handler
+ MIPS64_REFILL_INSNS
||
1269 split
< p
- MIPS64_REFILL_INSNS
)
1274 * Split two instructions before the end. One
1275 * for the branch and one for the instruction
1276 * in the delay slot.
1278 split
= tlb_handler
+ MIPS64_REFILL_INSNS
- 2;
1281 * If the branch would fall in a delay slot,
1282 * we must back up an additional instruction
1283 * so that it is no longer in a delay slot.
1285 if (uasm_insn_has_bdelay(relocs
, split
- 1))
1288 /* Copy first part of the handler. */
1289 uasm_copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
1290 f
+= split
- tlb_handler
;
1293 /* Insert branch. */
1294 uasm_l_split(&l
, final_handler
);
1295 uasm_il_b(&f
, &r
, label_split
);
1296 if (uasm_insn_has_bdelay(relocs
, split
))
1299 uasm_copy_handler(relocs
, labels
,
1300 split
, split
+ 1, f
);
1301 uasm_move_labels(labels
, f
, f
+ 1, -1);
1307 /* Copy the rest of the handler. */
1308 uasm_copy_handler(relocs
, labels
, split
, p
, final_handler
);
1309 final_len
= (f
- (final_handler
+ MIPS64_REFILL_INSNS
)) +
1312 #endif /* CONFIG_64BIT */
1314 uasm_resolve_relocs(relocs
, labels
);
1315 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1318 memcpy((void *)ebase
, final_handler
, 0x100);
1320 dump_handler((u32
*)ebase
, 64);
1324 * 128 instructions for the fastpath handler is generous and should
1325 * never be exceeded.
1327 #define FASTPATH_SIZE 128
1329 u32 handle_tlbl
[FASTPATH_SIZE
] __cacheline_aligned
;
1330 u32 handle_tlbs
[FASTPATH_SIZE
] __cacheline_aligned
;
1331 u32 handle_tlbm
[FASTPATH_SIZE
] __cacheline_aligned
;
1332 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1333 u32 tlbmiss_handler_setup_pgd
[16] __cacheline_aligned
;
1335 static void __cpuinit
build_r4000_setup_pgd(void)
1339 u32
*p
= tlbmiss_handler_setup_pgd
;
1340 struct uasm_label
*l
= labels
;
1341 struct uasm_reloc
*r
= relocs
;
1343 memset(tlbmiss_handler_setup_pgd
, 0, sizeof(tlbmiss_handler_setup_pgd
));
1344 memset(labels
, 0, sizeof(labels
));
1345 memset(relocs
, 0, sizeof(relocs
));
1347 pgd_reg
= allocate_kscratch();
1349 if (pgd_reg
== -1) {
1350 /* PGD << 11 in c0_Context */
1352 * If it is a ckseg0 address, convert to a physical
1353 * address. Shifting right by 29 and adding 4 will
1354 * result in zero for these addresses.
1357 UASM_i_SRA(&p
, a1
, a0
, 29);
1358 UASM_i_ADDIU(&p
, a1
, a1
, 4);
1359 uasm_il_bnez(&p
, &r
, a1
, label_tlbl_goaround1
);
1361 uasm_i_dinsm(&p
, a0
, 0, 29, 64 - 29);
1362 uasm_l_tlbl_goaround1(&l
, p
);
1363 UASM_i_SLL(&p
, a0
, a0
, 11);
1365 UASM_i_MTC0(&p
, a0
, C0_CONTEXT
);
1367 /* PGD in c0_KScratch */
1369 UASM_i_MTC0(&p
, a0
, 31, pgd_reg
);
1371 if (p
- tlbmiss_handler_setup_pgd
> ARRAY_SIZE(tlbmiss_handler_setup_pgd
))
1372 panic("tlbmiss_handler_setup_pgd space exceeded");
1373 uasm_resolve_relocs(relocs
, labels
);
1374 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1375 (unsigned int)(p
- tlbmiss_handler_setup_pgd
));
1377 dump_handler(tlbmiss_handler_setup_pgd
,
1378 ARRAY_SIZE(tlbmiss_handler_setup_pgd
));
1382 static void __cpuinit
1383 iPTE_LW(u32
**p
, unsigned int pte
, unsigned int ptr
)
1386 # ifdef CONFIG_64BIT_PHYS_ADDR
1388 uasm_i_lld(p
, pte
, 0, ptr
);
1391 UASM_i_LL(p
, pte
, 0, ptr
);
1393 # ifdef CONFIG_64BIT_PHYS_ADDR
1395 uasm_i_ld(p
, pte
, 0, ptr
);
1398 UASM_i_LW(p
, pte
, 0, ptr
);
1402 static void __cpuinit
1403 iPTE_SW(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
, unsigned int ptr
,
1406 #ifdef CONFIG_64BIT_PHYS_ADDR
1407 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
1410 uasm_i_ori(p
, pte
, pte
, mode
);
1412 # ifdef CONFIG_64BIT_PHYS_ADDR
1414 uasm_i_scd(p
, pte
, 0, ptr
);
1417 UASM_i_SC(p
, pte
, 0, ptr
);
1419 if (r10000_llsc_war())
1420 uasm_il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
1422 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1424 # ifdef CONFIG_64BIT_PHYS_ADDR
1425 if (!cpu_has_64bits
) {
1426 /* no uasm_i_nop needed */
1427 uasm_i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1428 uasm_i_ori(p
, pte
, pte
, hwmode
);
1429 uasm_i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1430 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1431 /* no uasm_i_nop needed */
1432 uasm_i_lw(p
, pte
, 0, ptr
);
1439 # ifdef CONFIG_64BIT_PHYS_ADDR
1441 uasm_i_sd(p
, pte
, 0, ptr
);
1444 UASM_i_SW(p
, pte
, 0, ptr
);
1446 # ifdef CONFIG_64BIT_PHYS_ADDR
1447 if (!cpu_has_64bits
) {
1448 uasm_i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1449 uasm_i_ori(p
, pte
, pte
, hwmode
);
1450 uasm_i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1451 uasm_i_lw(p
, pte
, 0, ptr
);
1458 * Check if PTE is present, if not then jump to LABEL. PTR points to
1459 * the page table where this PTE is located, PTE will be re-loaded
1460 * with it's original value.
1462 static void __cpuinit
1463 build_pte_present(u32
**p
, struct uasm_reloc
**r
,
1464 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1466 if (kernel_uses_smartmips_rixi
) {
1467 if (use_bbit_insns()) {
1468 uasm_il_bbit0(p
, r
, pte
, ilog2(_PAGE_PRESENT
), lid
);
1471 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
);
1472 uasm_il_beqz(p
, r
, pte
, lid
);
1473 iPTE_LW(p
, pte
, ptr
);
1476 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1477 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1478 uasm_il_bnez(p
, r
, pte
, lid
);
1479 iPTE_LW(p
, pte
, ptr
);
1483 /* Make PTE valid, store result in PTR. */
1484 static void __cpuinit
1485 build_make_valid(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
1488 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
1490 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1494 * Check if PTE can be written to, if not branch to LABEL. Regardless
1495 * restore PTE with value from PTR when done.
1497 static void __cpuinit
1498 build_pte_writable(u32
**p
, struct uasm_reloc
**r
,
1499 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1501 if (use_bbit_insns()) {
1502 uasm_il_bbit0(p
, r
, pte
, ilog2(_PAGE_PRESENT
), lid
);
1504 uasm_il_bbit0(p
, r
, pte
, ilog2(_PAGE_WRITE
), lid
);
1507 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1508 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1509 uasm_il_bnez(p
, r
, pte
, lid
);
1510 iPTE_LW(p
, pte
, ptr
);
1514 /* Make PTE writable, update software status bits as well, then store
1517 static void __cpuinit
1518 build_make_write(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
1521 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
1524 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1528 * Check if PTE can be modified, if not branch to LABEL. Regardless
1529 * restore PTE with value from PTR when done.
1531 static void __cpuinit
1532 build_pte_modifiable(u32
**p
, struct uasm_reloc
**r
,
1533 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1535 if (use_bbit_insns()) {
1536 uasm_il_bbit0(p
, r
, pte
, ilog2(_PAGE_WRITE
), lid
);
1539 uasm_i_andi(p
, pte
, pte
, _PAGE_WRITE
);
1540 uasm_il_beqz(p
, r
, pte
, lid
);
1541 iPTE_LW(p
, pte
, ptr
);
1545 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1549 * R3000 style TLB load/store/modify handlers.
1553 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1556 static void __cpuinit
1557 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
1559 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1560 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
1563 uasm_i_rfe(p
); /* branch delay */
1567 * This places the pte into ENTRYLO0 and writes it with tlbwi
1568 * or tlbwr as appropriate. This is because the index register
1569 * may have the probe fail bit set as a result of a trap on a
1570 * kseg2 access, i.e. without refill. Then it returns.
1572 static void __cpuinit
1573 build_r3000_tlb_reload_write(u32
**p
, struct uasm_label
**l
,
1574 struct uasm_reloc
**r
, unsigned int pte
,
1577 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
1578 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1579 uasm_il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1580 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1581 uasm_i_tlbwi(p
); /* cp0 delay */
1583 uasm_i_rfe(p
); /* branch delay */
1584 uasm_l_r3000_write_probe_fail(l
, *p
);
1585 uasm_i_tlbwr(p
); /* cp0 delay */
1587 uasm_i_rfe(p
); /* branch delay */
1590 static void __cpuinit
1591 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1594 long pgdc
= (long)pgd_current
;
1596 uasm_i_mfc0(p
, pte
, C0_BADVADDR
);
1597 uasm_i_lui(p
, ptr
, uasm_rel_hi(pgdc
)); /* cp0 delay */
1598 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
1599 uasm_i_srl(p
, pte
, pte
, 22); /* load delay */
1600 uasm_i_sll(p
, pte
, pte
, 2);
1601 uasm_i_addu(p
, ptr
, ptr
, pte
);
1602 uasm_i_mfc0(p
, pte
, C0_CONTEXT
);
1603 uasm_i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1604 uasm_i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1605 uasm_i_addu(p
, ptr
, ptr
, pte
);
1606 uasm_i_lw(p
, pte
, 0, ptr
);
1607 uasm_i_tlbp(p
); /* load delay */
1610 static void __cpuinit
build_r3000_tlb_load_handler(void)
1612 u32
*p
= handle_tlbl
;
1613 struct uasm_label
*l
= labels
;
1614 struct uasm_reloc
*r
= relocs
;
1616 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1617 memset(labels
, 0, sizeof(labels
));
1618 memset(relocs
, 0, sizeof(relocs
));
1620 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1621 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1622 uasm_i_nop(&p
); /* load delay */
1623 build_make_valid(&p
, &r
, K0
, K1
);
1624 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1626 uasm_l_nopage_tlbl(&l
, p
);
1627 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1630 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1631 panic("TLB load handler fastpath space exceeded");
1633 uasm_resolve_relocs(relocs
, labels
);
1634 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1635 (unsigned int)(p
- handle_tlbl
));
1637 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1640 static void __cpuinit
build_r3000_tlb_store_handler(void)
1642 u32
*p
= handle_tlbs
;
1643 struct uasm_label
*l
= labels
;
1644 struct uasm_reloc
*r
= relocs
;
1646 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1647 memset(labels
, 0, sizeof(labels
));
1648 memset(relocs
, 0, sizeof(relocs
));
1650 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1651 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1652 uasm_i_nop(&p
); /* load delay */
1653 build_make_write(&p
, &r
, K0
, K1
);
1654 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1656 uasm_l_nopage_tlbs(&l
, p
);
1657 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1660 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1661 panic("TLB store handler fastpath space exceeded");
1663 uasm_resolve_relocs(relocs
, labels
);
1664 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1665 (unsigned int)(p
- handle_tlbs
));
1667 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1670 static void __cpuinit
build_r3000_tlb_modify_handler(void)
1672 u32
*p
= handle_tlbm
;
1673 struct uasm_label
*l
= labels
;
1674 struct uasm_reloc
*r
= relocs
;
1676 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1677 memset(labels
, 0, sizeof(labels
));
1678 memset(relocs
, 0, sizeof(relocs
));
1680 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1681 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1682 uasm_i_nop(&p
); /* load delay */
1683 build_make_write(&p
, &r
, K0
, K1
);
1684 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1686 uasm_l_nopage_tlbm(&l
, p
);
1687 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1690 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1691 panic("TLB modify handler fastpath space exceeded");
1693 uasm_resolve_relocs(relocs
, labels
);
1694 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1695 (unsigned int)(p
- handle_tlbm
));
1697 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1699 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1702 * R4000 style TLB load/store/modify handlers.
1704 static void __cpuinit
1705 build_r4000_tlbchange_handler_head(u32
**p
, struct uasm_label
**l
,
1706 struct uasm_reloc
**r
, unsigned int pte
,
1710 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1712 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1715 #ifdef CONFIG_HUGETLB_PAGE
1717 * For huge tlb entries, pmd doesn't contain an address but
1718 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1719 * see if we need to jump to huge tlb processing.
1721 build_is_huge_pte(p
, r
, pte
, ptr
, label_tlb_huge_update
);
1724 UASM_i_MFC0(p
, pte
, C0_BADVADDR
);
1725 UASM_i_LW(p
, ptr
, 0, ptr
);
1726 UASM_i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1727 uasm_i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1728 UASM_i_ADDU(p
, ptr
, ptr
, pte
);
1731 uasm_l_smp_pgtable_change(l
, *p
);
1733 iPTE_LW(p
, pte
, ptr
); /* get even pte */
1734 if (!m4kc_tlbp_war())
1735 build_tlb_probe_entry(p
);
1738 static void __cpuinit
1739 build_r4000_tlbchange_handler_tail(u32
**p
, struct uasm_label
**l
,
1740 struct uasm_reloc
**r
, unsigned int tmp
,
1743 uasm_i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1744 uasm_i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1745 build_update_entries(p
, tmp
, ptr
);
1746 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1747 uasm_l_leave(l
, *p
);
1748 uasm_i_eret(p
); /* return from trap */
1751 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
, not_refill
);
1755 static void __cpuinit
build_r4000_tlb_load_handler(void)
1757 u32
*p
= handle_tlbl
;
1758 struct uasm_label
*l
= labels
;
1759 struct uasm_reloc
*r
= relocs
;
1761 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1762 memset(labels
, 0, sizeof(labels
));
1763 memset(relocs
, 0, sizeof(relocs
));
1765 if (bcm1250_m3_war()) {
1766 unsigned int segbits
= 44;
1768 uasm_i_dmfc0(&p
, K0
, C0_BADVADDR
);
1769 uasm_i_dmfc0(&p
, K1
, C0_ENTRYHI
);
1770 uasm_i_xor(&p
, K0
, K0
, K1
);
1771 uasm_i_dsrl_safe(&p
, K1
, K0
, 62);
1772 uasm_i_dsrl_safe(&p
, K0
, K0
, 12 + 1);
1773 uasm_i_dsll_safe(&p
, K0
, K0
, 64 + 12 + 1 - segbits
);
1774 uasm_i_or(&p
, K0
, K0
, K1
);
1775 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
1776 /* No need for uasm_i_nop */
1779 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1780 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1781 if (m4kc_tlbp_war())
1782 build_tlb_probe_entry(&p
);
1784 if (kernel_uses_smartmips_rixi
) {
1786 * If the page is not _PAGE_VALID, RI or XI could not
1787 * have triggered it. Skip the expensive test..
1789 if (use_bbit_insns()) {
1790 uasm_il_bbit0(&p
, &r
, K0
, ilog2(_PAGE_VALID
),
1791 label_tlbl_goaround1
);
1793 uasm_i_andi(&p
, K0
, K0
, _PAGE_VALID
);
1794 uasm_il_beqz(&p
, &r
, K0
, label_tlbl_goaround1
);
1799 /* Examine entrylo 0 or 1 based on ptr. */
1800 if (use_bbit_insns()) {
1801 uasm_i_bbit0(&p
, K1
, ilog2(sizeof(pte_t
)), 8);
1803 uasm_i_andi(&p
, K0
, K1
, sizeof(pte_t
));
1804 uasm_i_beqz(&p
, K0
, 8);
1807 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO0
); /* load it in the delay slot*/
1808 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO1
); /* load it if ptr is odd */
1810 * If the entryLo (now in K0) is valid (bit 1), RI or
1811 * XI must have triggered it.
1813 if (use_bbit_insns()) {
1814 uasm_il_bbit1(&p
, &r
, K0
, 1, label_nopage_tlbl
);
1815 /* Reload the PTE value */
1816 iPTE_LW(&p
, K0
, K1
);
1817 uasm_l_tlbl_goaround1(&l
, p
);
1819 uasm_i_andi(&p
, K0
, K0
, 2);
1820 uasm_il_bnez(&p
, &r
, K0
, label_nopage_tlbl
);
1821 uasm_l_tlbl_goaround1(&l
, p
);
1822 /* Reload the PTE value */
1823 iPTE_LW(&p
, K0
, K1
);
1826 build_make_valid(&p
, &r
, K0
, K1
);
1827 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1829 #ifdef CONFIG_HUGETLB_PAGE
1831 * This is the entry point when build_r4000_tlbchange_handler_head
1832 * spots a huge page.
1834 uasm_l_tlb_huge_update(&l
, p
);
1835 iPTE_LW(&p
, K0
, K1
);
1836 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1837 build_tlb_probe_entry(&p
);
1839 if (kernel_uses_smartmips_rixi
) {
1841 * If the page is not _PAGE_VALID, RI or XI could not
1842 * have triggered it. Skip the expensive test..
1844 if (use_bbit_insns()) {
1845 uasm_il_bbit0(&p
, &r
, K0
, ilog2(_PAGE_VALID
),
1846 label_tlbl_goaround2
);
1848 uasm_i_andi(&p
, K0
, K0
, _PAGE_VALID
);
1849 uasm_il_beqz(&p
, &r
, K0
, label_tlbl_goaround2
);
1854 /* Examine entrylo 0 or 1 based on ptr. */
1855 if (use_bbit_insns()) {
1856 uasm_i_bbit0(&p
, K1
, ilog2(sizeof(pte_t
)), 8);
1858 uasm_i_andi(&p
, K0
, K1
, sizeof(pte_t
));
1859 uasm_i_beqz(&p
, K0
, 8);
1861 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO0
); /* load it in the delay slot*/
1862 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO1
); /* load it if ptr is odd */
1864 * If the entryLo (now in K0) is valid (bit 1), RI or
1865 * XI must have triggered it.
1867 if (use_bbit_insns()) {
1868 uasm_il_bbit0(&p
, &r
, K0
, 1, label_tlbl_goaround2
);
1870 uasm_i_andi(&p
, K0
, K0
, 2);
1871 uasm_il_beqz(&p
, &r
, K0
, label_tlbl_goaround2
);
1873 /* Reload the PTE value */
1874 iPTE_LW(&p
, K0
, K1
);
1877 * We clobbered C0_PAGEMASK, restore it. On the other branch
1878 * it is restored in build_huge_tlb_write_entry.
1880 build_restore_pagemask(&p
, &r
, K0
, label_nopage_tlbl
, 0);
1882 uasm_l_tlbl_goaround2(&l
, p
);
1884 uasm_i_ori(&p
, K0
, K0
, (_PAGE_ACCESSED
| _PAGE_VALID
));
1885 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1888 uasm_l_nopage_tlbl(&l
, p
);
1889 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1892 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1893 panic("TLB load handler fastpath space exceeded");
1895 uasm_resolve_relocs(relocs
, labels
);
1896 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1897 (unsigned int)(p
- handle_tlbl
));
1899 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1902 static void __cpuinit
build_r4000_tlb_store_handler(void)
1904 u32
*p
= handle_tlbs
;
1905 struct uasm_label
*l
= labels
;
1906 struct uasm_reloc
*r
= relocs
;
1908 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1909 memset(labels
, 0, sizeof(labels
));
1910 memset(relocs
, 0, sizeof(relocs
));
1912 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1913 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1914 if (m4kc_tlbp_war())
1915 build_tlb_probe_entry(&p
);
1916 build_make_write(&p
, &r
, K0
, K1
);
1917 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1919 #ifdef CONFIG_HUGETLB_PAGE
1921 * This is the entry point when
1922 * build_r4000_tlbchange_handler_head spots a huge page.
1924 uasm_l_tlb_huge_update(&l
, p
);
1925 iPTE_LW(&p
, K0
, K1
);
1926 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1927 build_tlb_probe_entry(&p
);
1928 uasm_i_ori(&p
, K0
, K0
,
1929 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1930 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1933 uasm_l_nopage_tlbs(&l
, p
);
1934 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1937 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1938 panic("TLB store handler fastpath space exceeded");
1940 uasm_resolve_relocs(relocs
, labels
);
1941 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1942 (unsigned int)(p
- handle_tlbs
));
1944 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1947 static void __cpuinit
build_r4000_tlb_modify_handler(void)
1949 u32
*p
= handle_tlbm
;
1950 struct uasm_label
*l
= labels
;
1951 struct uasm_reloc
*r
= relocs
;
1953 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1954 memset(labels
, 0, sizeof(labels
));
1955 memset(relocs
, 0, sizeof(relocs
));
1957 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1958 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1959 if (m4kc_tlbp_war())
1960 build_tlb_probe_entry(&p
);
1961 /* Present and writable bits set, set accessed and dirty bits. */
1962 build_make_write(&p
, &r
, K0
, K1
);
1963 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1965 #ifdef CONFIG_HUGETLB_PAGE
1967 * This is the entry point when
1968 * build_r4000_tlbchange_handler_head spots a huge page.
1970 uasm_l_tlb_huge_update(&l
, p
);
1971 iPTE_LW(&p
, K0
, K1
);
1972 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1973 build_tlb_probe_entry(&p
);
1974 uasm_i_ori(&p
, K0
, K0
,
1975 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1976 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1979 uasm_l_nopage_tlbm(&l
, p
);
1980 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1983 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1984 panic("TLB modify handler fastpath space exceeded");
1986 uasm_resolve_relocs(relocs
, labels
);
1987 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1988 (unsigned int)(p
- handle_tlbm
));
1990 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1993 void __cpuinit
build_tlb_refill_handler(void)
1996 * The refill handler is generated per-CPU, multi-node systems
1997 * may have local storage for it. The other handlers are only
2000 static int run_once
= 0;
2003 check_for_high_segbits
= current_cpu_data
.vmbits
> (PGDIR_SHIFT
+ PGD_ORDER
+ PAGE_SHIFT
- 3);
2006 switch (current_cpu_type()) {
2014 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2015 build_r3000_tlb_refill_handler();
2017 build_r3000_tlb_load_handler();
2018 build_r3000_tlb_store_handler();
2019 build_r3000_tlb_modify_handler();
2023 panic("No R3000 TLB refill handler");
2029 panic("No R6000 TLB refill handler yet");
2033 panic("No R8000 TLB refill handler yet");
2038 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2039 build_r4000_setup_pgd();
2041 build_r4000_tlb_load_handler();
2042 build_r4000_tlb_store_handler();
2043 build_r4000_tlb_modify_handler();
2046 build_r4000_tlb_refill_handler();
2050 void __cpuinit
flush_tlb_handlers(void)
2052 local_flush_icache_range((unsigned long)handle_tlbl
,
2053 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
2054 local_flush_icache_range((unsigned long)handle_tlbs
,
2055 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
2056 local_flush_icache_range((unsigned long)handle_tlbm
,
2057 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));
2058 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2059 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd
,
2060 (unsigned long)tlbmiss_handler_setup_pgd
+ sizeof(handle_tlbm
));