--- /dev/null
+#ifndef __ASM_TLBEX_H
+#define __ASM_TLBEX_H
+
+#include <asm/uasm.h>
+
+/*
+ * Write random or indexed TLB entry, and care about the hazards from
+ * the preceding mtc0 and for the following eret.
+ */
+enum tlb_write_entry {
+ tlb_random,
+ tlb_indexed
+};
+
+extern int pgd_reg;
+
+void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int tmp, unsigned int ptr);
+void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
+void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
+void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
+void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode);
+
+#endif /* __ASM_TLBEX_H */
#include <asm/war.h>
#include <asm/uasm.h>
#include <asm/setup.h>
+#include <asm/tlbex.h>
static int mips_xpa_disabled;
}
static int scratch_reg;
-static int pgd_reg;
+int pgd_reg;
+EXPORT_SYMBOL_GPL(pgd_reg);
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
static struct work_registers build_get_work_registers(u32 **p)
}
}
-/*
- * Write random or indexed TLB entry, and care about the hazards from
- * the preceding mtc0 and for the following eret.
- */
-enum tlb_write_entry { tlb_random, tlb_indexed };
-
-static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r,
- enum tlb_write_entry wmode)
+void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode)
{
void(*tlbw)(u32 **) = NULL;
break;
}
}
+EXPORT_SYMBOL_GPL(build_tlb_write_entry);
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg)
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
-static void
-build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
- unsigned int tmp, unsigned int ptr)
+void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int tmp, unsigned int ptr)
{
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current;
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
#endif
}
+EXPORT_SYMBOL_GPL(build_get_pmde64);
/*
* BVADDR is the faulting address, PTR is scratch.
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
-static void __maybe_unused
-build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
+void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
}
+EXPORT_SYMBOL_GPL(build_get_pgde32);
#endif /* !CONFIG_64BIT */
uasm_i_andi(p, ctx, ctx, mask);
}
-static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
build_adjust_context(p, tmp);
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
}
+EXPORT_SYMBOL_GPL(build_get_ptep);
-static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
+void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
int pte_off_even = 0;
int pte_off_odd = sizeof(pte_t);
UASM_i_MTC0(p, 0, C0_ENTRYLO1);
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
}
+EXPORT_SYMBOL_GPL(build_update_entries);
struct mips_huge_tlb_info {
int huge_pte;