]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
MIPS: mm: Simplify build_update_entries
authorPaul Burton <paul.burton@imgtec.com>
Tue, 19 Apr 2016 08:25:09 +0000 (09:25 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 13 May 2016 13:30:25 +0000 (15:30 +0200)
We can simplify build_update_entries by unifying the code for the 36 bit
physical addressing with MIPS32 case with the general case, by using
pte_off_ variables in all cases & handling the trivial
_PAGE_GLOBAL_SHIFT == 0 case in build_convert_pte_to_entrylo. This
leaves XPA as the only special case.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13123/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/mm/tlbex.c

index 6f20b42be97989020a8ff816d9fcc4f862b10477..588bde3bdbe7d7e889112ca94ba1a93b2d23cb43 100644 (file)
@@ -631,6 +631,11 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
                                                        unsigned int reg)
 {
+       if (_PAGE_GLOBAL_SHIFT == 0) {
+               /* pte_t is already in EntryLo format */
+               return;
+       }
+
        if (cpu_has_rixi && _PAGE_NO_EXEC) {
                if (fill_includes_sw_bits) {
                        UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1011,10 +1016,16 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 
 static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 {
-       if (config_enabled(CONFIG_XPA)) {
-               int pte_off_even = sizeof(pte_t) / 2;
-               int pte_off_odd = pte_off_even + sizeof(pte_t);
+       int pte_off_even = 0;
+       int pte_off_odd = sizeof(pte_t);
 
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+       /* The low 32 bits of EntryLo is stored in pte_high */
+       pte_off_even += offsetof(pte_t, pte_high);
+       pte_off_odd += offsetof(pte_t, pte_high);
+#endif
+
+       if (config_enabled(CONFIG_XPA)) {
                uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
                UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
                UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
@@ -1033,24 +1044,8 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
                return;
        }
 
-       /*
-        * 64bit address support (36bit on a 32bit CPU) in a 32bit
-        * Kernel is a special case. Only a few CPUs use it.
-        */
-       if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
-               int pte_off_even = sizeof(pte_t) / 2;
-               int pte_off_odd = pte_off_even + sizeof(pte_t);
-
-               uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
-               UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
-
-               uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
-               UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
-               return;
-       }
-
-       UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
-       UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+       UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
+       UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
        if (r45k_bvahwbug())
                build_tlb_probe_entry(p);
        build_convert_pte_to_entrylo(p, tmp);