]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
csky: Fixup cpu speculative execution to IO area
authorGuo Ren <guoren@linux.alibaba.com>
Sat, 28 Mar 2020 11:14:37 +0000 (19:14 +0800)
committerGuo Ren <guoren@linux.alibaba.com>
Fri, 3 Apr 2020 04:40:07 +0000 (12:40 +0800)
For the memory size ( > 512MB, < 1GB), the MSA setting is:

 - SSEG0: PHY_START        , PHY_START + 512MB
 - SSEG1: PHY_START + 512MB, PHY_START + 1GB

But the real memory is no more than 1GB, there is a gap between the
end size of memory and border of 1GB. CPU could speculatively
execute to that gap and if the gap of the bus couldn't respond to
the CPU request, then the crash will happen.

Now make the setting with:

 - SSEG0: PHY_START        , PHY_START + 512MB (no change)
 - SSEG1: Disabled (We use highmem to use the memory of 512MB~1GB)

We also deprecated zhole_szie[] settings, it's only used by arm
style CPUs. All memory gap should use Reserved setting of dts in
csky system.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
arch/csky/abiv1/inc/abi/entry.h
arch/csky/abiv2/inc/abi/entry.h
arch/csky/kernel/head.S
arch/csky/kernel/setup.c
arch/csky/kernel/smp.c

index f35a9f3315ee6f62b126eb54b46f72a90d22da41..5056ebb902d18336ac8208da2cbdeee256b920da 100644 (file)
        addi    r6, 0xe
        cpwcr   r6, cpcr30
 
-       lsri    r6, 28
-       addi    r6, 2
-       lsli    r6, 28
-       addi    r6, 0xe
+       movi    r6, 0
        cpwcr   r6, cpcr31
 .endm
 
index 6a404393d0f404358890c8636c954b8db0e39d91..a99aff555a0ac1e979ed4aa8378ab80cc1257b87 100644 (file)
        addi    r6, 0x1ce
        mtcr    r6, cr<30, 15> /* Set MSA0 */
 
-       lsri    r6, 28
-       addi    r6, 2
-       lsli    r6, 28
-       addi    r6, 0x1ce
-       mtcr    r6, cr<31, 15> /* Set MSA1 */
+       movi    r6, 0
+       mtcr    r6, cr<31, 15> /* Clr MSA1 */
 
        /* enable MMU */
        mfcr    r6, cr18
index 61989f9241c021b8e4d12b554aed9247fd14641d..17ed9d2504807dfa385f5da0ef380b635a388c99 100644 (file)
@@ -21,6 +21,11 @@ END(_start)
 ENTRY(_start_smp_secondary)
        SETUP_MMU
 
+       /* copy msa1 from CPU0 */
+       lrw     r6, secondary_msa1
+       ld.w    r6, (r6, 0)
+       mtcr    r6, cr<31, 15>
+
        /* set stack point */
        lrw     r6, secondary_stack
        ld.w    r6, (r6, 0)
index 3821e55742f46f0070688f1e81161aefb8760f5c..819a9a7bf786dc2dcaedd968ff56538a5f26c0ad 100644 (file)
@@ -24,26 +24,9 @@ struct screen_info screen_info = {
 };
 #endif
 
-phys_addr_t __init_memblock memblock_end_of_REG0(void)
-{
-       return (memblock.memory.regions[0].base +
-               memblock.memory.regions[0].size);
-}
-
-phys_addr_t __init_memblock memblock_start_of_REG1(void)
-{
-       return memblock.memory.regions[1].base;
-}
-
-size_t __init_memblock memblock_size_of_REG1(void)
-{
-       return memblock.memory.regions[1].size;
-}
-
 static void __init csky_memblock_init(void)
 {
        unsigned long zone_size[MAX_NR_ZONES];
-       unsigned long zhole_size[MAX_NR_ZONES];
        signed long size;
 
        memblock_reserve(__pa(_stext), _end - _stext);
@@ -54,54 +37,36 @@ static void __init csky_memblock_init(void)
        memblock_dump_all();
 
        memset(zone_size, 0, sizeof(zone_size));
-       memset(zhole_size, 0, sizeof(zhole_size));
 
        min_low_pfn = PFN_UP(memblock_start_of_DRAM());
-       max_pfn     = PFN_DOWN(memblock_end_of_DRAM());
-
-       max_low_pfn = PFN_UP(memblock_end_of_REG0());
-       if (max_low_pfn == 0)
-               max_low_pfn = max_pfn;
+       max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
 
        size = max_pfn - min_low_pfn;
 
-       if (memblock.memory.cnt > 1) {
-               zone_size[ZONE_NORMAL]  =
-                       PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn;
-               zhole_size[ZONE_NORMAL] =
-                       PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn;
+       if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET))
+               zone_size[ZONE_NORMAL] = size;
+       else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) {
+               zone_size[ZONE_NORMAL] =
+                               PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
+               max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
        } else {
-               if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET))
-                       zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn;
-               else {
-                       zone_size[ZONE_NORMAL] =
+               zone_size[ZONE_NORMAL] =
                                PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
-                       max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
-               }
+               max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
+               write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
        }
 
 #ifdef CONFIG_HIGHMEM
-       size = 0;
-       if (memblock.memory.cnt > 1) {
-               size = PFN_DOWN(memblock_size_of_REG1());
-               highstart_pfn = PFN_DOWN(memblock_start_of_REG1());
-       } else {
-               size = max_pfn - min_low_pfn -
-                       PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
-               highstart_pfn =  min_low_pfn +
-                       PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
-       }
-
-       if (size > 0)
-               zone_size[ZONE_HIGHMEM] = size;
+       zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
 
-       highend_pfn = max_pfn;
+       highstart_pfn = max_low_pfn;
+       highend_pfn   = max_pfn;
 #endif
        memblock_set_current_limit(PFN_PHYS(max_low_pfn));
 
        dma_contiguous_reserve(0);
 
-       free_area_init_node(0, zone_size, min_low_pfn, zhole_size);
+       free_area_init_node(0, zone_size, min_low_pfn, NULL);
 }
 
 void __init setup_arch(char **cmdline_p)
index df2e2174dbd046aa02fc0d89669363f627e79307..b5c5bc3afeb5c8c1bf97bd3d24fd6acdbfc6a8e6 100644 (file)
@@ -159,6 +159,8 @@ volatile unsigned int secondary_hint;
 volatile unsigned int secondary_ccr;
 volatile unsigned int secondary_stack;
 
+unsigned long secondary_msa1;
+
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
        unsigned long mask = 1 << cpu;
@@ -167,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
                (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
        secondary_hint = mfcr("cr31");
        secondary_ccr  = mfcr("cr18");
+       secondary_msa1 = read_mmu_msa1();
 
        /*
         * Because other CPUs are in reset status, we must flush data