]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
powerpc/85xx: add HOTPLUG_CPU support
authorZhao Chenhui <chenhui.zhao@freescale.com>
Fri, 20 Jul 2012 12:42:36 +0000 (20:42 +0800)
committerKumar Gala <galak@kernel.crashing.org>
Wed, 12 Sep 2012 19:57:08 +0000 (14:57 -0500)
Add support to disable and re-enable individual cores at runtime on
MPC85xx/QorIQ SMP machines. Currently support e500v1/e500v2 core.

MPC85xx machines use ePAPR spin-table in boot page for CPU kick-off.  This
patch uses the boot page from bootloader to boot core at runtime.  It
supports 32-bit and 36-bit physical address.

Signed-off-by: Li Yang <leoli@freescale.com>
Signed-off-by: Jin Qing <b24347@freescale.com>
Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
arch/powerpc/Kconfig
arch/powerpc/include/asm/cacheflush.h
arch/powerpc/include/asm/smp.h
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/platforms/85xx/smp.c

index 98e513b6270968b4f69736ce42f2e63522899ce1..b8bab10bd0f1dea3c1bafd69c3a74bf91662db2f 100644 (file)
@@ -215,7 +215,8 @@ config ARCH_HIBERNATION_POSSIBLE
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
        depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-                  (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
+                  (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
+                  || 44x || 40x
 
 config PPC_DCR_NATIVE
        bool
@@ -328,7 +329,8 @@ config SWIOTLB
 
 config HOTPLUG_CPU
        bool "Support for enabling/disabling CPUs"
-       depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC || PPC_POWERNV)
+       depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || \
+       PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
        ---help---
          Say Y here to be able to disable and re-enable individual
          CPUs at runtime on SMP machines.
index ab9e402518e84bc23ef512b8d431c6fa9775f583..b843e35122e8934d7ea902e71ef0cae36f670611 100644 (file)
@@ -30,6 +30,8 @@ extern void flush_dcache_page(struct page *page);
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 
+extern void __flush_disable_L1(void);
+
 extern void __flush_icache_range(unsigned long, unsigned long);
 static inline void flush_icache_range(unsigned long start, unsigned long stop)
 {
index ce8e2bdf84ed03b4706996e2d6f3a997d9a00150..e807e9d8e3f7ea60b184da83176d54b3bbb40dcd 100644 (file)
@@ -191,6 +191,7 @@ extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
 
+extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 0f59863c3adeb3f220ef63093af71b1ce2bb23e4..b221541d9861f0e21bdc0bd7c55d0f0097b68040 100644 (file)
@@ -1043,6 +1043,34 @@ _GLOBAL(flush_dcache_L1)
 
        blr
 
+/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
+_GLOBAL(__flush_disable_L1)
+       mflr    r10
+       bl      flush_dcache_L1 /* Flush L1 d-cache */
+       mtlr    r10
+
+       mfspr   r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
+       li      r5, 2
+       rlwimi  r4, r5, 0, 3
+
+       msync
+       isync
+       mtspr   SPRN_L1CSR0, r4
+       isync
+
+1:     mfspr   r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
+       andi.   r4, r4, 2
+       bne     1b
+
+       mfspr   r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
+       li      r5, 2
+       rlwimi  r4, r5, 0, 3
+
+       mtspr   SPRN_L1CSR1, r4
+       isync
+
+       blr
+
 #ifdef CONFIG_SMP
 /* When we get here, r24 needs to hold the CPU # */
        .globl __secondary_start
index 7ed52a604a13c0e7babcbd38d05e06ca0d43dcf3..6fcfa12e5c56dd42a2b1335fe1d6a0a35d0bfeda 100644 (file)
@@ -31,8 +31,6 @@
 #include <sysdev/mpic.h>
 #include "smp.h"
 
-extern void __early_start(void);
-
 struct epapr_spin_table {
        u32     addr_h;
        u32     addr_l;
@@ -100,15 +98,45 @@ static void mpc85xx_take_timebase(void)
        local_irq_restore(flags);
 }
 
-static int __init
-smp_85xx_kick_cpu(int nr)
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cpuinit smp_85xx_mach_cpu_die(void)
+{
+       unsigned int cpu = smp_processor_id();
+       u32 tmp;
+
+       local_irq_disable();
+       idle_task_exit();
+       generic_set_cpu_dead(cpu);
+       mb();
+
+       mtspr(SPRN_TCR, 0);
+
+       __flush_disable_L1();
+       tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
+       mtspr(SPRN_HID0, tmp);
+       isync();
+
+       /* Enter NAP mode. */
+       tmp = mfmsr();
+       tmp |= MSR_WE;
+       mb();
+       mtmsr(tmp);
+       isync();
+
+       while (1)
+               ;
+}
+#endif
+
+static int __cpuinit smp_85xx_kick_cpu(int nr)
 {
        unsigned long flags;
        const u64 *cpu_rel_addr;
        __iomem struct epapr_spin_table *spin_table;
        struct device_node *np;
-       int n = 0, hw_cpu = get_hard_smp_processor_id(nr);
+       int hw_cpu = get_hard_smp_processor_id(nr);
        int ioremappable;
+       int ret = 0;
 
        WARN_ON(nr < 0 || nr >= NR_CPUS);
        WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -139,9 +167,34 @@ smp_85xx_kick_cpu(int nr)
                spin_table = phys_to_virt(*cpu_rel_addr);
 
        local_irq_save(flags);
+#ifdef CONFIG_PPC32
+#ifdef CONFIG_HOTPLUG_CPU
+       /* Corresponding to generic_set_cpu_dead() */
+       generic_set_cpu_up(nr);
+
+       if (system_state == SYSTEM_RUNNING) {
+               out_be32(&spin_table->addr_l, 0);
 
+               /*
+                * We don't set the BPTR register here since it already points
+                * to the boot page properly.
+                */
+               mpic_reset_core(hw_cpu);
+
+               /* wait until core is ready... */
+               if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
+                                               10000, 100)) {
+                       pr_err("%s: timeout waiting for core %d to reset\n",
+                                                       __func__, hw_cpu);
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               /*  clear the acknowledge status */
+               __secondary_hold_acknowledge = -1;
+       }
+#endif
        out_be32(&spin_table->pir, hw_cpu);
-#ifdef CONFIG_PPC32
        out_be32(&spin_table->addr_l, __pa(__early_start));
 
        if (!ioremappable)
@@ -149,11 +202,18 @@ smp_85xx_kick_cpu(int nr)
                        (ulong)spin_table + sizeof(struct epapr_spin_table));
 
        /* Wait a bit for the CPU to ack. */
-       while ((__secondary_hold_acknowledge != hw_cpu) && (++n < 1000))
-               mdelay(1);
+       if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
+                                       10000, 100)) {
+               pr_err("%s: timeout waiting for core %d to ack\n",
+                                               __func__, hw_cpu);
+               ret = -ENOENT;
+               goto out;
+       }
+out:
 #else
        smp_generic_kick_cpu(nr);
 
+       out_be32(&spin_table->pir, hw_cpu);
        out_be64((u64 *)(&spin_table->addr_h),
          __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
 
@@ -167,13 +227,15 @@ smp_85xx_kick_cpu(int nr)
        if (ioremappable)
                iounmap(spin_table);
 
-       pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
-
-       return 0;
+       return ret;
 }
 
 struct smp_ops_t smp_85xx_ops = {
        .kick_cpu = smp_85xx_kick_cpu,
+#ifdef CONFIG_HOTPLUG_CPU
+       .cpu_disable    = generic_cpu_disable,
+       .cpu_die        = generic_cpu_die,
+#endif
 #ifdef CONFIG_KEXEC
        .give_timebase  = smp_generic_give_timebase,
        .take_timebase  = smp_generic_take_timebase,
@@ -277,8 +339,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
 }
 #endif /* CONFIG_KEXEC */
 
-static void __init
-smp_85xx_setup_cpu(int cpu_nr)
+static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
 {
        if (smp_85xx_ops.probe == smp_mpic_probe)
                mpic_setup_this_cpu();
@@ -329,6 +390,9 @@ void __init mpc85xx_smp_init(void)
                }
                smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
                smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
+#ifdef CONFIG_HOTPLUG_CPU
+               ppc_md.cpu_die = smp_85xx_mach_cpu_die;
+#endif
        }
 
        smp_ops = &smp_85xx_ops;