2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
13 #include <linux/irqchip/mips-gic.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/types.h>
20 #include <asm/bcache.h>
21 #include <asm/mips-cm.h>
22 #include <asm/mips-cpc.h>
23 #include <asm/mips_mt.h>
24 #include <asm/mipsregs.h>
25 #include <asm/pm-cps.h>
26 #include <asm/r4kcache.h>
27 #include <asm/smp-cps.h>
31 static bool threads_disabled
;
32 static DECLARE_BITMAP(core_power
, NR_CPUS
);
34 struct core_boot_config
*mips_cps_core_bootcfg
;
36 static int __init
setup_nothreads(char *s
)
38 threads_disabled
= true;
41 early_param("nothreads", setup_nothreads
);
43 static unsigned core_vpe_count(unsigned core
)
50 if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP
) || !cpu_has_mipsmt
)
51 && (!IS_ENABLED(CONFIG_CPU_MIPSR6
) || !cpu_has_vp
))
54 mips_cm_lock_other(core
, 0);
55 cfg
= read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK
;
56 mips_cm_unlock_other();
57 return (cfg
>> CM_GCR_Cx_CONFIG_PVPE_SHF
) + 1;
60 static void __init
cps_smp_setup(void)
62 unsigned int ncores
, nvpes
, core_vpes
;
63 unsigned long core_entry
;
66 /* Detect & record VPE topology */
67 ncores
= mips_cm_numcores();
68 pr_info("%s topology ", cpu_has_mips_r6
? "VP" : "VPE");
69 for (c
= nvpes
= 0; c
< ncores
; c
++) {
70 core_vpes
= core_vpe_count(c
);
71 pr_cont("%c%u", c
? ',' : '{', core_vpes
);
73 /* Use the number of VPEs in core 0 for smp_num_siblings */
75 smp_num_siblings
= core_vpes
;
77 for (v
= 0; v
< min_t(int, core_vpes
, NR_CPUS
- nvpes
); v
++) {
78 cpu_data
[nvpes
+ v
].core
= c
;
79 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
80 cpu_data
[nvpes
+ v
].vpe_id
= v
;
86 pr_cont("} total %u\n", nvpes
);
88 /* Indicate present CPUs (CPU being synonymous with VPE) */
89 for (v
= 0; v
< min_t(unsigned, nvpes
, NR_CPUS
); v
++) {
90 set_cpu_possible(v
, true);
91 set_cpu_present(v
, true);
92 __cpu_number_map
[v
] = v
;
93 __cpu_logical_map
[v
] = v
;
96 /* Set a coherent default CCA (CWB) */
97 change_c0_config(CONF_CM_CMASK
, 0x5);
99 /* Core 0 is powered up (we're running on it) */
100 bitmap_set(core_power
, 0, 1);
102 /* Initialise core 0 */
103 mips_cps_core_init();
105 /* Make core 0 coherent with everything */
106 write_gcr_cl_coherence(0xff);
108 if (mips_cm_revision() >= CM_REV_CM3
) {
109 core_entry
= CKSEG1ADDR((unsigned long)mips_cps_core_entry
);
110 write_gcr_bev_base(core_entry
);
113 #ifdef CONFIG_MIPS_MT_FPAFF
114 /* If we have an FPU, enroll ourselves in the FPU-full mask */
116 cpumask_set_cpu(0, &mt_fpu_cpumask
);
117 #endif /* CONFIG_MIPS_MT_FPAFF */
120 static void __init
cps_prepare_cpus(unsigned int max_cpus
)
122 unsigned ncores
, core_vpes
, c
, cca
;
126 mips_mt_set_cpuoptions();
128 /* Detect whether the CCA is unsuited to multi-core SMP */
129 cca
= read_c0_config() & CONF_CM_CMASK
;
133 /* The CCA is coherent, multi-core is fine */
134 cca_unsuitable
= false;
138 /* CCA is not coherent, multi-core is not usable */
139 cca_unsuitable
= true;
142 /* Warn the user if the CCA prevents multi-core */
143 ncores
= mips_cm_numcores();
144 if (cca_unsuitable
&& ncores
> 1) {
145 pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
148 for_each_present_cpu(c
) {
149 if (cpu_data
[c
].core
)
150 set_cpu_present(c
, false);
155 * Patch the start of mips_cps_core_entry to provide:
159 entry_code
= (u32
*)&mips_cps_core_entry
;
160 uasm_i_addiu(&entry_code
, 16, 0, cca
);
161 blast_dcache_range((unsigned long)&mips_cps_core_entry
,
162 (unsigned long)entry_code
);
163 bc_wback_inv((unsigned long)&mips_cps_core_entry
,
164 (void *)entry_code
- (void *)&mips_cps_core_entry
);
167 /* Allocate core boot configuration structs */
168 mips_cps_core_bootcfg
= kcalloc(ncores
, sizeof(*mips_cps_core_bootcfg
),
170 if (!mips_cps_core_bootcfg
) {
171 pr_err("Failed to allocate boot config for %u cores\n", ncores
);
175 /* Allocate VPE boot configuration structs */
176 for (c
= 0; c
< ncores
; c
++) {
177 core_vpes
= core_vpe_count(c
);
178 mips_cps_core_bootcfg
[c
].vpe_config
= kcalloc(core_vpes
,
179 sizeof(*mips_cps_core_bootcfg
[c
].vpe_config
),
181 if (!mips_cps_core_bootcfg
[c
].vpe_config
) {
182 pr_err("Failed to allocate %u VPE boot configs\n",
188 /* Mark this CPU as booted */
189 atomic_set(&mips_cps_core_bootcfg
[current_cpu_data
.core
].vpe_mask
,
190 1 << cpu_vpe_id(¤t_cpu_data
));
194 /* Clean up allocations */
195 if (mips_cps_core_bootcfg
) {
196 for (c
= 0; c
< ncores
; c
++)
197 kfree(mips_cps_core_bootcfg
[c
].vpe_config
);
198 kfree(mips_cps_core_bootcfg
);
199 mips_cps_core_bootcfg
= NULL
;
202 /* Effectively disable SMP by declaring CPUs not present */
203 for_each_possible_cpu(c
) {
206 set_cpu_present(c
, false);
210 static void boot_core(unsigned int core
, unsigned int vpe_id
)
212 u32 access
, stat
, seq_state
;
215 /* Select the appropriate core */
216 mips_cm_lock_other(core
, 0);
218 /* Set its reset vector */
219 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry
));
221 /* Ensure its coherency is disabled */
222 write_gcr_co_coherence(0);
224 /* Start it with the legacy memory map and exception base */
225 write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB
);
227 /* Ensure the core can access the GCRs */
228 access
= read_gcr_access();
229 access
|= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF
+ core
);
230 write_gcr_access(access
);
232 if (mips_cpc_present()) {
234 mips_cpc_lock_other(core
);
236 if (mips_cm_revision() >= CM_REV_CM3
) {
237 /* Run only the requested VP following the reset */
238 write_cpc_co_vp_stop(0xf);
239 write_cpc_co_vp_run(1 << vpe_id
);
242 * Ensure that the VP_RUN register is written before the
248 write_cpc_co_cmd(CPC_Cx_CMD_RESET
);
252 stat
= read_cpc_co_stat_conf();
253 seq_state
= stat
& CPC_Cx_STAT_CONF_SEQSTATE_MSK
;
255 /* U6 == coherent execution, ie. the core is up */
256 if (seq_state
== CPC_Cx_STAT_CONF_SEQSTATE_U6
)
259 /* Delay a little while before we start warning */
266 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
271 mips_cpc_unlock_other();
273 /* Take the core out of reset */
274 write_gcr_co_reset_release(0);
277 mips_cm_unlock_other();
279 /* The core is now powered up */
280 bitmap_set(core_power
, core
, 1);
283 static void remote_vpe_boot(void *dummy
)
285 unsigned core
= current_cpu_data
.core
;
286 struct core_boot_config
*core_cfg
= &mips_cps_core_bootcfg
[core
];
288 mips_cps_boot_vpes(core_cfg
, cpu_vpe_id(¤t_cpu_data
));
291 static void cps_boot_secondary(int cpu
, struct task_struct
*idle
)
293 unsigned core
= cpu_data
[cpu
].core
;
294 unsigned vpe_id
= cpu_vpe_id(&cpu_data
[cpu
]);
295 struct core_boot_config
*core_cfg
= &mips_cps_core_bootcfg
[core
];
296 struct vpe_boot_config
*vpe_cfg
= &core_cfg
->vpe_config
[vpe_id
];
297 unsigned long core_entry
;
301 vpe_cfg
->pc
= (unsigned long)&smp_bootstrap
;
302 vpe_cfg
->sp
= __KSTK_TOS(idle
);
303 vpe_cfg
->gp
= (unsigned long)task_thread_info(idle
);
305 atomic_or(1 << cpu_vpe_id(&cpu_data
[cpu
]), &core_cfg
->vpe_mask
);
309 if (!test_bit(core
, core_power
)) {
310 /* Boot a VPE on a powered down core */
311 boot_core(core
, vpe_id
);
316 mips_cm_lock_other(core
, vpe_id
);
317 core_entry
= CKSEG1ADDR((unsigned long)mips_cps_core_entry
);
318 write_gcr_co_reset_base(core_entry
);
319 mips_cm_unlock_other();
322 if (core
!= current_cpu_data
.core
) {
323 /* Boot a VPE on another powered up core */
324 for (remote
= 0; remote
< NR_CPUS
; remote
++) {
325 if (cpu_data
[remote
].core
!= core
)
327 if (cpu_online(remote
))
330 if (remote
>= NR_CPUS
) {
331 pr_crit("No online CPU in core %u to start CPU%d\n",
336 err
= smp_call_function_single(remote
, remote_vpe_boot
,
339 panic("Failed to call remote CPU\n");
343 BUG_ON(!cpu_has_mipsmt
&& !cpu_has_vp
);
345 /* Boot a VPE on this core */
346 mips_cps_boot_vpes(core_cfg
, vpe_id
);
351 static void cps_init_secondary(void)
353 /* Disable MT - we only want to run 1 TC per VPE */
357 if (mips_cm_revision() >= CM_REV_CM3
) {
358 unsigned ident
= gic_read_local_vp_id();
361 * Ensure that our calculation of the VP ID matches up with
362 * what the GIC reports, otherwise we'll have configured
363 * interrupts incorrectly.
365 BUG_ON(ident
!= mips_cm_vp_id(smp_processor_id()));
369 clear_c0_status(ST0_IM
);
371 change_c0_status(ST0_IM
, STATUSF_IP2
| STATUSF_IP3
|
372 STATUSF_IP4
| STATUSF_IP5
|
373 STATUSF_IP6
| STATUSF_IP7
);
376 static void cps_smp_finish(void)
378 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency
/ HZ
));
380 #ifdef CONFIG_MIPS_MT_FPAFF
381 /* If we have an FPU, enroll ourselves in the FPU-full mask */
383 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask
);
384 #endif /* CONFIG_MIPS_MT_FPAFF */
389 #ifdef CONFIG_HOTPLUG_CPU
391 static int cps_cpu_disable(void)
393 unsigned cpu
= smp_processor_id();
394 struct core_boot_config
*core_cfg
;
399 if (!cps_pm_support_state(CPS_PM_POWER_GATED
))
402 core_cfg
= &mips_cps_core_bootcfg
[current_cpu_data
.core
];
403 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data
), &core_cfg
->vpe_mask
);
404 smp_mb__after_atomic();
405 set_cpu_online(cpu
, false);
406 calculate_cpu_foreign_map();
411 static DECLARE_COMPLETION(cpu_death_chosen
);
412 static unsigned cpu_death_sibling
;
420 unsigned int cpu
, core
, vpe_id
;
424 cpu
= smp_processor_id();
425 cpu_death
= CPU_DEATH_POWER
;
427 pr_debug("CPU%d going offline\n", cpu
);
429 if (cpu_has_mipsmt
|| cpu_has_vp
) {
430 core
= cpu_data
[cpu
].core
;
432 /* Look for another online VPE within the core */
433 for_each_online_cpu(cpu_death_sibling
) {
434 if (cpu_data
[cpu_death_sibling
].core
!= core
)
438 * There is an online VPE within the core. Just halt
439 * this TC and leave the core alone.
441 cpu_death
= CPU_DEATH_HALT
;
446 /* This CPU has chosen its way out */
447 complete(&cpu_death_chosen
);
449 if (cpu_death
== CPU_DEATH_HALT
) {
450 vpe_id
= cpu_vpe_id(&cpu_data
[cpu
]);
452 pr_debug("Halting core %d VP%d\n", core
, vpe_id
);
453 if (cpu_has_mipsmt
) {
455 write_c0_tchalt(TCHALT_H
);
456 instruction_hazard();
457 } else if (cpu_has_vp
) {
458 write_cpc_cl_vp_stop(1 << vpe_id
);
460 /* Ensure that the VP_STOP register is written */
464 pr_debug("Gating power to core %d\n", core
);
465 /* Power down the core */
466 cps_pm_enter_state(CPS_PM_POWER_GATED
);
469 /* This should never be reached */
470 panic("Failed to offline CPU %u", cpu
);
473 static void wait_for_sibling_halt(void *ptr_cpu
)
475 unsigned cpu
= (unsigned long)ptr_cpu
;
476 unsigned vpe_id
= cpu_vpe_id(&cpu_data
[cpu
]);
481 local_irq_save(flags
);
483 halted
= read_tc_c0_tchalt();
484 local_irq_restore(flags
);
485 } while (!(halted
& TCHALT_H
));
488 static void cps_cpu_die(unsigned int cpu
)
490 unsigned core
= cpu_data
[cpu
].core
;
491 unsigned int vpe_id
= cpu_vpe_id(&cpu_data
[cpu
]);
495 /* Wait for the cpu to choose its way out */
496 if (!wait_for_completion_timeout(&cpu_death_chosen
,
497 msecs_to_jiffies(5000))) {
498 pr_err("CPU%u: didn't offline\n", cpu
);
503 * Now wait for the CPU to actually offline. Without doing this that
504 * offlining may race with one or more of:
506 * - Onlining the CPU again.
507 * - Powering down the core if another VPE within it is offlined.
508 * - A sibling VPE entering a non-coherent state.
510 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
511 * with which we could race, so do nothing.
513 if (cpu_death
== CPU_DEATH_POWER
) {
515 * Wait for the core to enter a powered down or clock gated
516 * state, the latter happening when a JTAG probe is connected
517 * in which case the CPC will refuse to power down the core.
520 mips_cm_lock_other(core
, 0);
521 mips_cpc_lock_other(core
);
522 stat
= read_cpc_co_stat_conf();
523 stat
&= CPC_Cx_STAT_CONF_SEQSTATE_MSK
;
524 mips_cpc_unlock_other();
525 mips_cm_unlock_other();
526 } while (stat
!= CPC_Cx_STAT_CONF_SEQSTATE_D0
&&
527 stat
!= CPC_Cx_STAT_CONF_SEQSTATE_D2
&&
528 stat
!= CPC_Cx_STAT_CONF_SEQSTATE_U2
);
530 /* Indicate the core is powered off */
531 bitmap_clear(core_power
, core
, 1);
532 } else if (cpu_has_mipsmt
) {
534 * Have a CPU with access to the offlined CPUs registers wait
535 * for its TC to halt.
537 err
= smp_call_function_single(cpu_death_sibling
,
538 wait_for_sibling_halt
,
539 (void *)(unsigned long)cpu
, 1);
541 panic("Failed to call remote sibling CPU\n");
542 } else if (cpu_has_vp
) {
544 mips_cm_lock_other(core
, vpe_id
);
545 stat
= read_cpc_co_vp_running();
546 mips_cm_unlock_other();
547 } while (stat
& (1 << vpe_id
));
551 #endif /* CONFIG_HOTPLUG_CPU */
553 static struct plat_smp_ops cps_smp_ops
= {
554 .smp_setup
= cps_smp_setup
,
555 .prepare_cpus
= cps_prepare_cpus
,
556 .boot_secondary
= cps_boot_secondary
,
557 .init_secondary
= cps_init_secondary
,
558 .smp_finish
= cps_smp_finish
,
559 .send_ipi_single
= mips_smp_send_ipi_single
,
560 .send_ipi_mask
= mips_smp_send_ipi_mask
,
561 #ifdef CONFIG_HOTPLUG_CPU
562 .cpu_disable
= cps_cpu_disable
,
563 .cpu_die
= cps_cpu_die
,
567 bool mips_cps_smp_in_use(void)
569 extern struct plat_smp_ops
*mp_ops
;
570 return mp_ops
== &cps_smp_ops
;
573 int register_cps_smp_ops(void)
575 if (!mips_cm_present()) {
576 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
580 /* check we have a GIC - we need one for IPIs */
581 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK
)) {
582 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
586 register_smp_ops(&cps_smp_ops
);