2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * RajeshwarR: Dec 11, 2007
9 * -- Added support for Inter Processor Interrupts
11 * Vineetg: Nov 1st, 2007
12 * -- Initial Write (Borrowed heavily from ARM)
15 #include <linux/spinlock.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/profile.h>
20 #include <linux/cpu.h>
21 #include <linux/irq.h>
22 #include <linux/atomic.h>
23 #include <linux/cpumask.h>
24 #include <linux/reboot.h>
25 #include <asm/processor.h>
26 #include <asm/setup.h>
27 #include <asm/mach_desc.h>
29 #ifndef CONFIG_ARC_HAS_LLSC
30 arch_spinlock_t smp_atomic_ops_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
31 arch_spinlock_t smp_bitops_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
34 struct plat_smp_ops __weak plat_smp_ops
;
36 /* XXX: per cpu ? Only needed once in early seconday boot */
37 struct task_struct
*secondary_idle_tsk
;
39 /* Called from start_kernel */
40 void __init
smp_prepare_boot_cpu(void)
45 * Called from setup_arch() before calling setup_processor()
47 * - Initialise the CPU possible map early - this describes the CPUs
48 * which may be present or become present in the system.
49 * - Call early smp init hook. This can initialize a specific multi-core
50 * IP which is say common to several platforms (hence not part of
51 * platform specific int_early() hook)
53 void __init
smp_init_cpus(void)
57 for (i
= 0; i
< NR_CPUS
; i
++)
58 set_cpu_possible(i
, true);
60 if (plat_smp_ops
.init_early_smp
)
61 plat_smp_ops
.init_early_smp();
64 /* called from init ( ) => process 1 */
65 void __init
smp_prepare_cpus(unsigned int max_cpus
)
70 * Initialise the present map, which describes the set of CPUs
71 * actually populated at the present time.
73 for (i
= 0; i
< max_cpus
; i
++)
74 set_cpu_present(i
, true);
77 void __init
smp_cpus_done(unsigned int max_cpus
)
83 * Default smp boot helper for Run-on-reset case where all cores start off
84 * together. Non-masters need to wait for Master to start running.
85 * This is implemented using a flag in memory, which Non-masters spin-wait on.
86 * Master sets it to cpu-id of core to "ungate" it.
88 static volatile int wake_flag
;
90 static void arc_default_smp_cpu_kick(int cpu
, unsigned long pc
)
96 void arc_platform_smp_wait_to_boot(int cpu
)
98 while (wake_flag
!= cpu
)
102 __asm__
__volatile__("j @first_lines_of_secondary \n");
106 const char *arc_platform_smp_cpuinfo(void)
108 return plat_smp_ops
.info
? : "";
112 * The very first "C" code executed by secondary
113 * Called from asm stub in head.S
114 * "current"/R25 already setup by low level boot code
116 void start_kernel_secondary(void)
118 struct mm_struct
*mm
= &init_mm
;
119 unsigned int cpu
= smp_processor_id();
121 /* MMU, Caches, Vector Table, Interrupts etc */
124 atomic_inc(&mm
->mm_users
);
125 atomic_inc(&mm
->mm_count
);
126 current
->active_mm
= mm
;
127 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
129 /* Some SMP H/w setup - for each cpu */
130 if (plat_smp_ops
.init_per_cpu
)
131 plat_smp_ops
.init_per_cpu(cpu
);
133 if (machine_desc
->init_per_cpu
)
134 machine_desc
->init_per_cpu(cpu
);
136 notify_cpu_starting(cpu
);
137 set_cpu_online(cpu
, true);
139 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu
);
143 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
147 * Called from kernel_init( ) -> smp_init( ) - for each CPU
149 * At this point, Secondary Processor is "HALT"ed:
150 * -It booted, but was halted in head.S
151 * -It was configured to halt-on-reset
152 * So need to wake it up.
154 * Essential requirements being where to run from (PC) and stack (SP)
156 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
158 unsigned long wait_till
;
160 secondary_idle_tsk
= idle
;
162 pr_info("Idle Task [%d] %p", cpu
, idle
);
163 pr_info("Trying to bring up CPU%u ...\n", cpu
);
165 if (plat_smp_ops
.cpu_kick
)
166 plat_smp_ops
.cpu_kick(cpu
,
167 (unsigned long)first_lines_of_secondary
);
169 arc_default_smp_cpu_kick(cpu
, (unsigned long)NULL
);
171 /* wait for 1 sec after kicking the secondary */
172 wait_till
= jiffies
+ HZ
;
173 while (time_before(jiffies
, wait_till
)) {
178 if (!cpu_online(cpu
)) {
179 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu
);
183 secondary_idle_tsk
= NULL
;
191 int setup_profiling_timer(unsigned int multiplier
)
196 /*****************************************************************************/
197 /* Inter Processor Interrupt Handling */
198 /*****************************************************************************/
208 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
209 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
210 * IRQ), the msg-type needs to be conveyed via per-cpu data
213 static DEFINE_PER_CPU(unsigned long, ipi_data
);
215 static void ipi_send_msg_one(int cpu
, enum ipi_msg_type msg
)
217 unsigned long __percpu
*ipi_data_ptr
= per_cpu_ptr(&ipi_data
, cpu
);
218 unsigned long old
, new;
221 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg
, cpu
);
223 local_irq_save(flags
);
226 * Atomically write new msg bit (in case others are writing too),
227 * and read back old value
230 new = old
= ACCESS_ONCE(*ipi_data_ptr
);
232 } while (cmpxchg(ipi_data_ptr
, old
, new) != old
);
235 * Call the platform specific IPI kick function, but avoid if possible:
236 * Only do so if there's no pending msg from other concurrent sender(s).
237 * Otherwise, recevier will see this msg as well when it takes the
238 * IPI corresponding to that msg. This is true, even if it is already in
239 * IPI handler, because !@old means it has not yet dequeued the msg(s)
240 * so @new msg can be a free-loader
242 if (plat_smp_ops
.ipi_send
&& !old
)
243 plat_smp_ops
.ipi_send(cpu
);
245 local_irq_restore(flags
);
248 static void ipi_send_msg(const struct cpumask
*callmap
, enum ipi_msg_type msg
)
252 for_each_cpu(cpu
, callmap
)
253 ipi_send_msg_one(cpu
, msg
);
256 void smp_send_reschedule(int cpu
)
258 ipi_send_msg_one(cpu
, IPI_RESCHEDULE
);
261 void smp_send_stop(void)
263 struct cpumask targets
;
264 cpumask_copy(&targets
, cpu_online_mask
);
265 cpumask_clear_cpu(smp_processor_id(), &targets
);
266 ipi_send_msg(&targets
, IPI_CPU_STOP
);
269 void arch_send_call_function_single_ipi(int cpu
)
271 ipi_send_msg_one(cpu
, IPI_CALL_FUNC
);
274 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
276 ipi_send_msg(mask
, IPI_CALL_FUNC
);
280 * ipi_cpu_stop - handle IPI from smp_send_stop()
282 static void ipi_cpu_stop(void)
287 static inline int __do_IPI(unsigned long msg
)
297 generic_smp_call_function_interrupt();
312 * arch-common ISR to handle for inter-processor interrupts
313 * Has hooks for platform specific IPI
315 irqreturn_t
do_IPI(int irq
, void *dev_id
)
317 unsigned long pending
;
318 unsigned long __maybe_unused copy
;
320 pr_debug("IPI [%ld] received on cpu %d\n",
321 *this_cpu_ptr(&ipi_data
), smp_processor_id());
323 if (plat_smp_ops
.ipi_clear
)
324 plat_smp_ops
.ipi_clear(irq
);
327 * "dequeue" the msg corresponding to this IPI (and possibly other
328 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
330 copy
= pending
= xchg(this_cpu_ptr(&ipi_data
), 0);
333 unsigned long msg
= __ffs(pending
);
338 pr_info("IPI with bogus msg %ld in %ld\n", msg
, copy
);
339 pending
&= ~(1U << msg
);
346 * API called by platform code to hookup arch-common ISR to their IPI IRQ
348 * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
349 * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
350 * request_percpu_irq() below will fail
352 static DEFINE_PER_CPU(int, ipi_dev
);
354 int smp_ipi_irq_setup(int cpu
, int irq
)
356 int *dev
= per_cpu_ptr(&ipi_dev
, cpu
);
358 /* Boot cpu calls request, all call enable */
362 rc
= request_percpu_irq(irq
, do_IPI
, "IPI Interrupt", dev
);
364 panic("Percpu IRQ request failed for %d\n", irq
);
367 enable_percpu_irq(irq
, 0);