struct cpu cpu;
unsigned int imemctl;
unsigned int dmemctl;
+#ifdef CONFIG_SMP
+ struct task_struct *idle;
+#endif
};
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
#endif
void smp_icache_flush_range_others(unsigned long start,
- unsigned long end);
+ unsigned long end);
#ifdef CONFIG_HOTPLUG_CPU
void coreb_die(void);
void cpu_die(void);
int __cpu_die(unsigned int cpu);
#endif
+void smp_timer_broadcast(const struct cpumask *mask);
+
+
#endif /* !__ASM_BLACKFIN_SMP_H */
#if defined(CONFIG_TICKSOURCE_CORETMR)
/* per-cpu local core timer */
-static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
+DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
static int bfin_coretmr_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
+
+static void broadcast_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
+{
+ evt->name = "dummy_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DUMMY;
+ evt->rating = 400;
+ evt->mult = 1;
+ evt->set_mode = broadcast_timer_set_mode;
+
+ clockevents_register_device(evt);
+}
+
irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+#ifdef CONFIG_SMP
+ evt->broadcast = smp_timer_broadcast;
+#endif
+
+
evt->name = "bfin_core_timer";
evt->rating = 350;
evt->irq = -1;
#include <mach/irq.h>
#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
+#define SUPPLE_1_WAKEUP ((IRQ_SUPPLE_1 - (IRQ_CORETMR + 1)) % 32)
static inline void
bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
static inline void
bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
{
- bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
+ bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP) |
+ IWR_ENABLE(SUPPLE_1_WAKEUP), 0, iwr0, iwr1, iwr2);
}
#endif
if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
/* CoreB already running, sending ipi to wakeup it */
- platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
+ smp_send_reschedule(cpu);
} else {
/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
+#include <linux/clockchips.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
-#define BFIN_IPI_RESCHEDULE 0
-#define BFIN_IPI_CALL_FUNC 1
-#define BFIN_IPI_CPU_STOP 2
+#define BFIN_IPI_TIMER 0
+#define BFIN_IPI_RESCHEDULE 1
+#define BFIN_IPI_CALL_FUNC 2
+#define BFIN_IPI_CPU_STOP 3
struct blackfin_flush_data {
unsigned long start;
return IRQ_HANDLED;
}
+DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
+void ipi_timer(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+ evt->event_handler(evt);
+}
+
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
struct ipi_message *msg;
while (msg_queue->count) {
msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) {
+ case BFIN_IPI_TIMER:
+ ipi_timer();
+ break;
case BFIN_IPI_RESCHEDULE:
scheduler_ipi();
break;
{
cpumask_t callmap;
/* simply trigger an ipi */
- if (cpu_is_offline(cpu))
- return;
cpumask_clear(&callmap);
cpumask_set_cpu(cpu, &callmap);
return;
}
+void smp_send_msg(const struct cpumask *mask, unsigned long type)
+{
+ smp_send_message(*mask, type, NULL, NULL, 0);
+}
+
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+ smp_send_msg(mask, BFIN_IPI_TIMER);
+}
+
void smp_send_stop(void)
{
cpumask_t callmap;
int __cpuinit __cpu_up(unsigned int cpu)
{
int ret;
- static struct task_struct *idle;
+ struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
+ struct task_struct *idle = ci->idle;
- if (idle)
+ if (idle) {
free_task(idle);
-
- idle = fork_idle(cpu);
- if (IS_ERR(idle)) {
- printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
- return PTR_ERR(idle);
+ idle = NULL;
}
+ if (!idle) {
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle)) {
+ printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+ return PTR_ERR(idle);
+ }
+ ci->idle = idle;
+ } else {
+ init_idle(idle, cpu);
+ }
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
ret = platform_boot_secondary(cpu, idle);
bfin_setup_caches(cpu);
+ notify_cpu_starting(cpu);
/*
* Calibrate loops per jiffy value.
* IRQs need to be enabled here - D-cache can be invalidated