]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/paravirt/paravirt-smp.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / arch / mips / paravirt / paravirt-smp.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9 #include <linux/interrupt.h>
10 #include <linux/cpumask.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14
15 #include <asm/mipsregs.h>
16 #include <asm/setup.h>
17 #include <asm/time.h>
18 #include <asm/smp.h>
19
20 /*
21 * Writing the sp releases the CPU, so writes must be ordered, gp
22 * first, then sp.
23 */
24 unsigned long paravirt_smp_sp[NR_CPUS];
25 unsigned long paravirt_smp_gp[NR_CPUS];
26
27 static int numcpus = 1;
28
29 static int __init set_numcpus(char *str)
30 {
31 int newval;
32
33 if (get_option(&str, &newval)) {
34 if (newval < 1 || newval >= NR_CPUS)
35 goto bad;
36 numcpus = newval;
37 return 0;
38 }
39 bad:
40 return -EINVAL;
41 }
42 early_param("numcpus", set_numcpus);
43
44
45 static void paravirt_smp_setup(void)
46 {
47 int id;
48 unsigned int cpunum = get_ebase_cpunum();
49
50 if (WARN_ON(cpunum >= NR_CPUS))
51 return;
52
53 /* The present CPUs are initially just the boot cpu (CPU 0). */
54 for (id = 0; id < NR_CPUS; id++) {
55 set_cpu_possible(id, id == 0);
56 set_cpu_present(id, id == 0);
57 }
58 __cpu_number_map[cpunum] = 0;
59 __cpu_logical_map[0] = cpunum;
60
61 for (id = 0; id < numcpus; id++) {
62 set_cpu_possible(id, true);
63 set_cpu_present(id, true);
64 __cpu_number_map[id] = id;
65 __cpu_logical_map[id] = id;
66 }
67 }
68
69 void irq_mbox_ipi(int cpu, unsigned int actions);
70 static void paravirt_send_ipi_single(int cpu, unsigned int action)
71 {
72 irq_mbox_ipi(cpu, action);
73 }
74
75 static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int action)
76 {
77 unsigned int cpu;
78
79 for_each_cpu(cpu, mask)
80 paravirt_send_ipi_single(cpu, action);
81 }
82
83 static void paravirt_init_secondary(void)
84 {
85 unsigned int sr;
86
87 sr = set_c0_status(ST0_BEV);
88 write_c0_ebase((u32)ebase);
89
90 sr |= STATUSF_IP2; /* Interrupt controller on IP2 */
91 write_c0_status(sr);
92
93 irq_cpu_online();
94 }
95
96 static void paravirt_smp_finish(void)
97 {
98 /* to generate the first CPU timer interrupt */
99 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
100 local_irq_enable();
101 }
102
103 static void paravirt_boot_secondary(int cpu, struct task_struct *idle)
104 {
105 paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle);
106 smp_wmb();
107 paravirt_smp_sp[cpu] = __KSTK_TOS(idle);
108 }
109
110 static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
111 {
112 scheduler_ipi();
113 return IRQ_HANDLED;
114 }
115
116 static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
117 {
118 generic_smp_call_function_interrupt();
119 return IRQ_HANDLED;
120 }
121
122 static void paravirt_prepare_cpus(unsigned int max_cpus)
123 {
124 if (request_irq(MIPS_IRQ_MBOX0, paravirt_reched_interrupt,
125 IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
126 paravirt_reched_interrupt)) {
127 panic("Cannot request_irq for SchedulerIPI");
128 }
129 if (request_irq(MIPS_IRQ_MBOX1, paravirt_function_interrupt,
130 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
131 paravirt_function_interrupt)) {
132 panic("Cannot request_irq for SMP-Call");
133 }
134 }
135
136 struct plat_smp_ops paravirt_smp_ops = {
137 .send_ipi_single = paravirt_send_ipi_single,
138 .send_ipi_mask = paravirt_send_ipi_mask,
139 .init_secondary = paravirt_init_secondary,
140 .smp_finish = paravirt_smp_finish,
141 .boot_secondary = paravirt_boot_secondary,
142 .smp_setup = paravirt_smp_setup,
143 .prepare_cpus = paravirt_prepare_cpus,
144 };