]>
Commit | Line | Data |
---|---|---|
99106986 GR |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/module.h> | |
4 | #include <linux/init.h> | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel_stat.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/percpu.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/irq.h> | |
15 | #include <linux/irqdomain.h> | |
16 | #include <linux/of.h> | |
17 | #include <linux/sched/task_stack.h> | |
18 | #include <linux/sched/mm.h> | |
859e5f45 | 19 | #include <linux/sched/hotplug.h> |
99106986 GR |
20 | #include <asm/irq.h> |
21 | #include <asm/traps.h> | |
22 | #include <asm/sections.h> | |
23 | #include <asm/mmu_context.h> | |
24 | #include <asm/pgalloc.h> | |
12879bda GR |
25 | #ifdef CONFIG_CPU_HAS_FPU |
26 | #include <abi/fpu.h> | |
27 | #endif | |
99106986 GR |
28 | |
29 | struct ipi_data_struct { | |
30 | unsigned long bits ____cacheline_aligned; | |
31 | }; | |
32 | static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); | |
33 | ||
34 | enum ipi_message_type { | |
35 | IPI_EMPTY, | |
36 | IPI_RESCHEDULE, | |
37 | IPI_CALL_FUNC, | |
38 | IPI_MAX | |
39 | }; | |
40 | ||
41 | static irqreturn_t handle_ipi(int irq, void *dev) | |
42 | { | |
43 | while (true) { | |
44 | unsigned long ops; | |
45 | ||
46 | ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); | |
47 | if (ops == 0) | |
48 | return IRQ_HANDLED; | |
49 | ||
50 | if (ops & (1 << IPI_RESCHEDULE)) | |
51 | scheduler_ipi(); | |
52 | ||
53 | if (ops & (1 << IPI_CALL_FUNC)) | |
54 | generic_smp_call_function_interrupt(); | |
55 | ||
56 | BUG_ON((ops >> IPI_MAX) != 0); | |
57 | } | |
58 | ||
59 | return IRQ_HANDLED; | |
60 | } | |
61 | ||
62 | static void (*send_arch_ipi)(const struct cpumask *mask); | |
63 | ||
64 | static int ipi_irq; | |
65 | void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) | |
66 | { | |
67 | if (send_arch_ipi) | |
68 | return; | |
69 | ||
70 | send_arch_ipi = func; | |
71 | ipi_irq = irq; | |
72 | } | |
73 | ||
74 | static void | |
75 | send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) | |
76 | { | |
77 | int i; | |
78 | ||
79 | for_each_cpu(i, to_whom) | |
80 | set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); | |
81 | ||
82 | smp_mb(); | |
83 | send_arch_ipi(to_whom); | |
84 | } | |
85 | ||
86 | void arch_send_call_function_ipi_mask(struct cpumask *mask) | |
87 | { | |
88 | send_ipi_message(mask, IPI_CALL_FUNC); | |
89 | } | |
90 | ||
91 | void arch_send_call_function_single_ipi(int cpu) | |
92 | { | |
93 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); | |
94 | } | |
95 | ||
96 | static void ipi_stop(void *unused) | |
97 | { | |
98 | while (1); | |
99 | } | |
100 | ||
101 | void smp_send_stop(void) | |
102 | { | |
103 | on_each_cpu(ipi_stop, NULL, 1); | |
104 | } | |
105 | ||
106 | void smp_send_reschedule(int cpu) | |
107 | { | |
108 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | |
109 | } | |
110 | ||
111 | void __init smp_prepare_boot_cpu(void) | |
112 | { | |
113 | } | |
114 | ||
115 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
116 | { | |
117 | } | |
118 | ||
99106986 | 119 | static int ipi_dummy_dev; |
859e5f45 | 120 | |
99106986 GR |
121 | void __init setup_smp_ipi(void) |
122 | { | |
123 | int rc; | |
124 | ||
125 | if (ipi_irq == 0) | |
c9492737 | 126 | return; |
99106986 GR |
127 | |
128 | rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", | |
129 | &ipi_dummy_dev); | |
130 | if (rc) | |
131 | panic("%s IRQ request failed\n", __func__); | |
132 | ||
859e5f45 | 133 | enable_percpu_irq(ipi_irq, 0); |
99106986 GR |
134 | } |
135 | ||
136 | void __init setup_smp(void) | |
137 | { | |
138 | struct device_node *node = NULL; | |
139 | int cpu; | |
140 | ||
398539dd | 141 | for_each_of_cpu_node(node) { |
99106986 GR |
142 | if (!of_device_is_available(node)) |
143 | continue; | |
144 | ||
145 | if (of_property_read_u32(node, "reg", &cpu)) | |
146 | continue; | |
147 | ||
148 | if (cpu >= NR_CPUS) | |
149 | continue; | |
150 | ||
151 | set_cpu_possible(cpu, true); | |
152 | set_cpu_present(cpu, true); | |
153 | } | |
154 | } | |
155 | ||
156 | extern void _start_smp_secondary(void); | |
157 | ||
158 | volatile unsigned int secondary_hint; | |
159 | volatile unsigned int secondary_ccr; | |
160 | volatile unsigned int secondary_stack; | |
161 | ||
162 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |
163 | { | |
859e5f45 | 164 | unsigned long mask = 1 << cpu; |
99106986 | 165 | |
0f231dcf GR |
166 | secondary_stack = |
167 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; | |
99106986 | 168 | secondary_hint = mfcr("cr31"); |
99106986 GR |
169 | secondary_ccr = mfcr("cr18"); |
170 | ||
171 | /* | |
172 | * Because other CPUs are in reset status, we must flush data | |
173 | * from cache to out and secondary CPUs use them in | |
174 | * csky_start_secondary(void) | |
175 | */ | |
176 | mtcr("cr17", 0x22); | |
177 | ||
859e5f45 GR |
178 | if (mask & mfcr("cr<29, 0>")) { |
179 | send_arch_ipi(cpumask_of(cpu)); | |
180 | } else { | |
181 | /* Enable cpu in SMP reset ctrl reg */ | |
182 | mask |= mfcr("cr<29, 0>"); | |
183 | mtcr("cr<29, 0>", mask); | |
184 | } | |
99106986 GR |
185 | |
186 | /* Wait for the cpu online */ | |
187 | while (!cpu_online(cpu)); | |
188 | ||
189 | secondary_stack = 0; | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
194 | void __init smp_cpus_done(unsigned int max_cpus) | |
195 | { | |
196 | } | |
197 | ||
198 | int setup_profiling_timer(unsigned int multiplier) | |
199 | { | |
200 | return -EINVAL; | |
201 | } | |
202 | ||
203 | void csky_start_secondary(void) | |
204 | { | |
205 | struct mm_struct *mm = &init_mm; | |
206 | unsigned int cpu = smp_processor_id(); | |
207 | ||
208 | mtcr("cr31", secondary_hint); | |
209 | mtcr("cr18", secondary_ccr); | |
210 | ||
211 | mtcr("vbr", vec_base); | |
212 | ||
213 | flush_tlb_all(); | |
214 | write_mmu_pagemask(0); | |
215 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); | |
216 | TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); | |
217 | ||
99106986 GR |
218 | #ifdef CONFIG_CPU_HAS_FPU |
219 | init_fpu(); | |
220 | #endif | |
221 | ||
859e5f45 | 222 | enable_percpu_irq(ipi_irq, 0); |
99106986 GR |
223 | |
224 | mmget(mm); | |
225 | mmgrab(mm); | |
226 | current->active_mm = mm; | |
227 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
228 | ||
229 | notify_cpu_starting(cpu); | |
230 | set_cpu_online(cpu, true); | |
231 | ||
232 | pr_info("CPU%u Online: %s...\n", cpu, __func__); | |
233 | ||
234 | local_irq_enable(); | |
235 | preempt_disable(); | |
236 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); | |
237 | } | |
859e5f45 GR |
238 | |
239 | #ifdef CONFIG_HOTPLUG_CPU | |
240 | int __cpu_disable(void) | |
241 | { | |
242 | unsigned int cpu = smp_processor_id(); | |
243 | ||
244 | set_cpu_online(cpu, false); | |
245 | ||
246 | irq_migrate_all_off_this_cpu(); | |
247 | ||
248 | clear_tasks_mm_cpumask(cpu); | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | void __cpu_die(unsigned int cpu) | |
254 | { | |
255 | if (!cpu_wait_death(cpu, 5)) { | |
256 | pr_crit("CPU%u: shutdown failed\n", cpu); | |
257 | return; | |
258 | } | |
259 | pr_notice("CPU%u: shutdown\n", cpu); | |
260 | } | |
261 | ||
262 | void arch_cpu_idle_dead(void) | |
263 | { | |
264 | idle_task_exit(); | |
265 | ||
266 | cpu_report_death(); | |
267 | ||
268 | while (!secondary_stack) | |
269 | arch_cpu_idle(); | |
270 | ||
271 | local_irq_disable(); | |
272 | ||
273 | asm volatile( | |
274 | "mov sp, %0\n" | |
275 | "mov r8, %0\n" | |
276 | "jmpi csky_start_secondary" | |
277 | : | |
278 | : "r" (secondary_stack)); | |
279 | } | |
280 | #endif |