]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/tile/kernel/smpboot.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / arch / tile / kernel / smpboot.c
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
68e21be2 19#include <linux/sched/mm.h>
29930025 20#include <linux/sched/task.h>
867e359b 21#include <linux/kernel_stat.h>
867e359b
CM
22#include <linux/bootmem.h>
23#include <linux/notifier.h>
24#include <linux/cpu.h>
25#include <linux/percpu.h>
26#include <linux/delay.h>
27#include <linux/err.h>
0707ad30 28#include <linux/irq.h>
867e359b
CM
29#include <asm/mmu_context.h>
30#include <asm/tlbflush.h>
31#include <asm/sections.h>
32
867e359b 33/* State of each CPU. */
0707ad30 34static DEFINE_PER_CPU(int, cpu_state) = { 0 };
867e359b
CM
35
36/* The messaging code jumps to this pointer during boot-up */
37unsigned long start_cpu_function_addr;
38
39/* Called very early during startup to mark boot cpu as online */
40void __init smp_prepare_boot_cpu(void)
41{
42 int cpu = smp_processor_id();
43 set_cpu_online(cpu, 1);
44 set_cpu_present(cpu, 1);
b4f50191 45 __this_cpu_write(cpu_state, CPU_ONLINE);
867e359b
CM
46
47 init_messaging();
48}
49
50static void start_secondary(void);
51
52/*
53 * Called at the top of init() to launch all the other CPUs.
54 * They run free to complete their initialization and then wait
55 * until they get an IPI from the boot cpu to come online.
56 */
57void __init smp_prepare_cpus(unsigned int max_cpus)
58{
59 long rc;
60 int cpu, cpu_count;
61 int boot_cpu = smp_processor_id();
62
63 current_thread_info()->cpu = boot_cpu;
64
65 /*
66 * Pin this task to the boot CPU while we bring up the others,
67 * just to make sure we don't uselessly migrate as they come up.
68 */
69 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
70 if (rc != 0)
0707ad30 71 pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
867e359b
CM
72
73 /* Print information about disabled and dataplane cpus. */
74 print_disabled_cpus();
75
76 /*
77 * Tell the messaging subsystem how to respond to the
78 * startup message. We use a level of indirection to avoid
79 * confusing the linker with the fact that the messaging
80 * subsystem is calling __init code.
81 */
82 start_cpu_function_addr = (unsigned long) &online_secondary;
83
84 /* Set up thread context for all new processors. */
85 cpu_count = 1;
86 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
87 struct task_struct *idle;
88
89 if (cpu == boot_cpu)
90 continue;
91
92 if (!cpu_possible(cpu)) {
93 /*
94 * Make this processor do nothing on boot.
95 * Note that we don't give the boot_pc function
96 * a stack, so it has to be assembly code.
97 */
98 per_cpu(boot_sp, cpu) = 0;
99 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
100 continue;
101 }
102
103 /* Create a new idle thread to run start_secondary() */
104 idle = fork_idle(cpu);
105 if (IS_ERR(idle))
106 panic("failed fork for CPU %d", cpu);
107 idle->thread.pc = (unsigned long) start_secondary;
108
109 /* Make this thread the boot thread for this processor */
110 per_cpu(boot_sp, cpu) = task_ksp0(idle);
111 per_cpu(boot_pc, cpu) = idle->thread.pc;
112
113 ++cpu_count;
114 }
115 BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
116
117 /* Fire up the other tiles, if any */
118 init_cpu_present(cpu_possible_mask);
119 if (cpumask_weight(cpu_present_mask) > 1) {
120 mb(); /* make sure all data is visible to new processors */
121 hv_start_all_tiles();
122 }
123}
124
125static __initdata struct cpumask init_affinity;
126
127static __init int reset_init_affinity(void)
128{
129 long rc = sched_setaffinity(current->pid, &init_affinity);
130 if (rc != 0)
f4743673 131 pr_warn("couldn't reset init affinity (%ld)\n", rc);
867e359b
CM
132 return 0;
133}
134late_initcall(reset_init_affinity);
135
18f894c1 136static struct cpumask cpu_started;
867e359b
CM
137
138/*
139 * Activate a secondary processor. Very minimal; don't add anything
140 * to this path without knowing what you're doing, since SMP booting
141 * is pretty fragile.
142 */
18f894c1 143static void start_secondary(void)
867e359b 144{
bc1a298f
CM
145 int cpuid;
146
147 preempt_disable();
148
149 cpuid = smp_processor_id();
867e359b
CM
150
151 /* Set our thread pointer appropriately. */
152 set_my_cpu_offset(__per_cpu_offset[cpuid]);
153
867e359b
CM
154 /*
155 * In large machines even this will slow us down, since we
156 * will be contending for for the printk spinlock.
157 */
158 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
159
160 /* Initialize the current asid for our first page table. */
b4f50191 161 __this_cpu_write(current_asid, min_asid);
867e359b
CM
162
163 /* Set up this thread as another owner of the init_mm */
f1f10076 164 mmgrab(&init_mm);
867e359b
CM
165 current->active_mm = &init_mm;
166 if (current->mm)
167 BUG();
168 enter_lazy_tlb(&init_mm, current);
169
867e359b
CM
170 /* Allow hypervisor messages to be received */
171 init_messaging();
172 local_irq_enable();
173
174 /* Indicate that we're ready to come up. */
175 /* Must not do this before we're ready to receive messages */
176 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
f4743673 177 pr_warn("CPU#%d already started!\n", cpuid);
867e359b
CM
178 for (;;)
179 local_irq_enable();
180 }
181
182 smp_nap();
183}
184
867e359b
CM
185/*
186 * Bring a secondary processor online.
187 */
18f894c1 188void online_secondary(void)
867e359b
CM
189{
190 /*
191 * low-memory mappings have been cleared, flush them from
192 * the local TLBs too.
193 */
194 local_flush_tlb();
195
196 BUG_ON(in_interrupt());
197
198 /* This must be done before setting cpu_online_mask */
199 wmb();
200
d1640130
SB
201 notify_cpu_starting(smp_processor_id());
202
867e359b 203 set_cpu_online(smp_processor_id(), 1);
b4f50191 204 __this_cpu_write(cpu_state, CPU_ONLINE);
867e359b 205
0707ad30
CM
206 /* Set up tile-specific state for this cpu. */
207 setup_cpu(0);
867e359b
CM
208
209 /* Set up tile-timer clock-event device on this cpu */
210 setup_tile_timer();
211
fc6d73d6 212 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
867e359b
CM
213}
214
18f894c1 215int __cpu_up(unsigned int cpu, struct task_struct *tidle)
867e359b
CM
216{
217 /* Wait 5s total for all CPUs for them to come online */
218 static int timeout;
219 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
220 if (timeout >= 50000) {
0707ad30 221 pr_info("skipping unresponsive cpu%d\n", cpu);
867e359b
CM
222 local_irq_enable();
223 return -EIO;
224 }
225 udelay(100);
226 }
227
228 local_irq_enable();
229 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
230
231 /* Unleash the CPU! */
232 send_IPI_single(cpu, MSG_TAG_START_CPU);
233 while (!cpumask_test_cpu(cpu, cpu_online_mask))
234 cpu_relax();
235 return 0;
236}
237
238static void panic_start_cpu(void)
239{
240 panic("Received a MSG_START_CPU IPI after boot finished.");
241}
242
243void __init smp_cpus_done(unsigned int max_cpus)
244{
245 int cpu, next, rc;
246
247 /* Reset the response to a (now illegal) MSG_START_CPU IPI. */
248 start_cpu_function_addr = (unsigned long) &panic_start_cpu;
249
250 cpumask_copy(&init_affinity, cpu_online_mask);
251
252 /*
253 * Pin ourselves to a single cpu in the initial affinity set
254 * so that kernel mappings for the rootfs are not in the dataplane,
255 * if set, and to avoid unnecessary migrating during bringup.
256 * Use the last cpu just in case the whole chip has been
257 * isolated from the scheduler, to keep init away from likely
258 * more useful user code. This also ensures that work scheduled
259 * via schedule_delayed_work() in the init routines will land
260 * on this cpu.
261 */
262 for (cpu = cpumask_first(&init_affinity);
263 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
264 cpu = next)
265 ;
266 rc = sched_setaffinity(current->pid, cpumask_of(cpu));
267 if (rc != 0)
0707ad30 268 pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
867e359b 269}