]>
Commit | Line | Data |
---|---|---|
1 | /* CPU control. | |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/oom.h> | |
14 | #include <linux/rcupdate.h> | |
15 | #include <linux/export.h> | |
16 | #include <linux/bug.h> | |
17 | #include <linux/kthread.h> | |
18 | #include <linux/stop_machine.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/gfp.h> | |
21 | #include <linux/suspend.h> | |
22 | #include <linux/lockdep.h> | |
23 | #include <linux/tick.h> | |
24 | #include <linux/irq.h> | |
25 | ||
26 | #include <trace/events/power.h> | |
27 | #define CREATE_TRACE_POINTS | |
28 | #include <trace/events/cpuhp.h> | |
29 | ||
30 | #include "smpboot.h" | |
31 | ||
32 | /** | |
33 | * cpuhp_cpu_state - Per cpu hotplug state storage | |
34 | * @state: The current cpu state | |
35 | * @target: The target state | |
36 | */ | |
37 | struct cpuhp_cpu_state { | |
38 | enum cpuhp_state state; | |
39 | enum cpuhp_state target; | |
40 | }; | |
41 | ||
42 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); | |
43 | ||
44 | /** | |
45 | * cpuhp_step - Hotplug state machine step | |
46 | * @name: Name of the step | |
47 | * @startup: Startup function of the step | |
48 | * @teardown: Teardown function of the step | |
49 | * @skip_onerr: Do not invoke the functions on error rollback | |
50 | * Will go away once the notifiers are gone | |
51 | */ | |
52 | struct cpuhp_step { | |
53 | const char *name; | |
54 | int (*startup)(unsigned int cpu); | |
55 | int (*teardown)(unsigned int cpu); | |
56 | bool skip_onerr; | |
57 | }; | |
58 | ||
59 | static struct cpuhp_step cpuhp_bp_states[]; | |
60 | static struct cpuhp_step cpuhp_ap_states[]; | |
61 | ||
62 | /** | |
63 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state | |
64 | * @cpu: The cpu for which the callback should be invoked | |
65 | * @step: The step in the state machine | |
66 | * @cb: The callback function to invoke | |
67 | * | |
68 | * Called from cpu hotplug and from the state register machinery | |
69 | */ | |
70 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step, | |
71 | int (*cb)(unsigned int)) | |
72 | { | |
73 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | |
74 | int ret = 0; | |
75 | ||
76 | if (cb) { | |
77 | trace_cpuhp_enter(cpu, st->target, step, cb); | |
78 | ret = cb(cpu); | |
79 | trace_cpuhp_exit(cpu, st->state, step, ret); | |
80 | } | |
81 | return ret; | |
82 | } | |
83 | ||
84 | #ifdef CONFIG_SMP | |
85 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ | |
86 | static DEFINE_MUTEX(cpu_add_remove_lock); | |
87 | bool cpuhp_tasks_frozen; | |
88 | EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); | |
89 | ||
90 | /* | |
91 | * The following two APIs (cpu_maps_update_begin/done) must be used when | |
92 | * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. | |
93 | * The APIs cpu_notifier_register_begin/done() must be used to protect CPU | |
94 | * hotplug callback (un)registration performed using __register_cpu_notifier() | |
95 | * or __unregister_cpu_notifier(). | |
96 | */ | |
97 | void cpu_maps_update_begin(void) | |
98 | { | |
99 | mutex_lock(&cpu_add_remove_lock); | |
100 | } | |
101 | EXPORT_SYMBOL(cpu_notifier_register_begin); | |
102 | ||
103 | void cpu_maps_update_done(void) | |
104 | { | |
105 | mutex_unlock(&cpu_add_remove_lock); | |
106 | } | |
107 | EXPORT_SYMBOL(cpu_notifier_register_done); | |
108 | ||
109 | static RAW_NOTIFIER_HEAD(cpu_chain); | |
110 | ||
111 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. | |
112 | * Should always be manipulated under cpu_add_remove_lock | |
113 | */ | |
114 | static int cpu_hotplug_disabled; | |
115 | ||
116 | #ifdef CONFIG_HOTPLUG_CPU | |
117 | ||
118 | static struct { | |
119 | struct task_struct *active_writer; | |
120 | /* wait queue to wake up the active_writer */ | |
121 | wait_queue_head_t wq; | |
122 | /* verifies that no writer will get active while readers are active */ | |
123 | struct mutex lock; | |
124 | /* | |
125 | * Also blocks the new readers during | |
126 | * an ongoing cpu hotplug operation. | |
127 | */ | |
128 | atomic_t refcount; | |
129 | ||
130 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
131 | struct lockdep_map dep_map; | |
132 | #endif | |
133 | } cpu_hotplug = { | |
134 | .active_writer = NULL, | |
135 | .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), | |
136 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | |
137 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
138 | .dep_map = {.name = "cpu_hotplug.lock" }, | |
139 | #endif | |
140 | }; | |
141 | ||
142 | /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ | |
143 | #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) | |
144 | #define cpuhp_lock_acquire_tryread() \ | |
145 | lock_map_acquire_tryread(&cpu_hotplug.dep_map) | |
146 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | |
147 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | |
148 | ||
149 | ||
150 | void get_online_cpus(void) | |
151 | { | |
152 | might_sleep(); | |
153 | if (cpu_hotplug.active_writer == current) | |
154 | return; | |
155 | cpuhp_lock_acquire_read(); | |
156 | mutex_lock(&cpu_hotplug.lock); | |
157 | atomic_inc(&cpu_hotplug.refcount); | |
158 | mutex_unlock(&cpu_hotplug.lock); | |
159 | } | |
160 | EXPORT_SYMBOL_GPL(get_online_cpus); | |
161 | ||
162 | void put_online_cpus(void) | |
163 | { | |
164 | int refcount; | |
165 | ||
166 | if (cpu_hotplug.active_writer == current) | |
167 | return; | |
168 | ||
169 | refcount = atomic_dec_return(&cpu_hotplug.refcount); | |
170 | if (WARN_ON(refcount < 0)) /* try to fix things up */ | |
171 | atomic_inc(&cpu_hotplug.refcount); | |
172 | ||
173 | if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) | |
174 | wake_up(&cpu_hotplug.wq); | |
175 | ||
176 | cpuhp_lock_release(); | |
177 | ||
178 | } | |
179 | EXPORT_SYMBOL_GPL(put_online_cpus); | |
180 | ||
181 | /* | |
182 | * This ensures that the hotplug operation can begin only when the | |
183 | * refcount goes to zero. | |
184 | * | |
185 | * Note that during a cpu-hotplug operation, the new readers, if any, | |
186 | * will be blocked by the cpu_hotplug.lock | |
187 | * | |
188 | * Since cpu_hotplug_begin() is always called after invoking | |
189 | * cpu_maps_update_begin(), we can be sure that only one writer is active. | |
190 | * | |
191 | * Note that theoretically, there is a possibility of a livelock: | |
192 | * - Refcount goes to zero, last reader wakes up the sleeping | |
193 | * writer. | |
194 | * - Last reader unlocks the cpu_hotplug.lock. | |
195 | * - A new reader arrives at this moment, bumps up the refcount. | |
196 | * - The writer acquires the cpu_hotplug.lock finds the refcount | |
197 | * non zero and goes to sleep again. | |
198 | * | |
199 | * However, this is very difficult to achieve in practice since | |
200 | * get_online_cpus() not an api which is called all that often. | |
201 | * | |
202 | */ | |
203 | void cpu_hotplug_begin(void) | |
204 | { | |
205 | DEFINE_WAIT(wait); | |
206 | ||
207 | cpu_hotplug.active_writer = current; | |
208 | cpuhp_lock_acquire(); | |
209 | ||
210 | for (;;) { | |
211 | mutex_lock(&cpu_hotplug.lock); | |
212 | prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); | |
213 | if (likely(!atomic_read(&cpu_hotplug.refcount))) | |
214 | break; | |
215 | mutex_unlock(&cpu_hotplug.lock); | |
216 | schedule(); | |
217 | } | |
218 | finish_wait(&cpu_hotplug.wq, &wait); | |
219 | } | |
220 | ||
221 | void cpu_hotplug_done(void) | |
222 | { | |
223 | cpu_hotplug.active_writer = NULL; | |
224 | mutex_unlock(&cpu_hotplug.lock); | |
225 | cpuhp_lock_release(); | |
226 | } | |
227 | ||
228 | /* | |
229 | * Wait for currently running CPU hotplug operations to complete (if any) and | |
230 | * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects | |
231 | * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the | |
232 | * hotplug path before performing hotplug operations. So acquiring that lock | |
233 | * guarantees mutual exclusion from any currently running hotplug operations. | |
234 | */ | |
235 | void cpu_hotplug_disable(void) | |
236 | { | |
237 | cpu_maps_update_begin(); | |
238 | cpu_hotplug_disabled++; | |
239 | cpu_maps_update_done(); | |
240 | } | |
241 | EXPORT_SYMBOL_GPL(cpu_hotplug_disable); | |
242 | ||
243 | void cpu_hotplug_enable(void) | |
244 | { | |
245 | cpu_maps_update_begin(); | |
246 | WARN_ON(--cpu_hotplug_disabled < 0); | |
247 | cpu_maps_update_done(); | |
248 | } | |
249 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); | |
250 | #endif /* CONFIG_HOTPLUG_CPU */ | |
251 | ||
252 | /* Need to know about CPUs going up/down? */ | |
253 | int register_cpu_notifier(struct notifier_block *nb) | |
254 | { | |
255 | int ret; | |
256 | cpu_maps_update_begin(); | |
257 | ret = raw_notifier_chain_register(&cpu_chain, nb); | |
258 | cpu_maps_update_done(); | |
259 | return ret; | |
260 | } | |
261 | ||
262 | int __register_cpu_notifier(struct notifier_block *nb) | |
263 | { | |
264 | return raw_notifier_chain_register(&cpu_chain, nb); | |
265 | } | |
266 | ||
267 | static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call, | |
268 | int *nr_calls) | |
269 | { | |
270 | unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0; | |
271 | void *hcpu = (void *)(long)cpu; | |
272 | ||
273 | int ret; | |
274 | ||
275 | ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call, | |
276 | nr_calls); | |
277 | ||
278 | return notifier_to_errno(ret); | |
279 | } | |
280 | ||
281 | static int cpu_notify(unsigned long val, unsigned int cpu) | |
282 | { | |
283 | return __cpu_notify(val, cpu, -1, NULL); | |
284 | } | |
285 | ||
286 | /* Notifier wrappers for transitioning to state machine */ | |
287 | static int notify_prepare(unsigned int cpu) | |
288 | { | |
289 | int nr_calls = 0; | |
290 | int ret; | |
291 | ||
292 | ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls); | |
293 | if (ret) { | |
294 | nr_calls--; | |
295 | printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", | |
296 | __func__, cpu); | |
297 | __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL); | |
298 | } | |
299 | return ret; | |
300 | } | |
301 | ||
302 | static int notify_online(unsigned int cpu) | |
303 | { | |
304 | cpu_notify(CPU_ONLINE, cpu); | |
305 | return 0; | |
306 | } | |
307 | ||
308 | static int notify_starting(unsigned int cpu) | |
309 | { | |
310 | cpu_notify(CPU_STARTING, cpu); | |
311 | return 0; | |
312 | } | |
313 | ||
314 | static int bringup_cpu(unsigned int cpu) | |
315 | { | |
316 | struct task_struct *idle = idle_thread_get(cpu); | |
317 | int ret; | |
318 | ||
319 | /* Arch-specific enabling code. */ | |
320 | ret = __cpu_up(cpu, idle); | |
321 | if (ret) { | |
322 | cpu_notify(CPU_UP_CANCELED, cpu); | |
323 | return ret; | |
324 | } | |
325 | BUG_ON(!cpu_online(cpu)); | |
326 | return 0; | |
327 | } | |
328 | ||
329 | #ifdef CONFIG_HOTPLUG_CPU | |
330 | EXPORT_SYMBOL(register_cpu_notifier); | |
331 | EXPORT_SYMBOL(__register_cpu_notifier); | |
332 | ||
333 | void unregister_cpu_notifier(struct notifier_block *nb) | |
334 | { | |
335 | cpu_maps_update_begin(); | |
336 | raw_notifier_chain_unregister(&cpu_chain, nb); | |
337 | cpu_maps_update_done(); | |
338 | } | |
339 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
340 | ||
341 | void __unregister_cpu_notifier(struct notifier_block *nb) | |
342 | { | |
343 | raw_notifier_chain_unregister(&cpu_chain, nb); | |
344 | } | |
345 | EXPORT_SYMBOL(__unregister_cpu_notifier); | |
346 | ||
347 | /** | |
348 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU | |
349 | * @cpu: a CPU id | |
350 | * | |
351 | * This function walks all processes, finds a valid mm struct for each one and | |
352 | * then clears a corresponding bit in mm's cpumask. While this all sounds | |
353 | * trivial, there are various non-obvious corner cases, which this function | |
354 | * tries to solve in a safe manner. | |
355 | * | |
356 | * Also note that the function uses a somewhat relaxed locking scheme, so it may | |
357 | * be called only for an already offlined CPU. | |
358 | */ | |
359 | void clear_tasks_mm_cpumask(int cpu) | |
360 | { | |
361 | struct task_struct *p; | |
362 | ||
363 | /* | |
364 | * This function is called after the cpu is taken down and marked | |
365 | * offline, so its not like new tasks will ever get this cpu set in | |
366 | * their mm mask. -- Peter Zijlstra | |
367 | * Thus, we may use rcu_read_lock() here, instead of grabbing | |
368 | * full-fledged tasklist_lock. | |
369 | */ | |
370 | WARN_ON(cpu_online(cpu)); | |
371 | rcu_read_lock(); | |
372 | for_each_process(p) { | |
373 | struct task_struct *t; | |
374 | ||
375 | /* | |
376 | * Main thread might exit, but other threads may still have | |
377 | * a valid mm. Find one. | |
378 | */ | |
379 | t = find_lock_task_mm(p); | |
380 | if (!t) | |
381 | continue; | |
382 | cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); | |
383 | task_unlock(t); | |
384 | } | |
385 | rcu_read_unlock(); | |
386 | } | |
387 | ||
388 | static inline void check_for_tasks(int dead_cpu) | |
389 | { | |
390 | struct task_struct *g, *p; | |
391 | ||
392 | read_lock(&tasklist_lock); | |
393 | for_each_process_thread(g, p) { | |
394 | if (!p->on_rq) | |
395 | continue; | |
396 | /* | |
397 | * We do the check with unlocked task_rq(p)->lock. | |
398 | * Order the reading to do not warn about a task, | |
399 | * which was running on this cpu in the past, and | |
400 | * it's just been woken on another cpu. | |
401 | */ | |
402 | rmb(); | |
403 | if (task_cpu(p) != dead_cpu) | |
404 | continue; | |
405 | ||
406 | pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", | |
407 | p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); | |
408 | } | |
409 | read_unlock(&tasklist_lock); | |
410 | } | |
411 | ||
412 | static void cpu_notify_nofail(unsigned long val, unsigned int cpu) | |
413 | { | |
414 | BUG_ON(cpu_notify(val, cpu)); | |
415 | } | |
416 | ||
417 | static int notify_down_prepare(unsigned int cpu) | |
418 | { | |
419 | int err, nr_calls = 0; | |
420 | ||
421 | err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls); | |
422 | if (err) { | |
423 | nr_calls--; | |
424 | __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL); | |
425 | pr_warn("%s: attempt to take down CPU %u failed\n", | |
426 | __func__, cpu); | |
427 | } | |
428 | return err; | |
429 | } | |
430 | ||
431 | static int notify_dying(unsigned int cpu) | |
432 | { | |
433 | cpu_notify(CPU_DYING, cpu); | |
434 | return 0; | |
435 | } | |
436 | ||
437 | /* Take this CPU down. */ | |
438 | static int take_cpu_down(void *_param) | |
439 | { | |
440 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); | |
441 | enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); | |
442 | int err, cpu = smp_processor_id(); | |
443 | ||
444 | /* Ensure this CPU doesn't handle any more interrupts. */ | |
445 | err = __cpu_disable(); | |
446 | if (err < 0) | |
447 | return err; | |
448 | ||
449 | /* Invoke the former CPU_DYING callbacks */ | |
450 | for (; st->state > target; st->state--) { | |
451 | struct cpuhp_step *step = cpuhp_ap_states + st->state; | |
452 | ||
453 | cpuhp_invoke_callback(cpu, st->state, step->teardown); | |
454 | } | |
455 | /* Give up timekeeping duties */ | |
456 | tick_handover_do_timer(); | |
457 | /* Park the stopper thread */ | |
458 | stop_machine_park(cpu); | |
459 | return 0; | |
460 | } | |
461 | ||
462 | static int takedown_cpu(unsigned int cpu) | |
463 | { | |
464 | int err; | |
465 | ||
466 | /* | |
467 | * By now we've cleared cpu_active_mask, wait for all preempt-disabled | |
468 | * and RCU users of this state to go away such that all new such users | |
469 | * will observe it. | |
470 | * | |
471 | * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might | |
472 | * not imply sync_sched(), so wait for both. | |
473 | * | |
474 | * Do sync before park smpboot threads to take care the rcu boost case. | |
475 | */ | |
476 | if (IS_ENABLED(CONFIG_PREEMPT)) | |
477 | synchronize_rcu_mult(call_rcu, call_rcu_sched); | |
478 | else | |
479 | synchronize_rcu(); | |
480 | ||
481 | smpboot_park_threads(cpu); | |
482 | ||
483 | /* | |
484 | * Prevent irq alloc/free while the dying cpu reorganizes the | |
485 | * interrupt affinities. | |
486 | */ | |
487 | irq_lock_sparse(); | |
488 | ||
489 | /* | |
490 | * So now all preempt/rcu users must observe !cpu_active(). | |
491 | */ | |
492 | err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); | |
493 | if (err) { | |
494 | /* CPU didn't die: tell everyone. Can't complain. */ | |
495 | cpu_notify_nofail(CPU_DOWN_FAILED, cpu); | |
496 | irq_unlock_sparse(); | |
497 | return err; | |
498 | } | |
499 | BUG_ON(cpu_online(cpu)); | |
500 | ||
501 | /* | |
502 | * The migration_call() CPU_DYING callback will have removed all | |
503 | * runnable tasks from the cpu, there's only the idle task left now | |
504 | * that the migration thread is done doing the stop_machine thing. | |
505 | * | |
506 | * Wait for the stop thread to go away. | |
507 | */ | |
508 | while (!per_cpu(cpu_dead_idle, cpu)) | |
509 | cpu_relax(); | |
510 | smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ | |
511 | per_cpu(cpu_dead_idle, cpu) = false; | |
512 | ||
513 | /* Interrupts are moved away from the dying cpu, reenable alloc/free */ | |
514 | irq_unlock_sparse(); | |
515 | ||
516 | hotplug_cpu__broadcast_tick_pull(cpu); | |
517 | /* This actually kills the CPU. */ | |
518 | __cpu_die(cpu); | |
519 | ||
520 | tick_cleanup_dead_cpu(cpu); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | static int notify_dead(unsigned int cpu) | |
525 | { | |
526 | cpu_notify_nofail(CPU_DEAD, cpu); | |
527 | check_for_tasks(cpu); | |
528 | return 0; | |
529 | } | |
530 | ||
531 | #else | |
532 | #define notify_down_prepare NULL | |
533 | #define takedown_cpu NULL | |
534 | #define notify_dead NULL | |
535 | #define notify_dying NULL | |
536 | #endif | |
537 | ||
538 | #ifdef CONFIG_HOTPLUG_CPU | |
539 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) | |
540 | { | |
541 | for (st->state++; st->state < st->target; st->state++) { | |
542 | struct cpuhp_step *step = cpuhp_bp_states + st->state; | |
543 | ||
544 | if (!step->skip_onerr) | |
545 | cpuhp_invoke_callback(cpu, st->state, step->startup); | |
546 | } | |
547 | } | |
548 | ||
549 | /* Requires cpu_add_remove_lock to be held */ | |
550 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |
551 | { | |
552 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | |
553 | int prev_state, ret = 0; | |
554 | bool hasdied = false; | |
555 | ||
556 | if (num_online_cpus() == 1) | |
557 | return -EBUSY; | |
558 | ||
559 | if (!cpu_online(cpu)) | |
560 | return -EINVAL; | |
561 | ||
562 | cpu_hotplug_begin(); | |
563 | ||
564 | cpuhp_tasks_frozen = tasks_frozen; | |
565 | ||
566 | prev_state = st->state; | |
567 | st->target = CPUHP_OFFLINE; | |
568 | for (; st->state > st->target; st->state--) { | |
569 | struct cpuhp_step *step = cpuhp_bp_states + st->state; | |
570 | ||
571 | ret = cpuhp_invoke_callback(cpu, st->state, step->teardown); | |
572 | if (ret) { | |
573 | st->target = prev_state; | |
574 | undo_cpu_down(cpu, st); | |
575 | break; | |
576 | } | |
577 | } | |
578 | hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; | |
579 | ||
580 | cpu_hotplug_done(); | |
581 | /* This post dead nonsense must die */ | |
582 | if (!ret && hasdied) | |
583 | cpu_notify_nofail(CPU_POST_DEAD, cpu); | |
584 | return ret; | |
585 | } | |
586 | ||
587 | int cpu_down(unsigned int cpu) | |
588 | { | |
589 | int err; | |
590 | ||
591 | cpu_maps_update_begin(); | |
592 | ||
593 | if (cpu_hotplug_disabled) { | |
594 | err = -EBUSY; | |
595 | goto out; | |
596 | } | |
597 | ||
598 | err = _cpu_down(cpu, 0); | |
599 | ||
600 | out: | |
601 | cpu_maps_update_done(); | |
602 | return err; | |
603 | } | |
604 | EXPORT_SYMBOL(cpu_down); | |
605 | #endif /*CONFIG_HOTPLUG_CPU*/ | |
606 | ||
607 | /* | |
608 | * Unpark per-CPU smpboot kthreads at CPU-online time. | |
609 | */ | |
610 | static int smpboot_thread_call(struct notifier_block *nfb, | |
611 | unsigned long action, void *hcpu) | |
612 | { | |
613 | int cpu = (long)hcpu; | |
614 | ||
615 | switch (action & ~CPU_TASKS_FROZEN) { | |
616 | ||
617 | case CPU_DOWN_FAILED: | |
618 | case CPU_ONLINE: | |
619 | smpboot_unpark_threads(cpu); | |
620 | break; | |
621 | ||
622 | default: | |
623 | break; | |
624 | } | |
625 | ||
626 | return NOTIFY_OK; | |
627 | } | |
628 | ||
629 | static struct notifier_block smpboot_thread_notifier = { | |
630 | .notifier_call = smpboot_thread_call, | |
631 | .priority = CPU_PRI_SMPBOOT, | |
632 | }; | |
633 | ||
634 | void smpboot_thread_init(void) | |
635 | { | |
636 | register_cpu_notifier(&smpboot_thread_notifier); | |
637 | } | |
638 | ||
639 | /** | |
640 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | |
641 | * @cpu: cpu that just started | |
642 | * | |
643 | * This function calls the cpu_chain notifiers with CPU_STARTING. | |
644 | * It must be called by the arch code on the new cpu, before the new cpu | |
645 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | |
646 | */ | |
647 | void notify_cpu_starting(unsigned int cpu) | |
648 | { | |
649 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | |
650 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); | |
651 | ||
652 | while (st->state < target) { | |
653 | struct cpuhp_step *step; | |
654 | ||
655 | st->state++; | |
656 | step = cpuhp_ap_states + st->state; | |
657 | cpuhp_invoke_callback(cpu, st->state, step->startup); | |
658 | } | |
659 | } | |
660 | ||
661 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | |
662 | { | |
663 | for (st->state--; st->state > st->target; st->state--) { | |
664 | struct cpuhp_step *step = cpuhp_bp_states + st->state; | |
665 | ||
666 | if (!step->skip_onerr) | |
667 | cpuhp_invoke_callback(cpu, st->state, step->teardown); | |
668 | } | |
669 | } | |
670 | ||
671 | /* Requires cpu_add_remove_lock to be held */ | |
672 | static int _cpu_up(unsigned int cpu, int tasks_frozen) | |
673 | { | |
674 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | |
675 | struct task_struct *idle; | |
676 | int prev_state, ret = 0; | |
677 | ||
678 | cpu_hotplug_begin(); | |
679 | ||
680 | if (cpu_online(cpu) || !cpu_present(cpu)) { | |
681 | ret = -EINVAL; | |
682 | goto out; | |
683 | } | |
684 | ||
685 | /* Let it fail before we try to bring the cpu up */ | |
686 | idle = idle_thread_get(cpu); | |
687 | if (IS_ERR(idle)) { | |
688 | ret = PTR_ERR(idle); | |
689 | goto out; | |
690 | } | |
691 | ||
692 | cpuhp_tasks_frozen = tasks_frozen; | |
693 | ||
694 | prev_state = st->state; | |
695 | st->target = CPUHP_ONLINE; | |
696 | while (st->state < st->target) { | |
697 | struct cpuhp_step *step; | |
698 | ||
699 | st->state++; | |
700 | step = cpuhp_bp_states + st->state; | |
701 | ret = cpuhp_invoke_callback(cpu, st->state, step->startup); | |
702 | if (ret) { | |
703 | st->target = prev_state; | |
704 | undo_cpu_up(cpu, st); | |
705 | break; | |
706 | } | |
707 | } | |
708 | out: | |
709 | cpu_hotplug_done(); | |
710 | return ret; | |
711 | } | |
712 | ||
713 | int cpu_up(unsigned int cpu) | |
714 | { | |
715 | int err = 0; | |
716 | ||
717 | if (!cpu_possible(cpu)) { | |
718 | pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", | |
719 | cpu); | |
720 | #if defined(CONFIG_IA64) | |
721 | pr_err("please check additional_cpus= boot parameter\n"); | |
722 | #endif | |
723 | return -EINVAL; | |
724 | } | |
725 | ||
726 | err = try_online_node(cpu_to_node(cpu)); | |
727 | if (err) | |
728 | return err; | |
729 | ||
730 | cpu_maps_update_begin(); | |
731 | ||
732 | if (cpu_hotplug_disabled) { | |
733 | err = -EBUSY; | |
734 | goto out; | |
735 | } | |
736 | ||
737 | err = _cpu_up(cpu, 0); | |
738 | ||
739 | out: | |
740 | cpu_maps_update_done(); | |
741 | return err; | |
742 | } | |
743 | EXPORT_SYMBOL_GPL(cpu_up); | |
744 | ||
745 | #ifdef CONFIG_PM_SLEEP_SMP | |
746 | static cpumask_var_t frozen_cpus; | |
747 | ||
748 | int disable_nonboot_cpus(void) | |
749 | { | |
750 | int cpu, first_cpu, error = 0; | |
751 | ||
752 | cpu_maps_update_begin(); | |
753 | first_cpu = cpumask_first(cpu_online_mask); | |
754 | /* | |
755 | * We take down all of the non-boot CPUs in one shot to avoid races | |
756 | * with the userspace trying to use the CPU hotplug at the same time | |
757 | */ | |
758 | cpumask_clear(frozen_cpus); | |
759 | ||
760 | pr_info("Disabling non-boot CPUs ...\n"); | |
761 | for_each_online_cpu(cpu) { | |
762 | if (cpu == first_cpu) | |
763 | continue; | |
764 | trace_suspend_resume(TPS("CPU_OFF"), cpu, true); | |
765 | error = _cpu_down(cpu, 1); | |
766 | trace_suspend_resume(TPS("CPU_OFF"), cpu, false); | |
767 | if (!error) | |
768 | cpumask_set_cpu(cpu, frozen_cpus); | |
769 | else { | |
770 | pr_err("Error taking CPU%d down: %d\n", cpu, error); | |
771 | break; | |
772 | } | |
773 | } | |
774 | ||
775 | if (!error) | |
776 | BUG_ON(num_online_cpus() > 1); | |
777 | else | |
778 | pr_err("Non-boot CPUs are not disabled\n"); | |
779 | ||
780 | /* | |
781 | * Make sure the CPUs won't be enabled by someone else. We need to do | |
782 | * this even in case of failure as all disable_nonboot_cpus() users are | |
783 | * supposed to do enable_nonboot_cpus() on the failure path. | |
784 | */ | |
785 | cpu_hotplug_disabled++; | |
786 | ||
787 | cpu_maps_update_done(); | |
788 | return error; | |
789 | } | |
790 | ||
791 | void __weak arch_enable_nonboot_cpus_begin(void) | |
792 | { | |
793 | } | |
794 | ||
795 | void __weak arch_enable_nonboot_cpus_end(void) | |
796 | { | |
797 | } | |
798 | ||
799 | void enable_nonboot_cpus(void) | |
800 | { | |
801 | int cpu, error; | |
802 | ||
803 | /* Allow everyone to use the CPU hotplug again */ | |
804 | cpu_maps_update_begin(); | |
805 | WARN_ON(--cpu_hotplug_disabled < 0); | |
806 | if (cpumask_empty(frozen_cpus)) | |
807 | goto out; | |
808 | ||
809 | pr_info("Enabling non-boot CPUs ...\n"); | |
810 | ||
811 | arch_enable_nonboot_cpus_begin(); | |
812 | ||
813 | for_each_cpu(cpu, frozen_cpus) { | |
814 | trace_suspend_resume(TPS("CPU_ON"), cpu, true); | |
815 | error = _cpu_up(cpu, 1); | |
816 | trace_suspend_resume(TPS("CPU_ON"), cpu, false); | |
817 | if (!error) { | |
818 | pr_info("CPU%d is up\n", cpu); | |
819 | continue; | |
820 | } | |
821 | pr_warn("Error taking CPU%d up: %d\n", cpu, error); | |
822 | } | |
823 | ||
824 | arch_enable_nonboot_cpus_end(); | |
825 | ||
826 | cpumask_clear(frozen_cpus); | |
827 | out: | |
828 | cpu_maps_update_done(); | |
829 | } | |
830 | ||
831 | static int __init alloc_frozen_cpus(void) | |
832 | { | |
833 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | |
834 | return -ENOMEM; | |
835 | return 0; | |
836 | } | |
837 | core_initcall(alloc_frozen_cpus); | |
838 | ||
839 | /* | |
840 | * When callbacks for CPU hotplug notifications are being executed, we must | |
841 | * ensure that the state of the system with respect to the tasks being frozen | |
842 | * or not, as reported by the notification, remains unchanged *throughout the | |
843 | * duration* of the execution of the callbacks. | |
844 | * Hence we need to prevent the freezer from racing with regular CPU hotplug. | |
845 | * | |
846 | * This synchronization is implemented by mutually excluding regular CPU | |
847 | * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ | |
848 | * Hibernate notifications. | |
849 | */ | |
850 | static int | |
851 | cpu_hotplug_pm_callback(struct notifier_block *nb, | |
852 | unsigned long action, void *ptr) | |
853 | { | |
854 | switch (action) { | |
855 | ||
856 | case PM_SUSPEND_PREPARE: | |
857 | case PM_HIBERNATION_PREPARE: | |
858 | cpu_hotplug_disable(); | |
859 | break; | |
860 | ||
861 | case PM_POST_SUSPEND: | |
862 | case PM_POST_HIBERNATION: | |
863 | cpu_hotplug_enable(); | |
864 | break; | |
865 | ||
866 | default: | |
867 | return NOTIFY_DONE; | |
868 | } | |
869 | ||
870 | return NOTIFY_OK; | |
871 | } | |
872 | ||
873 | ||
874 | static int __init cpu_hotplug_pm_sync_init(void) | |
875 | { | |
876 | /* | |
877 | * cpu_hotplug_pm_callback has higher priority than x86 | |
878 | * bsp_pm_callback which depends on cpu_hotplug_pm_callback | |
879 | * to disable cpu hotplug to avoid cpu hotplug race. | |
880 | */ | |
881 | pm_notifier(cpu_hotplug_pm_callback, 0); | |
882 | return 0; | |
883 | } | |
884 | core_initcall(cpu_hotplug_pm_sync_init); | |
885 | ||
886 | #endif /* CONFIG_PM_SLEEP_SMP */ | |
887 | ||
888 | #endif /* CONFIG_SMP */ | |
889 | ||
890 | /* Boot processor state steps */ | |
891 | static struct cpuhp_step cpuhp_bp_states[] = { | |
892 | [CPUHP_OFFLINE] = { | |
893 | .name = "offline", | |
894 | .startup = NULL, | |
895 | .teardown = NULL, | |
896 | }, | |
897 | #ifdef CONFIG_SMP | |
898 | [CPUHP_CREATE_THREADS]= { | |
899 | .name = "threads:create", | |
900 | .startup = smpboot_create_threads, | |
901 | .teardown = NULL, | |
902 | }, | |
903 | [CPUHP_NOTIFY_PREPARE] = { | |
904 | .name = "notify:prepare", | |
905 | .startup = notify_prepare, | |
906 | .teardown = notify_dead, | |
907 | .skip_onerr = true, | |
908 | }, | |
909 | [CPUHP_BRINGUP_CPU] = { | |
910 | .name = "cpu:bringup", | |
911 | .startup = bringup_cpu, | |
912 | .teardown = NULL, | |
913 | }, | |
914 | [CPUHP_TEARDOWN_CPU] = { | |
915 | .name = "cpu:teardown", | |
916 | .startup = NULL, | |
917 | .teardown = takedown_cpu, | |
918 | }, | |
919 | [CPUHP_NOTIFY_ONLINE] = { | |
920 | .name = "notify:online", | |
921 | .startup = notify_online, | |
922 | .teardown = notify_down_prepare, | |
923 | }, | |
924 | #endif | |
925 | [CPUHP_ONLINE] = { | |
926 | .name = "online", | |
927 | .startup = NULL, | |
928 | .teardown = NULL, | |
929 | }, | |
930 | }; | |
931 | ||
932 | /* Application processor state steps */ | |
933 | static struct cpuhp_step cpuhp_ap_states[] = { | |
934 | #ifdef CONFIG_SMP | |
935 | [CPUHP_AP_NOTIFY_STARTING] = { | |
936 | .name = "notify:starting", | |
937 | .startup = notify_starting, | |
938 | .teardown = notify_dying, | |
939 | .skip_onerr = true, | |
940 | }, | |
941 | #endif | |
942 | [CPUHP_ONLINE] = { | |
943 | .name = "online", | |
944 | .startup = NULL, | |
945 | .teardown = NULL, | |
946 | }, | |
947 | }; | |
948 | ||
949 | /* | |
950 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | |
951 | * represents all NR_CPUS bits binary values of 1<<nr. | |
952 | * | |
953 | * It is used by cpumask_of() to get a constant address to a CPU | |
954 | * mask value that has a single bit set only. | |
955 | */ | |
956 | ||
957 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ | |
958 | #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) | |
959 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) | |
960 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | |
961 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | |
962 | ||
963 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { | |
964 | ||
965 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | |
966 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | |
967 | #if BITS_PER_LONG > 32 | |
968 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | |
969 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | |
970 | #endif | |
971 | }; | |
972 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | |
973 | ||
974 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | |
975 | EXPORT_SYMBOL(cpu_all_bits); | |
976 | ||
977 | #ifdef CONFIG_INIT_ALL_POSSIBLE | |
978 | struct cpumask __cpu_possible_mask __read_mostly | |
979 | = {CPU_BITS_ALL}; | |
980 | #else | |
981 | struct cpumask __cpu_possible_mask __read_mostly; | |
982 | #endif | |
983 | EXPORT_SYMBOL(__cpu_possible_mask); | |
984 | ||
985 | struct cpumask __cpu_online_mask __read_mostly; | |
986 | EXPORT_SYMBOL(__cpu_online_mask); | |
987 | ||
988 | struct cpumask __cpu_present_mask __read_mostly; | |
989 | EXPORT_SYMBOL(__cpu_present_mask); | |
990 | ||
991 | struct cpumask __cpu_active_mask __read_mostly; | |
992 | EXPORT_SYMBOL(__cpu_active_mask); | |
993 | ||
994 | void init_cpu_present(const struct cpumask *src) | |
995 | { | |
996 | cpumask_copy(&__cpu_present_mask, src); | |
997 | } | |
998 | ||
999 | void init_cpu_possible(const struct cpumask *src) | |
1000 | { | |
1001 | cpumask_copy(&__cpu_possible_mask, src); | |
1002 | } | |
1003 | ||
1004 | void init_cpu_online(const struct cpumask *src) | |
1005 | { | |
1006 | cpumask_copy(&__cpu_online_mask, src); | |
1007 | } | |
1008 | ||
1009 | /* | |
1010 | * Activate the first processor. | |
1011 | */ | |
1012 | void __init boot_cpu_init(void) | |
1013 | { | |
1014 | int cpu = smp_processor_id(); | |
1015 | ||
1016 | /* Mark the boot cpu "present", "online" etc for SMP and UP case */ | |
1017 | set_cpu_online(cpu, true); | |
1018 | set_cpu_active(cpu, true); | |
1019 | set_cpu_present(cpu, true); | |
1020 | set_cpu_possible(cpu, true); | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * Must be called _AFTER_ setting up the per_cpu areas | |
1025 | */ | |
1026 | void __init boot_cpu_state_init(void) | |
1027 | { | |
1028 | per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; | |
1029 | } |