]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kernel/smp.c
s390/sysinfo,topology: fix cpu topology maximum nesting detection
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kernel / smp.c
CommitLineData
1da177e4 1/*
8b646bd7 2 * SMP related functions
1da177e4 3 *
a53c8fab 4 * Copyright IBM Corp. 1999, 2012
8b646bd7
MS
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
1da177e4 8 *
39ce010d 9 * based on other smp stuff by
1da177e4
LT
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
8b646bd7
MS
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
1da177e4
LT
16 */
17
395d31d4
MS
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
f230886b 21#include <linux/workqueue.h>
1da177e4
LT
22#include <linux/module.h>
23#include <linux/init.h>
1da177e4 24#include <linux/mm.h>
4e950f6f 25#include <linux/err.h>
1da177e4
LT
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
1da177e4 28#include <linux/delay.h>
1da177e4 29#include <linux/interrupt.h>
3324e60a 30#include <linux/irqflags.h>
1da177e4 31#include <linux/cpu.h>
5a0e3ad6 32#include <linux/slab.h>
60a0c68d 33#include <linux/crash_dump.h>
cbb870c8 34#include <asm/asm-offsets.h>
1e3cab2f
HC
35#include <asm/switch_to.h>
36#include <asm/facility.h>
46b05d26 37#include <asm/ipl.h>
2b67fc46 38#include <asm/setup.h>
1da177e4 39#include <asm/irq.h>
1da177e4 40#include <asm/tlbflush.h>
27f6b416 41#include <asm/vtimer.h>
411ed322 42#include <asm/lowcore.h>
08d07968 43#include <asm/sclp.h>
c742b31c 44#include <asm/vdso.h>
3ab121ab 45#include <asm/debug.h>
4857d4bb 46#include <asm/os_info.h>
a9ae32c3 47#include <asm/sigp.h>
a806170e 48#include "entry.h"
1da177e4 49
8b646bd7
MS
50enum {
51 ec_schedule = 0,
52 ec_call_function,
53 ec_call_function_single,
54 ec_stop_cpu,
55};
08d07968 56
8b646bd7 57enum {
08d07968
HC
58 CPU_STATE_STANDBY,
59 CPU_STATE_CONFIGURED,
60};
61
8b646bd7
MS
62struct pcpu {
63 struct cpu cpu;
8b646bd7
MS
64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65 unsigned long async_stack; /* async stack for the cpu */
66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */
8b646bd7
MS
69 u16 address; /* physical cpu address */
70};
71
72static u8 boot_cpu_type;
73static u16 boot_cpu_address;
74static struct pcpu pcpu_devices[NR_CPUS];
75
dbd70fb4 76DEFINE_MUTEX(smp_cpu_state_mutex);
08d07968 77
8b646bd7
MS
78/*
79 * Signal processor helper functions.
80 */
81static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
82{
83 register unsigned int reg1 asm ("1") = parm;
84 int cc;
08d07968 85
8b646bd7
MS
86 asm volatile(
87 " sigp %1,%2,0(%3)\n"
88 " ipm %0\n"
89 " srl %0,28\n"
90 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
91 if (status && cc == 1)
92 *status = reg1;
93 return cc;
94}
1da177e4 95
8b646bd7 96static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
5c0b912e 97{
8b646bd7 98 int cc;
5c0b912e 99
8b646bd7 100 while (1) {
c5e3acd6 101 cc = __pcpu_sigp(addr, order, parm, NULL);
a9ae32c3 102 if (cc != SIGP_CC_BUSY)
8b646bd7
MS
103 return cc;
104 cpu_relax();
5c0b912e 105 }
5c0b912e
HC
106}
107
8b646bd7 108static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
a93b8ec1 109{
8b646bd7
MS
110 int cc, retry;
111
112 for (retry = 0; ; retry++) {
c5e3acd6 113 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
a9ae32c3 114 if (cc != SIGP_CC_BUSY)
8b646bd7
MS
115 break;
116 if (retry >= 3)
117 udelay(10);
118 }
119 return cc;
120}
121
122static inline int pcpu_stopped(struct pcpu *pcpu)
123{
c5e3acd6
HC
124 u32 status;
125
a9ae32c3 126 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
c5e3acd6 127 0, &status) != SIGP_CC_STATUS_STORED)
8b646bd7 128 return 0;
c5e3acd6 129 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
8b646bd7
MS
130}
131
132static inline int pcpu_running(struct pcpu *pcpu)
a93b8ec1 133{
a9ae32c3 134 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
c5e3acd6 135 0, NULL) != SIGP_CC_STATUS_STORED)
8b646bd7 136 return 1;
524b24ad
HC
137 /* Status stored condition code is equivalent to cpu not running. */
138 return 0;
a93b8ec1
HC
139}
140
1943f53c 141/*
8b646bd7 142 * Find struct pcpu by cpu address.
1943f53c 143 */
8b646bd7 144static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
1943f53c
MH
145{
146 int cpu;
147
8b646bd7
MS
148 for_each_cpu(cpu, mask)
149 if (pcpu_devices[cpu].address == address)
150 return pcpu_devices + cpu;
151 return NULL;
152}
153
154static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
155{
156 int order;
157
158 set_bit(ec_bit, &pcpu->ec_mask);
159 order = pcpu_running(pcpu) ?
a9ae32c3 160 SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
8b646bd7
MS
161 pcpu_sigp_retry(pcpu, order, 0);
162}
163
164static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
165{
166 struct _lowcore *lc;
167
168 if (pcpu != &pcpu_devices[0]) {
169 pcpu->lowcore = (struct _lowcore *)
170 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
171 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
172 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
173 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
174 goto out;
1943f53c 175 }
8b646bd7
MS
176 lc = pcpu->lowcore;
177 memcpy(lc, &S390_lowcore, 512);
178 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
179 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
180 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
181 lc->cpu_nr = cpu;
182#ifndef CONFIG_64BIT
183 if (MACHINE_HAS_IEEE) {
184 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
185 if (!lc->extended_save_area_addr)
186 goto out;
187 }
188#else
189 if (vdso_alloc_per_cpu(lc))
190 goto out;
191#endif
192 lowcore_ptr[cpu] = lc;
a9ae32c3 193 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
8b646bd7
MS
194 return 0;
195out:
196 if (pcpu != &pcpu_devices[0]) {
197 free_page(pcpu->panic_stack);
198 free_pages(pcpu->async_stack, ASYNC_ORDER);
199 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
200 }
201 return -ENOMEM;
1943f53c
MH
202}
203
9d0f46af
HC
204#ifdef CONFIG_HOTPLUG_CPU
205
8b646bd7 206static void pcpu_free_lowcore(struct pcpu *pcpu)
2c2df118 207{
a9ae32c3 208 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
8b646bd7
MS
209 lowcore_ptr[pcpu - pcpu_devices] = NULL;
210#ifndef CONFIG_64BIT
211 if (MACHINE_HAS_IEEE) {
212 struct _lowcore *lc = pcpu->lowcore;
213
214 free_page((unsigned long) lc->extended_save_area_addr);
215 lc->extended_save_area_addr = 0;
216 }
217#else
218 vdso_free_per_cpu(pcpu->lowcore);
219#endif
220 if (pcpu != &pcpu_devices[0]) {
221 free_page(pcpu->panic_stack);
222 free_pages(pcpu->async_stack, ASYNC_ORDER);
223 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
224 }
225}
226
9d0f46af
HC
227#endif /* CONFIG_HOTPLUG_CPU */
228
8b646bd7
MS
229static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
230{
231 struct _lowcore *lc = pcpu->lowcore;
232
233 atomic_inc(&init_mm.context.attach_count);
234 lc->cpu_nr = cpu;
235 lc->percpu_offset = __per_cpu_offset[cpu];
236 lc->kernel_asce = S390_lowcore.kernel_asce;
237 lc->machine_flags = S390_lowcore.machine_flags;
238 lc->ftrace_func = S390_lowcore.ftrace_func;
239 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
240 __ctl_store(lc->cregs_save_area, 0, 15);
241 save_access_regs((unsigned int *) lc->access_regs_save_area);
242 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
243 MAX_FACILITY_BIT/8);
244}
245
246static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
247{
248 struct _lowcore *lc = pcpu->lowcore;
249 struct thread_info *ti = task_thread_info(tsk);
250
251 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
252 lc->thread_info = (unsigned long) task_thread_info(tsk);
253 lc->current_task = (unsigned long) tsk;
254 lc->user_timer = ti->user_timer;
255 lc->system_timer = ti->system_timer;
256 lc->steal_timer = 0;
257}
258
259static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
260{
261 struct _lowcore *lc = pcpu->lowcore;
262
263 lc->restart_stack = lc->kernel_stack;
264 lc->restart_fn = (unsigned long) func;
265 lc->restart_data = (unsigned long) data;
266 lc->restart_source = -1UL;
a9ae32c3 267 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
8b646bd7
MS
268}
269
270/*
271 * Call function via PSW restart on pcpu and stop the current cpu.
272 */
273static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
274 void *data, unsigned long stack)
275{
061da3df 276 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
fbe76568 277 unsigned long source_cpu = stap();
8b646bd7
MS
278
279 __load_psw_mask(psw_kernel_bits);
fbe76568 280 if (pcpu->address == source_cpu)
8b646bd7
MS
281 func(data); /* should not return */
282 /* Stop target cpu (if func returns this stops the current cpu). */
a9ae32c3 283 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
8b646bd7 284 /* Restart func on the target cpu and stop the current cpu. */
fbe76568
HC
285 mem_assign_absolute(lc->restart_stack, stack);
286 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
287 mem_assign_absolute(lc->restart_data, (unsigned long) data);
288 mem_assign_absolute(lc->restart_source, source_cpu);
8b646bd7 289 asm volatile(
eb546195 290 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
8b646bd7 291 " brc 2,0b # busy, try again\n"
eb546195 292 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
8b646bd7 293 " brc 2,1b # busy, try again\n"
fbe76568 294 : : "d" (pcpu->address), "d" (source_cpu),
eb546195
HC
295 "K" (SIGP_RESTART), "K" (SIGP_STOP)
296 : "0", "1", "cc");
8b646bd7
MS
297 for (;;) ;
298}
299
300/*
301 * Call function on an online CPU.
302 */
303void smp_call_online_cpu(void (*func)(void *), void *data)
304{
305 struct pcpu *pcpu;
306
307 /* Use the current cpu if it is online. */
308 pcpu = pcpu_find_address(cpu_online_mask, stap());
309 if (!pcpu)
310 /* Use the first online cpu. */
311 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
312 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
313}
314
315/*
316 * Call function on the ipl CPU.
317 */
318void smp_call_ipl_cpu(void (*func)(void *), void *data)
319{
c6da39f2
MH
320 pcpu_delegate(&pcpu_devices[0], func, data,
321 pcpu_devices->panic_stack + PAGE_SIZE);
8b646bd7
MS
322}
323
324int smp_find_processor_id(u16 address)
325{
326 int cpu;
327
328 for_each_present_cpu(cpu)
329 if (pcpu_devices[cpu].address == address)
330 return cpu;
331 return -1;
2c2df118
HC
332}
333
8b646bd7 334int smp_vcpu_scheduled(int cpu)
85ac7ca5 335{
8b646bd7
MS
336 return pcpu_running(pcpu_devices + cpu);
337}
338
339void smp_yield(void)
340{
341 if (MACHINE_HAS_DIAG44)
342 asm volatile("diag 0,0,0x44");
2c2df118
HC
343}
344
8b646bd7 345void smp_yield_cpu(int cpu)
85ac7ca5 346{
8b646bd7
MS
347 if (MACHINE_HAS_DIAG9C)
348 asm volatile("diag %0,0,0x9c"
349 : : "d" (pcpu_devices[cpu].address));
350 else if (MACHINE_HAS_DIAG44)
351 asm volatile("diag 0,0,0x44");
352}
353
354/*
355 * Send cpus emergency shutdown signal. This gives the cpus the
356 * opportunity to complete outstanding interrupts.
357 */
358void smp_emergency_stop(cpumask_t *cpumask)
359{
360 u64 end;
361 int cpu;
362
363 end = get_clock() + (1000000UL << 12);
364 for_each_cpu(cpu, cpumask) {
365 struct pcpu *pcpu = pcpu_devices + cpu;
366 set_bit(ec_stop_cpu, &pcpu->ec_mask);
a9ae32c3
HC
367 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
368 0, NULL) == SIGP_CC_BUSY &&
8b646bd7
MS
369 get_clock() < end)
370 cpu_relax();
371 }
372 while (get_clock() < end) {
373 for_each_cpu(cpu, cpumask)
374 if (pcpu_stopped(pcpu_devices + cpu))
375 cpumask_clear_cpu(cpu, cpumask);
376 if (cpumask_empty(cpumask))
377 break;
85ac7ca5 378 cpu_relax();
8b646bd7 379 }
85ac7ca5
MS
380}
381
8b646bd7
MS
382/*
383 * Stop all cpus but the current one.
384 */
677d7623 385void smp_send_stop(void)
1da177e4 386{
85ac7ca5
MS
387 cpumask_t cpumask;
388 int cpu;
1da177e4 389
677d7623 390 /* Disable all interrupts/machine checks */
b50511e4 391 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
3324e60a 392 trace_hardirqs_off();
1da177e4 393
3ab121ab 394 debug_set_critical();
85ac7ca5
MS
395 cpumask_copy(&cpumask, cpu_online_mask);
396 cpumask_clear_cpu(smp_processor_id(), &cpumask);
397
8b646bd7
MS
398 if (oops_in_progress)
399 smp_emergency_stop(&cpumask);
1da177e4 400
85ac7ca5
MS
401 /* stop all processors */
402 for_each_cpu(cpu, &cpumask) {
8b646bd7 403 struct pcpu *pcpu = pcpu_devices + cpu;
a9ae32c3 404 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
8b646bd7 405 while (!pcpu_stopped(pcpu))
c6b5b847
HC
406 cpu_relax();
407 }
408}
409
8b646bd7
MS
410/*
411 * Stop the current cpu.
412 */
413void smp_stop_cpu(void)
414{
a9ae32c3 415 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
8b646bd7
MS
416 for (;;) ;
417}
418
1da177e4
LT
419/*
420 * This is the main routine where commands issued by other
421 * cpus are handled.
422 */
fde15c3a 423static void do_ext_call_interrupt(struct ext_code ext_code,
f6649a7e 424 unsigned int param32, unsigned long param64)
1da177e4 425{
39ce010d 426 unsigned long bits;
8b646bd7 427 int cpu;
1da177e4 428
8b646bd7 429 cpu = smp_processor_id();
fde15c3a 430 if (ext_code.code == 0x1202)
8b646bd7 431 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
2a3a2d66 432 else
8b646bd7 433 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
39ce010d
HC
434 /*
435 * handle bit signal external calls
39ce010d 436 */
8b646bd7 437 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
1da177e4 438
85ac7ca5
MS
439 if (test_bit(ec_stop_cpu, &bits))
440 smp_stop_cpu();
441
184748cc
PZ
442 if (test_bit(ec_schedule, &bits))
443 scheduler_ipi();
444
39ce010d 445 if (test_bit(ec_call_function, &bits))
ca9fc75a
HC
446 generic_smp_call_function_interrupt();
447
448 if (test_bit(ec_call_function_single, &bits))
449 generic_smp_call_function_single_interrupt();
85ac7ca5 450
1da177e4
LT
451}
452
630cd046 453void arch_send_call_function_ipi_mask(const struct cpumask *mask)
ca9fc75a
HC
454{
455 int cpu;
456
630cd046 457 for_each_cpu(cpu, mask)
8b646bd7 458 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
ca9fc75a
HC
459}
460
461void arch_send_call_function_single_ipi(int cpu)
462{
8b646bd7 463 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
ca9fc75a
HC
464}
465
347a8dc3 466#ifndef CONFIG_64BIT
1da177e4
LT
467/*
468 * this function sends a 'purge tlb' signal to another CPU.
469 */
a806170e 470static void smp_ptlb_callback(void *info)
1da177e4 471{
ba8a9229 472 __tlb_flush_local();
1da177e4
LT
473}
474
475void smp_ptlb_all(void)
476{
15c8b6c1 477 on_each_cpu(smp_ptlb_callback, NULL, 1);
1da177e4
LT
478}
479EXPORT_SYMBOL(smp_ptlb_all);
347a8dc3 480#endif /* ! CONFIG_64BIT */
1da177e4
LT
481
482/*
483 * this function sends a 'reschedule' IPI to another CPU.
484 * it goes straight through and wastes no time serializing
485 * anything. Worst case is that we lose a reschedule ...
486 */
487void smp_send_reschedule(int cpu)
488{
8b646bd7 489 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
1da177e4
LT
490}
491
492/*
493 * parameter area for the set/clear control bit callbacks
494 */
94c12cc7 495struct ec_creg_mask_parms {
8b646bd7
MS
496 unsigned long orval;
497 unsigned long andval;
498 int cr;
94c12cc7 499};
1da177e4
LT
500
501/*
502 * callback for setting/clearing control bits
503 */
39ce010d
HC
504static void smp_ctl_bit_callback(void *info)
505{
94c12cc7 506 struct ec_creg_mask_parms *pp = info;
1da177e4 507 unsigned long cregs[16];
39ce010d 508
94c12cc7 509 __ctl_store(cregs, 0, 15);
8b646bd7 510 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
94c12cc7 511 __ctl_load(cregs, 0, 15);
1da177e4
LT
512}
513
514/*
515 * Set a bit in a control register of all cpus
516 */
94c12cc7
MS
517void smp_ctl_set_bit(int cr, int bit)
518{
8b646bd7 519 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
1da177e4 520
15c8b6c1 521 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1da177e4 522}
39ce010d 523EXPORT_SYMBOL(smp_ctl_set_bit);
1da177e4
LT
524
525/*
526 * Clear a bit in a control register of all cpus
527 */
94c12cc7
MS
528void smp_ctl_clear_bit(int cr, int bit)
529{
8b646bd7 530 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
1da177e4 531
15c8b6c1 532 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1da177e4 533}
39ce010d 534EXPORT_SYMBOL(smp_ctl_clear_bit);
1da177e4 535
60a0c68d 536#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
411ed322 537
8b646bd7
MS
538struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
539EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
540
541static void __init smp_get_save_area(int cpu, u16 address)
411ed322 542{
8b646bd7
MS
543 void *lc = pcpu_devices[0].lowcore;
544 struct save_area *save_area;
545
60a0c68d 546 if (is_kdump_kernel())
411ed322 547 return;
8b646bd7
MS
548 if (!OLDMEM_BASE && (address == boot_cpu_address ||
549 ipl_info.type != IPL_TYPE_FCP_DUMP))
550 return;
285f6722 551 if (cpu >= NR_CPUS) {
8b646bd7
MS
552 pr_warning("CPU %i exceeds the maximum %i and is excluded "
553 "from the dump\n", cpu, NR_CPUS - 1);
285f6722 554 return;
411ed322 555 }
8b646bd7
MS
556 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
557 if (!save_area)
558 panic("could not allocate memory for save area\n");
559 zfcpdump_save_areas[cpu] = save_area;
560#ifdef CONFIG_CRASH_DUMP
561 if (address == boot_cpu_address) {
562 /* Copy the registers of the boot cpu. */
563 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
564 SAVE_AREA_BASE - PAGE_SIZE, 0);
565 return;
566 }
567#endif
568 /* Get the registers of a non-boot cpu. */
a9ae32c3 569 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
8b646bd7 570 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
411ed322
MH
571}
572
8b646bd7 573int smp_store_status(int cpu)
08d07968 574{
8b646bd7 575 struct pcpu *pcpu;
08d07968 576
8b646bd7 577 pcpu = pcpu_devices + cpu;
a9ae32c3
HC
578 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
579 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
8b646bd7 580 return -EIO;
08d07968
HC
581 return 0;
582}
583
8b646bd7 584#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
08d07968 585
8b646bd7 586static inline void smp_get_save_area(int cpu, u16 address) { }
08d07968 587
8b646bd7 588#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
08d07968 589
8b646bd7 590static struct sclp_cpu_info *smp_get_cpu_info(void)
08d07968 591{
8b646bd7 592 static int use_sigp_detection;
08d07968 593 struct sclp_cpu_info *info;
8b646bd7
MS
594 int address;
595
596 info = kzalloc(sizeof(*info), GFP_KERNEL);
597 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
598 use_sigp_detection = 1;
599 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
a9ae32c3
HC
600 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
601 SIGP_CC_NOT_OPERATIONAL)
8b646bd7
MS
602 continue;
603 info->cpu[info->configured].address = address;
604 info->configured++;
605 }
606 info->combined = info->configured;
08d07968 607 }
8b646bd7 608 return info;
08d07968
HC
609}
610
8b646bd7
MS
611static int __devinit smp_add_present_cpu(int cpu);
612
613static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
614 int sysfs_add)
08d07968 615{
8b646bd7 616 struct pcpu *pcpu;
08d07968 617 cpumask_t avail;
8b646bd7 618 int cpu, nr, i;
08d07968 619
8b646bd7 620 nr = 0;
0f1959f5 621 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
8b646bd7
MS
622 cpu = cpumask_first(&avail);
623 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
624 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
625 continue;
626 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
627 continue;
628 pcpu = pcpu_devices + cpu;
629 pcpu->address = info->cpu[i].address;
630 pcpu->state = (cpu >= info->configured) ?
631 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
632 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
633 set_cpu_present(cpu, true);
634 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
635 set_cpu_present(cpu, false);
636 else
637 nr++;
638 cpu = cpumask_next(cpu, &avail);
639 }
640 return nr;
1da177e4
LT
641}
642
48483b32
HC
643static void __init smp_detect_cpus(void)
644{
645 unsigned int cpu, c_cpus, s_cpus;
646 struct sclp_cpu_info *info;
48483b32 647
8b646bd7 648 info = smp_get_cpu_info();
48483b32
HC
649 if (!info)
650 panic("smp_detect_cpus failed to allocate memory\n");
48483b32
HC
651 if (info->has_cpu_type) {
652 for (cpu = 0; cpu < info->combined; cpu++) {
8b646bd7
MS
653 if (info->cpu[cpu].address != boot_cpu_address)
654 continue;
655 /* The boot cpu dictates the cpu type. */
656 boot_cpu_type = info->cpu[cpu].type;
657 break;
48483b32
HC
658 }
659 }
8b646bd7 660 c_cpus = s_cpus = 0;
48483b32 661 for (cpu = 0; cpu < info->combined; cpu++) {
8b646bd7 662 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
48483b32 663 continue;
8b646bd7
MS
664 if (cpu < info->configured) {
665 smp_get_save_area(c_cpus, info->cpu[cpu].address);
666 c_cpus++;
667 } else
48483b32 668 s_cpus++;
48483b32 669 }
395d31d4 670 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
9d40d2e3 671 get_online_cpus();
8b646bd7 672 __smp_rescan_cpus(info, 0);
9d40d2e3 673 put_online_cpus();
8b646bd7 674 kfree(info);
48483b32
HC
675}
676
1da177e4 677/*
39ce010d 678 * Activate a secondary processor.
1da177e4 679 */
8b646bd7 680static void __cpuinit smp_start_secondary(void *cpuvoid)
1da177e4 681{
8b646bd7
MS
682 S390_lowcore.last_update_clock = get_clock();
683 S390_lowcore.restart_stack = (unsigned long) restart_stack;
684 S390_lowcore.restart_fn = (unsigned long) do_restart;
685 S390_lowcore.restart_data = 0;
686 S390_lowcore.restart_source = -1UL;
687 restore_access_regs(S390_lowcore.access_regs_save_area);
688 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
689 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
39ce010d 690 cpu_init();
5bfb5d69 691 preempt_disable();
39ce010d 692 init_cpu_timer();
39ce010d 693 init_cpu_vtimer();
29b08d2b 694 pfault_init();
e545a614 695 notify_cpu_starting(smp_processor_id());
0f1959f5 696 set_cpu_online(smp_processor_id(), true);
1da177e4 697 local_irq_enable();
39ce010d
HC
698 /* cpu_idle will call schedule for us */
699 cpu_idle();
1da177e4
LT
700}
701
1da177e4 702/* Upping and downing of CPUs */
8239c25f 703int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
1da177e4 704{
8b646bd7
MS
705 struct pcpu *pcpu;
706 int rc;
1da177e4 707
8b646bd7
MS
708 pcpu = pcpu_devices + cpu;
709 if (pcpu->state != CPU_STATE_CONFIGURED)
08d07968 710 return -EIO;
a9ae32c3
HC
711 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
712 SIGP_CC_ORDER_CODE_ACCEPTED)
08d07968 713 return -EIO;
e80e7813 714
8b646bd7
MS
715 rc = pcpu_alloc_lowcore(pcpu, cpu);
716 if (rc)
717 return rc;
718 pcpu_prepare_secondary(pcpu, cpu);
e80e7813 719 pcpu_attach_task(pcpu, tidle);
8b646bd7 720 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
1da177e4
LT
721 while (!cpu_online(cpu))
722 cpu_relax();
723 return 0;
724}
725
48483b32 726static int __init setup_possible_cpus(char *s)
255acee7 727{
8b646bd7 728 int max, cpu;
255acee7 729
8b646bd7
MS
730 if (kstrtoint(s, 0, &max) < 0)
731 return 0;
88e01285 732 init_cpu_possible(cpumask_of(0));
8b646bd7 733 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
def6cfb7 734 set_cpu_possible(cpu, true);
37a33026
HC
735 return 0;
736}
737early_param("possible_cpus", setup_possible_cpus);
738
48483b32
HC
739#ifdef CONFIG_HOTPLUG_CPU
740
39ce010d 741int __cpu_disable(void)
1da177e4 742{
8b646bd7 743 unsigned long cregs[16];
1da177e4 744
8b646bd7
MS
745 set_cpu_online(smp_processor_id(), false);
746 /* Disable pseudo page faults on this cpu. */
29b08d2b 747 pfault_fini();
8b646bd7
MS
748 /* Disable interrupt sources via control register. */
749 __ctl_store(cregs, 0, 15);
750 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
751 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
752 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
753 __ctl_load(cregs, 0, 15);
1da177e4
LT
754 return 0;
755}
756
39ce010d 757void __cpu_die(unsigned int cpu)
1da177e4 758{
8b646bd7
MS
759 struct pcpu *pcpu;
760
1da177e4 761 /* Wait until target cpu is down */
8b646bd7
MS
762 pcpu = pcpu_devices + cpu;
763 while (!pcpu_stopped(pcpu))
1da177e4 764 cpu_relax();
8b646bd7 765 pcpu_free_lowcore(pcpu);
050eef36 766 atomic_dec(&init_mm.context.attach_count);
1da177e4
LT
767}
768
b456d94a 769void __noreturn cpu_die(void)
1da177e4
LT
770{
771 idle_task_exit();
a9ae32c3 772 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
8b646bd7 773 for (;;) ;
1da177e4
LT
774}
775
255acee7
HC
776#endif /* CONFIG_HOTPLUG_CPU */
777
1da177e4
LT
778void __init smp_prepare_cpus(unsigned int max_cpus)
779{
39ce010d
HC
780 /* request the 0x1201 emergency signal external interrupt */
781 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
782 panic("Couldn't request external interrupt 0x1201");
d98e19cc
MS
783 /* request the 0x1202 external call external interrupt */
784 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
785 panic("Couldn't request external interrupt 0x1202");
8b646bd7 786 smp_detect_cpus();
1da177e4
LT
787}
788
ea1f4eec 789void __init smp_prepare_boot_cpu(void)
1da177e4 790{
8b646bd7
MS
791 struct pcpu *pcpu = pcpu_devices;
792
793 boot_cpu_address = stap();
8b646bd7
MS
794 pcpu->state = CPU_STATE_CONFIGURED;
795 pcpu->address = boot_cpu_address;
796 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
797 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
798 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
1da177e4 799 S390_lowcore.percpu_offset = __per_cpu_offset[0];
83a24e32 800 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
8b646bd7
MS
801 set_cpu_present(0, true);
802 set_cpu_online(0, true);
1da177e4
LT
803}
804
ea1f4eec 805void __init smp_cpus_done(unsigned int max_cpus)
1da177e4 806{
1da177e4
LT
807}
808
02beaccc
HC
809void __init smp_setup_processor_id(void)
810{
811 S390_lowcore.cpu_nr = 0;
02beaccc
HC
812}
813
1da177e4
LT
814/*
815 * the frequency of the profiling timer can be changed
816 * by writing a multiplier value into /proc/profile.
817 *
818 * usually you want to run this on all CPUs ;)
819 */
820int setup_profiling_timer(unsigned int multiplier)
821{
39ce010d 822 return 0;
1da177e4
LT
823}
824
08d07968 825#ifdef CONFIG_HOTPLUG_CPU
8a25a2fd 826static ssize_t cpu_configure_show(struct device *dev,
8b646bd7 827 struct device_attribute *attr, char *buf)
08d07968
HC
828{
829 ssize_t count;
830
831 mutex_lock(&smp_cpu_state_mutex);
8b646bd7 832 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
08d07968
HC
833 mutex_unlock(&smp_cpu_state_mutex);
834 return count;
835}
836
8a25a2fd 837static ssize_t cpu_configure_store(struct device *dev,
8b646bd7
MS
838 struct device_attribute *attr,
839 const char *buf, size_t count)
08d07968 840{
8b646bd7
MS
841 struct pcpu *pcpu;
842 int cpu, val, rc;
08d07968
HC
843 char delim;
844
845 if (sscanf(buf, "%d %c", &val, &delim) != 1)
846 return -EINVAL;
847 if (val != 0 && val != 1)
848 return -EINVAL;
9d40d2e3 849 get_online_cpus();
0b18d318 850 mutex_lock(&smp_cpu_state_mutex);
08d07968 851 rc = -EBUSY;
2c2df118 852 /* disallow configuration changes of online cpus and cpu 0 */
8b646bd7 853 cpu = dev->id;
2c2df118 854 if (cpu_online(cpu) || cpu == 0)
08d07968 855 goto out;
8b646bd7 856 pcpu = pcpu_devices + cpu;
08d07968
HC
857 rc = 0;
858 switch (val) {
859 case 0:
8b646bd7
MS
860 if (pcpu->state != CPU_STATE_CONFIGURED)
861 break;
862 rc = sclp_cpu_deconfigure(pcpu->address);
863 if (rc)
864 break;
865 pcpu->state = CPU_STATE_STANDBY;
866 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
867 topology_expect_change();
08d07968
HC
868 break;
869 case 1:
8b646bd7
MS
870 if (pcpu->state != CPU_STATE_STANDBY)
871 break;
872 rc = sclp_cpu_configure(pcpu->address);
873 if (rc)
874 break;
875 pcpu->state = CPU_STATE_CONFIGURED;
876 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
877 topology_expect_change();
08d07968
HC
878 break;
879 default:
880 break;
881 }
882out:
08d07968 883 mutex_unlock(&smp_cpu_state_mutex);
0b18d318 884 put_online_cpus();
08d07968
HC
885 return rc ? rc : count;
886}
8a25a2fd 887static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
08d07968
HC
888#endif /* CONFIG_HOTPLUG_CPU */
889
8a25a2fd
KS
890static ssize_t show_cpu_address(struct device *dev,
891 struct device_attribute *attr, char *buf)
08d07968 892{
8b646bd7 893 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
08d07968 894}
8a25a2fd 895static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
08d07968 896
08d07968
HC
897static struct attribute *cpu_common_attrs[] = {
898#ifdef CONFIG_HOTPLUG_CPU
8a25a2fd 899 &dev_attr_configure.attr,
08d07968 900#endif
8a25a2fd 901 &dev_attr_address.attr,
08d07968
HC
902 NULL,
903};
904
905static struct attribute_group cpu_common_attr_group = {
906 .attrs = cpu_common_attrs,
907};
1da177e4 908
8a25a2fd
KS
909static ssize_t show_idle_count(struct device *dev,
910 struct device_attribute *attr, char *buf)
fae8b22d 911{
4c1051e3 912 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
fae8b22d 913 unsigned long long idle_count;
e98bbaaf 914 unsigned int sequence;
fae8b22d 915
4c1051e3
MS
916 do {
917 sequence = ACCESS_ONCE(idle->sequence);
918 idle_count = ACCESS_ONCE(idle->idle_count);
27f6b416 919 if (ACCESS_ONCE(idle->clock_idle_enter))
4c1051e3
MS
920 idle_count++;
921 } while ((sequence & 1) || (idle->sequence != sequence));
fae8b22d
HC
922 return sprintf(buf, "%llu\n", idle_count);
923}
8a25a2fd 924static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
fae8b22d 925
8a25a2fd
KS
926static ssize_t show_idle_time(struct device *dev,
927 struct device_attribute *attr, char *buf)
fae8b22d 928{
4c1051e3
MS
929 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
930 unsigned long long now, idle_time, idle_enter, idle_exit;
e98bbaaf 931 unsigned int sequence;
fae8b22d 932
4c1051e3
MS
933 do {
934 now = get_clock();
935 sequence = ACCESS_ONCE(idle->sequence);
936 idle_time = ACCESS_ONCE(idle->idle_time);
27f6b416
MS
937 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
938 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
4c1051e3
MS
939 } while ((sequence & 1) || (idle->sequence != sequence));
940 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
6f430924 941 return sprintf(buf, "%llu\n", idle_time >> 12);
fae8b22d 942}
8a25a2fd 943static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
fae8b22d 944
08d07968 945static struct attribute *cpu_online_attrs[] = {
8a25a2fd
KS
946 &dev_attr_idle_count.attr,
947 &dev_attr_idle_time_us.attr,
fae8b22d
HC
948 NULL,
949};
950
08d07968
HC
951static struct attribute_group cpu_online_attr_group = {
952 .attrs = cpu_online_attrs,
fae8b22d
HC
953};
954
2fc2d1e9
HC
955static int __cpuinit smp_cpu_notify(struct notifier_block *self,
956 unsigned long action, void *hcpu)
957{
958 unsigned int cpu = (unsigned int)(long)hcpu;
8b646bd7 959 struct cpu *c = &pcpu_devices[cpu].cpu;
8a25a2fd 960 struct device *s = &c->dev;
d882ba69 961 int err = 0;
2fc2d1e9 962
1c725922 963 switch (action & ~CPU_TASKS_FROZEN) {
2fc2d1e9 964 case CPU_ONLINE:
d882ba69 965 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
2fc2d1e9
HC
966 break;
967 case CPU_DEAD:
08d07968 968 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
2fc2d1e9
HC
969 break;
970 }
d882ba69 971 return notifier_from_errno(err);
2fc2d1e9
HC
972}
973
2bc89b5e 974static int __devinit smp_add_present_cpu(int cpu)
08d07968 975{
8b646bd7 976 struct cpu *c = &pcpu_devices[cpu].cpu;
8a25a2fd 977 struct device *s = &c->dev;
08d07968
HC
978 int rc;
979
980 c->hotpluggable = 1;
981 rc = register_cpu(c, cpu);
982 if (rc)
983 goto out;
984 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
985 if (rc)
986 goto out_cpu;
83a24e32
HC
987 if (cpu_online(cpu)) {
988 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
989 if (rc)
990 goto out_online;
991 }
992 rc = topology_cpu_init(c);
993 if (rc)
994 goto out_topology;
995 return 0;
996
997out_topology:
998 if (cpu_online(cpu))
999 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1000out_online:
08d07968
HC
1001 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1002out_cpu:
1003#ifdef CONFIG_HOTPLUG_CPU
1004 unregister_cpu(c);
1005#endif
1006out:
1007 return rc;
1008}
1009
1010#ifdef CONFIG_HOTPLUG_CPU
1e489518 1011
67060d9c 1012int __ref smp_rescan_cpus(void)
08d07968 1013{
8b646bd7
MS
1014 struct sclp_cpu_info *info;
1015 int nr;
08d07968 1016
8b646bd7
MS
1017 info = smp_get_cpu_info();
1018 if (!info)
1019 return -ENOMEM;
9d40d2e3 1020 get_online_cpus();
0b18d318 1021 mutex_lock(&smp_cpu_state_mutex);
8b646bd7 1022 nr = __smp_rescan_cpus(info, 1);
08d07968 1023 mutex_unlock(&smp_cpu_state_mutex);
0b18d318 1024 put_online_cpus();
8b646bd7
MS
1025 kfree(info);
1026 if (nr)
c10fde0d 1027 topology_schedule_update();
8b646bd7 1028 return 0;
1e489518
HC
1029}
1030
8a25a2fd
KS
1031static ssize_t __ref rescan_store(struct device *dev,
1032 struct device_attribute *attr,
c9be0a36 1033 const char *buf,
1e489518
HC
1034 size_t count)
1035{
1036 int rc;
1037
1038 rc = smp_rescan_cpus();
08d07968
HC
1039 return rc ? rc : count;
1040}
8a25a2fd 1041static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
08d07968
HC
1042#endif /* CONFIG_HOTPLUG_CPU */
1043
83a24e32 1044static int __init s390_smp_init(void)
1da177e4 1045{
83a24e32 1046 int cpu, rc;
2fc2d1e9 1047
7755d6b2 1048 hotcpu_notifier(smp_cpu_notify, 0);
08d07968 1049#ifdef CONFIG_HOTPLUG_CPU
8a25a2fd 1050 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
08d07968
HC
1051 if (rc)
1052 return rc;
1053#endif
1054 for_each_present_cpu(cpu) {
1055 rc = smp_add_present_cpu(cpu);
fae8b22d
HC
1056 if (rc)
1057 return rc;
1da177e4
LT
1058 }
1059 return 0;
1060}
83a24e32 1061subsys_initcall(s390_smp_init);