2 * SMP related functions
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
38 #include <asm/setup.h>
40 #include <asm/tlbflush.h>
41 #include <asm/timer.h>
42 #include <asm/lowcore.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
51 sigp_external_call
= 2,
52 sigp_emergency_signal
= 3,
56 sigp_stop_and_store_status
= 9,
57 sigp_initial_cpu_reset
= 11,
60 sigp_store_status_at_address
= 14,
61 sigp_store_extended_status_at_address
= 15,
62 sigp_set_architecture
= 18,
63 sigp_conditional_emergency_signal
= 19,
64 sigp_sense_running
= 21,
68 sigp_order_code_accepted
= 0,
69 sigp_status_stored
= 1,
71 sigp_not_operational
= 3,
77 ec_call_function_single
,
88 struct _lowcore
*lowcore
; /* lowcore page(s) for the cpu */
89 unsigned long async_stack
; /* async stack for the cpu */
90 unsigned long panic_stack
; /* panic stack for the cpu */
91 unsigned long ec_mask
; /* bit mask for ec_xxx functions */
92 int state
; /* physical cpu state */
93 u32 status
; /* last status received via sigp */
94 u16 address
; /* physical cpu address */
97 static u8 boot_cpu_type
;
98 static u16 boot_cpu_address
;
99 static struct pcpu pcpu_devices
[NR_CPUS
];
101 DEFINE_MUTEX(smp_cpu_state_mutex
);
104 * Signal processor helper functions.
106 static inline int __pcpu_sigp(u16 addr
, u8 order
, u32 parm
, u32
*status
)
108 register unsigned int reg1
asm ("1") = parm
;
112 " sigp %1,%2,0(%3)\n"
115 : "=d" (cc
), "+d" (reg1
) : "d" (addr
), "a" (order
) : "cc");
116 if (status
&& cc
== 1)
121 static inline int __pcpu_sigp_relax(u16 addr
, u8 order
, u32 parm
, u32
*status
)
126 cc
= __pcpu_sigp(addr
, order
, parm
, status
);
133 static int pcpu_sigp_retry(struct pcpu
*pcpu
, u8 order
, u32 parm
)
137 for (retry
= 0; ; retry
++) {
138 cc
= __pcpu_sigp(pcpu
->address
, order
, parm
, &pcpu
->status
);
147 static inline int pcpu_stopped(struct pcpu
*pcpu
)
149 if (__pcpu_sigp(pcpu
->address
, sigp_sense
,
150 0, &pcpu
->status
) != sigp_status_stored
)
152 /* Check for stopped and check stop state */
153 return !!(pcpu
->status
& 0x50);
156 static inline int pcpu_running(struct pcpu
*pcpu
)
158 if (__pcpu_sigp(pcpu
->address
, sigp_sense_running
,
159 0, &pcpu
->status
) != sigp_status_stored
)
161 /* Check for running status */
162 return !(pcpu
->status
& 0x400);
166 * Find struct pcpu by cpu address.
168 static struct pcpu
*pcpu_find_address(const struct cpumask
*mask
, int address
)
172 for_each_cpu(cpu
, mask
)
173 if (pcpu_devices
[cpu
].address
== address
)
174 return pcpu_devices
+ cpu
;
178 static void pcpu_ec_call(struct pcpu
*pcpu
, int ec_bit
)
182 set_bit(ec_bit
, &pcpu
->ec_mask
);
183 order
= pcpu_running(pcpu
) ?
184 sigp_external_call
: sigp_emergency_signal
;
185 pcpu_sigp_retry(pcpu
, order
, 0);
188 static int __cpuinit
pcpu_alloc_lowcore(struct pcpu
*pcpu
, int cpu
)
192 if (pcpu
!= &pcpu_devices
[0]) {
193 pcpu
->lowcore
= (struct _lowcore
*)
194 __get_free_pages(GFP_KERNEL
| GFP_DMA
, LC_ORDER
);
195 pcpu
->async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
196 pcpu
->panic_stack
= __get_free_page(GFP_KERNEL
);
197 if (!pcpu
->lowcore
|| !pcpu
->panic_stack
|| !pcpu
->async_stack
)
201 memcpy(lc
, &S390_lowcore
, 512);
202 memset((char *) lc
+ 512, 0, sizeof(*lc
) - 512);
203 lc
->async_stack
= pcpu
->async_stack
+ ASYNC_SIZE
;
204 lc
->panic_stack
= pcpu
->panic_stack
+ PAGE_SIZE
;
207 if (MACHINE_HAS_IEEE
) {
208 lc
->extended_save_area_addr
= get_zeroed_page(GFP_KERNEL
);
209 if (!lc
->extended_save_area_addr
)
213 if (vdso_alloc_per_cpu(lc
))
216 lowcore_ptr
[cpu
] = lc
;
217 pcpu_sigp_retry(pcpu
, sigp_set_prefix
, (u32
)(unsigned long) lc
);
220 if (pcpu
!= &pcpu_devices
[0]) {
221 free_page(pcpu
->panic_stack
);
222 free_pages(pcpu
->async_stack
, ASYNC_ORDER
);
223 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
228 #ifdef CONFIG_HOTPLUG_CPU
230 static void pcpu_free_lowcore(struct pcpu
*pcpu
)
232 pcpu_sigp_retry(pcpu
, sigp_set_prefix
, 0);
233 lowcore_ptr
[pcpu
- pcpu_devices
] = NULL
;
235 if (MACHINE_HAS_IEEE
) {
236 struct _lowcore
*lc
= pcpu
->lowcore
;
238 free_page((unsigned long) lc
->extended_save_area_addr
);
239 lc
->extended_save_area_addr
= 0;
242 vdso_free_per_cpu(pcpu
->lowcore
);
244 if (pcpu
!= &pcpu_devices
[0]) {
245 free_page(pcpu
->panic_stack
);
246 free_pages(pcpu
->async_stack
, ASYNC_ORDER
);
247 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
251 #endif /* CONFIG_HOTPLUG_CPU */
253 static void pcpu_prepare_secondary(struct pcpu
*pcpu
, int cpu
)
255 struct _lowcore
*lc
= pcpu
->lowcore
;
257 atomic_inc(&init_mm
.context
.attach_count
);
259 lc
->percpu_offset
= __per_cpu_offset
[cpu
];
260 lc
->kernel_asce
= S390_lowcore
.kernel_asce
;
261 lc
->machine_flags
= S390_lowcore
.machine_flags
;
262 lc
->ftrace_func
= S390_lowcore
.ftrace_func
;
263 lc
->user_timer
= lc
->system_timer
= lc
->steal_timer
= 0;
264 __ctl_store(lc
->cregs_save_area
, 0, 15);
265 save_access_regs((unsigned int *) lc
->access_regs_save_area
);
266 memcpy(lc
->stfle_fac_list
, S390_lowcore
.stfle_fac_list
,
270 static void pcpu_attach_task(struct pcpu
*pcpu
, struct task_struct
*tsk
)
272 struct _lowcore
*lc
= pcpu
->lowcore
;
273 struct thread_info
*ti
= task_thread_info(tsk
);
275 lc
->kernel_stack
= (unsigned long) task_stack_page(tsk
) + THREAD_SIZE
;
276 lc
->thread_info
= (unsigned long) task_thread_info(tsk
);
277 lc
->current_task
= (unsigned long) tsk
;
278 lc
->user_timer
= ti
->user_timer
;
279 lc
->system_timer
= ti
->system_timer
;
283 static void pcpu_start_fn(struct pcpu
*pcpu
, void (*func
)(void *), void *data
)
285 struct _lowcore
*lc
= pcpu
->lowcore
;
287 lc
->restart_stack
= lc
->kernel_stack
;
288 lc
->restart_fn
= (unsigned long) func
;
289 lc
->restart_data
= (unsigned long) data
;
290 lc
->restart_source
= -1UL;
291 pcpu_sigp_retry(pcpu
, sigp_restart
, 0);
295 * Call function via PSW restart on pcpu and stop the current cpu.
297 static void pcpu_delegate(struct pcpu
*pcpu
, void (*func
)(void *),
298 void *data
, unsigned long stack
)
300 struct _lowcore
*lc
= lowcore_ptr
[pcpu
- pcpu_devices
];
305 unsigned long source
;
306 } restart
= { stack
, func
, data
, stap() };
308 __load_psw_mask(psw_kernel_bits
);
309 if (pcpu
->address
== restart
.source
)
310 func(data
); /* should not return */
311 /* Stop target cpu (if func returns this stops the current cpu). */
312 pcpu_sigp_retry(pcpu
, sigp_stop
, 0);
313 /* Restart func on the target cpu and stop the current cpu. */
314 memcpy_absolute(&lc
->restart_stack
, &restart
, sizeof(restart
));
316 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
317 " brc 2,0b # busy, try again\n"
318 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
319 " brc 2,1b # busy, try again\n"
320 : : "d" (pcpu
->address
), "d" (restart
.source
) : "0", "1", "cc");
325 * Call function on an online CPU.
327 void smp_call_online_cpu(void (*func
)(void *), void *data
)
331 /* Use the current cpu if it is online. */
332 pcpu
= pcpu_find_address(cpu_online_mask
, stap());
334 /* Use the first online cpu. */
335 pcpu
= pcpu_devices
+ cpumask_first(cpu_online_mask
);
336 pcpu_delegate(pcpu
, func
, data
, (unsigned long) restart_stack
);
340 * Call function on the ipl CPU.
342 void smp_call_ipl_cpu(void (*func
)(void *), void *data
)
344 pcpu_delegate(&pcpu_devices
[0], func
, data
,
345 pcpu_devices
->panic_stack
+ PAGE_SIZE
);
348 int smp_find_processor_id(u16 address
)
352 for_each_present_cpu(cpu
)
353 if (pcpu_devices
[cpu
].address
== address
)
358 int smp_vcpu_scheduled(int cpu
)
360 return pcpu_running(pcpu_devices
+ cpu
);
365 if (MACHINE_HAS_DIAG44
)
366 asm volatile("diag 0,0,0x44");
369 void smp_yield_cpu(int cpu
)
371 if (MACHINE_HAS_DIAG9C
)
372 asm volatile("diag %0,0,0x9c"
373 : : "d" (pcpu_devices
[cpu
].address
));
374 else if (MACHINE_HAS_DIAG44
)
375 asm volatile("diag 0,0,0x44");
379 * Send cpus emergency shutdown signal. This gives the cpus the
380 * opportunity to complete outstanding interrupts.
382 void smp_emergency_stop(cpumask_t
*cpumask
)
387 end
= get_clock() + (1000000UL << 12);
388 for_each_cpu(cpu
, cpumask
) {
389 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
390 set_bit(ec_stop_cpu
, &pcpu
->ec_mask
);
391 while (__pcpu_sigp(pcpu
->address
, sigp_emergency_signal
,
392 0, NULL
) == sigp_busy
&&
396 while (get_clock() < end
) {
397 for_each_cpu(cpu
, cpumask
)
398 if (pcpu_stopped(pcpu_devices
+ cpu
))
399 cpumask_clear_cpu(cpu
, cpumask
);
400 if (cpumask_empty(cpumask
))
407 * Stop all cpus but the current one.
409 void smp_send_stop(void)
414 /* Disable all interrupts/machine checks */
415 __load_psw_mask(psw_kernel_bits
| PSW_MASK_DAT
);
416 trace_hardirqs_off();
418 debug_set_critical();
419 cpumask_copy(&cpumask
, cpu_online_mask
);
420 cpumask_clear_cpu(smp_processor_id(), &cpumask
);
422 if (oops_in_progress
)
423 smp_emergency_stop(&cpumask
);
425 /* stop all processors */
426 for_each_cpu(cpu
, &cpumask
) {
427 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
428 pcpu_sigp_retry(pcpu
, sigp_stop
, 0);
429 while (!pcpu_stopped(pcpu
))
435 * Stop the current cpu.
437 void smp_stop_cpu(void)
439 pcpu_sigp_retry(pcpu_devices
+ smp_processor_id(), sigp_stop
, 0);
444 * This is the main routine where commands issued by other
447 static void do_ext_call_interrupt(struct ext_code ext_code
,
448 unsigned int param32
, unsigned long param64
)
453 cpu
= smp_processor_id();
454 if (ext_code
.code
== 0x1202)
455 kstat_cpu(cpu
).irqs
[EXTINT_EXC
]++;
457 kstat_cpu(cpu
).irqs
[EXTINT_EMS
]++;
459 * handle bit signal external calls
461 bits
= xchg(&pcpu_devices
[cpu
].ec_mask
, 0);
463 if (test_bit(ec_stop_cpu
, &bits
))
466 if (test_bit(ec_schedule
, &bits
))
469 if (test_bit(ec_call_function
, &bits
))
470 generic_smp_call_function_interrupt();
472 if (test_bit(ec_call_function_single
, &bits
))
473 generic_smp_call_function_single_interrupt();
477 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
481 for_each_cpu(cpu
, mask
)
482 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function
);
485 void arch_send_call_function_single_ipi(int cpu
)
487 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
492 * this function sends a 'purge tlb' signal to another CPU.
494 static void smp_ptlb_callback(void *info
)
499 void smp_ptlb_all(void)
501 on_each_cpu(smp_ptlb_callback
, NULL
, 1);
503 EXPORT_SYMBOL(smp_ptlb_all
);
504 #endif /* ! CONFIG_64BIT */
507 * this function sends a 'reschedule' IPI to another CPU.
508 * it goes straight through and wastes no time serializing
509 * anything. Worst case is that we lose a reschedule ...
511 void smp_send_reschedule(int cpu
)
513 pcpu_ec_call(pcpu_devices
+ cpu
, ec_schedule
);
517 * parameter area for the set/clear control bit callbacks
519 struct ec_creg_mask_parms
{
521 unsigned long andval
;
526 * callback for setting/clearing control bits
528 static void smp_ctl_bit_callback(void *info
)
530 struct ec_creg_mask_parms
*pp
= info
;
531 unsigned long cregs
[16];
533 __ctl_store(cregs
, 0, 15);
534 cregs
[pp
->cr
] = (cregs
[pp
->cr
] & pp
->andval
) | pp
->orval
;
535 __ctl_load(cregs
, 0, 15);
539 * Set a bit in a control register of all cpus
541 void smp_ctl_set_bit(int cr
, int bit
)
543 struct ec_creg_mask_parms parms
= { 1UL << bit
, -1UL, cr
};
545 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
547 EXPORT_SYMBOL(smp_ctl_set_bit
);
550 * Clear a bit in a control register of all cpus
552 void smp_ctl_clear_bit(int cr
, int bit
)
554 struct ec_creg_mask_parms parms
= { 0, ~(1UL << bit
), cr
};
556 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
558 EXPORT_SYMBOL(smp_ctl_clear_bit
);
560 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
562 struct save_area
*zfcpdump_save_areas
[NR_CPUS
+ 1];
563 EXPORT_SYMBOL_GPL(zfcpdump_save_areas
);
565 static void __init
smp_get_save_area(int cpu
, u16 address
)
567 void *lc
= pcpu_devices
[0].lowcore
;
568 struct save_area
*save_area
;
570 if (is_kdump_kernel())
572 if (!OLDMEM_BASE
&& (address
== boot_cpu_address
||
573 ipl_info
.type
!= IPL_TYPE_FCP_DUMP
))
575 if (cpu
>= NR_CPUS
) {
576 pr_warning("CPU %i exceeds the maximum %i and is excluded "
577 "from the dump\n", cpu
, NR_CPUS
- 1);
580 save_area
= kmalloc(sizeof(struct save_area
), GFP_KERNEL
);
582 panic("could not allocate memory for save area\n");
583 zfcpdump_save_areas
[cpu
] = save_area
;
584 #ifdef CONFIG_CRASH_DUMP
585 if (address
== boot_cpu_address
) {
586 /* Copy the registers of the boot cpu. */
587 copy_oldmem_page(1, (void *) save_area
, sizeof(*save_area
),
588 SAVE_AREA_BASE
- PAGE_SIZE
, 0);
592 /* Get the registers of a non-boot cpu. */
593 __pcpu_sigp_relax(address
, sigp_stop_and_store_status
, 0, NULL
);
594 memcpy_real(save_area
, lc
+ SAVE_AREA_BASE
, sizeof(*save_area
));
597 int smp_store_status(int cpu
)
601 pcpu
= pcpu_devices
+ cpu
;
602 if (__pcpu_sigp_relax(pcpu
->address
, sigp_stop_and_store_status
,
603 0, NULL
) != sigp_order_code_accepted
)
608 #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
610 static inline void smp_get_save_area(int cpu
, u16 address
) { }
612 #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
614 static struct sclp_cpu_info
*smp_get_cpu_info(void)
616 static int use_sigp_detection
;
617 struct sclp_cpu_info
*info
;
620 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
621 if (info
&& (use_sigp_detection
|| sclp_get_cpu_info(info
))) {
622 use_sigp_detection
= 1;
623 for (address
= 0; address
<= MAX_CPU_ADDRESS
; address
++) {
624 if (__pcpu_sigp_relax(address
, sigp_sense
, 0, NULL
) ==
625 sigp_not_operational
)
627 info
->cpu
[info
->configured
].address
= address
;
630 info
->combined
= info
->configured
;
635 static int __devinit
smp_add_present_cpu(int cpu
);
637 static int __devinit
__smp_rescan_cpus(struct sclp_cpu_info
*info
,
645 cpumask_xor(&avail
, cpu_possible_mask
, cpu_present_mask
);
646 cpu
= cpumask_first(&avail
);
647 for (i
= 0; (i
< info
->combined
) && (cpu
< nr_cpu_ids
); i
++) {
648 if (info
->has_cpu_type
&& info
->cpu
[i
].type
!= boot_cpu_type
)
650 if (pcpu_find_address(cpu_present_mask
, info
->cpu
[i
].address
))
652 pcpu
= pcpu_devices
+ cpu
;
653 pcpu
->address
= info
->cpu
[i
].address
;
654 pcpu
->state
= (cpu
>= info
->configured
) ?
655 CPU_STATE_STANDBY
: CPU_STATE_CONFIGURED
;
656 cpu_set_polarization(cpu
, POLARIZATION_UNKNOWN
);
657 set_cpu_present(cpu
, true);
658 if (sysfs_add
&& smp_add_present_cpu(cpu
) != 0)
659 set_cpu_present(cpu
, false);
662 cpu
= cpumask_next(cpu
, &avail
);
667 static void __init
smp_detect_cpus(void)
669 unsigned int cpu
, c_cpus
, s_cpus
;
670 struct sclp_cpu_info
*info
;
672 info
= smp_get_cpu_info();
674 panic("smp_detect_cpus failed to allocate memory\n");
675 if (info
->has_cpu_type
) {
676 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
677 if (info
->cpu
[cpu
].address
!= boot_cpu_address
)
679 /* The boot cpu dictates the cpu type. */
680 boot_cpu_type
= info
->cpu
[cpu
].type
;
685 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
686 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= boot_cpu_type
)
688 if (cpu
< info
->configured
) {
689 smp_get_save_area(c_cpus
, info
->cpu
[cpu
].address
);
694 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus
, s_cpus
);
696 __smp_rescan_cpus(info
, 0);
702 * Activate a secondary processor.
704 static void __cpuinit
smp_start_secondary(void *cpuvoid
)
706 S390_lowcore
.last_update_clock
= get_clock();
707 S390_lowcore
.restart_stack
= (unsigned long) restart_stack
;
708 S390_lowcore
.restart_fn
= (unsigned long) do_restart
;
709 S390_lowcore
.restart_data
= 0;
710 S390_lowcore
.restart_source
= -1UL;
711 restore_access_regs(S390_lowcore
.access_regs_save_area
);
712 __ctl_load(S390_lowcore
.cregs_save_area
, 0, 15);
713 __load_psw_mask(psw_kernel_bits
| PSW_MASK_DAT
);
719 notify_cpu_starting(smp_processor_id());
721 set_cpu_online(smp_processor_id(), true);
724 /* cpu_idle will call schedule for us */
728 /* Upping and downing of CPUs */
729 int __cpuinit
__cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
734 pcpu
= pcpu_devices
+ cpu
;
735 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
737 if (pcpu_sigp_retry(pcpu
, sigp_initial_cpu_reset
, 0) !=
738 sigp_order_code_accepted
)
741 rc
= pcpu_alloc_lowcore(pcpu
, cpu
);
744 pcpu_prepare_secondary(pcpu
, cpu
);
745 pcpu_attach_task(pcpu
, tidle
);
746 pcpu_start_fn(pcpu
, smp_start_secondary
, NULL
);
747 while (!cpu_online(cpu
))
752 static int __init
setup_possible_cpus(char *s
)
756 if (kstrtoint(s
, 0, &max
) < 0)
758 init_cpu_possible(cpumask_of(0));
759 for (cpu
= 1; cpu
< max
&& cpu
< nr_cpu_ids
; cpu
++)
760 set_cpu_possible(cpu
, true);
763 early_param("possible_cpus", setup_possible_cpus
);
765 #ifdef CONFIG_HOTPLUG_CPU
767 int __cpu_disable(void)
769 unsigned long cregs
[16];
771 set_cpu_online(smp_processor_id(), false);
772 /* Disable pseudo page faults on this cpu. */
774 /* Disable interrupt sources via control register. */
775 __ctl_store(cregs
, 0, 15);
776 cregs
[0] &= ~0x0000ee70UL
; /* disable all external interrupts */
777 cregs
[6] &= ~0xff000000UL
; /* disable all I/O interrupts */
778 cregs
[14] &= ~0x1f000000UL
; /* disable most machine checks */
779 __ctl_load(cregs
, 0, 15);
783 void __cpu_die(unsigned int cpu
)
787 /* Wait until target cpu is down */
788 pcpu
= pcpu_devices
+ cpu
;
789 while (!pcpu_stopped(pcpu
))
791 pcpu_free_lowcore(pcpu
);
792 atomic_dec(&init_mm
.context
.attach_count
);
795 void __noreturn
cpu_die(void)
798 pcpu_sigp_retry(pcpu_devices
+ smp_processor_id(), sigp_stop
, 0);
802 #endif /* CONFIG_HOTPLUG_CPU */
804 void __init
smp_prepare_cpus(unsigned int max_cpus
)
806 /* request the 0x1201 emergency signal external interrupt */
807 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
808 panic("Couldn't request external interrupt 0x1201");
809 /* request the 0x1202 external call external interrupt */
810 if (register_external_interrupt(0x1202, do_ext_call_interrupt
) != 0)
811 panic("Couldn't request external interrupt 0x1202");
815 void __init
smp_prepare_boot_cpu(void)
817 struct pcpu
*pcpu
= pcpu_devices
;
819 boot_cpu_address
= stap();
820 pcpu
->state
= CPU_STATE_CONFIGURED
;
821 pcpu
->address
= boot_cpu_address
;
822 pcpu
->lowcore
= (struct _lowcore
*)(unsigned long) store_prefix();
823 pcpu
->async_stack
= S390_lowcore
.async_stack
- ASYNC_SIZE
;
824 pcpu
->panic_stack
= S390_lowcore
.panic_stack
- PAGE_SIZE
;
825 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
826 cpu_set_polarization(0, POLARIZATION_UNKNOWN
);
827 set_cpu_present(0, true);
828 set_cpu_online(0, true);
831 void __init
smp_cpus_done(unsigned int max_cpus
)
835 void __init
smp_setup_processor_id(void)
837 S390_lowcore
.cpu_nr
= 0;
841 * the frequency of the profiling timer can be changed
842 * by writing a multiplier value into /proc/profile.
844 * usually you want to run this on all CPUs ;)
846 int setup_profiling_timer(unsigned int multiplier
)
851 #ifdef CONFIG_HOTPLUG_CPU
852 static ssize_t
cpu_configure_show(struct device
*dev
,
853 struct device_attribute
*attr
, char *buf
)
857 mutex_lock(&smp_cpu_state_mutex
);
858 count
= sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].state
);
859 mutex_unlock(&smp_cpu_state_mutex
);
863 static ssize_t
cpu_configure_store(struct device
*dev
,
864 struct device_attribute
*attr
,
865 const char *buf
, size_t count
)
871 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
873 if (val
!= 0 && val
!= 1)
876 mutex_lock(&smp_cpu_state_mutex
);
878 /* disallow configuration changes of online cpus and cpu 0 */
880 if (cpu_online(cpu
) || cpu
== 0)
882 pcpu
= pcpu_devices
+ cpu
;
886 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
888 rc
= sclp_cpu_deconfigure(pcpu
->address
);
891 pcpu
->state
= CPU_STATE_STANDBY
;
892 cpu_set_polarization(cpu
, POLARIZATION_UNKNOWN
);
893 topology_expect_change();
896 if (pcpu
->state
!= CPU_STATE_STANDBY
)
898 rc
= sclp_cpu_configure(pcpu
->address
);
901 pcpu
->state
= CPU_STATE_CONFIGURED
;
902 cpu_set_polarization(cpu
, POLARIZATION_UNKNOWN
);
903 topology_expect_change();
909 mutex_unlock(&smp_cpu_state_mutex
);
911 return rc
? rc
: count
;
913 static DEVICE_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
914 #endif /* CONFIG_HOTPLUG_CPU */
916 static ssize_t
show_cpu_address(struct device
*dev
,
917 struct device_attribute
*attr
, char *buf
)
919 return sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].address
);
921 static DEVICE_ATTR(address
, 0444, show_cpu_address
, NULL
);
923 static struct attribute
*cpu_common_attrs
[] = {
924 #ifdef CONFIG_HOTPLUG_CPU
925 &dev_attr_configure
.attr
,
927 &dev_attr_address
.attr
,
931 static struct attribute_group cpu_common_attr_group
= {
932 .attrs
= cpu_common_attrs
,
935 static ssize_t
show_capability(struct device
*dev
,
936 struct device_attribute
*attr
, char *buf
)
938 unsigned int capability
;
941 rc
= get_cpu_capability(&capability
);
944 return sprintf(buf
, "%u\n", capability
);
946 static DEVICE_ATTR(capability
, 0444, show_capability
, NULL
);
948 static ssize_t
show_idle_count(struct device
*dev
,
949 struct device_attribute
*attr
, char *buf
)
951 struct s390_idle_data
*idle
= &per_cpu(s390_idle
, dev
->id
);
952 unsigned long long idle_count
;
953 unsigned int sequence
;
956 sequence
= ACCESS_ONCE(idle
->sequence
);
957 idle_count
= ACCESS_ONCE(idle
->idle_count
);
958 if (ACCESS_ONCE(idle
->idle_enter
))
960 } while ((sequence
& 1) || (idle
->sequence
!= sequence
));
961 return sprintf(buf
, "%llu\n", idle_count
);
963 static DEVICE_ATTR(idle_count
, 0444, show_idle_count
, NULL
);
965 static ssize_t
show_idle_time(struct device
*dev
,
966 struct device_attribute
*attr
, char *buf
)
968 struct s390_idle_data
*idle
= &per_cpu(s390_idle
, dev
->id
);
969 unsigned long long now
, idle_time
, idle_enter
, idle_exit
;
970 unsigned int sequence
;
974 sequence
= ACCESS_ONCE(idle
->sequence
);
975 idle_time
= ACCESS_ONCE(idle
->idle_time
);
976 idle_enter
= ACCESS_ONCE(idle
->idle_enter
);
977 idle_exit
= ACCESS_ONCE(idle
->idle_exit
);
978 } while ((sequence
& 1) || (idle
->sequence
!= sequence
));
979 idle_time
+= idle_enter
? ((idle_exit
? : now
) - idle_enter
) : 0;
980 return sprintf(buf
, "%llu\n", idle_time
>> 12);
982 static DEVICE_ATTR(idle_time_us
, 0444, show_idle_time
, NULL
);
984 static struct attribute
*cpu_online_attrs
[] = {
985 &dev_attr_capability
.attr
,
986 &dev_attr_idle_count
.attr
,
987 &dev_attr_idle_time_us
.attr
,
991 static struct attribute_group cpu_online_attr_group
= {
992 .attrs
= cpu_online_attrs
,
995 static int __cpuinit
smp_cpu_notify(struct notifier_block
*self
,
996 unsigned long action
, void *hcpu
)
998 unsigned int cpu
= (unsigned int)(long)hcpu
;
999 struct cpu
*c
= &pcpu_devices
[cpu
].cpu
;
1000 struct device
*s
= &c
->dev
;
1001 struct s390_idle_data
*idle
;
1006 case CPU_ONLINE_FROZEN
:
1007 idle
= &per_cpu(s390_idle
, cpu
);
1008 memset(idle
, 0, sizeof(struct s390_idle_data
));
1009 err
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1012 case CPU_DEAD_FROZEN
:
1013 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1016 return notifier_from_errno(err
);
1019 static struct notifier_block __cpuinitdata smp_cpu_nb
= {
1020 .notifier_call
= smp_cpu_notify
,
1023 static int __devinit
smp_add_present_cpu(int cpu
)
1025 struct cpu
*c
= &pcpu_devices
[cpu
].cpu
;
1026 struct device
*s
= &c
->dev
;
1029 c
->hotpluggable
= 1;
1030 rc
= register_cpu(c
, cpu
);
1033 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1036 if (cpu_online(cpu
)) {
1037 rc
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1041 rc
= topology_cpu_init(c
);
1047 if (cpu_online(cpu
))
1048 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1050 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1052 #ifdef CONFIG_HOTPLUG_CPU
1059 #ifdef CONFIG_HOTPLUG_CPU
1061 int __ref
smp_rescan_cpus(void)
1063 struct sclp_cpu_info
*info
;
1066 info
= smp_get_cpu_info();
1070 mutex_lock(&smp_cpu_state_mutex
);
1071 nr
= __smp_rescan_cpus(info
, 1);
1072 mutex_unlock(&smp_cpu_state_mutex
);
1076 topology_schedule_update();
1080 static ssize_t __ref
rescan_store(struct device
*dev
,
1081 struct device_attribute
*attr
,
1087 rc
= smp_rescan_cpus();
1088 return rc
? rc
: count
;
1090 static DEVICE_ATTR(rescan
, 0200, NULL
, rescan_store
);
1091 #endif /* CONFIG_HOTPLUG_CPU */
1093 static int __init
s390_smp_init(void)
1097 register_cpu_notifier(&smp_cpu_nb
);
1098 #ifdef CONFIG_HOTPLUG_CPU
1099 rc
= device_create_file(cpu_subsys
.dev_root
, &dev_attr_rescan
);
1103 for_each_present_cpu(cpu
) {
1104 rc
= smp_add_present_cpu(cpu
);
1110 subsys_initcall(s390_smp_init
);