2 * SMP related functions
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/bootmem.h>
23 #include <linux/export.h>
24 #include <linux/init.h>
26 #include <linux/err.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/kmemleak.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/cpu.h>
34 #include <linux/slab.h>
35 #include <linux/sched/hotplug.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/crash_dump.h>
38 #include <linux/memblock.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/switch_to.h>
42 #include <asm/facility.h>
44 #include <asm/setup.h>
46 #include <asm/tlbflush.h>
47 #include <asm/vtimer.h>
48 #include <asm/lowcore.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
60 ec_call_function_single
,
69 static DEFINE_PER_CPU(struct cpu
*, cpu_device
);
72 struct lowcore
*lowcore
; /* lowcore page(s) for the cpu */
73 unsigned long ec_mask
; /* bit mask for ec_xxx functions */
74 unsigned long ec_clk
; /* sigp timestamp for ec_xxx */
75 signed char state
; /* physical cpu state */
76 signed char polarization
; /* physical polarization */
77 u16 address
; /* physical cpu address */
80 static u8 boot_core_type
;
81 static struct pcpu pcpu_devices
[NR_CPUS
];
83 static struct kmem_cache
*pcpu_mcesa_cache
;
85 unsigned int smp_cpu_mt_shift
;
86 EXPORT_SYMBOL(smp_cpu_mt_shift
);
88 unsigned int smp_cpu_mtid
;
89 EXPORT_SYMBOL(smp_cpu_mtid
);
91 #ifdef CONFIG_CRASH_DUMP
92 __vector128 __initdata boot_cpu_vector_save_area
[__NUM_VXRS
];
95 static unsigned int smp_max_threads __initdata
= -1U;
97 static int __init
early_nosmt(char *s
)
102 early_param("nosmt", early_nosmt
);
104 static int __init
early_smt(char *s
)
106 get_option(&s
, &smp_max_threads
);
109 early_param("smt", early_smt
);
112 * The smp_cpu_state_mutex must be held when changing the state or polarization
113 * member of a pcpu data structure within the pcpu_devices arreay.
115 DEFINE_MUTEX(smp_cpu_state_mutex
);
118 * Signal processor helper functions.
120 static inline int __pcpu_sigp_relax(u16 addr
, u8 order
, unsigned long parm
)
125 cc
= __pcpu_sigp(addr
, order
, parm
, NULL
);
126 if (cc
!= SIGP_CC_BUSY
)
132 static int pcpu_sigp_retry(struct pcpu
*pcpu
, u8 order
, u32 parm
)
136 for (retry
= 0; ; retry
++) {
137 cc
= __pcpu_sigp(pcpu
->address
, order
, parm
, NULL
);
138 if (cc
!= SIGP_CC_BUSY
)
146 static inline int pcpu_stopped(struct pcpu
*pcpu
)
148 u32
uninitialized_var(status
);
150 if (__pcpu_sigp(pcpu
->address
, SIGP_SENSE
,
151 0, &status
) != SIGP_CC_STATUS_STORED
)
153 return !!(status
& (SIGP_STATUS_CHECK_STOP
|SIGP_STATUS_STOPPED
));
156 static inline int pcpu_running(struct pcpu
*pcpu
)
158 if (__pcpu_sigp(pcpu
->address
, SIGP_SENSE_RUNNING
,
159 0, NULL
) != SIGP_CC_STATUS_STORED
)
161 /* Status stored condition code is equivalent to cpu not running. */
166 * Find struct pcpu by cpu address.
168 static struct pcpu
*pcpu_find_address(const struct cpumask
*mask
, u16 address
)
172 for_each_cpu(cpu
, mask
)
173 if (pcpu_devices
[cpu
].address
== address
)
174 return pcpu_devices
+ cpu
;
178 static void pcpu_ec_call(struct pcpu
*pcpu
, int ec_bit
)
182 if (test_and_set_bit(ec_bit
, &pcpu
->ec_mask
))
184 order
= pcpu_running(pcpu
) ? SIGP_EXTERNAL_CALL
: SIGP_EMERGENCY_SIGNAL
;
185 pcpu
->ec_clk
= get_tod_clock_fast();
186 pcpu_sigp_retry(pcpu
, order
, 0);
189 #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
190 #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
192 static int pcpu_alloc_lowcore(struct pcpu
*pcpu
, int cpu
)
194 unsigned long async_stack
, panic_stack
;
195 unsigned long mcesa_origin
, mcesa_bits
;
198 mcesa_origin
= mcesa_bits
= 0;
199 if (pcpu
!= &pcpu_devices
[0]) {
200 pcpu
->lowcore
= (struct lowcore
*)
201 __get_free_pages(GFP_KERNEL
| GFP_DMA
, LC_ORDER
);
202 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
203 panic_stack
= __get_free_page(GFP_KERNEL
);
204 if (!pcpu
->lowcore
|| !panic_stack
|| !async_stack
)
206 if (MACHINE_HAS_VX
|| MACHINE_HAS_GS
) {
207 mcesa_origin
= (unsigned long)
208 kmem_cache_alloc(pcpu_mcesa_cache
, GFP_KERNEL
);
211 /* The pointer is stored with mcesa_bits ORed in */
212 kmemleak_not_leak((void *) mcesa_origin
);
213 mcesa_bits
= MACHINE_HAS_GS
? 11 : 0;
216 async_stack
= pcpu
->lowcore
->async_stack
- ASYNC_FRAME_OFFSET
;
217 panic_stack
= pcpu
->lowcore
->panic_stack
- PANIC_FRAME_OFFSET
;
218 mcesa_origin
= pcpu
->lowcore
->mcesad
& MCESA_ORIGIN_MASK
;
219 mcesa_bits
= pcpu
->lowcore
->mcesad
& MCESA_LC_MASK
;
222 memcpy(lc
, &S390_lowcore
, 512);
223 memset((char *) lc
+ 512, 0, sizeof(*lc
) - 512);
224 lc
->async_stack
= async_stack
+ ASYNC_FRAME_OFFSET
;
225 lc
->panic_stack
= panic_stack
+ PANIC_FRAME_OFFSET
;
226 lc
->mcesad
= mcesa_origin
| mcesa_bits
;
228 lc
->spinlock_lockval
= arch_spin_lockval(cpu
);
229 if (vdso_alloc_per_cpu(lc
))
231 lowcore_ptr
[cpu
] = lc
;
232 pcpu_sigp_retry(pcpu
, SIGP_SET_PREFIX
, (u32
)(unsigned long) lc
);
235 if (pcpu
!= &pcpu_devices
[0]) {
237 kmem_cache_free(pcpu_mcesa_cache
,
238 (void *) mcesa_origin
);
239 free_page(panic_stack
);
240 free_pages(async_stack
, ASYNC_ORDER
);
241 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
246 #ifdef CONFIG_HOTPLUG_CPU
248 static void pcpu_free_lowcore(struct pcpu
*pcpu
)
250 unsigned long mcesa_origin
;
252 pcpu_sigp_retry(pcpu
, SIGP_SET_PREFIX
, 0);
253 lowcore_ptr
[pcpu
- pcpu_devices
] = NULL
;
254 vdso_free_per_cpu(pcpu
->lowcore
);
255 if (pcpu
== &pcpu_devices
[0])
257 if (MACHINE_HAS_VX
|| MACHINE_HAS_GS
) {
258 mcesa_origin
= pcpu
->lowcore
->mcesad
& MCESA_ORIGIN_MASK
;
259 kmem_cache_free(pcpu_mcesa_cache
, (void *) mcesa_origin
);
261 free_page(pcpu
->lowcore
->panic_stack
-PANIC_FRAME_OFFSET
);
262 free_pages(pcpu
->lowcore
->async_stack
-ASYNC_FRAME_OFFSET
, ASYNC_ORDER
);
263 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
266 #endif /* CONFIG_HOTPLUG_CPU */
268 static void pcpu_prepare_secondary(struct pcpu
*pcpu
, int cpu
)
270 struct lowcore
*lc
= pcpu
->lowcore
;
272 cpumask_set_cpu(cpu
, &init_mm
.context
.cpu_attach_mask
);
273 cpumask_set_cpu(cpu
, mm_cpumask(&init_mm
));
275 lc
->spinlock_lockval
= arch_spin_lockval(cpu
);
276 lc
->percpu_offset
= __per_cpu_offset
[cpu
];
277 lc
->kernel_asce
= S390_lowcore
.kernel_asce
;
278 lc
->machine_flags
= S390_lowcore
.machine_flags
;
279 lc
->user_timer
= lc
->system_timer
= lc
->steal_timer
= 0;
280 __ctl_store(lc
->cregs_save_area
, 0, 15);
281 save_access_regs((unsigned int *) lc
->access_regs_save_area
);
282 memcpy(lc
->stfle_fac_list
, S390_lowcore
.stfle_fac_list
,
286 static void pcpu_attach_task(struct pcpu
*pcpu
, struct task_struct
*tsk
)
288 struct lowcore
*lc
= pcpu
->lowcore
;
290 lc
->kernel_stack
= (unsigned long) task_stack_page(tsk
)
291 + THREAD_SIZE
- STACK_FRAME_OVERHEAD
- sizeof(struct pt_regs
);
292 lc
->current_task
= (unsigned long) tsk
;
294 lc
->current_pid
= tsk
->pid
;
295 lc
->user_timer
= tsk
->thread
.user_timer
;
296 lc
->guest_timer
= tsk
->thread
.guest_timer
;
297 lc
->system_timer
= tsk
->thread
.system_timer
;
298 lc
->hardirq_timer
= tsk
->thread
.hardirq_timer
;
299 lc
->softirq_timer
= tsk
->thread
.softirq_timer
;
303 static void pcpu_start_fn(struct pcpu
*pcpu
, void (*func
)(void *), void *data
)
305 struct lowcore
*lc
= pcpu
->lowcore
;
307 lc
->restart_stack
= lc
->kernel_stack
;
308 lc
->restart_fn
= (unsigned long) func
;
309 lc
->restart_data
= (unsigned long) data
;
310 lc
->restart_source
= -1UL;
311 pcpu_sigp_retry(pcpu
, SIGP_RESTART
, 0);
315 * Call function via PSW restart on pcpu and stop the current cpu.
317 static void pcpu_delegate(struct pcpu
*pcpu
, void (*func
)(void *),
318 void *data
, unsigned long stack
)
320 struct lowcore
*lc
= lowcore_ptr
[pcpu
- pcpu_devices
];
321 unsigned long source_cpu
= stap();
323 __load_psw_mask(PSW_KERNEL_BITS
);
324 if (pcpu
->address
== source_cpu
)
325 func(data
); /* should not return */
326 /* Stop target cpu (if func returns this stops the current cpu). */
327 pcpu_sigp_retry(pcpu
, SIGP_STOP
, 0);
328 /* Restart func on the target cpu and stop the current cpu. */
329 mem_assign_absolute(lc
->restart_stack
, stack
);
330 mem_assign_absolute(lc
->restart_fn
, (unsigned long) func
);
331 mem_assign_absolute(lc
->restart_data
, (unsigned long) data
);
332 mem_assign_absolute(lc
->restart_source
, source_cpu
);
334 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
335 " brc 2,0b # busy, try again\n"
336 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
337 " brc 2,1b # busy, try again\n"
338 : : "d" (pcpu
->address
), "d" (source_cpu
),
339 "K" (SIGP_RESTART
), "K" (SIGP_STOP
)
345 * Enable additional logical cpus for multi-threading.
347 static int pcpu_set_smt(unsigned int mtid
)
351 if (smp_cpu_mtid
== mtid
)
353 cc
= __pcpu_sigp(0, SIGP_SET_MULTI_THREADING
, mtid
, NULL
);
356 smp_cpu_mt_shift
= 0;
357 while (smp_cpu_mtid
>= (1U << smp_cpu_mt_shift
))
359 pcpu_devices
[0].address
= stap();
365 * Call function on an online CPU.
367 void smp_call_online_cpu(void (*func
)(void *), void *data
)
371 /* Use the current cpu if it is online. */
372 pcpu
= pcpu_find_address(cpu_online_mask
, stap());
374 /* Use the first online cpu. */
375 pcpu
= pcpu_devices
+ cpumask_first(cpu_online_mask
);
376 pcpu_delegate(pcpu
, func
, data
, (unsigned long) restart_stack
);
380 * Call function on the ipl CPU.
382 void smp_call_ipl_cpu(void (*func
)(void *), void *data
)
384 pcpu_delegate(&pcpu_devices
[0], func
, data
,
385 pcpu_devices
->lowcore
->panic_stack
-
386 PANIC_FRAME_OFFSET
+ PAGE_SIZE
);
389 int smp_find_processor_id(u16 address
)
393 for_each_present_cpu(cpu
)
394 if (pcpu_devices
[cpu
].address
== address
)
399 bool arch_vcpu_is_preempted(int cpu
)
401 if (test_cpu_flag_of(CIF_ENABLED_WAIT
, cpu
))
403 if (pcpu_running(pcpu_devices
+ cpu
))
407 EXPORT_SYMBOL(arch_vcpu_is_preempted
);
409 void smp_yield_cpu(int cpu
)
411 if (MACHINE_HAS_DIAG9C
) {
412 diag_stat_inc_norecursion(DIAG_STAT_X09C
);
413 asm volatile("diag %0,0,0x9c"
414 : : "d" (pcpu_devices
[cpu
].address
));
415 } else if (MACHINE_HAS_DIAG44
) {
416 diag_stat_inc_norecursion(DIAG_STAT_X044
);
417 asm volatile("diag 0,0,0x44");
422 * Send cpus emergency shutdown signal. This gives the cpus the
423 * opportunity to complete outstanding interrupts.
425 static void smp_emergency_stop(cpumask_t
*cpumask
)
430 end
= get_tod_clock() + (1000000UL << 12);
431 for_each_cpu(cpu
, cpumask
) {
432 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
433 set_bit(ec_stop_cpu
, &pcpu
->ec_mask
);
434 while (__pcpu_sigp(pcpu
->address
, SIGP_EMERGENCY_SIGNAL
,
435 0, NULL
) == SIGP_CC_BUSY
&&
436 get_tod_clock() < end
)
439 while (get_tod_clock() < end
) {
440 for_each_cpu(cpu
, cpumask
)
441 if (pcpu_stopped(pcpu_devices
+ cpu
))
442 cpumask_clear_cpu(cpu
, cpumask
);
443 if (cpumask_empty(cpumask
))
450 * Stop all cpus but the current one.
452 void smp_send_stop(void)
457 /* Disable all interrupts/machine checks */
458 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
459 trace_hardirqs_off();
461 debug_set_critical();
462 cpumask_copy(&cpumask
, cpu_online_mask
);
463 cpumask_clear_cpu(smp_processor_id(), &cpumask
);
465 if (oops_in_progress
)
466 smp_emergency_stop(&cpumask
);
468 /* stop all processors */
469 for_each_cpu(cpu
, &cpumask
) {
470 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
471 pcpu_sigp_retry(pcpu
, SIGP_STOP
, 0);
472 while (!pcpu_stopped(pcpu
))
478 * This is the main routine where commands issued by other
481 static void smp_handle_ext_call(void)
485 /* handle bit signal external calls */
486 bits
= xchg(&pcpu_devices
[smp_processor_id()].ec_mask
, 0);
487 if (test_bit(ec_stop_cpu
, &bits
))
489 if (test_bit(ec_schedule
, &bits
))
491 if (test_bit(ec_call_function_single
, &bits
))
492 generic_smp_call_function_single_interrupt();
495 static void do_ext_call_interrupt(struct ext_code ext_code
,
496 unsigned int param32
, unsigned long param64
)
498 inc_irq_stat(ext_code
.code
== 0x1202 ? IRQEXT_EXC
: IRQEXT_EMS
);
499 smp_handle_ext_call();
502 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
506 for_each_cpu(cpu
, mask
)
507 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
510 void arch_send_call_function_single_ipi(int cpu
)
512 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
516 * this function sends a 'reschedule' IPI to another CPU.
517 * it goes straight through and wastes no time serializing
518 * anything. Worst case is that we lose a reschedule ...
520 void smp_send_reschedule(int cpu
)
522 pcpu_ec_call(pcpu_devices
+ cpu
, ec_schedule
);
526 * parameter area for the set/clear control bit callbacks
528 struct ec_creg_mask_parms
{
530 unsigned long andval
;
535 * callback for setting/clearing control bits
537 static void smp_ctl_bit_callback(void *info
)
539 struct ec_creg_mask_parms
*pp
= info
;
540 unsigned long cregs
[16];
542 __ctl_store(cregs
, 0, 15);
543 cregs
[pp
->cr
] = (cregs
[pp
->cr
] & pp
->andval
) | pp
->orval
;
544 __ctl_load(cregs
, 0, 15);
548 * Set a bit in a control register of all cpus
550 void smp_ctl_set_bit(int cr
, int bit
)
552 struct ec_creg_mask_parms parms
= { 1UL << bit
, -1UL, cr
};
554 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
556 EXPORT_SYMBOL(smp_ctl_set_bit
);
559 * Clear a bit in a control register of all cpus
561 void smp_ctl_clear_bit(int cr
, int bit
)
563 struct ec_creg_mask_parms parms
= { 0, ~(1UL << bit
), cr
};
565 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
567 EXPORT_SYMBOL(smp_ctl_clear_bit
);
569 #ifdef CONFIG_CRASH_DUMP
571 int smp_store_status(int cpu
)
573 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
576 pa
= __pa(&pcpu
->lowcore
->floating_pt_save_area
);
577 if (__pcpu_sigp_relax(pcpu
->address
, SIGP_STORE_STATUS_AT_ADDRESS
,
578 pa
) != SIGP_CC_ORDER_CODE_ACCEPTED
)
580 if (!MACHINE_HAS_VX
&& !MACHINE_HAS_GS
)
582 pa
= __pa(pcpu
->lowcore
->mcesad
& MCESA_ORIGIN_MASK
);
584 pa
|= pcpu
->lowcore
->mcesad
& MCESA_LC_MASK
;
585 if (__pcpu_sigp_relax(pcpu
->address
, SIGP_STORE_ADDITIONAL_STATUS
,
586 pa
) != SIGP_CC_ORDER_CODE_ACCEPTED
)
592 * Collect CPU state of the previous, crashed system.
593 * There are four cases:
594 * 1) standard zfcp dump
595 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
596 * The state for all CPUs except the boot CPU needs to be collected
597 * with sigp stop-and-store-status. The boot CPU state is located in
598 * the absolute lowcore of the memory stored in the HSA. The zcore code
599 * will copy the boot CPU state from the HSA.
600 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
601 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
602 * The state for all CPUs except the boot CPU needs to be collected
603 * with sigp stop-and-store-status. The firmware or the boot-loader
604 * stored the registers of the boot CPU in the absolute lowcore in the
605 * memory of the old system.
606 * 3) kdump and the old kernel did not store the CPU state,
607 * or stand-alone kdump for DASD
608 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
609 * The state for all CPUs except the boot CPU needs to be collected
610 * with sigp stop-and-store-status. The kexec code or the boot-loader
611 * stored the registers of the boot CPU in the memory of the old system.
612 * 4) kdump and the old kernel stored the CPU state
613 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
614 * This case does not exist for s390 anymore, setup_arch explicitly
615 * deactivates the elfcorehdr= kernel parameter
617 static __init
void smp_save_cpu_vxrs(struct save_area
*sa
, u16 addr
,
618 bool is_boot_cpu
, unsigned long page
)
620 __vector128
*vxrs
= (__vector128
*) page
;
623 vxrs
= boot_cpu_vector_save_area
;
625 __pcpu_sigp_relax(addr
, SIGP_STORE_ADDITIONAL_STATUS
, page
);
626 save_area_add_vxrs(sa
, vxrs
);
629 static __init
void smp_save_cpu_regs(struct save_area
*sa
, u16 addr
,
630 bool is_boot_cpu
, unsigned long page
)
632 void *regs
= (void *) page
;
635 copy_oldmem_kernel(regs
, (void *) __LC_FPREGS_SAVE_AREA
, 512);
637 __pcpu_sigp_relax(addr
, SIGP_STORE_STATUS_AT_ADDRESS
, page
);
638 save_area_add_regs(sa
, regs
);
641 void __init
smp_save_dump_cpus(void)
643 int addr
, boot_cpu_addr
, max_cpu_addr
;
644 struct save_area
*sa
;
648 if (!(OLDMEM_BASE
|| ipl_info
.type
== IPL_TYPE_FCP_DUMP
))
649 /* No previous system present, normal boot. */
651 /* Allocate a page as dumping area for the store status sigps */
652 page
= memblock_alloc_base(PAGE_SIZE
, PAGE_SIZE
, 1UL << 31);
653 /* Set multi-threading state to the previous system. */
654 pcpu_set_smt(sclp
.mtid_prev
);
655 boot_cpu_addr
= stap();
656 max_cpu_addr
= SCLP_MAX_CORES
<< sclp
.mtid_prev
;
657 for (addr
= 0; addr
<= max_cpu_addr
; addr
++) {
658 if (__pcpu_sigp_relax(addr
, SIGP_SENSE
, 0) ==
659 SIGP_CC_NOT_OPERATIONAL
)
661 is_boot_cpu
= (addr
== boot_cpu_addr
);
662 /* Allocate save area */
663 sa
= save_area_alloc(is_boot_cpu
);
665 panic("could not allocate memory for save area\n");
667 /* Get the vector registers */
668 smp_save_cpu_vxrs(sa
, addr
, is_boot_cpu
, page
);
670 * For a zfcp dump OLDMEM_BASE == NULL and the registers
671 * of the boot CPU are stored in the HSA. To retrieve
672 * these registers an SCLP request is required which is
673 * done by drivers/s390/char/zcore.c:init_cpu_info()
675 if (!is_boot_cpu
|| OLDMEM_BASE
)
676 /* Get the CPU registers */
677 smp_save_cpu_regs(sa
, addr
, is_boot_cpu
, page
);
679 memblock_free(page
, PAGE_SIZE
);
683 #endif /* CONFIG_CRASH_DUMP */
685 void smp_cpu_set_polarization(int cpu
, int val
)
687 pcpu_devices
[cpu
].polarization
= val
;
690 int smp_cpu_get_polarization(int cpu
)
692 return pcpu_devices
[cpu
].polarization
;
695 static void __ref
smp_get_core_info(struct sclp_core_info
*info
, int early
)
697 static int use_sigp_detection
;
700 if (use_sigp_detection
|| sclp_get_core_info(info
, early
)) {
701 use_sigp_detection
= 1;
703 address
< (SCLP_MAX_CORES
<< smp_cpu_mt_shift
);
704 address
+= (1U << smp_cpu_mt_shift
)) {
705 if (__pcpu_sigp_relax(address
, SIGP_SENSE
, 0) ==
706 SIGP_CC_NOT_OPERATIONAL
)
708 info
->core
[info
->configured
].core_id
=
709 address
>> smp_cpu_mt_shift
;
712 info
->combined
= info
->configured
;
716 static int smp_add_present_cpu(int cpu
);
718 static int __smp_rescan_cpus(struct sclp_core_info
*info
, int sysfs_add
)
726 cpumask_xor(&avail
, cpu_possible_mask
, cpu_present_mask
);
727 cpu
= cpumask_first(&avail
);
728 for (i
= 0; (i
< info
->combined
) && (cpu
< nr_cpu_ids
); i
++) {
729 if (sclp
.has_core_type
&& info
->core
[i
].type
!= boot_core_type
)
731 address
= info
->core
[i
].core_id
<< smp_cpu_mt_shift
;
732 for (j
= 0; j
<= smp_cpu_mtid
; j
++) {
733 if (pcpu_find_address(cpu_present_mask
, address
+ j
))
735 pcpu
= pcpu_devices
+ cpu
;
736 pcpu
->address
= address
+ j
;
738 (cpu
>= info
->configured
*(smp_cpu_mtid
+ 1)) ?
739 CPU_STATE_STANDBY
: CPU_STATE_CONFIGURED
;
740 smp_cpu_set_polarization(cpu
, POLARIZATION_UNKNOWN
);
741 set_cpu_present(cpu
, true);
742 if (sysfs_add
&& smp_add_present_cpu(cpu
) != 0)
743 set_cpu_present(cpu
, false);
746 cpu
= cpumask_next(cpu
, &avail
);
747 if (cpu
>= nr_cpu_ids
)
754 void __init
smp_detect_cpus(void)
756 unsigned int cpu
, mtid
, c_cpus
, s_cpus
;
757 struct sclp_core_info
*info
;
760 /* Get CPU information */
761 info
= memblock_virt_alloc(sizeof(*info
), 8);
762 smp_get_core_info(info
, 1);
763 /* Find boot CPU type */
764 if (sclp
.has_core_type
) {
766 for (cpu
= 0; cpu
< info
->combined
; cpu
++)
767 if (info
->core
[cpu
].core_id
== address
) {
768 /* The boot cpu dictates the cpu type. */
769 boot_core_type
= info
->core
[cpu
].type
;
772 if (cpu
>= info
->combined
)
773 panic("Could not find boot CPU type");
776 /* Set multi-threading state for the current system */
777 mtid
= boot_core_type
? sclp
.mtid
: sclp
.mtid_cp
;
778 mtid
= (mtid
< smp_max_threads
) ? mtid
: smp_max_threads
- 1;
781 /* Print number of CPUs */
783 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
784 if (sclp
.has_core_type
&&
785 info
->core
[cpu
].type
!= boot_core_type
)
787 if (cpu
< info
->configured
)
788 c_cpus
+= smp_cpu_mtid
+ 1;
790 s_cpus
+= smp_cpu_mtid
+ 1;
792 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus
, s_cpus
);
794 /* Add CPUs present at boot */
796 __smp_rescan_cpus(info
, 0);
798 memblock_free_early((unsigned long)info
, sizeof(*info
));
802 * Activate a secondary processor.
804 static void smp_start_secondary(void *cpuvoid
)
806 S390_lowcore
.last_update_clock
= get_tod_clock();
807 S390_lowcore
.restart_stack
= (unsigned long) restart_stack
;
808 S390_lowcore
.restart_fn
= (unsigned long) do_restart
;
809 S390_lowcore
.restart_data
= 0;
810 S390_lowcore
.restart_source
= -1UL;
811 restore_access_regs(S390_lowcore
.access_regs_save_area
);
812 __ctl_load(S390_lowcore
.cregs_save_area
, 0, 15);
813 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
819 notify_cpu_starting(smp_processor_id());
820 set_cpu_online(smp_processor_id(), true);
821 inc_irq_stat(CPU_RST
);
823 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
826 /* Upping and downing of CPUs */
827 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
832 pcpu
= pcpu_devices
+ cpu
;
833 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
835 base
= smp_get_base_cpu(cpu
);
836 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
837 if (base
+ i
< nr_cpu_ids
)
838 if (cpu_online(base
+ i
))
842 * If this is the first CPU of the core to get online
843 * do an initial CPU reset.
845 if (i
> smp_cpu_mtid
&&
846 pcpu_sigp_retry(pcpu_devices
+ base
, SIGP_INITIAL_CPU_RESET
, 0) !=
847 SIGP_CC_ORDER_CODE_ACCEPTED
)
850 rc
= pcpu_alloc_lowcore(pcpu
, cpu
);
853 pcpu_prepare_secondary(pcpu
, cpu
);
854 pcpu_attach_task(pcpu
, tidle
);
855 pcpu_start_fn(pcpu
, smp_start_secondary
, NULL
);
856 /* Wait until cpu puts itself in the online & active maps */
857 while (!cpu_online(cpu
))
862 static unsigned int setup_possible_cpus __initdata
;
864 static int __init
_setup_possible_cpus(char *s
)
866 get_option(&s
, &setup_possible_cpus
);
869 early_param("possible_cpus", _setup_possible_cpus
);
871 #ifdef CONFIG_HOTPLUG_CPU
873 int __cpu_disable(void)
875 unsigned long cregs
[16];
877 /* Handle possible pending IPIs */
878 smp_handle_ext_call();
879 set_cpu_online(smp_processor_id(), false);
880 /* Disable pseudo page faults on this cpu. */
882 /* Disable interrupt sources via control register. */
883 __ctl_store(cregs
, 0, 15);
884 cregs
[0] &= ~0x0000ee70UL
; /* disable all external interrupts */
885 cregs
[6] &= ~0xff000000UL
; /* disable all I/O interrupts */
886 cregs
[14] &= ~0x1f000000UL
; /* disable most machine checks */
887 __ctl_load(cregs
, 0, 15);
888 clear_cpu_flag(CIF_NOHZ_DELAY
);
892 void __cpu_die(unsigned int cpu
)
896 /* Wait until target cpu is down */
897 pcpu
= pcpu_devices
+ cpu
;
898 while (!pcpu_stopped(pcpu
))
900 pcpu_free_lowcore(pcpu
);
901 cpumask_clear_cpu(cpu
, mm_cpumask(&init_mm
));
902 cpumask_clear_cpu(cpu
, &init_mm
.context
.cpu_attach_mask
);
905 void __noreturn
cpu_die(void)
908 pcpu_sigp_retry(pcpu_devices
+ smp_processor_id(), SIGP_STOP
, 0);
912 #endif /* CONFIG_HOTPLUG_CPU */
914 void __init
smp_fill_possible_mask(void)
916 unsigned int possible
, sclp_max
, cpu
;
918 sclp_max
= max(sclp
.mtid
, sclp
.mtid_cp
) + 1;
919 sclp_max
= min(smp_max_threads
, sclp_max
);
920 sclp_max
= (sclp
.max_cores
* sclp_max
) ?: nr_cpu_ids
;
921 possible
= setup_possible_cpus
?: nr_cpu_ids
;
922 possible
= min(possible
, sclp_max
);
923 for (cpu
= 0; cpu
< possible
&& cpu
< nr_cpu_ids
; cpu
++)
924 set_cpu_possible(cpu
, true);
927 void __init
smp_prepare_cpus(unsigned int max_cpus
)
931 /* request the 0x1201 emergency signal external interrupt */
932 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG
, do_ext_call_interrupt
))
933 panic("Couldn't request external interrupt 0x1201");
934 /* request the 0x1202 external call external interrupt */
935 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL
, do_ext_call_interrupt
))
936 panic("Couldn't request external interrupt 0x1202");
937 /* create slab cache for the machine-check-extended-save-areas */
938 if (MACHINE_HAS_VX
|| MACHINE_HAS_GS
) {
939 size
= 1UL << (MACHINE_HAS_GS
? 11 : 10);
940 pcpu_mcesa_cache
= kmem_cache_create("nmi_save_areas",
941 size
, size
, 0, NULL
);
942 if (!pcpu_mcesa_cache
)
943 panic("Couldn't create nmi save area cache");
947 void __init
smp_prepare_boot_cpu(void)
949 struct pcpu
*pcpu
= pcpu_devices
;
951 WARN_ON(!cpu_present(0) || !cpu_online(0));
952 pcpu
->state
= CPU_STATE_CONFIGURED
;
953 pcpu
->lowcore
= (struct lowcore
*)(unsigned long) store_prefix();
954 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
955 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN
);
958 void __init
smp_cpus_done(unsigned int max_cpus
)
962 void __init
smp_setup_processor_id(void)
964 pcpu_devices
[0].address
= stap();
965 S390_lowcore
.cpu_nr
= 0;
966 S390_lowcore
.spinlock_lockval
= arch_spin_lockval(0);
970 * the frequency of the profiling timer can be changed
971 * by writing a multiplier value into /proc/profile.
973 * usually you want to run this on all CPUs ;)
975 int setup_profiling_timer(unsigned int multiplier
)
980 #ifdef CONFIG_HOTPLUG_CPU
981 static ssize_t
cpu_configure_show(struct device
*dev
,
982 struct device_attribute
*attr
, char *buf
)
986 mutex_lock(&smp_cpu_state_mutex
);
987 count
= sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].state
);
988 mutex_unlock(&smp_cpu_state_mutex
);
992 static ssize_t
cpu_configure_store(struct device
*dev
,
993 struct device_attribute
*attr
,
994 const char *buf
, size_t count
)
1000 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
1002 if (val
!= 0 && val
!= 1)
1005 mutex_lock(&smp_cpu_state_mutex
);
1007 /* disallow configuration changes of online cpus and cpu 0 */
1009 cpu
= smp_get_base_cpu(cpu
);
1012 for (i
= 0; i
<= smp_cpu_mtid
; i
++)
1013 if (cpu_online(cpu
+ i
))
1015 pcpu
= pcpu_devices
+ cpu
;
1019 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
1021 rc
= sclp_core_deconfigure(pcpu
->address
>> smp_cpu_mt_shift
);
1024 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
1025 if (cpu
+ i
>= nr_cpu_ids
|| !cpu_present(cpu
+ i
))
1027 pcpu
[i
].state
= CPU_STATE_STANDBY
;
1028 smp_cpu_set_polarization(cpu
+ i
,
1029 POLARIZATION_UNKNOWN
);
1031 topology_expect_change();
1034 if (pcpu
->state
!= CPU_STATE_STANDBY
)
1036 rc
= sclp_core_configure(pcpu
->address
>> smp_cpu_mt_shift
);
1039 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
1040 if (cpu
+ i
>= nr_cpu_ids
|| !cpu_present(cpu
+ i
))
1042 pcpu
[i
].state
= CPU_STATE_CONFIGURED
;
1043 smp_cpu_set_polarization(cpu
+ i
,
1044 POLARIZATION_UNKNOWN
);
1046 topology_expect_change();
1052 mutex_unlock(&smp_cpu_state_mutex
);
1054 return rc
? rc
: count
;
1056 static DEVICE_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
1057 #endif /* CONFIG_HOTPLUG_CPU */
1059 static ssize_t
show_cpu_address(struct device
*dev
,
1060 struct device_attribute
*attr
, char *buf
)
1062 return sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].address
);
1064 static DEVICE_ATTR(address
, 0444, show_cpu_address
, NULL
);
1066 static struct attribute
*cpu_common_attrs
[] = {
1067 #ifdef CONFIG_HOTPLUG_CPU
1068 &dev_attr_configure
.attr
,
1070 &dev_attr_address
.attr
,
1074 static struct attribute_group cpu_common_attr_group
= {
1075 .attrs
= cpu_common_attrs
,
1078 static struct attribute
*cpu_online_attrs
[] = {
1079 &dev_attr_idle_count
.attr
,
1080 &dev_attr_idle_time_us
.attr
,
1084 static struct attribute_group cpu_online_attr_group
= {
1085 .attrs
= cpu_online_attrs
,
1088 static int smp_cpu_online(unsigned int cpu
)
1090 struct device
*s
= &per_cpu(cpu_device
, cpu
)->dev
;
1092 return sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1094 static int smp_cpu_pre_down(unsigned int cpu
)
1096 struct device
*s
= &per_cpu(cpu_device
, cpu
)->dev
;
1098 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1102 static int smp_add_present_cpu(int cpu
)
1108 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1111 per_cpu(cpu_device
, cpu
) = c
;
1113 c
->hotpluggable
= 1;
1114 rc
= register_cpu(c
, cpu
);
1117 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1120 rc
= topology_cpu_init(c
);
1126 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1128 #ifdef CONFIG_HOTPLUG_CPU
1135 #ifdef CONFIG_HOTPLUG_CPU
1137 int __ref
smp_rescan_cpus(void)
1139 struct sclp_core_info
*info
;
1142 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1145 smp_get_core_info(info
, 0);
1147 mutex_lock(&smp_cpu_state_mutex
);
1148 nr
= __smp_rescan_cpus(info
, 1);
1149 mutex_unlock(&smp_cpu_state_mutex
);
1153 topology_schedule_update();
1157 static ssize_t __ref
rescan_store(struct device
*dev
,
1158 struct device_attribute
*attr
,
1164 rc
= smp_rescan_cpus();
1165 return rc
? rc
: count
;
1167 static DEVICE_ATTR(rescan
, 0200, NULL
, rescan_store
);
1168 #endif /* CONFIG_HOTPLUG_CPU */
1170 static int __init
s390_smp_init(void)
1174 #ifdef CONFIG_HOTPLUG_CPU
1175 rc
= device_create_file(cpu_subsys
.dev_root
, &dev_attr_rescan
);
1179 for_each_present_cpu(cpu
) {
1180 rc
= smp_add_present_cpu(cpu
);
1185 rc
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "s390/smp:online",
1186 smp_cpu_online
, smp_cpu_pre_down
);
1187 rc
= rc
<= 0 ? rc
: 0;
1191 subsys_initcall(s390_smp_init
);