1 // SPDX-License-Identifier: GPL-2.0
3 * SMP related functions
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
15 * the translation of logical to physical cpu ids. All new code that
16 * operates on physical cpu numbers needs to go into smp.c.
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/workqueue.h>
23 #include <linux/memblock.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/cpu.h>
34 #include <linux/slab.h>
35 #include <linux/sched/hotplug.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/crash_dump.h>
38 #include <linux/kprobes.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/switch_to.h>
42 #include <asm/facility.h>
44 #include <asm/setup.h>
46 #include <asm/tlbflush.h>
47 #include <asm/vtimer.h>
48 #include <asm/lowcore.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
56 #include <asm/stacktrace.h>
57 #include <asm/topology.h>
62 ec_call_function_single
,
72 static DEFINE_PER_CPU(struct cpu
*, cpu_device
);
75 struct lowcore
*lowcore
; /* lowcore page(s) for the cpu */
76 unsigned long ec_mask
; /* bit mask for ec_xxx functions */
77 unsigned long ec_clk
; /* sigp timestamp for ec_xxx */
78 signed char state
; /* physical cpu state */
79 signed char polarization
; /* physical polarization */
80 u16 address
; /* physical cpu address */
83 static u8 boot_core_type
;
84 static struct pcpu pcpu_devices
[NR_CPUS
];
86 unsigned int smp_cpu_mt_shift
;
87 EXPORT_SYMBOL(smp_cpu_mt_shift
);
89 unsigned int smp_cpu_mtid
;
90 EXPORT_SYMBOL(smp_cpu_mtid
);
92 #ifdef CONFIG_CRASH_DUMP
93 __vector128 __initdata boot_cpu_vector_save_area
[__NUM_VXRS
];
96 static unsigned int smp_max_threads __initdata
= -1U;
98 static int __init
early_nosmt(char *s
)
103 early_param("nosmt", early_nosmt
);
105 static int __init
early_smt(char *s
)
107 get_option(&s
, &smp_max_threads
);
110 early_param("smt", early_smt
);
113 * The smp_cpu_state_mutex must be held when changing the state or polarization
114 * member of a pcpu data structure within the pcpu_devices arreay.
116 DEFINE_MUTEX(smp_cpu_state_mutex
);
119 * Signal processor helper functions.
121 static inline int __pcpu_sigp_relax(u16 addr
, u8 order
, unsigned long parm
)
126 cc
= __pcpu_sigp(addr
, order
, parm
, NULL
);
127 if (cc
!= SIGP_CC_BUSY
)
133 static int pcpu_sigp_retry(struct pcpu
*pcpu
, u8 order
, u32 parm
)
137 for (retry
= 0; ; retry
++) {
138 cc
= __pcpu_sigp(pcpu
->address
, order
, parm
, NULL
);
139 if (cc
!= SIGP_CC_BUSY
)
147 static inline int pcpu_stopped(struct pcpu
*pcpu
)
151 if (__pcpu_sigp(pcpu
->address
, SIGP_SENSE
,
152 0, &status
) != SIGP_CC_STATUS_STORED
)
154 return !!(status
& (SIGP_STATUS_CHECK_STOP
|SIGP_STATUS_STOPPED
));
157 static inline int pcpu_running(struct pcpu
*pcpu
)
159 if (__pcpu_sigp(pcpu
->address
, SIGP_SENSE_RUNNING
,
160 0, NULL
) != SIGP_CC_STATUS_STORED
)
162 /* Status stored condition code is equivalent to cpu not running. */
167 * Find struct pcpu by cpu address.
169 static struct pcpu
*pcpu_find_address(const struct cpumask
*mask
, u16 address
)
173 for_each_cpu(cpu
, mask
)
174 if (pcpu_devices
[cpu
].address
== address
)
175 return pcpu_devices
+ cpu
;
179 static void pcpu_ec_call(struct pcpu
*pcpu
, int ec_bit
)
183 if (test_and_set_bit(ec_bit
, &pcpu
->ec_mask
))
185 order
= pcpu_running(pcpu
) ? SIGP_EXTERNAL_CALL
: SIGP_EMERGENCY_SIGNAL
;
186 pcpu
->ec_clk
= get_tod_clock_fast();
187 pcpu_sigp_retry(pcpu
, order
, 0);
190 static int pcpu_alloc_lowcore(struct pcpu
*pcpu
, int cpu
)
192 unsigned long async_stack
, nodat_stack
;
195 if (pcpu
!= &pcpu_devices
[0]) {
196 pcpu
->lowcore
= (struct lowcore
*)
197 __get_free_pages(GFP_KERNEL
| GFP_DMA
, LC_ORDER
);
198 nodat_stack
= __get_free_pages(GFP_KERNEL
, THREAD_SIZE_ORDER
);
199 if (!pcpu
->lowcore
|| !nodat_stack
)
202 nodat_stack
= pcpu
->lowcore
->nodat_stack
- STACK_INIT_OFFSET
;
204 async_stack
= stack_alloc();
208 memcpy(lc
, &S390_lowcore
, 512);
209 memset((char *) lc
+ 512, 0, sizeof(*lc
) - 512);
210 lc
->async_stack
= async_stack
+ STACK_INIT_OFFSET
;
211 lc
->nodat_stack
= nodat_stack
+ STACK_INIT_OFFSET
;
213 lc
->spinlock_lockval
= arch_spin_lockval(cpu
);
214 lc
->spinlock_index
= 0;
215 lc
->br_r1_trampoline
= 0x07f1; /* br %r1 */
216 lc
->return_lpswe
= gen_lpswe(__LC_RETURN_PSW
);
217 lc
->return_mcck_lpswe
= gen_lpswe(__LC_RETURN_MCCK_PSW
);
218 if (nmi_alloc_per_cpu(lc
))
220 if (vdso_alloc_per_cpu(lc
))
222 lowcore_ptr
[cpu
] = lc
;
223 pcpu_sigp_retry(pcpu
, SIGP_SET_PREFIX
, (u32
)(unsigned long) lc
);
227 nmi_free_per_cpu(lc
);
229 stack_free(async_stack
);
231 if (pcpu
!= &pcpu_devices
[0]) {
232 free_pages(nodat_stack
, THREAD_SIZE_ORDER
);
233 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
238 static void pcpu_free_lowcore(struct pcpu
*pcpu
)
240 unsigned long async_stack
, nodat_stack
, lowcore
;
242 nodat_stack
= pcpu
->lowcore
->nodat_stack
- STACK_INIT_OFFSET
;
243 async_stack
= pcpu
->lowcore
->async_stack
- STACK_INIT_OFFSET
;
244 lowcore
= (unsigned long) pcpu
->lowcore
;
246 pcpu_sigp_retry(pcpu
, SIGP_SET_PREFIX
, 0);
247 lowcore_ptr
[pcpu
- pcpu_devices
] = NULL
;
248 vdso_free_per_cpu(pcpu
->lowcore
);
249 nmi_free_per_cpu(pcpu
->lowcore
);
250 stack_free(async_stack
);
251 if (pcpu
== &pcpu_devices
[0])
253 free_pages(nodat_stack
, THREAD_SIZE_ORDER
);
254 free_pages(lowcore
, LC_ORDER
);
257 static void pcpu_prepare_secondary(struct pcpu
*pcpu
, int cpu
)
259 struct lowcore
*lc
= pcpu
->lowcore
;
261 cpumask_set_cpu(cpu
, &init_mm
.context
.cpu_attach_mask
);
262 cpumask_set_cpu(cpu
, mm_cpumask(&init_mm
));
264 lc
->spinlock_lockval
= arch_spin_lockval(cpu
);
265 lc
->spinlock_index
= 0;
266 lc
->percpu_offset
= __per_cpu_offset
[cpu
];
267 lc
->kernel_asce
= S390_lowcore
.kernel_asce
;
268 lc
->user_asce
= S390_lowcore
.kernel_asce
;
269 lc
->machine_flags
= S390_lowcore
.machine_flags
;
270 lc
->user_timer
= lc
->system_timer
=
271 lc
->steal_timer
= lc
->avg_steal_timer
= 0;
272 __ctl_store(lc
->cregs_save_area
, 0, 15);
273 lc
->cregs_save_area
[1] = lc
->kernel_asce
;
274 lc
->cregs_save_area
[7] = lc
->vdso_asce
;
275 save_access_regs((unsigned int *) lc
->access_regs_save_area
);
276 memcpy(lc
->stfle_fac_list
, S390_lowcore
.stfle_fac_list
,
277 sizeof(lc
->stfle_fac_list
));
278 memcpy(lc
->alt_stfle_fac_list
, S390_lowcore
.alt_stfle_fac_list
,
279 sizeof(lc
->alt_stfle_fac_list
));
280 arch_spin_lock_setup(cpu
);
283 static void pcpu_attach_task(struct pcpu
*pcpu
, struct task_struct
*tsk
)
285 struct lowcore
*lc
= pcpu
->lowcore
;
287 lc
->kernel_stack
= (unsigned long) task_stack_page(tsk
)
288 + THREAD_SIZE
- STACK_FRAME_OVERHEAD
- sizeof(struct pt_regs
);
289 lc
->current_task
= (unsigned long) tsk
;
291 lc
->current_pid
= tsk
->pid
;
292 lc
->user_timer
= tsk
->thread
.user_timer
;
293 lc
->guest_timer
= tsk
->thread
.guest_timer
;
294 lc
->system_timer
= tsk
->thread
.system_timer
;
295 lc
->hardirq_timer
= tsk
->thread
.hardirq_timer
;
296 lc
->softirq_timer
= tsk
->thread
.softirq_timer
;
300 static void pcpu_start_fn(struct pcpu
*pcpu
, void (*func
)(void *), void *data
)
302 struct lowcore
*lc
= pcpu
->lowcore
;
304 lc
->restart_stack
= lc
->nodat_stack
;
305 lc
->restart_fn
= (unsigned long) func
;
306 lc
->restart_data
= (unsigned long) data
;
307 lc
->restart_source
= -1UL;
308 pcpu_sigp_retry(pcpu
, SIGP_RESTART
, 0);
312 * Call function via PSW restart on pcpu and stop the current cpu.
314 static void __pcpu_delegate(void (*func
)(void*), void *data
)
316 func(data
); /* should not return */
319 static void __no_sanitize_address
pcpu_delegate(struct pcpu
*pcpu
,
320 void (*func
)(void *),
321 void *data
, unsigned long stack
)
323 struct lowcore
*lc
= lowcore_ptr
[pcpu
- pcpu_devices
];
324 unsigned long source_cpu
= stap();
326 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
327 if (pcpu
->address
== source_cpu
)
328 CALL_ON_STACK(__pcpu_delegate
, stack
, 2, func
, data
);
329 /* Stop target cpu (if func returns this stops the current cpu). */
330 pcpu_sigp_retry(pcpu
, SIGP_STOP
, 0);
331 /* Restart func on the target cpu and stop the current cpu. */
332 mem_assign_absolute(lc
->restart_stack
, stack
);
333 mem_assign_absolute(lc
->restart_fn
, (unsigned long) func
);
334 mem_assign_absolute(lc
->restart_data
, (unsigned long) data
);
335 mem_assign_absolute(lc
->restart_source
, source_cpu
);
338 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
339 " brc 2,0b # busy, try again\n"
340 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
341 " brc 2,1b # busy, try again\n"
342 : : "d" (pcpu
->address
), "d" (source_cpu
),
343 "K" (SIGP_RESTART
), "K" (SIGP_STOP
)
349 * Enable additional logical cpus for multi-threading.
351 static int pcpu_set_smt(unsigned int mtid
)
355 if (smp_cpu_mtid
== mtid
)
357 cc
= __pcpu_sigp(0, SIGP_SET_MULTI_THREADING
, mtid
, NULL
);
360 smp_cpu_mt_shift
= 0;
361 while (smp_cpu_mtid
>= (1U << smp_cpu_mt_shift
))
363 pcpu_devices
[0].address
= stap();
369 * Call function on an online CPU.
371 void smp_call_online_cpu(void (*func
)(void *), void *data
)
375 /* Use the current cpu if it is online. */
376 pcpu
= pcpu_find_address(cpu_online_mask
, stap());
378 /* Use the first online cpu. */
379 pcpu
= pcpu_devices
+ cpumask_first(cpu_online_mask
);
380 pcpu_delegate(pcpu
, func
, data
, (unsigned long) restart_stack
);
384 * Call function on the ipl CPU.
386 void smp_call_ipl_cpu(void (*func
)(void *), void *data
)
388 struct lowcore
*lc
= pcpu_devices
->lowcore
;
390 if (pcpu_devices
[0].address
== stap())
393 pcpu_delegate(&pcpu_devices
[0], func
, data
,
397 int smp_find_processor_id(u16 address
)
401 for_each_present_cpu(cpu
)
402 if (pcpu_devices
[cpu
].address
== address
)
407 void schedule_mcck_handler(void)
409 pcpu_ec_call(pcpu_devices
+ smp_processor_id(), ec_mcck_pending
);
412 bool notrace
arch_vcpu_is_preempted(int cpu
)
414 if (test_cpu_flag_of(CIF_ENABLED_WAIT
, cpu
))
416 if (pcpu_running(pcpu_devices
+ cpu
))
420 EXPORT_SYMBOL(arch_vcpu_is_preempted
);
422 void notrace
smp_yield_cpu(int cpu
)
424 if (!MACHINE_HAS_DIAG9C
)
426 diag_stat_inc_norecursion(DIAG_STAT_X09C
);
427 asm volatile("diag %0,0,0x9c"
428 : : "d" (pcpu_devices
[cpu
].address
));
432 * Send cpus emergency shutdown signal. This gives the cpus the
433 * opportunity to complete outstanding interrupts.
435 void notrace
smp_emergency_stop(void)
441 cpumask_copy(&cpumask
, cpu_online_mask
);
442 cpumask_clear_cpu(smp_processor_id(), &cpumask
);
444 end
= get_tod_clock() + (1000000UL << 12);
445 for_each_cpu(cpu
, &cpumask
) {
446 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
447 set_bit(ec_stop_cpu
, &pcpu
->ec_mask
);
448 while (__pcpu_sigp(pcpu
->address
, SIGP_EMERGENCY_SIGNAL
,
449 0, NULL
) == SIGP_CC_BUSY
&&
450 get_tod_clock() < end
)
453 while (get_tod_clock() < end
) {
454 for_each_cpu(cpu
, &cpumask
)
455 if (pcpu_stopped(pcpu_devices
+ cpu
))
456 cpumask_clear_cpu(cpu
, &cpumask
);
457 if (cpumask_empty(&cpumask
))
462 NOKPROBE_SYMBOL(smp_emergency_stop
);
465 * Stop all cpus but the current one.
467 void smp_send_stop(void)
471 /* Disable all interrupts/machine checks */
472 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
473 trace_hardirqs_off();
475 debug_set_critical();
477 if (oops_in_progress
)
478 smp_emergency_stop();
480 /* stop all processors */
481 for_each_online_cpu(cpu
) {
482 if (cpu
== smp_processor_id())
484 pcpu_sigp_retry(pcpu_devices
+ cpu
, SIGP_STOP
, 0);
485 while (!pcpu_stopped(pcpu_devices
+ cpu
))
491 * This is the main routine where commands issued by other
494 static void smp_handle_ext_call(void)
498 /* handle bit signal external calls */
499 bits
= xchg(&pcpu_devices
[smp_processor_id()].ec_mask
, 0);
500 if (test_bit(ec_stop_cpu
, &bits
))
502 if (test_bit(ec_schedule
, &bits
))
504 if (test_bit(ec_call_function_single
, &bits
))
505 generic_smp_call_function_single_interrupt();
506 if (test_bit(ec_mcck_pending
, &bits
))
510 static void do_ext_call_interrupt(struct ext_code ext_code
,
511 unsigned int param32
, unsigned long param64
)
513 inc_irq_stat(ext_code
.code
== 0x1202 ? IRQEXT_EXC
: IRQEXT_EMS
);
514 smp_handle_ext_call();
517 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
521 for_each_cpu(cpu
, mask
)
522 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
525 void arch_send_call_function_single_ipi(int cpu
)
527 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
531 * this function sends a 'reschedule' IPI to another CPU.
532 * it goes straight through and wastes no time serializing
533 * anything. Worst case is that we lose a reschedule ...
535 void smp_send_reschedule(int cpu
)
537 pcpu_ec_call(pcpu_devices
+ cpu
, ec_schedule
);
541 * parameter area for the set/clear control bit callbacks
543 struct ec_creg_mask_parms
{
545 unsigned long andval
;
550 * callback for setting/clearing control bits
552 static void smp_ctl_bit_callback(void *info
)
554 struct ec_creg_mask_parms
*pp
= info
;
555 unsigned long cregs
[16];
557 __ctl_store(cregs
, 0, 15);
558 cregs
[pp
->cr
] = (cregs
[pp
->cr
] & pp
->andval
) | pp
->orval
;
559 __ctl_load(cregs
, 0, 15);
563 * Set a bit in a control register of all cpus
565 void smp_ctl_set_bit(int cr
, int bit
)
567 struct ec_creg_mask_parms parms
= { 1UL << bit
, -1UL, cr
};
569 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
571 EXPORT_SYMBOL(smp_ctl_set_bit
);
574 * Clear a bit in a control register of all cpus
576 void smp_ctl_clear_bit(int cr
, int bit
)
578 struct ec_creg_mask_parms parms
= { 0, ~(1UL << bit
), cr
};
580 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
582 EXPORT_SYMBOL(smp_ctl_clear_bit
);
584 #ifdef CONFIG_CRASH_DUMP
586 int smp_store_status(int cpu
)
588 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
591 pa
= __pa(&pcpu
->lowcore
->floating_pt_save_area
);
592 if (__pcpu_sigp_relax(pcpu
->address
, SIGP_STORE_STATUS_AT_ADDRESS
,
593 pa
) != SIGP_CC_ORDER_CODE_ACCEPTED
)
595 if (!MACHINE_HAS_VX
&& !MACHINE_HAS_GS
)
597 pa
= __pa(pcpu
->lowcore
->mcesad
& MCESA_ORIGIN_MASK
);
599 pa
|= pcpu
->lowcore
->mcesad
& MCESA_LC_MASK
;
600 if (__pcpu_sigp_relax(pcpu
->address
, SIGP_STORE_ADDITIONAL_STATUS
,
601 pa
) != SIGP_CC_ORDER_CODE_ACCEPTED
)
607 * Collect CPU state of the previous, crashed system.
608 * There are four cases:
609 * 1) standard zfcp/nvme dump
610 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
611 * The state for all CPUs except the boot CPU needs to be collected
612 * with sigp stop-and-store-status. The boot CPU state is located in
613 * the absolute lowcore of the memory stored in the HSA. The zcore code
614 * will copy the boot CPU state from the HSA.
615 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
616 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
617 * The state for all CPUs except the boot CPU needs to be collected
618 * with sigp stop-and-store-status. The firmware or the boot-loader
619 * stored the registers of the boot CPU in the absolute lowcore in the
620 * memory of the old system.
621 * 3) kdump and the old kernel did not store the CPU state,
622 * or stand-alone kdump for DASD
623 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
624 * The state for all CPUs except the boot CPU needs to be collected
625 * with sigp stop-and-store-status. The kexec code or the boot-loader
626 * stored the registers of the boot CPU in the memory of the old system.
627 * 4) kdump and the old kernel stored the CPU state
628 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
629 * This case does not exist for s390 anymore, setup_arch explicitly
630 * deactivates the elfcorehdr= kernel parameter
632 static __init
void smp_save_cpu_vxrs(struct save_area
*sa
, u16 addr
,
633 bool is_boot_cpu
, unsigned long page
)
635 __vector128
*vxrs
= (__vector128
*) page
;
638 vxrs
= boot_cpu_vector_save_area
;
640 __pcpu_sigp_relax(addr
, SIGP_STORE_ADDITIONAL_STATUS
, page
);
641 save_area_add_vxrs(sa
, vxrs
);
644 static __init
void smp_save_cpu_regs(struct save_area
*sa
, u16 addr
,
645 bool is_boot_cpu
, unsigned long page
)
647 void *regs
= (void *) page
;
650 copy_oldmem_kernel(regs
, (void *) __LC_FPREGS_SAVE_AREA
, 512);
652 __pcpu_sigp_relax(addr
, SIGP_STORE_STATUS_AT_ADDRESS
, page
);
653 save_area_add_regs(sa
, regs
);
656 void __init
smp_save_dump_cpus(void)
658 int addr
, boot_cpu_addr
, max_cpu_addr
;
659 struct save_area
*sa
;
663 if (!(OLDMEM_BASE
|| is_ipl_type_dump()))
664 /* No previous system present, normal boot. */
666 /* Allocate a page as dumping area for the store status sigps */
667 page
= memblock_phys_alloc_range(PAGE_SIZE
, PAGE_SIZE
, 0, 1UL << 31);
669 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
670 PAGE_SIZE
, 1UL << 31);
672 /* Set multi-threading state to the previous system. */
673 pcpu_set_smt(sclp
.mtid_prev
);
674 boot_cpu_addr
= stap();
675 max_cpu_addr
= SCLP_MAX_CORES
<< sclp
.mtid_prev
;
676 for (addr
= 0; addr
<= max_cpu_addr
; addr
++) {
677 if (__pcpu_sigp_relax(addr
, SIGP_SENSE
, 0) ==
678 SIGP_CC_NOT_OPERATIONAL
)
680 is_boot_cpu
= (addr
== boot_cpu_addr
);
681 /* Allocate save area */
682 sa
= save_area_alloc(is_boot_cpu
);
684 panic("could not allocate memory for save area\n");
686 /* Get the vector registers */
687 smp_save_cpu_vxrs(sa
, addr
, is_boot_cpu
, page
);
689 * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers
690 * of the boot CPU are stored in the HSA. To retrieve
691 * these registers an SCLP request is required which is
692 * done by drivers/s390/char/zcore.c:init_cpu_info()
694 if (!is_boot_cpu
|| OLDMEM_BASE
)
695 /* Get the CPU registers */
696 smp_save_cpu_regs(sa
, addr
, is_boot_cpu
, page
);
698 memblock_free(page
, PAGE_SIZE
);
699 diag_dma_ops
.diag308_reset();
702 #endif /* CONFIG_CRASH_DUMP */
704 void smp_cpu_set_polarization(int cpu
, int val
)
706 pcpu_devices
[cpu
].polarization
= val
;
709 int smp_cpu_get_polarization(int cpu
)
711 return pcpu_devices
[cpu
].polarization
;
714 int smp_cpu_get_cpu_address(int cpu
)
716 return pcpu_devices
[cpu
].address
;
719 static void __ref
smp_get_core_info(struct sclp_core_info
*info
, int early
)
721 static int use_sigp_detection
;
724 if (use_sigp_detection
|| sclp_get_core_info(info
, early
)) {
725 use_sigp_detection
= 1;
727 address
< (SCLP_MAX_CORES
<< smp_cpu_mt_shift
);
728 address
+= (1U << smp_cpu_mt_shift
)) {
729 if (__pcpu_sigp_relax(address
, SIGP_SENSE
, 0) ==
730 SIGP_CC_NOT_OPERATIONAL
)
732 info
->core
[info
->configured
].core_id
=
733 address
>> smp_cpu_mt_shift
;
736 info
->combined
= info
->configured
;
740 static int smp_add_present_cpu(int cpu
);
742 static int smp_add_core(struct sclp_core_entry
*core
, cpumask_t
*avail
,
743 bool configured
, bool early
)
750 if (sclp
.has_core_type
&& core
->type
!= boot_core_type
)
752 cpu
= cpumask_first(avail
);
753 address
= core
->core_id
<< smp_cpu_mt_shift
;
754 for (i
= 0; (i
<= smp_cpu_mtid
) && (cpu
< nr_cpu_ids
); i
++) {
755 if (pcpu_find_address(cpu_present_mask
, address
+ i
))
757 pcpu
= pcpu_devices
+ cpu
;
758 pcpu
->address
= address
+ i
;
760 pcpu
->state
= CPU_STATE_CONFIGURED
;
762 pcpu
->state
= CPU_STATE_STANDBY
;
763 smp_cpu_set_polarization(cpu
, POLARIZATION_UNKNOWN
);
764 set_cpu_present(cpu
, true);
765 if (!early
&& smp_add_present_cpu(cpu
) != 0)
766 set_cpu_present(cpu
, false);
769 cpumask_clear_cpu(cpu
, avail
);
770 cpu
= cpumask_next(cpu
, avail
);
775 static int __smp_rescan_cpus(struct sclp_core_info
*info
, bool early
)
777 struct sclp_core_entry
*core
;
784 cpumask_xor(&avail
, cpu_possible_mask
, cpu_present_mask
);
786 * Add IPL core first (which got logical CPU number 0) to make sure
787 * that all SMT threads get subsequent logical CPU numbers.
790 core_id
= pcpu_devices
[0].address
>> smp_cpu_mt_shift
;
791 for (i
= 0; i
< info
->configured
; i
++) {
792 core
= &info
->core
[i
];
793 if (core
->core_id
== core_id
) {
794 nr
+= smp_add_core(core
, &avail
, true, early
);
799 for (i
= 0; i
< info
->combined
; i
++) {
800 configured
= i
< info
->configured
;
801 nr
+= smp_add_core(&info
->core
[i
], &avail
, configured
, early
);
806 void __init
smp_detect_cpus(void)
808 unsigned int cpu
, mtid
, c_cpus
, s_cpus
;
809 struct sclp_core_info
*info
;
812 /* Get CPU information */
813 info
= memblock_alloc(sizeof(*info
), 8);
815 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
816 __func__
, sizeof(*info
), 8);
817 smp_get_core_info(info
, 1);
818 /* Find boot CPU type */
819 if (sclp
.has_core_type
) {
821 for (cpu
= 0; cpu
< info
->combined
; cpu
++)
822 if (info
->core
[cpu
].core_id
== address
) {
823 /* The boot cpu dictates the cpu type. */
824 boot_core_type
= info
->core
[cpu
].type
;
827 if (cpu
>= info
->combined
)
828 panic("Could not find boot CPU type");
831 /* Set multi-threading state for the current system */
832 mtid
= boot_core_type
? sclp
.mtid
: sclp
.mtid_cp
;
833 mtid
= (mtid
< smp_max_threads
) ? mtid
: smp_max_threads
- 1;
836 /* Print number of CPUs */
838 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
839 if (sclp
.has_core_type
&&
840 info
->core
[cpu
].type
!= boot_core_type
)
842 if (cpu
< info
->configured
)
843 c_cpus
+= smp_cpu_mtid
+ 1;
845 s_cpus
+= smp_cpu_mtid
+ 1;
847 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus
, s_cpus
);
849 /* Add CPUs present at boot */
851 __smp_rescan_cpus(info
, true);
853 memblock_free_early((unsigned long)info
, sizeof(*info
));
856 static void smp_init_secondary(void)
858 int cpu
= raw_smp_processor_id();
860 S390_lowcore
.last_update_clock
= get_tod_clock();
861 restore_access_regs(S390_lowcore
.access_regs_save_area
);
862 set_cpu_flag(CIF_ASCE_PRIMARY
);
863 set_cpu_flag(CIF_ASCE_SECONDARY
);
865 rcu_cpu_starting(cpu
);
870 notify_cpu_starting(cpu
);
871 if (topology_cpu_dedicated(cpu
))
872 set_cpu_flag(CIF_DEDICATED_CPU
);
874 clear_cpu_flag(CIF_DEDICATED_CPU
);
875 set_cpu_online(cpu
, true);
877 inc_irq_stat(CPU_RST
);
879 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
883 * Activate a secondary processor.
885 static void __no_sanitize_address
smp_start_secondary(void *cpuvoid
)
887 S390_lowcore
.restart_stack
= (unsigned long) restart_stack
;
888 S390_lowcore
.restart_fn
= (unsigned long) do_restart
;
889 S390_lowcore
.restart_data
= 0;
890 S390_lowcore
.restart_source
= -1UL;
891 __ctl_load(S390_lowcore
.cregs_save_area
, 0, 15);
892 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
893 CALL_ON_STACK_NORETURN(smp_init_secondary
, S390_lowcore
.kernel_stack
);
896 /* Upping and downing of CPUs */
897 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
899 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
902 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
904 if (pcpu_sigp_retry(pcpu
, SIGP_INITIAL_CPU_RESET
, 0) !=
905 SIGP_CC_ORDER_CODE_ACCEPTED
)
908 rc
= pcpu_alloc_lowcore(pcpu
, cpu
);
911 pcpu_prepare_secondary(pcpu
, cpu
);
912 pcpu_attach_task(pcpu
, tidle
);
913 pcpu_start_fn(pcpu
, smp_start_secondary
, NULL
);
914 /* Wait until cpu puts itself in the online & active maps */
915 while (!cpu_online(cpu
))
920 static unsigned int setup_possible_cpus __initdata
;
922 static int __init
_setup_possible_cpus(char *s
)
924 get_option(&s
, &setup_possible_cpus
);
927 early_param("possible_cpus", _setup_possible_cpus
);
929 int __cpu_disable(void)
931 unsigned long cregs
[16];
933 /* Handle possible pending IPIs */
934 smp_handle_ext_call();
935 set_cpu_online(smp_processor_id(), false);
937 /* Disable pseudo page faults on this cpu. */
939 /* Disable interrupt sources via control register. */
940 __ctl_store(cregs
, 0, 15);
941 cregs
[0] &= ~0x0000ee70UL
; /* disable all external interrupts */
942 cregs
[6] &= ~0xff000000UL
; /* disable all I/O interrupts */
943 cregs
[14] &= ~0x1f000000UL
; /* disable most machine checks */
944 __ctl_load(cregs
, 0, 15);
945 clear_cpu_flag(CIF_NOHZ_DELAY
);
949 void __cpu_die(unsigned int cpu
)
953 /* Wait until target cpu is down */
954 pcpu
= pcpu_devices
+ cpu
;
955 while (!pcpu_stopped(pcpu
))
957 pcpu_free_lowcore(pcpu
);
958 cpumask_clear_cpu(cpu
, mm_cpumask(&init_mm
));
959 cpumask_clear_cpu(cpu
, &init_mm
.context
.cpu_attach_mask
);
962 void __noreturn
cpu_die(void)
966 pcpu_sigp_retry(pcpu_devices
+ smp_processor_id(), SIGP_STOP
, 0);
970 void __init
smp_fill_possible_mask(void)
972 unsigned int possible
, sclp_max
, cpu
;
974 sclp_max
= max(sclp
.mtid
, sclp
.mtid_cp
) + 1;
975 sclp_max
= min(smp_max_threads
, sclp_max
);
976 sclp_max
= (sclp
.max_cores
* sclp_max
) ?: nr_cpu_ids
;
977 possible
= setup_possible_cpus
?: nr_cpu_ids
;
978 possible
= min(possible
, sclp_max
);
979 for (cpu
= 0; cpu
< possible
&& cpu
< nr_cpu_ids
; cpu
++)
980 set_cpu_possible(cpu
, true);
983 void __init
smp_prepare_cpus(unsigned int max_cpus
)
985 /* request the 0x1201 emergency signal external interrupt */
986 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG
, do_ext_call_interrupt
))
987 panic("Couldn't request external interrupt 0x1201");
988 /* request the 0x1202 external call external interrupt */
989 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL
, do_ext_call_interrupt
))
990 panic("Couldn't request external interrupt 0x1202");
993 void __init
smp_prepare_boot_cpu(void)
995 struct pcpu
*pcpu
= pcpu_devices
;
997 WARN_ON(!cpu_present(0) || !cpu_online(0));
998 pcpu
->state
= CPU_STATE_CONFIGURED
;
999 pcpu
->lowcore
= (struct lowcore
*)(unsigned long) store_prefix();
1000 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
1001 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN
);
1004 void __init
smp_setup_processor_id(void)
1006 pcpu_devices
[0].address
= stap();
1007 S390_lowcore
.cpu_nr
= 0;
1008 S390_lowcore
.spinlock_lockval
= arch_spin_lockval(0);
1009 S390_lowcore
.spinlock_index
= 0;
1013 * the frequency of the profiling timer can be changed
1014 * by writing a multiplier value into /proc/profile.
1016 * usually you want to run this on all CPUs ;)
1018 int setup_profiling_timer(unsigned int multiplier
)
1023 static ssize_t
cpu_configure_show(struct device
*dev
,
1024 struct device_attribute
*attr
, char *buf
)
1028 mutex_lock(&smp_cpu_state_mutex
);
1029 count
= sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].state
);
1030 mutex_unlock(&smp_cpu_state_mutex
);
1034 static ssize_t
cpu_configure_store(struct device
*dev
,
1035 struct device_attribute
*attr
,
1036 const char *buf
, size_t count
)
1039 int cpu
, val
, rc
, i
;
1042 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
1044 if (val
!= 0 && val
!= 1)
1047 mutex_lock(&smp_cpu_state_mutex
);
1049 /* disallow configuration changes of online cpus and cpu 0 */
1051 cpu
= smp_get_base_cpu(cpu
);
1054 for (i
= 0; i
<= smp_cpu_mtid
; i
++)
1055 if (cpu_online(cpu
+ i
))
1057 pcpu
= pcpu_devices
+ cpu
;
1061 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
1063 rc
= sclp_core_deconfigure(pcpu
->address
>> smp_cpu_mt_shift
);
1066 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
1067 if (cpu
+ i
>= nr_cpu_ids
|| !cpu_present(cpu
+ i
))
1069 pcpu
[i
].state
= CPU_STATE_STANDBY
;
1070 smp_cpu_set_polarization(cpu
+ i
,
1071 POLARIZATION_UNKNOWN
);
1073 topology_expect_change();
1076 if (pcpu
->state
!= CPU_STATE_STANDBY
)
1078 rc
= sclp_core_configure(pcpu
->address
>> smp_cpu_mt_shift
);
1081 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
1082 if (cpu
+ i
>= nr_cpu_ids
|| !cpu_present(cpu
+ i
))
1084 pcpu
[i
].state
= CPU_STATE_CONFIGURED
;
1085 smp_cpu_set_polarization(cpu
+ i
,
1086 POLARIZATION_UNKNOWN
);
1088 topology_expect_change();
1094 mutex_unlock(&smp_cpu_state_mutex
);
1096 return rc
? rc
: count
;
1098 static DEVICE_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
1100 static ssize_t
show_cpu_address(struct device
*dev
,
1101 struct device_attribute
*attr
, char *buf
)
1103 return sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].address
);
1105 static DEVICE_ATTR(address
, 0444, show_cpu_address
, NULL
);
1107 static struct attribute
*cpu_common_attrs
[] = {
1108 &dev_attr_configure
.attr
,
1109 &dev_attr_address
.attr
,
1113 static struct attribute_group cpu_common_attr_group
= {
1114 .attrs
= cpu_common_attrs
,
1117 static struct attribute
*cpu_online_attrs
[] = {
1118 &dev_attr_idle_count
.attr
,
1119 &dev_attr_idle_time_us
.attr
,
1123 static struct attribute_group cpu_online_attr_group
= {
1124 .attrs
= cpu_online_attrs
,
1127 static int smp_cpu_online(unsigned int cpu
)
1129 struct device
*s
= &per_cpu(cpu_device
, cpu
)->dev
;
1131 return sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1134 static int smp_cpu_pre_down(unsigned int cpu
)
1136 struct device
*s
= &per_cpu(cpu_device
, cpu
)->dev
;
1138 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1142 static int smp_add_present_cpu(int cpu
)
1148 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1151 per_cpu(cpu_device
, cpu
) = c
;
1153 c
->hotpluggable
= 1;
1154 rc
= register_cpu(c
, cpu
);
1157 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1160 rc
= topology_cpu_init(c
);
1166 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1173 int __ref
smp_rescan_cpus(void)
1175 struct sclp_core_info
*info
;
1178 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1181 smp_get_core_info(info
, 0);
1183 mutex_lock(&smp_cpu_state_mutex
);
1184 nr
= __smp_rescan_cpus(info
, false);
1185 mutex_unlock(&smp_cpu_state_mutex
);
1189 topology_schedule_update();
1193 static ssize_t __ref
rescan_store(struct device
*dev
,
1194 struct device_attribute
*attr
,
1200 rc
= lock_device_hotplug_sysfs();
1203 rc
= smp_rescan_cpus();
1204 unlock_device_hotplug();
1205 return rc
? rc
: count
;
1207 static DEVICE_ATTR_WO(rescan
);
1209 static int __init
s390_smp_init(void)
1213 rc
= device_create_file(cpu_subsys
.dev_root
, &dev_attr_rescan
);
1216 for_each_present_cpu(cpu
) {
1217 rc
= smp_add_present_cpu(cpu
);
1222 rc
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "s390/smp:online",
1223 smp_cpu_online
, smp_cpu_pre_down
);
1224 rc
= rc
<= 0 ? rc
: 0;
1228 subsys_initcall(s390_smp_init
);