2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/smp_lock.h>
29 #include <linux/delay.h>
30 #include <linux/cache.h>
31 #include <linux/interrupt.h>
32 #include <linux/cpu.h>
33 #include <linux/timex.h>
34 #include <linux/bootmem.h>
36 #include <asm/setup.h>
38 #include <asm/pgalloc.h>
40 #include <asm/s390_ext.h>
41 #include <asm/cpcmd.h>
42 #include <asm/tlbflush.h>
43 #include <asm/timer.h>
44 #include <asm/lowcore.h>
47 * An array with a pointer the lowcore of every CPU.
49 struct _lowcore
*lowcore_ptr
[NR_CPUS
];
50 EXPORT_SYMBOL(lowcore_ptr
);
52 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
53 EXPORT_SYMBOL(cpu_online_map
);
55 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
56 EXPORT_SYMBOL(cpu_possible_map
);
58 static struct task_struct
*current_set
[NR_CPUS
];
60 static void smp_ext_bitcall(int, ec_bit_sig
);
63 * Structure and data for __smp_call_function_map(). This is designed to
64 * minimise static memory requirements. It also looks cleaner.
66 static DEFINE_SPINLOCK(call_lock
);
68 struct call_data_struct
{
69 void (*func
) (void *info
);
76 static struct call_data_struct
*call_data
;
79 * 'Call function' interrupt callback
81 static void do_call_function(void)
83 void (*func
) (void *info
) = call_data
->func
;
84 void *info
= call_data
->info
;
85 int wait
= call_data
->wait
;
87 cpu_set(smp_processor_id(), call_data
->started
);
90 cpu_set(smp_processor_id(), call_data
->finished
);;
93 static void __smp_call_function_map(void (*func
) (void *info
), void *info
,
94 int nonatomic
, int wait
, cpumask_t map
)
96 struct call_data_struct data
;
100 * Can deadlock when interrupts are disabled or if in wrong context.
102 WARN_ON(irqs_disabled() || in_irq());
105 * Check for local function call. We have to have the same call order
106 * as in on_each_cpu() because of machine_restart_smp().
108 if (cpu_isset(smp_processor_id(), map
)) {
110 cpu_clear(smp_processor_id(), map
);
113 cpus_and(map
, map
, cpu_online_map
);
119 data
.started
= CPU_MASK_NONE
;
122 data
.finished
= CPU_MASK_NONE
;
124 spin_lock_bh(&call_lock
);
127 for_each_cpu_mask(cpu
, map
)
128 smp_ext_bitcall(cpu
, ec_call_function
);
130 /* Wait for response */
131 while (!cpus_equal(map
, data
.started
))
135 while (!cpus_equal(map
, data
.finished
))
138 spin_unlock_bh(&call_lock
);
149 * @func: the function to run; this must be fast and non-blocking
150 * @info: an arbitrary pointer to pass to the function
152 * @wait: if true, wait (atomically) until function has completed on other CPUs
154 * Run a function on all other CPUs.
156 * You must not call this function with disabled interrupts, from a
157 * hardware interrupt handler or from a bottom half.
159 int smp_call_function(void (*func
) (void *info
), void *info
, int nonatomic
,
165 map
= cpu_online_map
;
166 cpu_clear(smp_processor_id(), map
);
167 __smp_call_function_map(func
, info
, nonatomic
, wait
, map
);
171 EXPORT_SYMBOL(smp_call_function
);
174 * smp_call_function_on:
175 * @func: the function to run; this must be fast and non-blocking
176 * @info: an arbitrary pointer to pass to the function
178 * @wait: if true, wait (atomically) until function has completed on other CPUs
179 * @cpu: the CPU where func should run
181 * Run a function on one processor.
183 * You must not call this function with disabled interrupts, from a
184 * hardware interrupt handler or from a bottom half.
186 int smp_call_function_on(void (*func
) (void *info
), void *info
, int nonatomic
,
189 cpumask_t map
= CPU_MASK_NONE
;
193 __smp_call_function_map(func
, info
, nonatomic
, wait
, map
);
197 EXPORT_SYMBOL(smp_call_function_on
);
199 static void do_send_stop(void)
203 /* stop all processors */
204 for_each_online_cpu(cpu
) {
205 if (cpu
== smp_processor_id())
208 rc
= signal_processor(cpu
, sigp_stop
);
209 } while (rc
== sigp_busy
);
213 static void do_store_status(void)
217 /* store status of all processors in their lowcores (real 0) */
218 for_each_online_cpu(cpu
) {
219 if (cpu
== smp_processor_id())
222 rc
= signal_processor_p(
223 (__u32
)(unsigned long) lowcore_ptr
[cpu
], cpu
,
224 sigp_store_status_at_address
);
225 } while (rc
== sigp_busy
);
229 static void do_wait_for_stop(void)
233 /* Wait for all other cpus to enter stopped state */
234 for_each_online_cpu(cpu
) {
235 if (cpu
== smp_processor_id())
237 while (!smp_cpu_not_running(cpu
))
243 * this function sends a 'stop' sigp to all other CPUs in the system.
244 * it goes straight through.
246 void smp_send_stop(void)
248 /* Disable all interrupts/machine checks */
249 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
251 /* write magic number to zero page (absolute 0) */
252 lowcore_ptr
[smp_processor_id()]->panic_magic
= __PANIC_MAGIC
;
254 /* stop other processors. */
257 /* wait until other processors are stopped */
260 /* store status of other processors. */
265 * Reboot, halt and power_off routines for SMP.
267 void machine_restart_smp(char *__unused
)
273 void machine_halt_smp(void)
276 if (MACHINE_IS_VM
&& strlen(vmhalt_cmd
) > 0)
277 __cpcmd(vmhalt_cmd
, NULL
, 0, NULL
);
278 signal_processor(smp_processor_id(), sigp_stop_and_store_status
);
282 void machine_power_off_smp(void)
285 if (MACHINE_IS_VM
&& strlen(vmpoff_cmd
) > 0)
286 __cpcmd(vmpoff_cmd
, NULL
, 0, NULL
);
287 signal_processor(smp_processor_id(), sigp_stop_and_store_status
);
292 * This is the main routine where commands issued by other
296 static void do_ext_call_interrupt(__u16 code
)
301 * handle bit signal external calls
303 * For the ec_schedule signal we have to do nothing. All the work
304 * is done automatically when we return from the interrupt.
306 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
308 if (test_bit(ec_call_function
, &bits
))
313 * Send an external call sigp to another cpu and return without waiting
314 * for its completion.
316 static void smp_ext_bitcall(int cpu
, ec_bit_sig sig
)
319 * Set signaling bit in lowcore of target cpu and kick it
321 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
322 while (signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
328 * this function sends a 'purge tlb' signal to another CPU.
330 void smp_ptlb_callback(void *info
)
335 void smp_ptlb_all(void)
337 on_each_cpu(smp_ptlb_callback
, NULL
, 0, 1);
339 EXPORT_SYMBOL(smp_ptlb_all
);
340 #endif /* ! CONFIG_64BIT */
343 * this function sends a 'reschedule' IPI to another CPU.
344 * it goes straight through and wastes no time serializing
345 * anything. Worst case is that we lose a reschedule ...
347 void smp_send_reschedule(int cpu
)
349 smp_ext_bitcall(cpu
, ec_schedule
);
353 * parameter area for the set/clear control bit callbacks
355 struct ec_creg_mask_parms
{
356 unsigned long orvals
[16];
357 unsigned long andvals
[16];
361 * callback for setting/clearing control bits
363 static void smp_ctl_bit_callback(void *info
)
365 struct ec_creg_mask_parms
*pp
= info
;
366 unsigned long cregs
[16];
369 __ctl_store(cregs
, 0, 15);
370 for (i
= 0; i
<= 15; i
++)
371 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
372 __ctl_load(cregs
, 0, 15);
376 * Set a bit in a control register of all cpus
378 void smp_ctl_set_bit(int cr
, int bit
)
380 struct ec_creg_mask_parms parms
;
382 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
383 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
384 parms
.orvals
[cr
] = 1 << bit
;
385 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
387 EXPORT_SYMBOL(smp_ctl_set_bit
);
390 * Clear a bit in a control register of all cpus
392 void smp_ctl_clear_bit(int cr
, int bit
)
394 struct ec_creg_mask_parms parms
;
396 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
397 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
398 parms
.andvals
[cr
] = ~(1L << bit
);
399 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
401 EXPORT_SYMBOL(smp_ctl_clear_bit
);
403 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
406 * zfcpdump_prefix_array holds prefix registers for the following scenario:
407 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
408 * save its prefix registers, since they get lost, when switching from 31 bit
411 unsigned int zfcpdump_prefix_array
[NR_CPUS
+ 1] \
412 __attribute__((__section__(".data")));
414 static void __init
smp_get_save_areas(void)
416 unsigned int cpu
, cpu_num
, rc
;
419 if (ipl_info
.type
!= IPL_TYPE_FCP_DUMP
)
421 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
423 for (cpu
= 0; cpu
<= 65535; cpu
++) {
424 if ((u16
) cpu
== boot_cpu_addr
)
426 __cpu_logical_map
[1] = (__u16
) cpu
;
427 if (signal_processor(1, sigp_sense
) == sigp_not_operational
)
429 if (cpu_num
>= NR_CPUS
) {
430 printk("WARNING: Registers for cpu %i are not "
431 "saved, since dump kernel was compiled with"
432 "NR_CPUS=%i!\n", cpu_num
, NR_CPUS
);
435 zfcpdump_save_areas
[cpu_num
] =
436 alloc_bootmem(sizeof(union save_area
));
438 rc
= signal_processor(1, sigp_stop_and_store_status
);
443 memcpy(zfcpdump_save_areas
[cpu_num
],
444 (void *)(unsigned long) store_prefix() +
445 SAVE_AREA_BASE
, SAVE_AREA_SIZE
);
447 /* copy original prefix register */
448 zfcpdump_save_areas
[cpu_num
]->s390x
.pref_reg
=
449 zfcpdump_prefix_array
[cpu_num
];
455 union save_area
*zfcpdump_save_areas
[NR_CPUS
+ 1];
456 EXPORT_SYMBOL_GPL(zfcpdump_save_areas
);
459 #define smp_get_save_areas() do { } while (0)
463 * Lets check how many CPUs we have.
466 static unsigned int __init
smp_count_cpus(void)
468 unsigned int cpu
, num_cpus
;
472 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
475 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
476 current_thread_info()->cpu
= 0;
478 for (cpu
= 0; cpu
<= 65535; cpu
++) {
479 if ((__u16
) cpu
== boot_cpu_addr
)
481 __cpu_logical_map
[1] = (__u16
) cpu
;
482 if (signal_processor(1, sigp_sense
) == sigp_not_operational
)
487 printk("Detected %d CPU's\n", (int) num_cpus
);
488 printk("Boot cpu address %2X\n", boot_cpu_addr
);
494 * Activate a secondary processor.
496 int __devinit
start_secondary(void *cpuvoid
)
501 /* Enable TOD clock interrupts on the secondary cpu. */
503 #ifdef CONFIG_VIRT_TIMER
504 /* Enable cpu timer interrupts on the secondary cpu. */
507 /* Enable pfault pseudo page faults on this cpu. */
510 /* Mark this cpu as online */
511 cpu_set(smp_processor_id(), cpu_online_map
);
512 /* Switch on interrupts */
514 /* Print info about this processor */
515 print_cpu_info(&S390_lowcore
.cpu_data
);
516 /* cpu_idle will call schedule for us */
521 static void __init
smp_create_idle(unsigned int cpu
)
523 struct task_struct
*p
;
526 * don't care about the psw and regs settings since we'll never
527 * reschedule the forked task.
531 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
532 current_set
[cpu
] = p
;
535 static int cpu_stopped(int cpu
)
539 /* Check for stopped state */
540 if (signal_processor_ps(&status
, 0, cpu
, sigp_sense
) ==
541 sigp_status_stored
) {
548 /* Upping and downing of CPUs */
550 int __cpu_up(unsigned int cpu
)
552 struct task_struct
*idle
;
553 struct _lowcore
*cpu_lowcore
;
554 struct stack_frame
*sf
;
558 for (curr_cpu
= 0; curr_cpu
<= 65535; curr_cpu
++) {
559 __cpu_logical_map
[cpu
] = (__u16
) curr_cpu
;
560 if (cpu_stopped(cpu
))
564 if (!cpu_stopped(cpu
))
567 ccode
= signal_processor_p((__u32
)(unsigned long)(lowcore_ptr
[cpu
]),
568 cpu
, sigp_set_prefix
);
570 printk("sigp_set_prefix failed for cpu %d "
571 "with condition code %d\n",
572 (int) cpu
, (int) ccode
);
576 idle
= current_set
[cpu
];
577 cpu_lowcore
= lowcore_ptr
[cpu
];
578 cpu_lowcore
->kernel_stack
= (unsigned long)
579 task_stack_page(idle
) + THREAD_SIZE
;
580 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
581 - sizeof(struct pt_regs
)
582 - sizeof(struct stack_frame
));
583 memset(sf
, 0, sizeof(struct stack_frame
));
584 sf
->gprs
[9] = (unsigned long) sf
;
585 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
586 __ctl_store(cpu_lowcore
->cregs_save_area
[0], 0, 15);
589 : : "a" (&cpu_lowcore
->access_regs_save_area
) : "memory");
590 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
591 cpu_lowcore
->current_task
= (unsigned long) idle
;
592 cpu_lowcore
->cpu_data
.cpu_nr
= cpu
;
595 while (signal_processor(cpu
, sigp_restart
) == sigp_busy
)
598 while (!cpu_online(cpu
))
603 static unsigned int __initdata additional_cpus
;
604 static unsigned int __initdata possible_cpus
;
606 void __init
smp_setup_cpu_possible_map(void)
608 unsigned int phy_cpus
, pos_cpus
, cpu
;
610 smp_get_save_areas();
611 phy_cpus
= smp_count_cpus();
612 pos_cpus
= min(phy_cpus
+ additional_cpus
, (unsigned int) NR_CPUS
);
615 pos_cpus
= min(possible_cpus
, (unsigned int) NR_CPUS
);
617 for (cpu
= 0; cpu
< pos_cpus
; cpu
++)
618 cpu_set(cpu
, cpu_possible_map
);
620 phy_cpus
= min(phy_cpus
, pos_cpus
);
622 for (cpu
= 0; cpu
< phy_cpus
; cpu
++)
623 cpu_set(cpu
, cpu_present_map
);
626 #ifdef CONFIG_HOTPLUG_CPU
628 static int __init
setup_additional_cpus(char *s
)
630 additional_cpus
= simple_strtoul(s
, NULL
, 0);
633 early_param("additional_cpus", setup_additional_cpus
);
635 static int __init
setup_possible_cpus(char *s
)
637 possible_cpus
= simple_strtoul(s
, NULL
, 0);
640 early_param("possible_cpus", setup_possible_cpus
);
642 int __cpu_disable(void)
644 struct ec_creg_mask_parms cr_parms
;
645 int cpu
= smp_processor_id();
647 cpu_clear(cpu
, cpu_online_map
);
649 /* Disable pfault pseudo page faults on this cpu. */
652 memset(&cr_parms
.orvals
, 0, sizeof(cr_parms
.orvals
));
653 memset(&cr_parms
.andvals
, 0xff, sizeof(cr_parms
.andvals
));
655 /* disable all external interrupts */
656 cr_parms
.orvals
[0] = 0;
657 cr_parms
.andvals
[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
658 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
659 /* disable all I/O interrupts */
660 cr_parms
.orvals
[6] = 0;
661 cr_parms
.andvals
[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
662 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
663 /* disable most machine checks */
664 cr_parms
.orvals
[14] = 0;
665 cr_parms
.andvals
[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
668 smp_ctl_bit_callback(&cr_parms
);
673 void __cpu_die(unsigned int cpu
)
675 /* Wait until target cpu is down */
676 while (!smp_cpu_not_running(cpu
))
678 printk("Processor %d spun down\n", cpu
);
684 signal_processor(smp_processor_id(), sigp_stop
);
689 #endif /* CONFIG_HOTPLUG_CPU */
692 * Cycle through the processors and setup structures.
695 void __init
smp_prepare_cpus(unsigned int max_cpus
)
701 /* request the 0x1201 emergency signal external interrupt */
702 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
703 panic("Couldn't request external interrupt 0x1201");
704 memset(lowcore_ptr
, 0, sizeof(lowcore_ptr
));
706 * Initialize prefix pages and stacks for all possible cpus
708 print_cpu_info(&S390_lowcore
.cpu_data
);
710 for_each_possible_cpu(i
) {
711 lowcore_ptr
[i
] = (struct _lowcore
*)
712 __get_free_pages(GFP_KERNEL
| GFP_DMA
,
713 sizeof(void*) == 8 ? 1 : 0);
714 stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
715 if (!lowcore_ptr
[i
] || !stack
)
716 panic("smp_boot_cpus failed to allocate memory\n");
718 *(lowcore_ptr
[i
]) = S390_lowcore
;
719 lowcore_ptr
[i
]->async_stack
= stack
+ ASYNC_SIZE
;
720 stack
= __get_free_pages(GFP_KERNEL
, 0);
722 panic("smp_boot_cpus failed to allocate memory\n");
723 lowcore_ptr
[i
]->panic_stack
= stack
+ PAGE_SIZE
;
725 if (MACHINE_HAS_IEEE
) {
726 lowcore_ptr
[i
]->extended_save_area_addr
=
727 (__u32
) __get_free_pages(GFP_KERNEL
, 0);
728 if (!lowcore_ptr
[i
]->extended_save_area_addr
)
729 panic("smp_boot_cpus failed to "
730 "allocate memory\n");
735 if (MACHINE_HAS_IEEE
)
736 ctl_set_bit(14, 29); /* enable extended save area */
738 set_prefix((u32
)(unsigned long) lowcore_ptr
[smp_processor_id()]);
740 for_each_possible_cpu(cpu
)
741 if (cpu
!= smp_processor_id())
742 smp_create_idle(cpu
);
745 void __devinit
smp_prepare_boot_cpu(void)
747 BUG_ON(smp_processor_id() != 0);
749 cpu_set(0, cpu_online_map
);
750 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
751 current_set
[0] = current
;
754 void smp_cpus_done(unsigned int max_cpus
)
756 cpu_present_map
= cpu_possible_map
;
760 * the frequency of the profiling timer can be changed
761 * by writing a multiplier value into /proc/profile.
763 * usually you want to run this on all CPUs ;)
765 int setup_profiling_timer(unsigned int multiplier
)
770 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
772 static ssize_t
show_capability(struct sys_device
*dev
, char *buf
)
774 unsigned int capability
;
777 rc
= get_cpu_capability(&capability
);
780 return sprintf(buf
, "%u\n", capability
);
782 static SYSDEV_ATTR(capability
, 0444, show_capability
, NULL
);
784 static int __cpuinit
smp_cpu_notify(struct notifier_block
*self
,
785 unsigned long action
, void *hcpu
)
787 unsigned int cpu
= (unsigned int)(long)hcpu
;
788 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
789 struct sys_device
*s
= &c
->sysdev
;
793 if (sysdev_create_file(s
, &attr_capability
))
797 sysdev_remove_file(s
, &attr_capability
);
803 static struct notifier_block __cpuinitdata smp_cpu_nb
= {
804 .notifier_call
= smp_cpu_notify
,
807 static int __init
topology_init(void)
811 register_cpu_notifier(&smp_cpu_nb
);
813 for_each_possible_cpu(cpu
) {
814 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
815 struct sys_device
*s
= &c
->sysdev
;
818 register_cpu(c
, cpu
);
819 if (!cpu_online(cpu
))
822 sysdev_create_file(s
, &attr_capability
);
826 subsys_initcall(topology_init
);