1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
42 extern void calibrate_delay(void);
44 /* Please don't make this stuff initdata!!! --DaveM */
45 static unsigned char boot_cpu_id
;
47 cpumask_t cpu_online_map __read_mostly
= CPU_MASK_NONE
;
48 cpumask_t phys_cpu_present_map __read_mostly
= CPU_MASK_NONE
;
49 static cpumask_t smp_commenced_mask
;
50 static cpumask_t cpu_callout_map
;
52 void smp_info(struct seq_file
*m
)
56 seq_printf(m
, "State:\n");
57 for (i
= 0; i
< NR_CPUS
; i
++) {
60 "CPU%d:\t\tonline\n", i
);
64 void smp_bogo(struct seq_file
*m
)
68 for (i
= 0; i
< NR_CPUS
; i
++)
71 "Cpu%dBogo\t: %lu.%02lu\n"
72 "Cpu%dClkTck\t: %016lx\n",
73 i
, cpu_data(i
).udelay_val
/ (500000/HZ
),
74 (cpu_data(i
).udelay_val
/ (5000/HZ
)) % 100,
75 i
, cpu_data(i
).clock_tick
);
78 void __init
smp_store_cpu_info(int id
)
82 /* multiplier and counter set by
83 smp_setup_percpu_timer() */
84 cpu_data(id
).udelay_val
= loops_per_jiffy
;
86 cpu_find_by_mid(id
, &cpu_node
);
87 cpu_data(id
).clock_tick
= prom_getintdefault(cpu_node
,
88 "clock-frequency", 0);
90 cpu_data(id
).pgcache_size
= 0;
91 cpu_data(id
).pte_cache
[0] = NULL
;
92 cpu_data(id
).pte_cache
[1] = NULL
;
93 cpu_data(id
).pgd_cache
= NULL
;
94 cpu_data(id
).idle_volume
= 1;
96 cpu_data(id
).dcache_size
= prom_getintdefault(cpu_node
, "dcache-size",
98 cpu_data(id
).dcache_line_size
=
99 prom_getintdefault(cpu_node
, "dcache-line-size", 32);
100 cpu_data(id
).icache_size
= prom_getintdefault(cpu_node
, "icache-size",
102 cpu_data(id
).icache_line_size
=
103 prom_getintdefault(cpu_node
, "icache-line-size", 32);
104 cpu_data(id
).ecache_size
= prom_getintdefault(cpu_node
, "ecache-size",
106 cpu_data(id
).ecache_line_size
=
107 prom_getintdefault(cpu_node
, "ecache-line-size", 64);
108 printk("CPU[%d]: Caches "
109 "D[sz(%d):line_sz(%d)] "
110 "I[sz(%d):line_sz(%d)] "
111 "E[sz(%d):line_sz(%d)]\n",
113 cpu_data(id
).dcache_size
, cpu_data(id
).dcache_line_size
,
114 cpu_data(id
).icache_size
, cpu_data(id
).icache_line_size
,
115 cpu_data(id
).ecache_size
, cpu_data(id
).ecache_line_size
);
118 static void smp_setup_percpu_timer(void);
120 static volatile unsigned long callin_flag
= 0;
122 extern void inherit_locked_prom_mappings(int save_p
);
124 static inline void cpu_setup_percpu_base(unsigned long cpu_id
)
126 __asm__
__volatile__("mov %0, %%g5\n\t"
127 "stxa %0, [%1] %2\n\t"
130 : "r" (__per_cpu_offset(cpu_id
)),
131 "r" (TSB_REG
), "i" (ASI_IMMU
));
134 void __init
smp_callin(void)
136 int cpuid
= hard_smp_processor_id();
138 inherit_locked_prom_mappings(0);
142 cpu_setup_percpu_base(cpuid
);
144 smp_setup_percpu_timer();
146 if (cheetah_pcache_forced_on
)
147 cheetah_enable_pcache();
152 smp_store_cpu_info(cpuid
);
154 __asm__
__volatile__("membar #Sync\n\t"
155 "flush %%g6" : : : "memory");
157 /* Clear this or we will die instantly when we
158 * schedule back to this idler...
160 current_thread_info()->new_child
= 0;
162 /* Attach to the address space of init_task. */
163 atomic_inc(&init_mm
.mm_count
);
164 current
->active_mm
= &init_mm
;
166 while (!cpu_isset(cpuid
, smp_commenced_mask
))
169 cpu_set(cpuid
, cpu_online_map
);
171 /* idle thread is expected to have preempt disabled */
177 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
178 panic("SMP bolixed\n");
181 static unsigned long current_tick_offset __read_mostly
;
183 /* This tick register synchronization scheme is taken entirely from
184 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
186 * The only change I've made is to rework it so that the master
187 * initiates the synchonization instead of the slave. -DaveM
191 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
193 #define NUM_ROUNDS 64 /* magic value */
194 #define NUM_ITERS 5 /* likewise */
196 static DEFINE_SPINLOCK(itc_sync_lock
);
197 static unsigned long go
[SLAVE
+ 1];
199 #define DEBUG_TICK_SYNC 0
201 static inline long get_delta (long *rt
, long *master
)
203 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
204 unsigned long tcenter
, t0
, t1
, tm
;
207 for (i
= 0; i
< NUM_ITERS
; i
++) {
208 t0
= tick_ops
->get_tick();
211 while (!(tm
= go
[SLAVE
]))
215 t1
= tick_ops
->get_tick();
217 if (t1
- t0
< best_t1
- best_t0
)
218 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
221 *rt
= best_t1
- best_t0
;
222 *master
= best_tm
- best_t0
;
224 /* average best_t0 and best_t1 without overflow: */
225 tcenter
= (best_t0
/2 + best_t1
/2);
226 if (best_t0
% 2 + best_t1
% 2 == 2)
228 return tcenter
- best_tm
;
231 void smp_synchronize_tick_client(void)
233 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
234 unsigned long flags
, rt
, master_time_stamp
, bound
;
237 long rt
; /* roundtrip time */
238 long master
; /* master's timestamp */
239 long diff
; /* difference between midpoint and master's timestamp */
240 long lat
; /* estimate of itc adjustment latency */
249 local_irq_save(flags
);
251 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
252 delta
= get_delta(&rt
, &master_time_stamp
);
254 done
= 1; /* let's lock on to this... */
260 adjust_latency
+= -delta
;
261 adj
= -delta
+ adjust_latency
/4;
265 tick_ops
->add_tick(adj
, current_tick_offset
);
269 t
[i
].master
= master_time_stamp
;
271 t
[i
].lat
= adjust_latency
/4;
275 local_irq_restore(flags
);
278 for (i
= 0; i
< NUM_ROUNDS
; i
++)
279 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
280 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
283 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
284 "maxerr %lu cycles)\n", smp_processor_id(), delta
, rt
);
287 static void smp_start_sync_tick_client(int cpu
);
289 static void smp_synchronize_one_tick(int cpu
)
291 unsigned long flags
, i
;
295 smp_start_sync_tick_client(cpu
);
297 /* wait for client to be ready */
301 /* now let the client proceed into his loop */
305 spin_lock_irqsave(&itc_sync_lock
, flags
);
307 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
312 go
[SLAVE
] = tick_ops
->get_tick();
316 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
319 extern unsigned long sparc64_cpu_startup
;
321 /* The OBP cpu startup callback truncates the 3rd arg cookie to
322 * 32-bits (I think) so to be safe we have it read the pointer
323 * contained here so we work on >4GB machines. -DaveM
325 static struct thread_info
*cpu_new_thread
= NULL
;
327 static int __devinit
smp_boot_one_cpu(unsigned int cpu
)
329 unsigned long entry
=
330 (unsigned long)(&sparc64_cpu_startup
);
331 unsigned long cookie
=
332 (unsigned long)(&cpu_new_thread
);
333 struct task_struct
*p
;
334 int timeout
, ret
, cpu_node
;
338 cpu_new_thread
= task_thread_info(p
);
339 cpu_set(cpu
, cpu_callout_map
);
341 cpu_find_by_mid(cpu
, &cpu_node
);
342 prom_startcpu(cpu_node
, entry
, cookie
);
344 for (timeout
= 0; timeout
< 5000000; timeout
++) {
352 printk("Processor %d is stuck.\n", cpu
);
353 cpu_clear(cpu
, cpu_callout_map
);
356 cpu_new_thread
= NULL
;
361 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
366 if (this_is_starfire
) {
367 /* map to real upaid */
368 cpu
= (((cpu
& 0x3c) << 1) |
369 ((cpu
& 0x40) >> 4) |
373 target
= (cpu
<< 14) | 0x70;
375 /* Ok, this is the real Spitfire Errata #54.
376 * One must read back from a UDB internal register
377 * after writes to the UDB interrupt dispatch, but
378 * before the membar Sync for that write.
379 * So we use the high UDB control register (ASI 0x7f,
380 * ADDR 0x20) for the dummy read. -DaveM
383 __asm__
__volatile__(
384 "wrpr %1, %2, %%pstate\n\t"
385 "stxa %4, [%0] %3\n\t"
386 "stxa %5, [%0+%8] %3\n\t"
388 "stxa %6, [%0+%8] %3\n\t"
390 "stxa %%g0, [%7] %3\n\t"
393 "ldxa [%%g1] 0x7f, %%g0\n\t"
396 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
397 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
398 "r" (0x10), "0" (tmp
)
401 /* NOTE: PSTATE_IE is still clear. */
404 __asm__
__volatile__("ldxa [%%g0] %1, %0"
406 : "i" (ASI_INTR_DISPATCH_STAT
));
408 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
415 } while (result
& 0x1);
416 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
419 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
420 smp_processor_id(), result
);
427 static __inline__
void spitfire_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
432 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
433 for_each_cpu_mask(i
, mask
)
434 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, i
);
437 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
438 * packet, but we have no use for that. However we do take advantage of
439 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
441 static void cheetah_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
444 int nack_busy_id
, is_jalapeno
;
446 if (cpus_empty(mask
))
449 /* Unfortunately, someone at Sun had the brilliant idea to make the
450 * busy/nack fields hard-coded by ITID number for this Ultra-III
451 * derivative processor.
453 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
454 is_jalapeno
= ((ver
>> 32) == 0x003e0016);
456 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
459 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
460 : : "r" (pstate
), "i" (PSTATE_IE
));
462 /* Setup the dispatch data registers. */
463 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
464 "stxa %1, [%4] %6\n\t"
465 "stxa %2, [%5] %6\n\t"
468 : "r" (data0
), "r" (data1
), "r" (data2
),
469 "r" (0x40), "r" (0x50), "r" (0x60),
476 for_each_cpu_mask(i
, mask
) {
477 u64 target
= (i
<< 14) | 0x70;
480 target
|= (nack_busy_id
<< 24);
481 __asm__
__volatile__(
482 "stxa %%g0, [%0] %1\n\t"
485 : "r" (target
), "i" (ASI_INTR_W
));
490 /* Now, poll for completion. */
495 stuck
= 100000 * nack_busy_id
;
497 __asm__
__volatile__("ldxa [%%g0] %1, %0"
498 : "=r" (dispatch_stat
)
499 : "i" (ASI_INTR_DISPATCH_STAT
));
500 if (dispatch_stat
== 0UL) {
501 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
507 } while (dispatch_stat
& 0x5555555555555555UL
);
509 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
512 if ((dispatch_stat
& ~(0x5555555555555555UL
)) == 0) {
513 /* Busy bits will not clear, continue instead
514 * of freezing up on this cpu.
516 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
517 smp_processor_id(), dispatch_stat
);
519 int i
, this_busy_nack
= 0;
521 /* Delay some random time with interrupts enabled
522 * to prevent deadlock.
524 udelay(2 * nack_busy_id
);
526 /* Clear out the mask bits for cpus which did not
529 for_each_cpu_mask(i
, mask
) {
533 check_mask
= (0x2UL
<< (2*i
));
535 check_mask
= (0x2UL
<<
537 if ((dispatch_stat
& check_mask
) == 0)
547 /* Send cross call to all processors mentioned in MASK
550 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, cpumask_t mask
)
552 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
553 int this_cpu
= get_cpu();
555 cpus_and(mask
, mask
, cpu_online_map
);
556 cpu_clear(this_cpu
, mask
);
558 if (tlb_type
== spitfire
)
559 spitfire_xcall_deliver(data0
, data1
, data2
, mask
);
561 cheetah_xcall_deliver(data0
, data1
, data2
, mask
);
562 /* NOTE: Caller runs local copy on master. */
567 extern unsigned long xcall_sync_tick
;
569 static void smp_start_sync_tick_client(int cpu
)
571 cpumask_t mask
= cpumask_of_cpu(cpu
);
573 smp_cross_call_masked(&xcall_sync_tick
,
577 /* Send cross call to all processors except self. */
578 #define smp_cross_call(func, ctx, data1, data2) \
579 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
581 struct call_data_struct
{
582 void (*func
) (void *info
);
588 static DEFINE_SPINLOCK(call_lock
);
589 static struct call_data_struct
*call_data
;
591 extern unsigned long xcall_call_function
;
594 * You must not call this function with disabled interrupts or from a
595 * hardware interrupt handler or from a bottom half handler.
597 int smp_call_function(void (*func
)(void *info
), void *info
,
598 int nonatomic
, int wait
)
600 struct call_data_struct data
;
601 int cpus
= num_online_cpus() - 1;
607 /* Can deadlock when called with interrupts disabled */
608 WARN_ON(irqs_disabled());
612 atomic_set(&data
.finished
, 0);
615 spin_lock(&call_lock
);
619 smp_cross_call(&xcall_call_function
, 0, 0, 0);
622 * Wait for other cpus to complete function or at
623 * least snap the call data.
626 while (atomic_read(&data
.finished
) != cpus
) {
633 spin_unlock(&call_lock
);
638 spin_unlock(&call_lock
);
639 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
640 (long) num_online_cpus() - 1L,
641 (long) atomic_read(&data
.finished
));
645 void smp_call_function_client(int irq
, struct pt_regs
*regs
)
647 void (*func
) (void *info
) = call_data
->func
;
648 void *info
= call_data
->info
;
650 clear_softint(1 << irq
);
651 if (call_data
->wait
) {
652 /* let initiator proceed only after completion */
654 atomic_inc(&call_data
->finished
);
656 /* let initiator proceed after getting data */
657 atomic_inc(&call_data
->finished
);
662 extern unsigned long xcall_flush_tlb_mm
;
663 extern unsigned long xcall_flush_tlb_pending
;
664 extern unsigned long xcall_flush_tlb_kernel_range
;
665 extern unsigned long xcall_flush_tlb_all_spitfire
;
666 extern unsigned long xcall_flush_tlb_all_cheetah
;
667 extern unsigned long xcall_report_regs
;
668 extern unsigned long xcall_receive_signal
;
670 #ifdef DCACHE_ALIASING_POSSIBLE
671 extern unsigned long xcall_flush_dcache_page_cheetah
;
673 extern unsigned long xcall_flush_dcache_page_spitfire
;
675 #ifdef CONFIG_DEBUG_DCFLUSH
676 extern atomic_t dcpage_flushes
;
677 extern atomic_t dcpage_flushes_xcall
;
680 static __inline__
void __local_flush_dcache_page(struct page
*page
)
682 #ifdef DCACHE_ALIASING_POSSIBLE
683 __flush_dcache_page(page_address(page
),
684 ((tlb_type
== spitfire
) &&
685 page_mapping(page
) != NULL
));
687 if (page_mapping(page
) != NULL
&&
688 tlb_type
== spitfire
)
689 __flush_icache_page(__pa(page_address(page
)));
693 void smp_flush_dcache_page_impl(struct page
*page
, int cpu
)
695 cpumask_t mask
= cpumask_of_cpu(cpu
);
696 int this_cpu
= get_cpu();
698 #ifdef CONFIG_DEBUG_DCFLUSH
699 atomic_inc(&dcpage_flushes
);
701 if (cpu
== this_cpu
) {
702 __local_flush_dcache_page(page
);
703 } else if (cpu_online(cpu
)) {
704 void *pg_addr
= page_address(page
);
707 if (tlb_type
== spitfire
) {
709 ((u64
)&xcall_flush_dcache_page_spitfire
);
710 if (page_mapping(page
) != NULL
)
711 data0
|= ((u64
)1 << 32);
712 spitfire_xcall_deliver(data0
,
717 #ifdef DCACHE_ALIASING_POSSIBLE
719 ((u64
)&xcall_flush_dcache_page_cheetah
);
720 cheetah_xcall_deliver(data0
,
725 #ifdef CONFIG_DEBUG_DCFLUSH
726 atomic_inc(&dcpage_flushes_xcall
);
733 void flush_dcache_page_all(struct mm_struct
*mm
, struct page
*page
)
735 void *pg_addr
= page_address(page
);
736 cpumask_t mask
= cpu_online_map
;
738 int this_cpu
= get_cpu();
740 cpu_clear(this_cpu
, mask
);
742 #ifdef CONFIG_DEBUG_DCFLUSH
743 atomic_inc(&dcpage_flushes
);
745 if (cpus_empty(mask
))
747 if (tlb_type
== spitfire
) {
748 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
749 if (page_mapping(page
) != NULL
)
750 data0
|= ((u64
)1 << 32);
751 spitfire_xcall_deliver(data0
,
756 #ifdef DCACHE_ALIASING_POSSIBLE
757 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
758 cheetah_xcall_deliver(data0
,
763 #ifdef CONFIG_DEBUG_DCFLUSH
764 atomic_inc(&dcpage_flushes_xcall
);
767 __local_flush_dcache_page(page
);
772 void smp_receive_signal(int cpu
)
774 cpumask_t mask
= cpumask_of_cpu(cpu
);
776 if (cpu_online(cpu
)) {
777 u64 data0
= (((u64
)&xcall_receive_signal
) & 0xffffffff);
779 if (tlb_type
== spitfire
)
780 spitfire_xcall_deliver(data0
, 0, 0, mask
);
782 cheetah_xcall_deliver(data0
, 0, 0, mask
);
786 void smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
788 /* Just return, rtrap takes care of the rest. */
789 clear_softint(1 << irq
);
792 void smp_report_regs(void)
794 smp_cross_call(&xcall_report_regs
, 0, 0, 0);
797 void smp_flush_tlb_all(void)
799 if (tlb_type
== spitfire
)
800 smp_cross_call(&xcall_flush_tlb_all_spitfire
, 0, 0, 0);
802 smp_cross_call(&xcall_flush_tlb_all_cheetah
, 0, 0, 0);
806 /* We know that the window frames of the user have been flushed
807 * to the stack before we get here because all callers of us
808 * are flush_tlb_*() routines, and these run after flush_cache_*()
809 * which performs the flushw.
811 * The SMP TLB coherency scheme we use works as follows:
813 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
814 * space has (potentially) executed on, this is the heuristic
815 * we use to avoid doing cross calls.
817 * Also, for flushing from kswapd and also for clones, we
818 * use cpu_vm_mask as the list of cpus to make run the TLB.
820 * 2) TLB context numbers are shared globally across all processors
821 * in the system, this allows us to play several games to avoid
824 * One invariant is that when a cpu switches to a process, and
825 * that processes tsk->active_mm->cpu_vm_mask does not have the
826 * current cpu's bit set, that tlb context is flushed locally.
828 * If the address space is non-shared (ie. mm->count == 1) we avoid
829 * cross calls when we want to flush the currently running process's
830 * tlb state. This is done by clearing all cpu bits except the current
831 * processor's in current->active_mm->cpu_vm_mask and performing the
832 * flush locally only. This will force any subsequent cpus which run
833 * this task to flush the context from the local tlb if the process
834 * migrates to another cpu (again).
836 * 3) For shared address spaces (threads) and swapping we bite the
837 * bullet for most cases and perform the cross call (but only to
838 * the cpus listed in cpu_vm_mask).
840 * The performance gain from "optimizing" away the cross call for threads is
841 * questionable (in theory the big win for threads is the massive sharing of
842 * address space state across processors).
845 /* This currently is only used by the hugetlb arch pre-fault
846 * hook on UltraSPARC-III+ and later when changing the pagesize
847 * bits of the context register for an address space.
849 void smp_flush_tlb_mm(struct mm_struct
*mm
)
851 u32 ctx
= CTX_HWBITS(mm
->context
);
854 if (atomic_read(&mm
->mm_users
) == 1) {
855 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
856 goto local_flush_and_out
;
859 smp_cross_call_masked(&xcall_flush_tlb_mm
,
864 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
869 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
871 u32 ctx
= CTX_HWBITS(mm
->context
);
874 if (mm
== current
->active_mm
&& atomic_read(&mm
->mm_users
) == 1)
875 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
877 smp_cross_call_masked(&xcall_flush_tlb_pending
,
878 ctx
, nr
, (unsigned long) vaddrs
,
881 __flush_tlb_pending(ctx
, nr
, vaddrs
);
886 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
889 end
= PAGE_ALIGN(end
);
891 smp_cross_call(&xcall_flush_tlb_kernel_range
,
894 __flush_tlb_kernel_range(start
, end
);
899 /* #define CAPTURE_DEBUG */
900 extern unsigned long xcall_capture
;
902 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
903 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
904 static unsigned long penguins_are_doing_time
;
906 void smp_capture(void)
908 int result
= atomic_add_ret(1, &smp_capture_depth
);
911 int ncpus
= num_online_cpus();
914 printk("CPU[%d]: Sending penguins to jail...",
917 penguins_are_doing_time
= 1;
918 membar_storestore_loadstore();
919 atomic_inc(&smp_capture_registry
);
920 smp_cross_call(&xcall_capture
, 0, 0, 0);
921 while (atomic_read(&smp_capture_registry
) != ncpus
)
929 void smp_release(void)
931 if (atomic_dec_and_test(&smp_capture_depth
)) {
933 printk("CPU[%d]: Giving pardon to "
934 "imprisoned penguins\n",
937 penguins_are_doing_time
= 0;
938 membar_storeload_storestore();
939 atomic_dec(&smp_capture_registry
);
943 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
944 * can service tlb flush xcalls...
946 extern void prom_world(int);
947 extern void save_alternate_globals(unsigned long *);
948 extern void restore_alternate_globals(unsigned long *);
949 void smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
951 unsigned long global_save
[24];
953 clear_softint(1 << irq
);
957 __asm__
__volatile__("flushw");
958 save_alternate_globals(global_save
);
960 atomic_inc(&smp_capture_registry
);
961 membar_storeload_storestore();
962 while (penguins_are_doing_time
)
964 restore_alternate_globals(global_save
);
965 atomic_dec(&smp_capture_registry
);
971 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
972 #define prof_counter(__cpu) cpu_data(__cpu).counter
974 void smp_percpu_timer_interrupt(struct pt_regs
*regs
)
976 unsigned long compare
, tick
, pstate
;
977 int cpu
= smp_processor_id();
978 int user
= user_mode(regs
);
981 * Check for level 14 softint.
984 unsigned long tick_mask
= tick_ops
->softint_mask
;
986 if (!(get_softint() & tick_mask
)) {
987 extern void handler_irq(int, struct pt_regs
*);
989 handler_irq(14, regs
);
992 clear_softint(tick_mask
);
996 profile_tick(CPU_PROFILING
, regs
);
997 if (!--prof_counter(cpu
)) {
1000 if (cpu
== boot_cpu_id
) {
1001 kstat_this_cpu
.irqs
[0]++;
1002 timer_tick_interrupt(regs
);
1005 update_process_times(user
);
1009 prof_counter(cpu
) = prof_multiplier(cpu
);
1012 /* Guarantee that the following sequences execute
1015 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1016 "wrpr %0, %1, %%pstate"
1020 compare
= tick_ops
->add_compare(current_tick_offset
);
1021 tick
= tick_ops
->get_tick();
1023 /* Restore PSTATE_IE. */
1024 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1027 } while (time_after_eq(tick
, compare
));
1030 static void __init
smp_setup_percpu_timer(void)
1032 int cpu
= smp_processor_id();
1033 unsigned long pstate
;
1035 prof_counter(cpu
) = prof_multiplier(cpu
) = 1;
1037 /* Guarantee that the following sequences execute
1040 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1041 "wrpr %0, %1, %%pstate"
1045 tick_ops
->init_tick(current_tick_offset
);
1047 /* Restore PSTATE_IE. */
1048 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1053 void __init
smp_tick_init(void)
1055 boot_cpu_id
= hard_smp_processor_id();
1056 current_tick_offset
= timer_tick_offset
;
1058 cpu_set(boot_cpu_id
, cpu_online_map
);
1059 prof_counter(boot_cpu_id
) = prof_multiplier(boot_cpu_id
) = 1;
1062 /* /proc/profile writes can call this, don't __init it please. */
1063 static DEFINE_SPINLOCK(prof_setup_lock
);
1065 int setup_profiling_timer(unsigned int multiplier
)
1067 unsigned long flags
;
1070 if ((!multiplier
) || (timer_tick_offset
/ multiplier
) < 1000)
1073 spin_lock_irqsave(&prof_setup_lock
, flags
);
1074 for (i
= 0; i
< NR_CPUS
; i
++)
1075 prof_multiplier(i
) = multiplier
;
1076 current_tick_offset
= (timer_tick_offset
/ multiplier
);
1077 spin_unlock_irqrestore(&prof_setup_lock
, flags
);
1082 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1087 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1089 cpu_set(mid
, phys_cpu_present_map
);
1093 if (num_possible_cpus() > max_cpus
) {
1095 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1096 if (mid
!= boot_cpu_id
) {
1097 cpu_clear(mid
, phys_cpu_present_map
);
1098 if (num_possible_cpus() <= max_cpus
)
1105 smp_store_cpu_info(boot_cpu_id
);
1108 void __devinit
smp_prepare_boot_cpu(void)
1110 if (hard_smp_processor_id() >= NR_CPUS
) {
1111 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1115 current_thread_info()->cpu
= hard_smp_processor_id();
1117 cpu_set(smp_processor_id(), cpu_online_map
);
1118 cpu_set(smp_processor_id(), phys_cpu_present_map
);
1121 int __devinit
__cpu_up(unsigned int cpu
)
1123 int ret
= smp_boot_one_cpu(cpu
);
1126 cpu_set(cpu
, smp_commenced_mask
);
1127 while (!cpu_isset(cpu
, cpu_online_map
))
1129 if (!cpu_isset(cpu
, cpu_online_map
)) {
1132 smp_synchronize_one_tick(cpu
);
1138 void __init
smp_cpus_done(unsigned int max_cpus
)
1140 unsigned long bogosum
= 0;
1143 for (i
= 0; i
< NR_CPUS
; i
++) {
1145 bogosum
+= cpu_data(i
).udelay_val
;
1147 printk("Total of %ld processors activated "
1148 "(%lu.%02lu BogoMIPS).\n",
1149 (long) num_online_cpus(),
1150 bogosum
/(500000/HZ
),
1151 (bogosum
/(5000/HZ
))%100);
1154 void smp_send_reschedule(int cpu
)
1156 smp_receive_signal(cpu
);
1159 /* This is a nop because we capture all other cpus
1160 * anyways when making the PROM active.
1162 void smp_send_stop(void)
1166 unsigned long __per_cpu_base __read_mostly
;
1167 unsigned long __per_cpu_shift __read_mostly
;
1169 EXPORT_SYMBOL(__per_cpu_base
);
1170 EXPORT_SYMBOL(__per_cpu_shift
);
1172 void __init
setup_per_cpu_areas(void)
1174 unsigned long goal
, size
, i
;
1176 /* Created by linker magic */
1177 extern char __per_cpu_start
[], __per_cpu_end
[];
1179 /* Copy section for each CPU (we discard the original) */
1180 goal
= ALIGN(__per_cpu_end
- __per_cpu_start
, PAGE_SIZE
);
1182 #ifdef CONFIG_MODULES
1183 if (goal
< PERCPU_ENOUGH_ROOM
)
1184 goal
= PERCPU_ENOUGH_ROOM
;
1186 __per_cpu_shift
= 0;
1187 for (size
= 1UL; size
< goal
; size
<<= 1UL)
1190 /* Make sure the resulting __per_cpu_base value
1191 * will fit in the 43-bit sign extended IMMU
1194 ptr
= __alloc_bootmem(size
* NR_CPUS
, PAGE_SIZE
,
1195 (unsigned long) __per_cpu_start
);
1197 __per_cpu_base
= ptr
- __per_cpu_start
;
1199 if ((__per_cpu_shift
< PAGE_SHIFT
) ||
1200 (__per_cpu_base
& ~PAGE_MASK
) ||
1201 (__per_cpu_base
!= (((long) __per_cpu_base
<< 20) >> 20))) {
1202 prom_printf("PER_CPU: Invalid layout, "
1203 "ptr[%p] shift[%lx] base[%lx]\n",
1204 ptr
, __per_cpu_shift
, __per_cpu_base
);
1208 for (i
= 0; i
< NR_CPUS
; i
++, ptr
+= size
)
1209 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);
1211 /* Finally, load in the boot cpu's base value.
1212 * We abuse the IMMU TSB register for trap handler
1213 * entry and exit loading of %g5. That is why it
1214 * has to be page aligned.
1216 cpu_setup_percpu_base(hard_smp_processor_id());