]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* smp.c: Sparc64 SMP support. |
2 | * | |
3 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | |
4 | */ | |
5 | ||
6 | #include <linux/module.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/threads.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/smp_lock.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/seq_file.h> | |
21 | #include <linux/cache.h> | |
22 | #include <linux/jiffies.h> | |
23 | #include <linux/profile.h> | |
24 | #include <linux/bootmem.h> | |
25 | ||
26 | #include <asm/head.h> | |
27 | #include <asm/ptrace.h> | |
28 | #include <asm/atomic.h> | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/mmu_context.h> | |
31 | #include <asm/cpudata.h> | |
32 | ||
33 | #include <asm/irq.h> | |
6d24c8dc | 34 | #include <asm/irq_regs.h> |
1da177e4 LT |
35 | #include <asm/page.h> |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/oplib.h> | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/timer.h> | |
40 | #include <asm/starfire.h> | |
41 | #include <asm/tlb.h> | |
56fb4df6 | 42 | #include <asm/sections.h> |
07f8e5f3 | 43 | #include <asm/prom.h> |
1da177e4 | 44 | |
1da177e4 LT |
45 | extern void calibrate_delay(void); |
46 | ||
47 | /* Please don't make this stuff initdata!!! --DaveM */ | |
48 | static unsigned char boot_cpu_id; | |
49 | ||
c12a8289 AM |
50 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
51 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; | |
8935dced DM |
52 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = |
53 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | |
1da177e4 LT |
54 | static cpumask_t smp_commenced_mask; |
55 | static cpumask_t cpu_callout_map; | |
56 | ||
57 | void smp_info(struct seq_file *m) | |
58 | { | |
59 | int i; | |
60 | ||
61 | seq_printf(m, "State:\n"); | |
394e3902 AM |
62 | for_each_online_cpu(i) |
63 | seq_printf(m, "CPU%d:\t\tonline\n", i); | |
1da177e4 LT |
64 | } |
65 | ||
66 | void smp_bogo(struct seq_file *m) | |
67 | { | |
68 | int i; | |
69 | ||
394e3902 AM |
70 | for_each_online_cpu(i) |
71 | seq_printf(m, | |
72 | "Cpu%dBogo\t: %lu.%02lu\n" | |
73 | "Cpu%dClkTck\t: %016lx\n", | |
74 | i, cpu_data(i).udelay_val / (500000/HZ), | |
75 | (cpu_data(i).udelay_val / (5000/HZ)) % 100, | |
76 | i, cpu_data(i).clock_tick); | |
1da177e4 LT |
77 | } |
78 | ||
79 | void __init smp_store_cpu_info(int id) | |
80 | { | |
07f8e5f3 DM |
81 | struct device_node *dp; |
82 | int def; | |
1da177e4 LT |
83 | |
84 | /* multiplier and counter set by | |
85 | smp_setup_percpu_timer() */ | |
86 | cpu_data(id).udelay_val = loops_per_jiffy; | |
87 | ||
07f8e5f3 DM |
88 | cpu_find_by_mid(id, &dp); |
89 | cpu_data(id).clock_tick = | |
90 | of_getintprop_default(dp, "clock-frequency", 0); | |
1da177e4 | 91 | |
f03b8a54 | 92 | def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); |
07f8e5f3 DM |
93 | cpu_data(id).dcache_size = |
94 | of_getintprop_default(dp, "dcache-size", def); | |
f03b8a54 DM |
95 | |
96 | def = 32; | |
80dc0d6b | 97 | cpu_data(id).dcache_line_size = |
07f8e5f3 | 98 | of_getintprop_default(dp, "dcache-line-size", def); |
f03b8a54 DM |
99 | |
100 | def = 16 * 1024; | |
07f8e5f3 DM |
101 | cpu_data(id).icache_size = |
102 | of_getintprop_default(dp, "icache-size", def); | |
f03b8a54 DM |
103 | |
104 | def = 32; | |
80dc0d6b | 105 | cpu_data(id).icache_line_size = |
07f8e5f3 | 106 | of_getintprop_default(dp, "icache-line-size", def); |
f03b8a54 DM |
107 | |
108 | def = ((tlb_type == hypervisor) ? | |
109 | (3 * 1024 * 1024) : | |
110 | (4 * 1024 * 1024)); | |
07f8e5f3 DM |
111 | cpu_data(id).ecache_size = |
112 | of_getintprop_default(dp, "ecache-size", def); | |
f03b8a54 DM |
113 | |
114 | def = 64; | |
80dc0d6b | 115 | cpu_data(id).ecache_line_size = |
07f8e5f3 | 116 | of_getintprop_default(dp, "ecache-line-size", def); |
f03b8a54 | 117 | |
80dc0d6b DM |
118 | printk("CPU[%d]: Caches " |
119 | "D[sz(%d):line_sz(%d)] " | |
120 | "I[sz(%d):line_sz(%d)] " | |
121 | "E[sz(%d):line_sz(%d)]\n", | |
122 | id, | |
123 | cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, | |
124 | cpu_data(id).icache_size, cpu_data(id).icache_line_size, | |
125 | cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); | |
1da177e4 LT |
126 | } |
127 | ||
128 | static void smp_setup_percpu_timer(void); | |
129 | ||
130 | static volatile unsigned long callin_flag = 0; | |
131 | ||
1da177e4 LT |
132 | void __init smp_callin(void) |
133 | { | |
134 | int cpuid = hard_smp_processor_id(); | |
135 | ||
56fb4df6 | 136 | __local_per_cpu_offset = __per_cpu_offset(cpuid); |
1da177e4 | 137 | |
4a07e646 | 138 | if (tlb_type == hypervisor) |
490384e7 | 139 | sun4v_ktsb_register(); |
481295f9 | 140 | |
56fb4df6 | 141 | __flush_tlb_all(); |
1da177e4 LT |
142 | |
143 | smp_setup_percpu_timer(); | |
144 | ||
816242da DM |
145 | if (cheetah_pcache_forced_on) |
146 | cheetah_enable_pcache(); | |
147 | ||
1da177e4 LT |
148 | local_irq_enable(); |
149 | ||
150 | calibrate_delay(); | |
151 | smp_store_cpu_info(cpuid); | |
152 | callin_flag = 1; | |
153 | __asm__ __volatile__("membar #Sync\n\t" | |
154 | "flush %%g6" : : : "memory"); | |
155 | ||
156 | /* Clear this or we will die instantly when we | |
157 | * schedule back to this idler... | |
158 | */ | |
db7d9a4e | 159 | current_thread_info()->new_child = 0; |
1da177e4 LT |
160 | |
161 | /* Attach to the address space of init_task. */ | |
162 | atomic_inc(&init_mm.mm_count); | |
163 | current->active_mm = &init_mm; | |
164 | ||
165 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
4f07118f | 166 | rmb(); |
1da177e4 LT |
167 | |
168 | cpu_set(cpuid, cpu_online_map); | |
5bfb5d69 NP |
169 | |
170 | /* idle thread is expected to have preempt disabled */ | |
171 | preempt_disable(); | |
1da177e4 LT |
172 | } |
173 | ||
174 | void cpu_panic(void) | |
175 | { | |
176 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | |
177 | panic("SMP bolixed\n"); | |
178 | } | |
179 | ||
d369ddd2 | 180 | static unsigned long current_tick_offset __read_mostly; |
1da177e4 LT |
181 | |
182 | /* This tick register synchronization scheme is taken entirely from | |
183 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | |
184 | * | |
185 | * The only change I've made is to rework it so that the master | |
186 | * initiates the synchonization instead of the slave. -DaveM | |
187 | */ | |
188 | ||
189 | #define MASTER 0 | |
190 | #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) | |
191 | ||
192 | #define NUM_ROUNDS 64 /* magic value */ | |
193 | #define NUM_ITERS 5 /* likewise */ | |
194 | ||
195 | static DEFINE_SPINLOCK(itc_sync_lock); | |
196 | static unsigned long go[SLAVE + 1]; | |
197 | ||
198 | #define DEBUG_TICK_SYNC 0 | |
199 | ||
200 | static inline long get_delta (long *rt, long *master) | |
201 | { | |
202 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; | |
203 | unsigned long tcenter, t0, t1, tm; | |
204 | unsigned long i; | |
205 | ||
206 | for (i = 0; i < NUM_ITERS; i++) { | |
207 | t0 = tick_ops->get_tick(); | |
208 | go[MASTER] = 1; | |
4f07118f | 209 | membar_storeload(); |
1da177e4 | 210 | while (!(tm = go[SLAVE])) |
4f07118f | 211 | rmb(); |
1da177e4 | 212 | go[SLAVE] = 0; |
4f07118f | 213 | wmb(); |
1da177e4 LT |
214 | t1 = tick_ops->get_tick(); |
215 | ||
216 | if (t1 - t0 < best_t1 - best_t0) | |
217 | best_t0 = t0, best_t1 = t1, best_tm = tm; | |
218 | } | |
219 | ||
220 | *rt = best_t1 - best_t0; | |
221 | *master = best_tm - best_t0; | |
222 | ||
223 | /* average best_t0 and best_t1 without overflow: */ | |
224 | tcenter = (best_t0/2 + best_t1/2); | |
225 | if (best_t0 % 2 + best_t1 % 2 == 2) | |
226 | tcenter++; | |
227 | return tcenter - best_tm; | |
228 | } | |
229 | ||
230 | void smp_synchronize_tick_client(void) | |
231 | { | |
232 | long i, delta, adj, adjust_latency = 0, done = 0; | |
233 | unsigned long flags, rt, master_time_stamp, bound; | |
234 | #if DEBUG_TICK_SYNC | |
235 | struct { | |
236 | long rt; /* roundtrip time */ | |
237 | long master; /* master's timestamp */ | |
238 | long diff; /* difference between midpoint and master's timestamp */ | |
239 | long lat; /* estimate of itc adjustment latency */ | |
240 | } t[NUM_ROUNDS]; | |
241 | #endif | |
242 | ||
243 | go[MASTER] = 1; | |
244 | ||
245 | while (go[MASTER]) | |
4f07118f | 246 | rmb(); |
1da177e4 LT |
247 | |
248 | local_irq_save(flags); | |
249 | { | |
250 | for (i = 0; i < NUM_ROUNDS; i++) { | |
251 | delta = get_delta(&rt, &master_time_stamp); | |
252 | if (delta == 0) { | |
253 | done = 1; /* let's lock on to this... */ | |
254 | bound = rt; | |
255 | } | |
256 | ||
257 | if (!done) { | |
258 | if (i > 0) { | |
259 | adjust_latency += -delta; | |
260 | adj = -delta + adjust_latency/4; | |
261 | } else | |
262 | adj = -delta; | |
263 | ||
264 | tick_ops->add_tick(adj, current_tick_offset); | |
265 | } | |
266 | #if DEBUG_TICK_SYNC | |
267 | t[i].rt = rt; | |
268 | t[i].master = master_time_stamp; | |
269 | t[i].diff = delta; | |
270 | t[i].lat = adjust_latency/4; | |
271 | #endif | |
272 | } | |
273 | } | |
274 | local_irq_restore(flags); | |
275 | ||
276 | #if DEBUG_TICK_SYNC | |
277 | for (i = 0; i < NUM_ROUNDS; i++) | |
278 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", | |
279 | t[i].rt, t[i].master, t[i].diff, t[i].lat); | |
280 | #endif | |
281 | ||
282 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles," | |
283 | "maxerr %lu cycles)\n", smp_processor_id(), delta, rt); | |
284 | } | |
285 | ||
286 | static void smp_start_sync_tick_client(int cpu); | |
287 | ||
288 | static void smp_synchronize_one_tick(int cpu) | |
289 | { | |
290 | unsigned long flags, i; | |
291 | ||
292 | go[MASTER] = 0; | |
293 | ||
294 | smp_start_sync_tick_client(cpu); | |
295 | ||
296 | /* wait for client to be ready */ | |
297 | while (!go[MASTER]) | |
4f07118f | 298 | rmb(); |
1da177e4 LT |
299 | |
300 | /* now let the client proceed into his loop */ | |
301 | go[MASTER] = 0; | |
4f07118f | 302 | membar_storeload(); |
1da177e4 LT |
303 | |
304 | spin_lock_irqsave(&itc_sync_lock, flags); | |
305 | { | |
306 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | |
307 | while (!go[MASTER]) | |
4f07118f | 308 | rmb(); |
1da177e4 | 309 | go[MASTER] = 0; |
4f07118f | 310 | wmb(); |
1da177e4 | 311 | go[SLAVE] = tick_ops->get_tick(); |
4f07118f | 312 | membar_storeload(); |
1da177e4 LT |
313 | } |
314 | } | |
315 | spin_unlock_irqrestore(&itc_sync_lock, flags); | |
316 | } | |
317 | ||
72aff53f DM |
318 | extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load); |
319 | ||
1da177e4 LT |
320 | extern unsigned long sparc64_cpu_startup; |
321 | ||
322 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | |
323 | * 32-bits (I think) so to be safe we have it read the pointer | |
324 | * contained here so we work on >4GB machines. -DaveM | |
325 | */ | |
326 | static struct thread_info *cpu_new_thread = NULL; | |
327 | ||
328 | static int __devinit smp_boot_one_cpu(unsigned int cpu) | |
329 | { | |
330 | unsigned long entry = | |
331 | (unsigned long)(&sparc64_cpu_startup); | |
332 | unsigned long cookie = | |
333 | (unsigned long)(&cpu_new_thread); | |
334 | struct task_struct *p; | |
7890f794 | 335 | int timeout, ret; |
1da177e4 LT |
336 | |
337 | p = fork_idle(cpu); | |
338 | callin_flag = 0; | |
f3169641 | 339 | cpu_new_thread = task_thread_info(p); |
1da177e4 LT |
340 | cpu_set(cpu, cpu_callout_map); |
341 | ||
7890f794 | 342 | if (tlb_type == hypervisor) { |
72aff53f DM |
343 | /* Alloc the mondo queues, cpu will load them. */ |
344 | sun4v_init_mondo_queues(0, cpu, 1, 0); | |
345 | ||
7890f794 DM |
346 | prom_startcpu_cpuid(cpu, entry, cookie); |
347 | } else { | |
07f8e5f3 | 348 | struct device_node *dp; |
7890f794 | 349 | |
07f8e5f3 DM |
350 | cpu_find_by_mid(cpu, &dp); |
351 | prom_startcpu(dp->node, entry, cookie); | |
7890f794 | 352 | } |
1da177e4 LT |
353 | |
354 | for (timeout = 0; timeout < 5000000; timeout++) { | |
355 | if (callin_flag) | |
356 | break; | |
357 | udelay(100); | |
358 | } | |
72aff53f | 359 | |
1da177e4 LT |
360 | if (callin_flag) { |
361 | ret = 0; | |
362 | } else { | |
363 | printk("Processor %d is stuck.\n", cpu); | |
364 | cpu_clear(cpu, cpu_callout_map); | |
365 | ret = -ENODEV; | |
366 | } | |
367 | cpu_new_thread = NULL; | |
368 | ||
369 | return ret; | |
370 | } | |
371 | ||
372 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) | |
373 | { | |
374 | u64 result, target; | |
375 | int stuck, tmp; | |
376 | ||
377 | if (this_is_starfire) { | |
378 | /* map to real upaid */ | |
379 | cpu = (((cpu & 0x3c) << 1) | | |
380 | ((cpu & 0x40) >> 4) | | |
381 | (cpu & 0x3)); | |
382 | } | |
383 | ||
384 | target = (cpu << 14) | 0x70; | |
385 | again: | |
386 | /* Ok, this is the real Spitfire Errata #54. | |
387 | * One must read back from a UDB internal register | |
388 | * after writes to the UDB interrupt dispatch, but | |
389 | * before the membar Sync for that write. | |
390 | * So we use the high UDB control register (ASI 0x7f, | |
391 | * ADDR 0x20) for the dummy read. -DaveM | |
392 | */ | |
393 | tmp = 0x40; | |
394 | __asm__ __volatile__( | |
395 | "wrpr %1, %2, %%pstate\n\t" | |
396 | "stxa %4, [%0] %3\n\t" | |
397 | "stxa %5, [%0+%8] %3\n\t" | |
398 | "add %0, %8, %0\n\t" | |
399 | "stxa %6, [%0+%8] %3\n\t" | |
400 | "membar #Sync\n\t" | |
401 | "stxa %%g0, [%7] %3\n\t" | |
402 | "membar #Sync\n\t" | |
403 | "mov 0x20, %%g1\n\t" | |
404 | "ldxa [%%g1] 0x7f, %%g0\n\t" | |
405 | "membar #Sync" | |
406 | : "=r" (tmp) | |
407 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), | |
408 | "r" (data0), "r" (data1), "r" (data2), "r" (target), | |
409 | "r" (0x10), "0" (tmp) | |
410 | : "g1"); | |
411 | ||
412 | /* NOTE: PSTATE_IE is still clear. */ | |
413 | stuck = 100000; | |
414 | do { | |
415 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
416 | : "=r" (result) | |
417 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
418 | if (result == 0) { | |
419 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
420 | : : "r" (pstate)); | |
421 | return; | |
422 | } | |
423 | stuck -= 1; | |
424 | if (stuck == 0) | |
425 | break; | |
426 | } while (result & 0x1); | |
427 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
428 | : : "r" (pstate)); | |
429 | if (stuck == 0) { | |
430 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | |
431 | smp_processor_id(), result); | |
432 | } else { | |
433 | udelay(2); | |
434 | goto again; | |
435 | } | |
436 | } | |
437 | ||
438 | static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | |
439 | { | |
440 | u64 pstate; | |
441 | int i; | |
442 | ||
443 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
444 | for_each_cpu_mask(i, mask) | |
445 | spitfire_xcall_helper(data0, data1, data2, pstate, i); | |
446 | } | |
447 | ||
448 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt | |
449 | * packet, but we have no use for that. However we do take advantage of | |
450 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). | |
451 | */ | |
452 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | |
453 | { | |
454 | u64 pstate, ver; | |
92704a1c | 455 | int nack_busy_id, is_jbus; |
1da177e4 LT |
456 | |
457 | if (cpus_empty(mask)) | |
458 | return; | |
459 | ||
460 | /* Unfortunately, someone at Sun had the brilliant idea to make the | |
461 | * busy/nack fields hard-coded by ITID number for this Ultra-III | |
462 | * derivative processor. | |
463 | */ | |
464 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
92704a1c DM |
465 | is_jbus = ((ver >> 32) == __JALAPENO_ID || |
466 | (ver >> 32) == __SERRANO_ID); | |
1da177e4 LT |
467 | |
468 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
469 | ||
470 | retry: | |
471 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" | |
472 | : : "r" (pstate), "i" (PSTATE_IE)); | |
473 | ||
474 | /* Setup the dispatch data registers. */ | |
475 | __asm__ __volatile__("stxa %0, [%3] %6\n\t" | |
476 | "stxa %1, [%4] %6\n\t" | |
477 | "stxa %2, [%5] %6\n\t" | |
478 | "membar #Sync\n\t" | |
479 | : /* no outputs */ | |
480 | : "r" (data0), "r" (data1), "r" (data2), | |
481 | "r" (0x40), "r" (0x50), "r" (0x60), | |
482 | "i" (ASI_INTR_W)); | |
483 | ||
484 | nack_busy_id = 0; | |
485 | { | |
486 | int i; | |
487 | ||
488 | for_each_cpu_mask(i, mask) { | |
489 | u64 target = (i << 14) | 0x70; | |
490 | ||
92704a1c | 491 | if (!is_jbus) |
1da177e4 LT |
492 | target |= (nack_busy_id << 24); |
493 | __asm__ __volatile__( | |
494 | "stxa %%g0, [%0] %1\n\t" | |
495 | "membar #Sync\n\t" | |
496 | : /* no outputs */ | |
497 | : "r" (target), "i" (ASI_INTR_W)); | |
498 | nack_busy_id++; | |
499 | } | |
500 | } | |
501 | ||
502 | /* Now, poll for completion. */ | |
503 | { | |
504 | u64 dispatch_stat; | |
505 | long stuck; | |
506 | ||
507 | stuck = 100000 * nack_busy_id; | |
508 | do { | |
509 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
510 | : "=r" (dispatch_stat) | |
511 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
512 | if (dispatch_stat == 0UL) { | |
513 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
514 | : : "r" (pstate)); | |
515 | return; | |
516 | } | |
517 | if (!--stuck) | |
518 | break; | |
519 | } while (dispatch_stat & 0x5555555555555555UL); | |
520 | ||
521 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
522 | : : "r" (pstate)); | |
523 | ||
524 | if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) { | |
525 | /* Busy bits will not clear, continue instead | |
526 | * of freezing up on this cpu. | |
527 | */ | |
528 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | |
529 | smp_processor_id(), dispatch_stat); | |
530 | } else { | |
531 | int i, this_busy_nack = 0; | |
532 | ||
533 | /* Delay some random time with interrupts enabled | |
534 | * to prevent deadlock. | |
535 | */ | |
536 | udelay(2 * nack_busy_id); | |
537 | ||
538 | /* Clear out the mask bits for cpus which did not | |
539 | * NACK us. | |
540 | */ | |
541 | for_each_cpu_mask(i, mask) { | |
542 | u64 check_mask; | |
543 | ||
92704a1c | 544 | if (is_jbus) |
1da177e4 LT |
545 | check_mask = (0x2UL << (2*i)); |
546 | else | |
547 | check_mask = (0x2UL << | |
548 | this_busy_nack); | |
549 | if ((dispatch_stat & check_mask) == 0) | |
550 | cpu_clear(i, mask); | |
551 | this_busy_nack += 2; | |
552 | } | |
553 | ||
554 | goto retry; | |
555 | } | |
556 | } | |
557 | } | |
558 | ||
1d2f1f90 | 559 | /* Multi-cpu list version. */ |
a43fe0e7 DM |
560 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
561 | { | |
b830ab66 DM |
562 | struct trap_per_cpu *tb; |
563 | u16 *cpu_list; | |
564 | u64 *mondo; | |
565 | cpumask_t error_mask; | |
566 | unsigned long flags, status; | |
3cab0c3e | 567 | int cnt, retries, this_cpu, prev_sent, i; |
b830ab66 DM |
568 | |
569 | /* We have to do this whole thing with interrupts fully disabled. | |
570 | * Otherwise if we send an xcall from interrupt context it will | |
571 | * corrupt both our mondo block and cpu list state. | |
572 | * | |
573 | * One consequence of this is that we cannot use timeout mechanisms | |
574 | * that depend upon interrupts being delivered locally. So, for | |
575 | * example, we cannot sample jiffies and expect it to advance. | |
576 | * | |
577 | * Fortunately, udelay() uses %stick/%tick so we can use that. | |
578 | */ | |
579 | local_irq_save(flags); | |
580 | ||
581 | this_cpu = smp_processor_id(); | |
582 | tb = &trap_block[this_cpu]; | |
1d2f1f90 | 583 | |
b830ab66 | 584 | mondo = __va(tb->cpu_mondo_block_pa); |
1d2f1f90 DM |
585 | mondo[0] = data0; |
586 | mondo[1] = data1; | |
587 | mondo[2] = data2; | |
588 | wmb(); | |
589 | ||
b830ab66 DM |
590 | cpu_list = __va(tb->cpu_list_pa); |
591 | ||
592 | /* Setup the initial cpu list. */ | |
593 | cnt = 0; | |
594 | for_each_cpu_mask(i, mask) | |
595 | cpu_list[cnt++] = i; | |
596 | ||
597 | cpus_clear(error_mask); | |
1d2f1f90 | 598 | retries = 0; |
3cab0c3e | 599 | prev_sent = 0; |
1d2f1f90 | 600 | do { |
3cab0c3e | 601 | int forward_progress, n_sent; |
1d2f1f90 | 602 | |
b830ab66 DM |
603 | status = sun4v_cpu_mondo_send(cnt, |
604 | tb->cpu_list_pa, | |
605 | tb->cpu_mondo_block_pa); | |
606 | ||
607 | /* HV_EOK means all cpus received the xcall, we're done. */ | |
608 | if (likely(status == HV_EOK)) | |
1d2f1f90 | 609 | break; |
b830ab66 | 610 | |
3cab0c3e DM |
611 | /* First, see if we made any forward progress. |
612 | * | |
613 | * The hypervisor indicates successful sends by setting | |
614 | * cpu list entries to the value 0xffff. | |
b830ab66 | 615 | */ |
3cab0c3e | 616 | n_sent = 0; |
b830ab66 | 617 | for (i = 0; i < cnt; i++) { |
3cab0c3e DM |
618 | if (likely(cpu_list[i] == 0xffff)) |
619 | n_sent++; | |
1d2f1f90 DM |
620 | } |
621 | ||
3cab0c3e DM |
622 | forward_progress = 0; |
623 | if (n_sent > prev_sent) | |
624 | forward_progress = 1; | |
625 | ||
626 | prev_sent = n_sent; | |
627 | ||
b830ab66 DM |
628 | /* If we get a HV_ECPUERROR, then one or more of the cpus |
629 | * in the list are in error state. Use the cpu_state() | |
630 | * hypervisor call to find out which cpus are in error state. | |
631 | */ | |
632 | if (unlikely(status == HV_ECPUERROR)) { | |
633 | for (i = 0; i < cnt; i++) { | |
634 | long err; | |
635 | u16 cpu; | |
636 | ||
637 | cpu = cpu_list[i]; | |
638 | if (cpu == 0xffff) | |
639 | continue; | |
640 | ||
641 | err = sun4v_cpu_state(cpu); | |
642 | if (err >= 0 && | |
643 | err == HV_CPU_STATE_ERROR) { | |
3cab0c3e | 644 | cpu_list[i] = 0xffff; |
b830ab66 DM |
645 | cpu_set(cpu, error_mask); |
646 | } | |
647 | } | |
648 | } else if (unlikely(status != HV_EWOULDBLOCK)) | |
649 | goto fatal_mondo_error; | |
650 | ||
3cab0c3e DM |
651 | /* Don't bother rewriting the CPU list, just leave the |
652 | * 0xffff and non-0xffff entries in there and the | |
653 | * hypervisor will do the right thing. | |
654 | * | |
655 | * Only advance timeout state if we didn't make any | |
656 | * forward progress. | |
657 | */ | |
b830ab66 DM |
658 | if (unlikely(!forward_progress)) { |
659 | if (unlikely(++retries > 10000)) | |
660 | goto fatal_mondo_timeout; | |
661 | ||
662 | /* Delay a little bit to let other cpus catch up | |
663 | * on their cpu mondo queue work. | |
664 | */ | |
665 | udelay(2 * cnt); | |
666 | } | |
1d2f1f90 DM |
667 | } while (1); |
668 | ||
b830ab66 DM |
669 | local_irq_restore(flags); |
670 | ||
671 | if (unlikely(!cpus_empty(error_mask))) | |
672 | goto fatal_mondo_cpu_error; | |
673 | ||
674 | return; | |
675 | ||
676 | fatal_mondo_cpu_error: | |
677 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | |
678 | "were in error state\n", | |
679 | this_cpu); | |
680 | printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); | |
681 | for_each_cpu_mask(i, error_mask) | |
682 | printk("%d ", i); | |
683 | printk("]\n"); | |
684 | return; | |
685 | ||
686 | fatal_mondo_timeout: | |
687 | local_irq_restore(flags); | |
688 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " | |
689 | " progress after %d retries.\n", | |
690 | this_cpu, retries); | |
691 | goto dump_cpu_list_and_out; | |
692 | ||
693 | fatal_mondo_error: | |
694 | local_irq_restore(flags); | |
695 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", | |
696 | this_cpu, status); | |
697 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | |
698 | "mondo_block_pa(%lx)\n", | |
699 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | |
700 | ||
701 | dump_cpu_list_and_out: | |
702 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | |
703 | for (i = 0; i < cnt; i++) | |
704 | printk("%u ", cpu_list[i]); | |
705 | printk("]\n"); | |
1d2f1f90 | 706 | } |
a43fe0e7 | 707 | |
1da177e4 LT |
708 | /* Send cross call to all processors mentioned in MASK |
709 | * except self. | |
710 | */ | |
711 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask) | |
712 | { | |
713 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | |
714 | int this_cpu = get_cpu(); | |
715 | ||
716 | cpus_and(mask, mask, cpu_online_map); | |
717 | cpu_clear(this_cpu, mask); | |
718 | ||
719 | if (tlb_type == spitfire) | |
720 | spitfire_xcall_deliver(data0, data1, data2, mask); | |
a43fe0e7 | 721 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
1da177e4 | 722 | cheetah_xcall_deliver(data0, data1, data2, mask); |
a43fe0e7 DM |
723 | else |
724 | hypervisor_xcall_deliver(data0, data1, data2, mask); | |
1da177e4 LT |
725 | /* NOTE: Caller runs local copy on master. */ |
726 | ||
727 | put_cpu(); | |
728 | } | |
729 | ||
730 | extern unsigned long xcall_sync_tick; | |
731 | ||
732 | static void smp_start_sync_tick_client(int cpu) | |
733 | { | |
734 | cpumask_t mask = cpumask_of_cpu(cpu); | |
735 | ||
736 | smp_cross_call_masked(&xcall_sync_tick, | |
737 | 0, 0, 0, mask); | |
738 | } | |
739 | ||
740 | /* Send cross call to all processors except self. */ | |
741 | #define smp_cross_call(func, ctx, data1, data2) \ | |
742 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) | |
743 | ||
744 | struct call_data_struct { | |
745 | void (*func) (void *info); | |
746 | void *info; | |
747 | atomic_t finished; | |
748 | int wait; | |
749 | }; | |
750 | ||
aa1d1a0a | 751 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); |
1da177e4 LT |
752 | static struct call_data_struct *call_data; |
753 | ||
754 | extern unsigned long xcall_call_function; | |
755 | ||
aa1d1a0a DM |
756 | /** |
757 | * smp_call_function(): Run a function on all other CPUs. | |
758 | * @func: The function to run. This must be fast and non-blocking. | |
759 | * @info: An arbitrary pointer to pass to the function. | |
760 | * @nonatomic: currently unused. | |
761 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
762 | * | |
763 | * Returns 0 on success, else a negative status code. Does not return until | |
764 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
765 | * | |
1da177e4 LT |
766 | * You must not call this function with disabled interrupts or from a |
767 | * hardware interrupt handler or from a bottom half handler. | |
768 | */ | |
bd40791e DM |
769 | static int smp_call_function_mask(void (*func)(void *info), void *info, |
770 | int nonatomic, int wait, cpumask_t mask) | |
1da177e4 LT |
771 | { |
772 | struct call_data_struct data; | |
ee29074d | 773 | int cpus; |
1da177e4 | 774 | |
1da177e4 LT |
775 | /* Can deadlock when called with interrupts disabled */ |
776 | WARN_ON(irqs_disabled()); | |
777 | ||
778 | data.func = func; | |
779 | data.info = info; | |
780 | atomic_set(&data.finished, 0); | |
781 | data.wait = wait; | |
782 | ||
783 | spin_lock(&call_lock); | |
784 | ||
ee29074d DM |
785 | cpu_clear(smp_processor_id(), mask); |
786 | cpus = cpus_weight(mask); | |
787 | if (!cpus) | |
788 | goto out_unlock; | |
789 | ||
1da177e4 | 790 | call_data = &data; |
aa1d1a0a | 791 | mb(); |
1da177e4 | 792 | |
bd40791e | 793 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); |
1da177e4 | 794 | |
aa1d1a0a DM |
795 | /* Wait for response */ |
796 | while (atomic_read(&data.finished) != cpus) | |
797 | cpu_relax(); | |
1da177e4 | 798 | |
ee29074d | 799 | out_unlock: |
1da177e4 LT |
800 | spin_unlock(&call_lock); |
801 | ||
802 | return 0; | |
1da177e4 LT |
803 | } |
804 | ||
bd40791e DM |
805 | int smp_call_function(void (*func)(void *info), void *info, |
806 | int nonatomic, int wait) | |
807 | { | |
808 | return smp_call_function_mask(func, info, nonatomic, wait, | |
809 | cpu_online_map); | |
810 | } | |
811 | ||
1da177e4 LT |
812 | void smp_call_function_client(int irq, struct pt_regs *regs) |
813 | { | |
814 | void (*func) (void *info) = call_data->func; | |
815 | void *info = call_data->info; | |
816 | ||
817 | clear_softint(1 << irq); | |
818 | if (call_data->wait) { | |
819 | /* let initiator proceed only after completion */ | |
820 | func(info); | |
821 | atomic_inc(&call_data->finished); | |
822 | } else { | |
823 | /* let initiator proceed after getting data */ | |
824 | atomic_inc(&call_data->finished); | |
825 | func(info); | |
826 | } | |
827 | } | |
828 | ||
bd40791e DM |
829 | static void tsb_sync(void *info) |
830 | { | |
6f25f398 | 831 | struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; |
bd40791e DM |
832 | struct mm_struct *mm = info; |
833 | ||
6f25f398 DM |
834 | /* It is not valid to test "currrent->active_mm == mm" here. |
835 | * | |
836 | * The value of "current" is not changed atomically with | |
837 | * switch_mm(). But that's OK, we just need to check the | |
838 | * current cpu's trap block PGD physical address. | |
839 | */ | |
840 | if (tp->pgd_paddr == __pa(mm->pgd)) | |
bd40791e DM |
841 | tsb_context_switch(mm); |
842 | } | |
843 | ||
844 | void smp_tsb_sync(struct mm_struct *mm) | |
845 | { | |
846 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | |
847 | } | |
848 | ||
1da177e4 LT |
849 | extern unsigned long xcall_flush_tlb_mm; |
850 | extern unsigned long xcall_flush_tlb_pending; | |
851 | extern unsigned long xcall_flush_tlb_kernel_range; | |
1da177e4 LT |
852 | extern unsigned long xcall_report_regs; |
853 | extern unsigned long xcall_receive_signal; | |
ee29074d | 854 | extern unsigned long xcall_new_mmu_context_version; |
1da177e4 LT |
855 | |
856 | #ifdef DCACHE_ALIASING_POSSIBLE | |
857 | extern unsigned long xcall_flush_dcache_page_cheetah; | |
858 | #endif | |
859 | extern unsigned long xcall_flush_dcache_page_spitfire; | |
860 | ||
861 | #ifdef CONFIG_DEBUG_DCFLUSH | |
862 | extern atomic_t dcpage_flushes; | |
863 | extern atomic_t dcpage_flushes_xcall; | |
864 | #endif | |
865 | ||
866 | static __inline__ void __local_flush_dcache_page(struct page *page) | |
867 | { | |
868 | #ifdef DCACHE_ALIASING_POSSIBLE | |
869 | __flush_dcache_page(page_address(page), | |
870 | ((tlb_type == spitfire) && | |
871 | page_mapping(page) != NULL)); | |
872 | #else | |
873 | if (page_mapping(page) != NULL && | |
874 | tlb_type == spitfire) | |
875 | __flush_icache_page(__pa(page_address(page))); | |
876 | #endif | |
877 | } | |
878 | ||
879 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | |
880 | { | |
881 | cpumask_t mask = cpumask_of_cpu(cpu); | |
a43fe0e7 DM |
882 | int this_cpu; |
883 | ||
884 | if (tlb_type == hypervisor) | |
885 | return; | |
1da177e4 LT |
886 | |
887 | #ifdef CONFIG_DEBUG_DCFLUSH | |
888 | atomic_inc(&dcpage_flushes); | |
889 | #endif | |
a43fe0e7 DM |
890 | |
891 | this_cpu = get_cpu(); | |
892 | ||
1da177e4 LT |
893 | if (cpu == this_cpu) { |
894 | __local_flush_dcache_page(page); | |
895 | } else if (cpu_online(cpu)) { | |
896 | void *pg_addr = page_address(page); | |
897 | u64 data0; | |
898 | ||
899 | if (tlb_type == spitfire) { | |
900 | data0 = | |
901 | ((u64)&xcall_flush_dcache_page_spitfire); | |
902 | if (page_mapping(page) != NULL) | |
903 | data0 |= ((u64)1 << 32); | |
904 | spitfire_xcall_deliver(data0, | |
905 | __pa(pg_addr), | |
906 | (u64) pg_addr, | |
907 | mask); | |
a43fe0e7 | 908 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 LT |
909 | #ifdef DCACHE_ALIASING_POSSIBLE |
910 | data0 = | |
911 | ((u64)&xcall_flush_dcache_page_cheetah); | |
912 | cheetah_xcall_deliver(data0, | |
913 | __pa(pg_addr), | |
914 | 0, mask); | |
915 | #endif | |
916 | } | |
917 | #ifdef CONFIG_DEBUG_DCFLUSH | |
918 | atomic_inc(&dcpage_flushes_xcall); | |
919 | #endif | |
920 | } | |
921 | ||
922 | put_cpu(); | |
923 | } | |
924 | ||
925 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |
926 | { | |
927 | void *pg_addr = page_address(page); | |
928 | cpumask_t mask = cpu_online_map; | |
929 | u64 data0; | |
a43fe0e7 DM |
930 | int this_cpu; |
931 | ||
932 | if (tlb_type == hypervisor) | |
933 | return; | |
934 | ||
935 | this_cpu = get_cpu(); | |
1da177e4 LT |
936 | |
937 | cpu_clear(this_cpu, mask); | |
938 | ||
939 | #ifdef CONFIG_DEBUG_DCFLUSH | |
940 | atomic_inc(&dcpage_flushes); | |
941 | #endif | |
942 | if (cpus_empty(mask)) | |
943 | goto flush_self; | |
944 | if (tlb_type == spitfire) { | |
945 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | |
946 | if (page_mapping(page) != NULL) | |
947 | data0 |= ((u64)1 << 32); | |
948 | spitfire_xcall_deliver(data0, | |
949 | __pa(pg_addr), | |
950 | (u64) pg_addr, | |
951 | mask); | |
a43fe0e7 | 952 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 LT |
953 | #ifdef DCACHE_ALIASING_POSSIBLE |
954 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | |
955 | cheetah_xcall_deliver(data0, | |
956 | __pa(pg_addr), | |
957 | 0, mask); | |
958 | #endif | |
959 | } | |
960 | #ifdef CONFIG_DEBUG_DCFLUSH | |
961 | atomic_inc(&dcpage_flushes_xcall); | |
962 | #endif | |
963 | flush_self: | |
964 | __local_flush_dcache_page(page); | |
965 | ||
966 | put_cpu(); | |
967 | } | |
968 | ||
a0663a79 DM |
969 | static void __smp_receive_signal_mask(cpumask_t mask) |
970 | { | |
971 | smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); | |
972 | } | |
973 | ||
1da177e4 LT |
974 | void smp_receive_signal(int cpu) |
975 | { | |
976 | cpumask_t mask = cpumask_of_cpu(cpu); | |
977 | ||
a0663a79 DM |
978 | if (cpu_online(cpu)) |
979 | __smp_receive_signal_mask(mask); | |
1da177e4 LT |
980 | } |
981 | ||
982 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | |
ee29074d DM |
983 | { |
984 | clear_softint(1 << irq); | |
985 | } | |
986 | ||
987 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | |
1da177e4 | 988 | { |
a0663a79 | 989 | struct mm_struct *mm; |
ee29074d | 990 | unsigned long flags; |
a0663a79 | 991 | |
1da177e4 | 992 | clear_softint(1 << irq); |
a0663a79 DM |
993 | |
994 | /* See if we need to allocate a new TLB context because | |
995 | * the version of the one we are using is now out of date. | |
996 | */ | |
997 | mm = current->active_mm; | |
ee29074d DM |
998 | if (unlikely(!mm || (mm == &init_mm))) |
999 | return; | |
a0663a79 | 1000 | |
ee29074d | 1001 | spin_lock_irqsave(&mm->context.lock, flags); |
aac0aadf | 1002 | |
ee29074d DM |
1003 | if (unlikely(!CTX_VALID(mm->context))) |
1004 | get_new_mmu_context(mm); | |
aac0aadf | 1005 | |
ee29074d | 1006 | spin_unlock_irqrestore(&mm->context.lock, flags); |
aac0aadf | 1007 | |
ee29074d DM |
1008 | load_secondary_context(mm); |
1009 | __flush_tlb_mm(CTX_HWBITS(mm->context), | |
1010 | SECONDARY_CONTEXT); | |
a0663a79 DM |
1011 | } |
1012 | ||
1013 | void smp_new_mmu_context_version(void) | |
1014 | { | |
ee29074d | 1015 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
1da177e4 LT |
1016 | } |
1017 | ||
1018 | void smp_report_regs(void) | |
1019 | { | |
1020 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | |
1021 | } | |
1022 | ||
1da177e4 LT |
1023 | /* We know that the window frames of the user have been flushed |
1024 | * to the stack before we get here because all callers of us | |
1025 | * are flush_tlb_*() routines, and these run after flush_cache_*() | |
1026 | * which performs the flushw. | |
1027 | * | |
1028 | * The SMP TLB coherency scheme we use works as follows: | |
1029 | * | |
1030 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address | |
1031 | * space has (potentially) executed on, this is the heuristic | |
1032 | * we use to avoid doing cross calls. | |
1033 | * | |
1034 | * Also, for flushing from kswapd and also for clones, we | |
1035 | * use cpu_vm_mask as the list of cpus to make run the TLB. | |
1036 | * | |
1037 | * 2) TLB context numbers are shared globally across all processors | |
1038 | * in the system, this allows us to play several games to avoid | |
1039 | * cross calls. | |
1040 | * | |
1041 | * One invariant is that when a cpu switches to a process, and | |
1042 | * that processes tsk->active_mm->cpu_vm_mask does not have the | |
1043 | * current cpu's bit set, that tlb context is flushed locally. | |
1044 | * | |
1045 | * If the address space is non-shared (ie. mm->count == 1) we avoid | |
1046 | * cross calls when we want to flush the currently running process's | |
1047 | * tlb state. This is done by clearing all cpu bits except the current | |
1048 | * processor's in current->active_mm->cpu_vm_mask and performing the | |
1049 | * flush locally only. This will force any subsequent cpus which run | |
1050 | * this task to flush the context from the local tlb if the process | |
1051 | * migrates to another cpu (again). | |
1052 | * | |
1053 | * 3) For shared address spaces (threads) and swapping we bite the | |
1054 | * bullet for most cases and perform the cross call (but only to | |
1055 | * the cpus listed in cpu_vm_mask). | |
1056 | * | |
1057 | * The performance gain from "optimizing" away the cross call for threads is | |
1058 | * questionable (in theory the big win for threads is the massive sharing of | |
1059 | * address space state across processors). | |
1060 | */ | |
62dbec78 DM |
1061 | |
1062 | /* This currently is only used by the hugetlb arch pre-fault | |
1063 | * hook on UltraSPARC-III+ and later when changing the pagesize | |
1064 | * bits of the context register for an address space. | |
1065 | */ | |
1da177e4 LT |
1066 | void smp_flush_tlb_mm(struct mm_struct *mm) |
1067 | { | |
62dbec78 DM |
1068 | u32 ctx = CTX_HWBITS(mm->context); |
1069 | int cpu = get_cpu(); | |
1da177e4 | 1070 | |
62dbec78 DM |
1071 | if (atomic_read(&mm->mm_users) == 1) { |
1072 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | |
1073 | goto local_flush_and_out; | |
1074 | } | |
1da177e4 | 1075 | |
62dbec78 DM |
1076 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
1077 | ctx, 0, 0, | |
1078 | mm->cpu_vm_mask); | |
1da177e4 | 1079 | |
62dbec78 DM |
1080 | local_flush_and_out: |
1081 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | |
1da177e4 | 1082 | |
62dbec78 | 1083 | put_cpu(); |
1da177e4 LT |
1084 | } |
1085 | ||
1086 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | |
1087 | { | |
1088 | u32 ctx = CTX_HWBITS(mm->context); | |
1089 | int cpu = get_cpu(); | |
1090 | ||
dedeb002 | 1091 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) |
1da177e4 | 1092 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); |
dedeb002 HD |
1093 | else |
1094 | smp_cross_call_masked(&xcall_flush_tlb_pending, | |
1095 | ctx, nr, (unsigned long) vaddrs, | |
1096 | mm->cpu_vm_mask); | |
1da177e4 | 1097 | |
1da177e4 LT |
1098 | __flush_tlb_pending(ctx, nr, vaddrs); |
1099 | ||
1100 | put_cpu(); | |
1101 | } | |
1102 | ||
1103 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
1104 | { | |
1105 | start &= PAGE_MASK; | |
1106 | end = PAGE_ALIGN(end); | |
1107 | if (start != end) { | |
1108 | smp_cross_call(&xcall_flush_tlb_kernel_range, | |
1109 | 0, start, end); | |
1110 | ||
1111 | __flush_tlb_kernel_range(start, end); | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | /* CPU capture. */ | |
1116 | /* #define CAPTURE_DEBUG */ | |
1117 | extern unsigned long xcall_capture; | |
1118 | ||
1119 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); | |
1120 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); | |
1121 | static unsigned long penguins_are_doing_time; | |
1122 | ||
1123 | void smp_capture(void) | |
1124 | { | |
1125 | int result = atomic_add_ret(1, &smp_capture_depth); | |
1126 | ||
1127 | if (result == 1) { | |
1128 | int ncpus = num_online_cpus(); | |
1129 | ||
1130 | #ifdef CAPTURE_DEBUG | |
1131 | printk("CPU[%d]: Sending penguins to jail...", | |
1132 | smp_processor_id()); | |
1133 | #endif | |
1134 | penguins_are_doing_time = 1; | |
4f07118f | 1135 | membar_storestore_loadstore(); |
1da177e4 LT |
1136 | atomic_inc(&smp_capture_registry); |
1137 | smp_cross_call(&xcall_capture, 0, 0, 0); | |
1138 | while (atomic_read(&smp_capture_registry) != ncpus) | |
4f07118f | 1139 | rmb(); |
1da177e4 LT |
1140 | #ifdef CAPTURE_DEBUG |
1141 | printk("done\n"); | |
1142 | #endif | |
1143 | } | |
1144 | } | |
1145 | ||
1146 | void smp_release(void) | |
1147 | { | |
1148 | if (atomic_dec_and_test(&smp_capture_depth)) { | |
1149 | #ifdef CAPTURE_DEBUG | |
1150 | printk("CPU[%d]: Giving pardon to " | |
1151 | "imprisoned penguins\n", | |
1152 | smp_processor_id()); | |
1153 | #endif | |
1154 | penguins_are_doing_time = 0; | |
4f07118f | 1155 | membar_storeload_storestore(); |
1da177e4 LT |
1156 | atomic_dec(&smp_capture_registry); |
1157 | } | |
1158 | } | |
1159 | ||
1160 | /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they | |
1161 | * can service tlb flush xcalls... | |
1162 | */ | |
1163 | extern void prom_world(int); | |
96c6e0d8 | 1164 | |
1da177e4 LT |
1165 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) |
1166 | { | |
1da177e4 LT |
1167 | clear_softint(1 << irq); |
1168 | ||
1169 | preempt_disable(); | |
1170 | ||
1171 | __asm__ __volatile__("flushw"); | |
1da177e4 LT |
1172 | prom_world(1); |
1173 | atomic_inc(&smp_capture_registry); | |
4f07118f | 1174 | membar_storeload_storestore(); |
1da177e4 | 1175 | while (penguins_are_doing_time) |
4f07118f | 1176 | rmb(); |
1da177e4 LT |
1177 | atomic_dec(&smp_capture_registry); |
1178 | prom_world(0); | |
1179 | ||
1180 | preempt_enable(); | |
1181 | } | |
1182 | ||
1da177e4 LT |
1183 | #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier |
1184 | #define prof_counter(__cpu) cpu_data(__cpu).counter | |
1185 | ||
1186 | void smp_percpu_timer_interrupt(struct pt_regs *regs) | |
1187 | { | |
1188 | unsigned long compare, tick, pstate; | |
1189 | int cpu = smp_processor_id(); | |
1190 | int user = user_mode(regs); | |
6d24c8dc | 1191 | struct pt_regs *old_regs; |
1da177e4 LT |
1192 | |
1193 | /* | |
1194 | * Check for level 14 softint. | |
1195 | */ | |
1196 | { | |
1197 | unsigned long tick_mask = tick_ops->softint_mask; | |
1198 | ||
1199 | if (!(get_softint() & tick_mask)) { | |
1200 | extern void handler_irq(int, struct pt_regs *); | |
1201 | ||
1202 | handler_irq(14, regs); | |
1203 | return; | |
1204 | } | |
1205 | clear_softint(tick_mask); | |
1206 | } | |
1207 | ||
6d24c8dc | 1208 | old_regs = set_irq_regs(regs); |
1da177e4 | 1209 | do { |
6d24c8dc | 1210 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
1211 | if (!--prof_counter(cpu)) { |
1212 | irq_enter(); | |
1213 | ||
1214 | if (cpu == boot_cpu_id) { | |
1215 | kstat_this_cpu.irqs[0]++; | |
1216 | timer_tick_interrupt(regs); | |
1217 | } | |
1218 | ||
1219 | update_process_times(user); | |
1220 | ||
1221 | irq_exit(); | |
1222 | ||
1223 | prof_counter(cpu) = prof_multiplier(cpu); | |
1224 | } | |
1225 | ||
1226 | /* Guarantee that the following sequences execute | |
1227 | * uninterrupted. | |
1228 | */ | |
1229 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | |
1230 | "wrpr %0, %1, %%pstate" | |
1231 | : "=r" (pstate) | |
1232 | : "i" (PSTATE_IE)); | |
1233 | ||
1234 | compare = tick_ops->add_compare(current_tick_offset); | |
1235 | tick = tick_ops->get_tick(); | |
1236 | ||
1237 | /* Restore PSTATE_IE. */ | |
1238 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
1239 | : /* no outputs */ | |
1240 | : "r" (pstate)); | |
1241 | } while (time_after_eq(tick, compare)); | |
6d24c8dc | 1242 | set_irq_regs(old_regs); |
1da177e4 LT |
1243 | } |
1244 | ||
1245 | static void __init smp_setup_percpu_timer(void) | |
1246 | { | |
1247 | int cpu = smp_processor_id(); | |
1248 | unsigned long pstate; | |
1249 | ||
1250 | prof_counter(cpu) = prof_multiplier(cpu) = 1; | |
1251 | ||
1252 | /* Guarantee that the following sequences execute | |
1253 | * uninterrupted. | |
1254 | */ | |
1255 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | |
1256 | "wrpr %0, %1, %%pstate" | |
1257 | : "=r" (pstate) | |
1258 | : "i" (PSTATE_IE)); | |
1259 | ||
1260 | tick_ops->init_tick(current_tick_offset); | |
1261 | ||
1262 | /* Restore PSTATE_IE. */ | |
1263 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
1264 | : /* no outputs */ | |
1265 | : "r" (pstate)); | |
1266 | } | |
1267 | ||
1268 | void __init smp_tick_init(void) | |
1269 | { | |
1270 | boot_cpu_id = hard_smp_processor_id(); | |
1271 | current_tick_offset = timer_tick_offset; | |
1272 | ||
1da177e4 LT |
1273 | prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; |
1274 | } | |
1275 | ||
1276 | /* /proc/profile writes can call this, don't __init it please. */ | |
1277 | static DEFINE_SPINLOCK(prof_setup_lock); | |
1278 | ||
1279 | int setup_profiling_timer(unsigned int multiplier) | |
1280 | { | |
1281 | unsigned long flags; | |
1282 | int i; | |
1283 | ||
1284 | if ((!multiplier) || (timer_tick_offset / multiplier) < 1000) | |
1285 | return -EINVAL; | |
1286 | ||
1287 | spin_lock_irqsave(&prof_setup_lock, flags); | |
a283a525 | 1288 | for_each_possible_cpu(i) |
1da177e4 LT |
1289 | prof_multiplier(i) = multiplier; |
1290 | current_tick_offset = (timer_tick_offset / multiplier); | |
1291 | spin_unlock_irqrestore(&prof_setup_lock, flags); | |
1292 | ||
1293 | return 0; | |
1294 | } | |
1295 | ||
9145bcf6 DM |
1296 | static void __init smp_tune_scheduling(void) |
1297 | { | |
07f8e5f3 DM |
1298 | struct device_node *dp; |
1299 | int instance; | |
9145bcf6 DM |
1300 | unsigned int def, smallest = ~0U; |
1301 | ||
1302 | def = ((tlb_type == hypervisor) ? | |
1303 | (3 * 1024 * 1024) : | |
1304 | (4 * 1024 * 1024)); | |
1305 | ||
1306 | instance = 0; | |
07f8e5f3 | 1307 | while (!cpu_find_by_instance(instance, &dp, NULL)) { |
9145bcf6 DM |
1308 | unsigned int val; |
1309 | ||
07f8e5f3 | 1310 | val = of_getintprop_default(dp, "ecache-size", def); |
9145bcf6 DM |
1311 | if (val < smallest) |
1312 | smallest = val; | |
1313 | ||
1314 | instance++; | |
1315 | } | |
1316 | ||
1317 | /* Any value less than 256K is nonsense. */ | |
1318 | if (smallest < (256U * 1024U)) | |
1319 | smallest = 256 * 1024; | |
1320 | ||
1321 | max_cache_size = smallest; | |
1322 | ||
1323 | if (smallest < 1U * 1024U * 1024U) | |
1324 | printk(KERN_INFO "Using max_cache_size of %uKB\n", | |
1325 | smallest / 1024U); | |
1326 | else | |
1327 | printk(KERN_INFO "Using max_cache_size of %uMB\n", | |
1328 | smallest / 1024U / 1024U); | |
1329 | } | |
1330 | ||
7abea921 | 1331 | /* Constrain the number of cpus to max_cpus. */ |
1da177e4 LT |
1332 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1333 | { | |
8935dced DM |
1334 | int i; |
1335 | ||
1da177e4 | 1336 | if (num_possible_cpus() > max_cpus) { |
7abea921 DM |
1337 | int instance, mid; |
1338 | ||
1da177e4 LT |
1339 | instance = 0; |
1340 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | |
1341 | if (mid != boot_cpu_id) { | |
1342 | cpu_clear(mid, phys_cpu_present_map); | |
7d3aee9a | 1343 | cpu_clear(mid, cpu_present_map); |
1da177e4 LT |
1344 | if (num_possible_cpus() <= max_cpus) |
1345 | break; | |
1346 | } | |
1347 | instance++; | |
1348 | } | |
1349 | } | |
1350 | ||
a283a525 | 1351 | for_each_possible_cpu(i) { |
8935dced DM |
1352 | if (tlb_type == hypervisor) { |
1353 | int j; | |
1354 | ||
1355 | /* XXX get this mapping from machine description */ | |
a283a525 | 1356 | for_each_possible_cpu(j) { |
8935dced DM |
1357 | if ((j >> 2) == (i >> 2)) |
1358 | cpu_set(j, cpu_sibling_map[i]); | |
1359 | } | |
1360 | } else { | |
1361 | cpu_set(i, cpu_sibling_map[i]); | |
1362 | } | |
1363 | } | |
1364 | ||
1da177e4 | 1365 | smp_store_cpu_info(boot_cpu_id); |
9145bcf6 | 1366 | smp_tune_scheduling(); |
1da177e4 LT |
1367 | } |
1368 | ||
7abea921 DM |
1369 | /* Set this up early so that things like the scheduler can init |
1370 | * properly. We use the same cpu mask for both the present and | |
1371 | * possible cpu map. | |
1372 | */ | |
1373 | void __init smp_setup_cpu_possible_map(void) | |
1374 | { | |
1375 | int instance, mid; | |
1376 | ||
1377 | instance = 0; | |
1378 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | |
7d3aee9a | 1379 | if (mid < NR_CPUS) { |
7abea921 | 1380 | cpu_set(mid, phys_cpu_present_map); |
7d3aee9a DM |
1381 | cpu_set(mid, cpu_present_map); |
1382 | } | |
7abea921 DM |
1383 | instance++; |
1384 | } | |
1385 | } | |
1386 | ||
1da177e4 LT |
1387 | void __devinit smp_prepare_boot_cpu(void) |
1388 | { | |
1da177e4 LT |
1389 | } |
1390 | ||
b282b6f8 | 1391 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 LT |
1392 | { |
1393 | int ret = smp_boot_one_cpu(cpu); | |
1394 | ||
1395 | if (!ret) { | |
1396 | cpu_set(cpu, smp_commenced_mask); | |
1397 | while (!cpu_isset(cpu, cpu_online_map)) | |
1398 | mb(); | |
1399 | if (!cpu_isset(cpu, cpu_online_map)) { | |
1400 | ret = -ENODEV; | |
1401 | } else { | |
02fead75 DM |
1402 | /* On SUN4V, writes to %tick and %stick are |
1403 | * not allowed. | |
1404 | */ | |
1405 | if (tlb_type != hypervisor) | |
1406 | smp_synchronize_one_tick(cpu); | |
1da177e4 LT |
1407 | } |
1408 | } | |
1409 | return ret; | |
1410 | } | |
1411 | ||
1412 | void __init smp_cpus_done(unsigned int max_cpus) | |
1413 | { | |
1414 | unsigned long bogosum = 0; | |
1415 | int i; | |
1416 | ||
394e3902 AM |
1417 | for_each_online_cpu(i) |
1418 | bogosum += cpu_data(i).udelay_val; | |
1da177e4 LT |
1419 | printk("Total of %ld processors activated " |
1420 | "(%lu.%02lu BogoMIPS).\n", | |
1421 | (long) num_online_cpus(), | |
1422 | bogosum/(500000/HZ), | |
1423 | (bogosum/(5000/HZ))%100); | |
1424 | } | |
1425 | ||
1da177e4 LT |
1426 | void smp_send_reschedule(int cpu) |
1427 | { | |
64c7c8f8 | 1428 | smp_receive_signal(cpu); |
1da177e4 LT |
1429 | } |
1430 | ||
1431 | /* This is a nop because we capture all other cpus | |
1432 | * anyways when making the PROM active. | |
1433 | */ | |
1434 | void smp_send_stop(void) | |
1435 | { | |
1436 | } | |
1437 | ||
d369ddd2 DM |
1438 | unsigned long __per_cpu_base __read_mostly; |
1439 | unsigned long __per_cpu_shift __read_mostly; | |
1da177e4 LT |
1440 | |
1441 | EXPORT_SYMBOL(__per_cpu_base); | |
1442 | EXPORT_SYMBOL(__per_cpu_shift); | |
1443 | ||
1444 | void __init setup_per_cpu_areas(void) | |
1445 | { | |
1446 | unsigned long goal, size, i; | |
1447 | char *ptr; | |
1da177e4 LT |
1448 | |
1449 | /* Copy section for each CPU (we discard the original) */ | |
5a089006 DM |
1450 | goal = PERCPU_ENOUGH_ROOM; |
1451 | ||
1da177e4 LT |
1452 | __per_cpu_shift = 0; |
1453 | for (size = 1UL; size < goal; size <<= 1UL) | |
1454 | __per_cpu_shift++; | |
1455 | ||
56fb4df6 | 1456 | ptr = alloc_bootmem(size * NR_CPUS); |
1da177e4 LT |
1457 | |
1458 | __per_cpu_base = ptr - __per_cpu_start; | |
1459 | ||
1da177e4 LT |
1460 | for (i = 0; i < NR_CPUS; i++, ptr += size) |
1461 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | |
951bc82c DM |
1462 | |
1463 | /* Setup %g5 for the boot cpu. */ | |
1464 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | |
1da177e4 | 1465 | } |