]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/m32r/kernel/smp.c
headers: remove sched.h from interrupt.h
[mirror_ubuntu-artful-kernel.git] / arch / m32r / kernel / smp.c
1 /*
2 * linux/arch/m32r/kernel/smp.c
3 *
4 * M32R SMP support routines.
5 *
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
7 *
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11 *
12 * This code is released under the GNU General Public License version 2 or
13 * later.
14 */
15
16 #undef DEBUG_SMP
17
18 #include <linux/irq.h>
19 #include <linux/interrupt.h>
20 #include <linux/sched.h>
21 #include <linux/spinlock.h>
22 #include <linux/mm.h>
23 #include <linux/smp.h>
24 #include <linux/profile.h>
25 #include <linux/cpu.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/pgalloc.h>
29 #include <asm/atomic.h>
30 #include <asm/io.h>
31 #include <asm/mmu_context.h>
32 #include <asm/m32r.h>
33
34 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
35 /* Data structures and variables */
36 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
37
38 /*
39 * For flush_cache_all()
40 */
41 static DEFINE_SPINLOCK(flushcache_lock);
42 static volatile unsigned long flushcache_cpumask = 0;
43
44 /*
45 * For flush_tlb_others()
46 */
47 static volatile cpumask_t flush_cpumask;
48 static struct mm_struct *flush_mm;
49 static struct vm_area_struct *flush_vma;
50 static volatile unsigned long flush_va;
51 static DEFINE_SPINLOCK(tlbstate_lock);
52 #define FLUSH_ALL 0xffffffff
53
54 DECLARE_PER_CPU(int, prof_multiplier);
55 DECLARE_PER_CPU(int, prof_old_multiplier);
56 DECLARE_PER_CPU(int, prof_counter);
57
58 extern spinlock_t ipi_lock[];
59
60 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
61 /* Function Prototypes */
62 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
63
64 void smp_send_reschedule(int);
65 void smp_reschedule_interrupt(void);
66
67 void smp_flush_cache_all(void);
68 void smp_flush_cache_all_interrupt(void);
69
70 void smp_flush_tlb_all(void);
71 static void flush_tlb_all_ipi(void *);
72
73 void smp_flush_tlb_mm(struct mm_struct *);
74 void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
75 unsigned long);
76 void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
77 static void flush_tlb_others(cpumask_t, struct mm_struct *,
78 struct vm_area_struct *, unsigned long);
79 void smp_invalidate_interrupt(void);
80
81 void smp_send_stop(void);
82 static void stop_this_cpu(void *);
83
84 void smp_send_timer(void);
85 void smp_ipi_timer_interrupt(struct pt_regs *);
86 void smp_local_timer_interrupt(void);
87
88 static void send_IPI_allbutself(int, int);
89 static void send_IPI_mask(const struct cpumask *, int, int);
90 unsigned long send_IPI_mask_phys(cpumask_t, int, int);
91
92 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
93 /* Rescheduling request Routines */
94 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
95
96 /*==========================================================================*
97 * Name: smp_send_reschedule
98 *
99 * Description: This routine requests other CPU to execute rescheduling.
100 * 1.Send 'RESCHEDULE_IPI' to other CPU.
101 * Request other CPU to execute 'smp_reschedule_interrupt()'.
102 *
103 * Born on Date: 2002.02.05
104 *
105 * Arguments: cpu_id - Target CPU ID
106 *
107 * Returns: void (cannot fail)
108 *
109 * Modification log:
110 * Date Who Description
111 * ---------- --- --------------------------------------------------------
112 *
113 *==========================================================================*/
114 void smp_send_reschedule(int cpu_id)
115 {
116 WARN_ON(cpu_is_offline(cpu_id));
117 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
118 }
119
120 /*==========================================================================*
121 * Name: smp_reschedule_interrupt
122 *
123 * Description: This routine executes on CPU which received
124 * 'RESCHEDULE_IPI'.
125 * Rescheduling is processed at the exit of interrupt
126 * operation.
127 *
128 * Born on Date: 2002.02.05
129 *
130 * Arguments: NONE
131 *
132 * Returns: void (cannot fail)
133 *
134 * Modification log:
135 * Date Who Description
136 * ---------- --- --------------------------------------------------------
137 *
138 *==========================================================================*/
139 void smp_reschedule_interrupt(void)
140 {
141 /* nothing to do */
142 }
143
144 /*==========================================================================*
145 * Name: smp_flush_cache_all
146 *
147 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
148 * CPUs in the system.
149 *
150 * Born on Date: 2003-05-28
151 *
152 * Arguments: NONE
153 *
154 * Returns: void (cannot fail)
155 *
156 * Modification log:
157 * Date Who Description
158 * ---------- --- --------------------------------------------------------
159 *
160 *==========================================================================*/
161 void smp_flush_cache_all(void)
162 {
163 cpumask_t cpumask;
164 unsigned long *mask;
165
166 preempt_disable();
167 cpumask = cpu_online_map;
168 cpu_clear(smp_processor_id(), cpumask);
169 spin_lock(&flushcache_lock);
170 mask=cpus_addr(cpumask);
171 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
172 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
173 _flush_cache_copyback_all();
174 while (flushcache_cpumask)
175 mb();
176 spin_unlock(&flushcache_lock);
177 preempt_enable();
178 }
179
180 void smp_flush_cache_all_interrupt(void)
181 {
182 _flush_cache_copyback_all();
183 clear_bit(smp_processor_id(), &flushcache_cpumask);
184 }
185
186 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
187 /* TLB flush request Routines */
188 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
189
190 /*==========================================================================*
191 * Name: smp_flush_tlb_all
192 *
193 * Description: This routine flushes all processes TLBs.
194 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
195 * 2.Execute 'do_flush_tlb_all_local()'.
196 *
197 * Born on Date: 2002.02.05
198 *
199 * Arguments: NONE
200 *
201 * Returns: void (cannot fail)
202 *
203 * Modification log:
204 * Date Who Description
205 * ---------- --- --------------------------------------------------------
206 *
207 *==========================================================================*/
208 void smp_flush_tlb_all(void)
209 {
210 unsigned long flags;
211
212 preempt_disable();
213 local_irq_save(flags);
214 __flush_tlb_all();
215 local_irq_restore(flags);
216 smp_call_function(flush_tlb_all_ipi, NULL, 1);
217 preempt_enable();
218 }
219
220 /*==========================================================================*
221 * Name: flush_tlb_all_ipi
222 *
223 * Description: This routine flushes all local TLBs.
224 * 1.Execute 'do_flush_tlb_all_local()'.
225 *
226 * Born on Date: 2002.02.05
227 *
228 * Arguments: *info - not used
229 *
230 * Returns: void (cannot fail)
231 *
232 * Modification log:
233 * Date Who Description
234 * ---------- --- --------------------------------------------------------
235 *
236 *==========================================================================*/
237 static void flush_tlb_all_ipi(void *info)
238 {
239 __flush_tlb_all();
240 }
241
242 /*==========================================================================*
243 * Name: smp_flush_tlb_mm
244 *
245 * Description: This routine flushes the specified mm context TLB's.
246 *
247 * Born on Date: 2002.02.05
248 *
249 * Arguments: *mm - a pointer to the mm struct for flush TLB
250 *
251 * Returns: void (cannot fail)
252 *
253 * Modification log:
254 * Date Who Description
255 * ---------- --- --------------------------------------------------------
256 *
257 *==========================================================================*/
258 void smp_flush_tlb_mm(struct mm_struct *mm)
259 {
260 int cpu_id;
261 cpumask_t cpu_mask;
262 unsigned long *mmc;
263 unsigned long flags;
264
265 preempt_disable();
266 cpu_id = smp_processor_id();
267 mmc = &mm->context[cpu_id];
268 cpu_mask = *mm_cpumask(mm);
269 cpu_clear(cpu_id, cpu_mask);
270
271 if (*mmc != NO_CONTEXT) {
272 local_irq_save(flags);
273 *mmc = NO_CONTEXT;
274 if (mm == current->mm)
275 activate_context(mm);
276 else
277 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
278 local_irq_restore(flags);
279 }
280 if (!cpus_empty(cpu_mask))
281 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
282
283 preempt_enable();
284 }
285
286 /*==========================================================================*
287 * Name: smp_flush_tlb_range
288 *
289 * Description: This routine flushes a range of pages.
290 *
291 * Born on Date: 2002.02.05
292 *
293 * Arguments: *mm - a pointer to the mm struct for flush TLB
294 * start - not used
295 * end - not used
296 *
297 * Returns: void (cannot fail)
298 *
299 * Modification log:
300 * Date Who Description
301 * ---------- --- --------------------------------------------------------
302 *
303 *==========================================================================*/
304 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
305 unsigned long end)
306 {
307 smp_flush_tlb_mm(vma->vm_mm);
308 }
309
310 /*==========================================================================*
311 * Name: smp_flush_tlb_page
312 *
313 * Description: This routine flushes one page.
314 *
315 * Born on Date: 2002.02.05
316 *
317 * Arguments: *vma - a pointer to the vma struct include va
318 * va - virtual address for flush TLB
319 *
320 * Returns: void (cannot fail)
321 *
322 * Modification log:
323 * Date Who Description
324 * ---------- --- --------------------------------------------------------
325 *
326 *==========================================================================*/
327 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
328 {
329 struct mm_struct *mm = vma->vm_mm;
330 int cpu_id;
331 cpumask_t cpu_mask;
332 unsigned long *mmc;
333 unsigned long flags;
334
335 preempt_disable();
336 cpu_id = smp_processor_id();
337 mmc = &mm->context[cpu_id];
338 cpu_mask = *mm_cpumask(mm);
339 cpu_clear(cpu_id, cpu_mask);
340
341 #ifdef DEBUG_SMP
342 if (!mm)
343 BUG();
344 #endif
345
346 if (*mmc != NO_CONTEXT) {
347 local_irq_save(flags);
348 va &= PAGE_MASK;
349 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
350 __flush_tlb_page(va);
351 local_irq_restore(flags);
352 }
353 if (!cpus_empty(cpu_mask))
354 flush_tlb_others(cpu_mask, mm, vma, va);
355
356 preempt_enable();
357 }
358
359 /*==========================================================================*
360 * Name: flush_tlb_others
361 *
362 * Description: This routine requests other CPU to execute flush TLB.
363 * 1.Setup parameters.
364 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
365 * Request other CPU to execute 'smp_invalidate_interrupt()'.
366 * 3.Wait for other CPUs operation finished.
367 *
368 * Born on Date: 2002.02.05
369 *
370 * Arguments: cpumask - bitmap of target CPUs
371 * *mm - a pointer to the mm struct for flush TLB
372 * *vma - a pointer to the vma struct include va
373 * va - virtual address for flush TLB
374 *
375 * Returns: void (cannot fail)
376 *
377 * Modification log:
378 * Date Who Description
379 * ---------- --- --------------------------------------------------------
380 *
381 *==========================================================================*/
382 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
383 struct vm_area_struct *vma, unsigned long va)
384 {
385 unsigned long *mask;
386 #ifdef DEBUG_SMP
387 unsigned long flags;
388 __save_flags(flags);
389 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
390 BUG();
391 #endif /* DEBUG_SMP */
392
393 /*
394 * A couple of (to be removed) sanity checks:
395 *
396 * - we do not send IPIs to not-yet booted CPUs.
397 * - current CPU must not be in mask
398 * - mask must exist :)
399 */
400 BUG_ON(cpus_empty(cpumask));
401
402 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
403 BUG_ON(!mm);
404
405 /* If a CPU which we ran on has gone down, OK. */
406 cpus_and(cpumask, cpumask, cpu_online_map);
407 if (cpus_empty(cpumask))
408 return;
409
410 /*
411 * i'm not happy about this global shared spinlock in the
412 * MM hot path, but we'll see how contended it is.
413 * Temporarily this turns IRQs off, so that lockups are
414 * detected by the NMI watchdog.
415 */
416 spin_lock(&tlbstate_lock);
417
418 flush_mm = mm;
419 flush_vma = vma;
420 flush_va = va;
421 mask=cpus_addr(cpumask);
422 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
423
424 /*
425 * We have to send the IPI only to
426 * CPUs affected.
427 */
428 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
429
430 while (!cpus_empty(flush_cpumask)) {
431 /* nothing. lockup detection does not belong here */
432 mb();
433 }
434
435 flush_mm = NULL;
436 flush_vma = NULL;
437 flush_va = 0;
438 spin_unlock(&tlbstate_lock);
439 }
440
441 /*==========================================================================*
442 * Name: smp_invalidate_interrupt
443 *
444 * Description: This routine executes on CPU which received
445 * 'INVALIDATE_TLB_IPI'.
446 * 1.Flush local TLB.
447 * 2.Report flush TLB process was finished.
448 *
449 * Born on Date: 2002.02.05
450 *
451 * Arguments: NONE
452 *
453 * Returns: void (cannot fail)
454 *
455 * Modification log:
456 * Date Who Description
457 * ---------- --- --------------------------------------------------------
458 *
459 *==========================================================================*/
460 void smp_invalidate_interrupt(void)
461 {
462 int cpu_id = smp_processor_id();
463 unsigned long *mmc = &flush_mm->context[cpu_id];
464
465 if (!cpu_isset(cpu_id, flush_cpumask))
466 return;
467
468 if (flush_va == FLUSH_ALL) {
469 *mmc = NO_CONTEXT;
470 if (flush_mm == current->active_mm)
471 activate_context(flush_mm);
472 else
473 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
474 } else {
475 unsigned long va = flush_va;
476
477 if (*mmc != NO_CONTEXT) {
478 va &= PAGE_MASK;
479 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
480 __flush_tlb_page(va);
481 }
482 }
483 cpu_clear(cpu_id, flush_cpumask);
484 }
485
486 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
487 /* Stop CPU request Routines */
488 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
489
490 /*==========================================================================*
491 * Name: smp_send_stop
492 *
493 * Description: This routine requests stop all CPUs.
494 * 1.Request other CPU to execute 'stop_this_cpu()'.
495 *
496 * Born on Date: 2002.02.05
497 *
498 * Arguments: NONE
499 *
500 * Returns: void (cannot fail)
501 *
502 * Modification log:
503 * Date Who Description
504 * ---------- --- --------------------------------------------------------
505 *
506 *==========================================================================*/
507 void smp_send_stop(void)
508 {
509 smp_call_function(stop_this_cpu, NULL, 0);
510 }
511
512 /*==========================================================================*
513 * Name: stop_this_cpu
514 *
515 * Description: This routine halt CPU.
516 *
517 * Born on Date: 2002.02.05
518 *
519 * Arguments: NONE
520 *
521 * Returns: void (cannot fail)
522 *
523 * Modification log:
524 * Date Who Description
525 * ---------- --- --------------------------------------------------------
526 *
527 *==========================================================================*/
528 static void stop_this_cpu(void *dummy)
529 {
530 int cpu_id = smp_processor_id();
531
532 /*
533 * Remove this CPU:
534 */
535 cpu_clear(cpu_id, cpu_online_map);
536
537 /*
538 * PSW IE = 1;
539 * IMASK = 0;
540 * goto SLEEP
541 */
542 local_irq_disable();
543 outl(0, M32R_ICU_IMASK_PORTL);
544 inl(M32R_ICU_IMASK_PORTL); /* dummy read */
545 local_irq_enable();
546
547 for ( ; ; );
548 }
549
550 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
551 {
552 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
553 }
554
555 void arch_send_call_function_single_ipi(int cpu)
556 {
557 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
558 }
559
560 /*==========================================================================*
561 * Name: smp_call_function_interrupt
562 *
563 * Description: This routine executes on CPU which received
564 * 'CALL_FUNCTION_IPI'.
565 *
566 * Born on Date: 2002.02.05
567 *
568 * Arguments: NONE
569 *
570 * Returns: void (cannot fail)
571 *
572 * Modification log:
573 * Date Who Description
574 * ---------- --- --------------------------------------------------------
575 *
576 *==========================================================================*/
577 void smp_call_function_interrupt(void)
578 {
579 irq_enter();
580 generic_smp_call_function_interrupt();
581 irq_exit();
582 }
583
584 void smp_call_function_single_interrupt(void)
585 {
586 irq_enter();
587 generic_smp_call_function_single_interrupt();
588 irq_exit();
589 }
590
591 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
592 /* Timer Routines */
593 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
594
595 /*==========================================================================*
596 * Name: smp_send_timer
597 *
598 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
599 * in the system.
600 *
601 * Born on Date: 2002.02.05
602 *
603 * Arguments: NONE
604 *
605 * Returns: void (cannot fail)
606 *
607 * Modification log:
608 * Date Who Description
609 * ---------- --- --------------------------------------------------------
610 *
611 *==========================================================================*/
612 void smp_send_timer(void)
613 {
614 send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
615 }
616
617 /*==========================================================================*
618 * Name: smp_send_timer
619 *
620 * Description: This routine executes on CPU which received
621 * 'LOCAL_TIMER_IPI'.
622 *
623 * Born on Date: 2002.02.05
624 *
625 * Arguments: *regs - a pointer to the saved regster info
626 *
627 * Returns: void (cannot fail)
628 *
629 * Modification log:
630 * Date Who Description
631 * ---------- --- --------------------------------------------------------
632 *
633 *==========================================================================*/
634 void smp_ipi_timer_interrupt(struct pt_regs *regs)
635 {
636 struct pt_regs *old_regs;
637 old_regs = set_irq_regs(regs);
638 irq_enter();
639 smp_local_timer_interrupt();
640 irq_exit();
641 set_irq_regs(old_regs);
642 }
643
644 /*==========================================================================*
645 * Name: smp_local_timer_interrupt
646 *
647 * Description: Local timer interrupt handler. It does both profiling and
648 * process statistics/rescheduling.
649 * We do profiling in every local tick, statistics/rescheduling
650 * happen only every 'profiling multiplier' ticks. The default
651 * multiplier is 1 and it can be changed by writing the new
652 * multiplier value into /proc/profile.
653 *
654 * Born on Date: 2002.02.05
655 *
656 * Arguments: *regs - a pointer to the saved regster info
657 *
658 * Returns: void (cannot fail)
659 *
660 * Original: arch/i386/kernel/apic.c
661 *
662 * Modification log:
663 * Date Who Description
664 * ---------- --- --------------------------------------------------------
665 * 2003-06-24 hy use per_cpu structure.
666 *==========================================================================*/
667 void smp_local_timer_interrupt(void)
668 {
669 int user = user_mode(get_irq_regs());
670 int cpu_id = smp_processor_id();
671
672 /*
673 * The profiling function is SMP safe. (nothing can mess
674 * around with "current", and the profiling counters are
675 * updated with atomic operations). This is especially
676 * useful with a profiling multiplier != 1
677 */
678
679 profile_tick(CPU_PROFILING);
680
681 if (--per_cpu(prof_counter, cpu_id) <= 0) {
682 /*
683 * The multiplier may have changed since the last time we got
684 * to this point as a result of the user writing to
685 * /proc/profile. In this case we need to adjust the APIC
686 * timer accordingly.
687 *
688 * Interrupts are already masked off at this point.
689 */
690 per_cpu(prof_counter, cpu_id)
691 = per_cpu(prof_multiplier, cpu_id);
692 if (per_cpu(prof_counter, cpu_id)
693 != per_cpu(prof_old_multiplier, cpu_id))
694 {
695 per_cpu(prof_old_multiplier, cpu_id)
696 = per_cpu(prof_counter, cpu_id);
697 }
698
699 update_process_times(user);
700 }
701 }
702
703 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
704 /* Send IPI Routines */
705 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
706
707 /*==========================================================================*
708 * Name: send_IPI_allbutself
709 *
710 * Description: This routine sends a IPI to all other CPUs in the system.
711 *
712 * Born on Date: 2002.02.05
713 *
714 * Arguments: ipi_num - Number of IPI
715 * try - 0 : Send IPI certainly.
716 * !0 : The following IPI is not sent when Target CPU
717 * has not received the before IPI.
718 *
719 * Returns: void (cannot fail)
720 *
721 * Modification log:
722 * Date Who Description
723 * ---------- --- --------------------------------------------------------
724 *
725 *==========================================================================*/
726 static void send_IPI_allbutself(int ipi_num, int try)
727 {
728 cpumask_t cpumask;
729
730 cpumask = cpu_online_map;
731 cpu_clear(smp_processor_id(), cpumask);
732
733 send_IPI_mask(&cpumask, ipi_num, try);
734 }
735
736 /*==========================================================================*
737 * Name: send_IPI_mask
738 *
739 * Description: This routine sends a IPI to CPUs in the system.
740 *
741 * Born on Date: 2002.02.05
742 *
743 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
744 * ipi_num - Number of IPI
745 * try - 0 : Send IPI certainly.
746 * !0 : The following IPI is not sent when Target CPU
747 * has not received the before IPI.
748 *
749 * Returns: void (cannot fail)
750 *
751 * Modification log:
752 * Date Who Description
753 * ---------- --- --------------------------------------------------------
754 *
755 *==========================================================================*/
756 static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
757 {
758 cpumask_t physid_mask, tmp;
759 int cpu_id, phys_id;
760 int num_cpus = num_online_cpus();
761
762 if (num_cpus <= 1) /* NO MP */
763 return;
764
765 cpumask_and(&tmp, cpumask, cpu_online_mask);
766 BUG_ON(!cpumask_equal(cpumask, &tmp));
767
768 physid_mask = CPU_MASK_NONE;
769 for_each_cpu(cpu_id, cpumask) {
770 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
771 cpu_set(phys_id, physid_mask);
772 }
773
774 send_IPI_mask_phys(physid_mask, ipi_num, try);
775 }
776
777 /*==========================================================================*
778 * Name: send_IPI_mask_phys
779 *
780 * Description: This routine sends a IPI to other CPUs in the system.
781 *
782 * Born on Date: 2002.02.05
783 *
784 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
785 * ipi_num - Number of IPI
786 * try - 0 : Send IPI certainly.
787 * !0 : The following IPI is not sent when Target CPU
788 * has not received the before IPI.
789 *
790 * Returns: IPICRi regster value.
791 *
792 * Modification log:
793 * Date Who Description
794 * ---------- --- --------------------------------------------------------
795 *
796 *==========================================================================*/
797 unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
798 int try)
799 {
800 spinlock_t *ipilock;
801 volatile unsigned long *ipicr_addr;
802 unsigned long ipicr_val;
803 unsigned long my_physid_mask;
804 unsigned long mask = cpus_addr(physid_mask)[0];
805
806
807 if (mask & ~physids_coerce(phys_cpu_present_map))
808 BUG();
809 if (ipi_num >= NR_IPIS)
810 BUG();
811
812 mask <<= IPI_SHIFT;
813 ipilock = &ipi_lock[ipi_num];
814 ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
815 + (ipi_num << 2));
816 my_physid_mask = ~(1 << smp_processor_id());
817
818 /*
819 * lock ipi_lock[i]
820 * check IPICRi == 0
821 * write IPICRi (send IPIi)
822 * unlock ipi_lock[i]
823 */
824 spin_lock(ipilock);
825 __asm__ __volatile__ (
826 ";; CHECK IPICRi == 0 \n\t"
827 ".fillinsn \n"
828 "1: \n\t"
829 "ld %0, @%1 \n\t"
830 "and %0, %4 \n\t"
831 "beqz %0, 2f \n\t"
832 "bnez %3, 3f \n\t"
833 "bra 1b \n\t"
834 ";; WRITE IPICRi (send IPIi) \n\t"
835 ".fillinsn \n"
836 "2: \n\t"
837 "st %2, @%1 \n\t"
838 ".fillinsn \n"
839 "3: \n\t"
840 : "=&r"(ipicr_val)
841 : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
842 : "memory"
843 );
844 spin_unlock(ipilock);
845
846 return ipicr_val;
847 }