1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/bootmem.h>
23 #include <linux/irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
31 #include <asm/iommu.h>
33 #include <asm/oplib.h>
35 #include <asm/timer.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
41 #include <asm/auxio.h>
43 #include <asm/hypervisor.h>
44 #include <asm/cacheflush.h>
48 #define NUM_IVECS (IMAP_INR + 1)
50 struct ino_bucket
*ivector_table
;
51 unsigned long ivector_table_pa
;
53 /* On several sun4u processors, it is illegal to mix bypass and
54 * non-bypass accesses. Therefore we access all INO buckets
55 * using bypass accesses only.
57 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa
)
61 __asm__
__volatile__("ldxa [%1] %2, %0"
64 offsetof(struct ino_bucket
,
66 "i" (ASI_PHYS_USE_EC
));
71 static void bucket_clear_chain_pa(unsigned long bucket_pa
)
73 __asm__
__volatile__("stxa %%g0, [%0] %1"
76 offsetof(struct ino_bucket
,
78 "i" (ASI_PHYS_USE_EC
));
81 static unsigned int bucket_get_virt_irq(unsigned long bucket_pa
)
85 __asm__
__volatile__("lduwa [%1] %2, %0"
88 offsetof(struct ino_bucket
,
90 "i" (ASI_PHYS_USE_EC
));
95 static void bucket_set_virt_irq(unsigned long bucket_pa
,
96 unsigned int virt_irq
)
98 __asm__
__volatile__("stwa %0, [%1] %2"
102 offsetof(struct ino_bucket
,
104 "i" (ASI_PHYS_USE_EC
));
107 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
110 unsigned int dev_handle
;
111 unsigned int dev_ino
;
113 } virt_irq_table
[NR_IRQS
];
114 static DEFINE_SPINLOCK(virt_irq_alloc_lock
);
116 unsigned char virt_irq_alloc(unsigned int dev_handle
,
117 unsigned int dev_ino
)
122 BUILD_BUG_ON(NR_IRQS
>= 256);
124 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
126 for (ent
= 1; ent
< NR_IRQS
; ent
++) {
127 if (!virt_irq_table
[ent
].in_use
)
130 if (ent
>= NR_IRQS
) {
131 printk(KERN_ERR
"IRQ: Out of virtual IRQs.\n");
134 virt_irq_table
[ent
].dev_handle
= dev_handle
;
135 virt_irq_table
[ent
].dev_ino
= dev_ino
;
136 virt_irq_table
[ent
].in_use
= 1;
139 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
144 #ifdef CONFIG_PCI_MSI
145 void virt_irq_free(unsigned int virt_irq
)
149 if (virt_irq
>= NR_IRQS
)
152 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
154 virt_irq_table
[virt_irq
].in_use
= 0;
156 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
161 * /proc/interrupts printing:
164 int show_interrupts(struct seq_file
*p
, void *v
)
166 int i
= *(loff_t
*) v
, j
;
167 struct irqaction
* action
;
172 for_each_online_cpu(j
)
173 seq_printf(p
, "CPU%d ",j
);
178 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
179 action
= irq_desc
[i
].action
;
182 seq_printf(p
, "%3d: ",i
);
184 seq_printf(p
, "%10u ", kstat_irqs(i
));
186 for_each_online_cpu(j
)
187 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
189 seq_printf(p
, " %9s", irq_desc
[i
].chip
->typename
);
190 seq_printf(p
, " %s", action
->name
);
192 for (action
=action
->next
; action
; action
= action
->next
)
193 seq_printf(p
, ", %s", action
->name
);
197 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
202 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
206 if (this_is_starfire
) {
207 tid
= starfire_translate(imap
, cpuid
);
208 tid
<<= IMAP_TID_SHIFT
;
211 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
214 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
215 if ((ver
>> 32UL) == __JALAPENO_ID
||
216 (ver
>> 32UL) == __SERRANO_ID
) {
217 tid
= cpuid
<< IMAP_TID_SHIFT
;
218 tid
&= IMAP_TID_JBUS
;
220 unsigned int a
= cpuid
& 0x1f;
221 unsigned int n
= (cpuid
>> 5) & 0x1f;
223 tid
= ((a
<< IMAP_AID_SHIFT
) |
224 (n
<< IMAP_NID_SHIFT
));
225 tid
&= (IMAP_AID_SAFARI
|
229 tid
= cpuid
<< IMAP_TID_SHIFT
;
237 struct irq_handler_data
{
241 void (*pre_handler
)(unsigned int, void *, void *);
247 static int irq_choose_cpu(unsigned int virt_irq
)
249 cpumask_t mask
= irq_desc
[virt_irq
].affinity
;
252 if (cpus_equal(mask
, CPU_MASK_ALL
)) {
253 static int irq_rover
;
254 static DEFINE_SPINLOCK(irq_rover_lock
);
257 /* Round-robin distribution... */
259 spin_lock_irqsave(&irq_rover_lock
, flags
);
261 while (!cpu_online(irq_rover
)) {
262 if (++irq_rover
>= NR_CPUS
)
267 if (++irq_rover
>= NR_CPUS
)
269 } while (!cpu_online(irq_rover
));
271 spin_unlock_irqrestore(&irq_rover_lock
, flags
);
275 cpus_and(tmp
, cpu_online_map
, mask
);
280 cpuid
= first_cpu(tmp
);
286 static int irq_choose_cpu(unsigned int virt_irq
)
288 return real_hard_smp_processor_id();
292 static void sun4u_irq_enable(unsigned int virt_irq
)
294 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
297 unsigned long cpuid
, imap
, val
;
300 cpuid
= irq_choose_cpu(virt_irq
);
303 tid
= sun4u_compute_tid(imap
, cpuid
);
305 val
= upa_readq(imap
);
306 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
307 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
308 val
|= tid
| IMAP_VALID
;
309 upa_writeq(val
, imap
);
310 upa_writeq(ICLR_IDLE
, data
->iclr
);
314 static void sun4u_set_affinity(unsigned int virt_irq
, cpumask_t mask
)
316 sun4u_irq_enable(virt_irq
);
319 static void sun4u_irq_disable(unsigned int virt_irq
)
321 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
324 unsigned long imap
= data
->imap
;
325 unsigned long tmp
= upa_readq(imap
);
328 upa_writeq(tmp
, imap
);
332 static void sun4u_irq_eoi(unsigned int virt_irq
)
334 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
335 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
337 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
341 upa_writeq(ICLR_IDLE
, data
->iclr
);
344 static void sun4v_irq_enable(unsigned int virt_irq
)
346 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
347 unsigned long cpuid
= irq_choose_cpu(virt_irq
);
350 err
= sun4v_intr_settarget(ino
, cpuid
);
352 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
353 "err(%d)\n", ino
, cpuid
, err
);
354 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
356 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
357 "err(%d)\n", ino
, err
);
358 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
360 printk(KERN_ERR
"sun4v_intr_setenabled(%x): err(%d)\n",
364 static void sun4v_set_affinity(unsigned int virt_irq
, cpumask_t mask
)
366 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
367 unsigned long cpuid
= irq_choose_cpu(virt_irq
);
370 err
= sun4v_intr_settarget(ino
, cpuid
);
372 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
373 "err(%d)\n", ino
, cpuid
, err
);
376 static void sun4v_irq_disable(unsigned int virt_irq
)
378 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
381 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
383 printk(KERN_ERR
"sun4v_intr_setenabled(%x): "
384 "err(%d)\n", ino
, err
);
387 static void sun4v_irq_eoi(unsigned int virt_irq
)
389 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
390 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
393 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
396 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
398 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
399 "err(%d)\n", ino
, err
);
402 static void sun4v_virq_enable(unsigned int virt_irq
)
404 unsigned long cpuid
, dev_handle
, dev_ino
;
407 cpuid
= irq_choose_cpu(virt_irq
);
409 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
410 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
412 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
414 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
416 dev_handle
, dev_ino
, cpuid
, err
);
417 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
420 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
421 "HV_INTR_STATE_IDLE): err(%d)\n",
422 dev_handle
, dev_ino
, err
);
423 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
426 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
427 "HV_INTR_ENABLED): err(%d)\n",
428 dev_handle
, dev_ino
, err
);
431 static void sun4v_virt_set_affinity(unsigned int virt_irq
, cpumask_t mask
)
433 unsigned long cpuid
, dev_handle
, dev_ino
;
436 cpuid
= irq_choose_cpu(virt_irq
);
438 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
439 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
441 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
443 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
445 dev_handle
, dev_ino
, cpuid
, err
);
448 static void sun4v_virq_disable(unsigned int virt_irq
)
450 unsigned long dev_handle
, dev_ino
;
453 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
454 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
456 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
459 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
460 "HV_INTR_DISABLED): err(%d)\n",
461 dev_handle
, dev_ino
, err
);
464 static void sun4v_virq_eoi(unsigned int virt_irq
)
466 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
467 unsigned long dev_handle
, dev_ino
;
470 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
473 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
474 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
476 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
479 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
480 "HV_INTR_STATE_IDLE): err(%d)\n",
481 dev_handle
, dev_ino
, err
);
484 static struct irq_chip sun4u_irq
= {
486 .enable
= sun4u_irq_enable
,
487 .disable
= sun4u_irq_disable
,
488 .eoi
= sun4u_irq_eoi
,
489 .set_affinity
= sun4u_set_affinity
,
492 static struct irq_chip sun4v_irq
= {
494 .enable
= sun4v_irq_enable
,
495 .disable
= sun4v_irq_disable
,
496 .eoi
= sun4v_irq_eoi
,
497 .set_affinity
= sun4v_set_affinity
,
500 static struct irq_chip sun4v_virq
= {
501 .typename
= "vsun4v",
502 .enable
= sun4v_virq_enable
,
503 .disable
= sun4v_virq_disable
,
504 .eoi
= sun4v_virq_eoi
,
505 .set_affinity
= sun4v_virt_set_affinity
,
508 static void pre_flow_handler(unsigned int virt_irq
,
509 struct irq_desc
*desc
)
511 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
512 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
514 data
->pre_handler(ino
, data
->arg1
, data
->arg2
);
516 handle_fasteoi_irq(virt_irq
, desc
);
519 void irq_install_pre_handler(int virt_irq
,
520 void (*func
)(unsigned int, void *, void *),
521 void *arg1
, void *arg2
)
523 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
524 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
526 data
->pre_handler
= func
;
530 desc
->handle_irq
= pre_flow_handler
;
533 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
)
535 struct ino_bucket
*bucket
;
536 struct irq_handler_data
*data
;
537 unsigned int virt_irq
;
540 BUG_ON(tlb_type
== hypervisor
);
542 ino
= (upa_readq(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
543 bucket
= &ivector_table
[ino
];
544 virt_irq
= bucket_get_virt_irq(__pa(bucket
));
546 virt_irq
= virt_irq_alloc(0, ino
);
547 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
548 set_irq_chip_and_handler_name(virt_irq
,
554 data
= get_irq_chip_data(virt_irq
);
558 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
559 if (unlikely(!data
)) {
560 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
563 set_irq_chip_data(virt_irq
, data
);
572 static unsigned int sun4v_build_common(unsigned long sysino
,
573 struct irq_chip
*chip
)
575 struct ino_bucket
*bucket
;
576 struct irq_handler_data
*data
;
577 unsigned int virt_irq
;
579 BUG_ON(tlb_type
!= hypervisor
);
581 bucket
= &ivector_table
[sysino
];
582 virt_irq
= bucket_get_virt_irq(__pa(bucket
));
584 virt_irq
= virt_irq_alloc(0, sysino
);
585 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
586 set_irq_chip_and_handler_name(virt_irq
, chip
,
591 data
= get_irq_chip_data(virt_irq
);
595 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
596 if (unlikely(!data
)) {
597 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
600 set_irq_chip_data(virt_irq
, data
);
602 /* Catch accidental accesses to these things. IMAP/ICLR handling
603 * is done by hypervisor calls on sun4v platforms, not by direct
613 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
)
615 unsigned long sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
617 return sun4v_build_common(sysino
, &sun4v_irq
);
620 unsigned int sun4v_build_virq(u32 devhandle
, unsigned int devino
)
622 struct irq_handler_data
*data
;
623 unsigned long hv_err
, cookie
;
624 struct ino_bucket
*bucket
;
625 struct irq_desc
*desc
;
626 unsigned int virt_irq
;
628 bucket
= kzalloc(sizeof(struct ino_bucket
), GFP_ATOMIC
);
629 if (unlikely(!bucket
))
631 __flush_dcache_range((unsigned long) bucket
,
632 ((unsigned long) bucket
+
633 sizeof(struct ino_bucket
)));
635 virt_irq
= virt_irq_alloc(devhandle
, devino
);
636 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
638 set_irq_chip_and_handler_name(virt_irq
, &sun4v_virq
,
642 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
646 /* In order to make the LDC channel startup sequence easier,
647 * especially wrt. locking, we do not let request_irq() enable
650 desc
= irq_desc
+ virt_irq
;
651 desc
->status
|= IRQ_NOAUTOEN
;
653 set_irq_chip_data(virt_irq
, data
);
655 /* Catch accidental accesses to these things. IMAP/ICLR handling
656 * is done by hypervisor calls on sun4v platforms, not by direct
662 cookie
= ~__pa(bucket
);
663 hv_err
= sun4v_vintr_set_cookie(devhandle
, devino
, cookie
);
665 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
666 "err=%lu\n", devhandle
, devino
, hv_err
);
673 void ack_bad_irq(unsigned int virt_irq
)
675 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
680 printk(KERN_CRIT
"Unexpected IRQ from ino[%x] virt_irq[%u]\n",
684 void *hardirq_stack
[NR_CPUS
];
685 void *softirq_stack
[NR_CPUS
];
687 static __attribute__((always_inline
)) void *set_hardirq_stack(void)
689 void *orig_sp
, *sp
= hardirq_stack
[smp_processor_id()];
691 __asm__
__volatile__("mov %%sp, %0" : "=r" (orig_sp
));
693 orig_sp
> (sp
+ THREAD_SIZE
)) {
694 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
695 __asm__
__volatile__("mov %0, %%sp" : : "r" (sp
));
700 static __attribute__((always_inline
)) void restore_hardirq_stack(void *orig_sp
)
702 __asm__
__volatile__("mov %0, %%sp" : : "r" (orig_sp
));
705 void handler_irq(int irq
, struct pt_regs
*regs
)
707 unsigned long pstate
, bucket_pa
;
708 struct pt_regs
*old_regs
;
711 clear_softint(1 << irq
);
713 old_regs
= set_irq_regs(regs
);
716 /* Grab an atomic snapshot of the pending IVECs. */
717 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
718 "wrpr %0, %3, %%pstate\n\t"
721 "wrpr %0, 0x0, %%pstate\n\t"
722 : "=&r" (pstate
), "=&r" (bucket_pa
)
723 : "r" (irq_work_pa(smp_processor_id())),
727 orig_sp
= set_hardirq_stack();
730 struct irq_desc
*desc
;
731 unsigned long next_pa
;
732 unsigned int virt_irq
;
734 next_pa
= bucket_get_chain_pa(bucket_pa
);
735 virt_irq
= bucket_get_virt_irq(bucket_pa
);
736 bucket_clear_chain_pa(bucket_pa
);
738 desc
= irq_desc
+ virt_irq
;
740 desc
->handle_irq(virt_irq
, desc
);
745 restore_hardirq_stack(orig_sp
);
748 set_irq_regs(old_regs
);
751 void do_softirq(void)
758 local_irq_save(flags
);
760 if (local_softirq_pending()) {
761 void *orig_sp
, *sp
= softirq_stack
[smp_processor_id()];
763 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
765 __asm__
__volatile__("mov %%sp, %0\n\t"
770 __asm__
__volatile__("mov %0, %%sp"
774 local_irq_restore(flags
);
777 #ifdef CONFIG_HOTPLUG_CPU
778 void fixup_irqs(void)
782 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
785 spin_lock_irqsave(&irq_desc
[irq
].lock
, flags
);
786 if (irq_desc
[irq
].action
&&
787 !(irq_desc
[irq
].status
& IRQ_PER_CPU
)) {
788 if (irq_desc
[irq
].chip
->set_affinity
)
789 irq_desc
[irq
].chip
->set_affinity(irq
,
790 irq_desc
[irq
].affinity
);
792 spin_unlock_irqrestore(&irq_desc
[irq
].lock
, flags
);
795 tick_ops
->disable_irq();
806 static struct sun5_timer
*prom_timers
;
807 static u64 prom_limit0
, prom_limit1
;
809 static void map_prom_timers(void)
811 struct device_node
*dp
;
812 const unsigned int *addr
;
814 /* PROM timer node hangs out in the top level of device siblings... */
815 dp
= of_find_node_by_path("/");
818 if (!strcmp(dp
->name
, "counter-timer"))
823 /* Assume if node is not present, PROM uses different tick mechanism
824 * which we should not care about.
827 prom_timers
= (struct sun5_timer
*) 0;
831 /* If PROM is really using this, it must be mapped by him. */
832 addr
= of_get_property(dp
, "address", NULL
);
834 prom_printf("PROM does not have timer mapped, trying to continue.\n");
835 prom_timers
= (struct sun5_timer
*) 0;
838 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
841 static void kill_prom_timer(void)
846 /* Save them away for later. */
847 prom_limit0
= prom_timers
->limit0
;
848 prom_limit1
= prom_timers
->limit1
;
850 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
851 * We turn both off here just to be paranoid.
853 prom_timers
->limit0
= 0;
854 prom_timers
->limit1
= 0;
856 /* Wheee, eat the interrupt packet too... */
857 __asm__
__volatile__(
859 " ldxa [%%g0] %0, %%g1\n"
860 " ldxa [%%g2] %1, %%g1\n"
861 " stxa %%g0, [%%g0] %0\n"
864 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
868 void init_irqwork_curcpu(void)
870 int cpu
= hard_smp_processor_id();
872 trap_block
[cpu
].irq_worklist_pa
= 0UL;
875 /* Please be very careful with register_one_mondo() and
876 * sun4v_register_mondo_queues().
878 * On SMP this gets invoked from the CPU trampoline before
879 * the cpu has fully taken over the trap table from OBP,
880 * and it's kernel stack + %g6 thread register state is
881 * not fully cooked yet.
883 * Therefore you cannot make any OBP calls, not even prom_printf,
884 * from these two routines.
886 static void __cpuinit
register_one_mondo(unsigned long paddr
, unsigned long type
, unsigned long qmask
)
888 unsigned long num_entries
= (qmask
+ 1) / 64;
889 unsigned long status
;
891 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
892 if (status
!= HV_EOK
) {
893 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
894 "err %lu\n", type
, paddr
, num_entries
, status
);
899 void __cpuinit
sun4v_register_mondo_queues(int this_cpu
)
901 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
903 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
,
904 tb
->cpu_mondo_qmask
);
905 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
,
906 tb
->dev_mondo_qmask
);
907 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
,
909 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
,
913 static void __init
alloc_one_mondo(unsigned long *pa_ptr
, unsigned long qmask
)
915 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
916 void *p
= __alloc_bootmem(size
, size
, 0);
918 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
925 static void __init
alloc_one_kbuf(unsigned long *pa_ptr
, unsigned long qmask
)
927 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
928 void *p
= __alloc_bootmem(size
, size
, 0);
931 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
938 static void __init
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
)
943 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
945 page
= alloc_bootmem_pages(PAGE_SIZE
);
947 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
951 tb
->cpu_mondo_block_pa
= __pa(page
);
952 tb
->cpu_list_pa
= __pa(page
+ 64);
956 /* Allocate mondo and error queues for all possible cpus. */
957 static void __init
sun4v_init_mondo_queues(void)
961 for_each_possible_cpu(cpu
) {
962 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
964 alloc_one_mondo(&tb
->cpu_mondo_pa
, tb
->cpu_mondo_qmask
);
965 alloc_one_mondo(&tb
->dev_mondo_pa
, tb
->dev_mondo_qmask
);
966 alloc_one_mondo(&tb
->resum_mondo_pa
, tb
->resum_qmask
);
967 alloc_one_kbuf(&tb
->resum_kernel_buf_pa
, tb
->resum_qmask
);
968 alloc_one_mondo(&tb
->nonresum_mondo_pa
, tb
->nonresum_qmask
);
969 alloc_one_kbuf(&tb
->nonresum_kernel_buf_pa
,
974 static void __init
init_send_mondo_info(void)
978 for_each_possible_cpu(cpu
) {
979 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
981 init_cpu_send_mondo_info(tb
);
985 static struct irqaction timer_irq_action
= {
989 /* Only invoked on boot processor. */
990 void __init
init_IRQ(void)
997 size
= sizeof(struct ino_bucket
) * NUM_IVECS
;
998 ivector_table
= alloc_bootmem(size
);
999 if (!ivector_table
) {
1000 prom_printf("Fatal error, cannot allocate ivector_table\n");
1003 __flush_dcache_range((unsigned long) ivector_table
,
1004 ((unsigned long) ivector_table
) + size
);
1006 ivector_table_pa
= __pa(ivector_table
);
1008 if (tlb_type
== hypervisor
)
1009 sun4v_init_mondo_queues();
1011 init_send_mondo_info();
1013 if (tlb_type
== hypervisor
) {
1014 /* Load up the boot cpu's entries. */
1015 sun4v_register_mondo_queues(hard_smp_processor_id());
1018 /* We need to clear any IRQ's pending in the soft interrupt
1019 * registers, a spurious one could be left around from the
1020 * PROM timer which we just disabled.
1022 clear_softint(get_softint());
1024 /* Now that ivector table is initialized, it is safe
1025 * to receive IRQ vector traps. We will normally take
1026 * one or two right now, in case some device PROM used
1027 * to boot us wants to speak to us. We just ignore them.
1029 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1030 "or %%g1, %0, %%g1\n\t"
1031 "wrpr %%g1, 0x0, %%pstate"
1036 irq_desc
[0].action
= &timer_irq_action
;