1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
31 #include <asm/iommu.h>
33 #include <asm/oplib.h>
34 #include <asm/timer.h>
36 #include <asm/starfire.h>
37 #include <asm/uaccess.h>
38 #include <asm/cache.h>
39 #include <asm/cpudata.h>
40 #include <asm/auxio.h>
43 static void distribute_irqs(void);
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54 * The IVEC handler does not need to act atomically, the PIL dispatch
55 * code uses CAS to get an atomic snapshot of the list and clear it
59 struct ino_bucket ivector_table
[NUM_IVECS
] __attribute__ ((aligned (SMP_CACHE_BYTES
)));
61 /* This has to be in the main kernel image, it cannot be
62 * turned into per-cpu data. The reason is that the main
63 * kernel image is locked into the TLB and this structure
64 * is accessed from the vectored interrupt trap handler. If
65 * access to this structure takes a TLB miss it could cause
66 * the 5-level sparc v9 trap stack to overflow.
68 struct irq_work_struct
{
69 unsigned int irq_worklists
[16];
71 struct irq_work_struct __irq_work
[NR_CPUS
];
72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
75 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77 * for devices behind busses other than APB on Sabre systems.
79 * Currently these physical addresses are just config space accesses
80 * to the command register for that device.
82 unsigned long pci_dma_wsync
;
83 unsigned long dma_sync_reg_table
[256];
84 unsigned char dma_sync_reg_table_entry
= 0;
87 /* This is based upon code in the 32-bit Sparc kernel written mostly by
88 * David Redman (djhr@tadpole.co.uk).
90 #define MAX_STATIC_ALLOC 4
91 static struct irqaction static_irqaction
[MAX_STATIC_ALLOC
];
92 static int static_irq_count
;
94 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
95 struct irqaction
*irq_action
[NR_IRQS
+1] = {
96 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
97 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
100 /* This only synchronizes entities which modify IRQ handler
101 * state and some selected user-level spots that want to
102 * read things in the table. IRQ handler processing orders
103 * its' accesses such that no locking is needed.
105 static DEFINE_SPINLOCK(irq_action_lock
);
107 static void register_irq_proc (unsigned int irq
);
110 * Upper 2b of irqaction->flags holds the ino.
111 * irqaction->mask holds the smp affinity information.
113 #define put_ino_in_irqaction(action, irq) \
114 action->flags &= 0xffffffffffffUL; \
115 if (__bucket(irq) == &pil0_dummy_bucket) \
116 action->flags |= 0xdeadUL << 48; \
118 action->flags |= __irq_ino(irq) << 48;
119 #define get_ino_in_irqaction(action) (action->flags >> 48)
121 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
122 #define get_smpaff_in_irqaction(action) ((action)->mask)
124 int show_interrupts(struct seq_file
*p
, void *v
)
127 int i
= *(loff_t
*) v
;
128 struct irqaction
*action
;
133 spin_lock_irqsave(&irq_action_lock
, flags
);
135 if (!(action
= *(i
+ irq_action
)))
137 seq_printf(p
, "%3d: ", i
);
139 seq_printf(p
, "%10u ", kstat_irqs(i
));
141 for (j
= 0; j
< NR_CPUS
; j
++) {
144 seq_printf(p
, "%10u ",
145 kstat_cpu(j
).irqs
[i
]);
148 seq_printf(p
, " %s:%lx", action
->name
,
149 get_ino_in_irqaction(action
));
150 for (action
= action
->next
; action
; action
= action
->next
) {
151 seq_printf(p
, ", %s:%lx", action
->name
,
152 get_ino_in_irqaction(action
));
157 spin_unlock_irqrestore(&irq_action_lock
, flags
);
162 /* Now these are always passed a true fully specified sun4u INO. */
163 void enable_irq(unsigned int irq
)
165 struct ino_bucket
*bucket
= __bucket(irq
);
175 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
178 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
179 if ((ver
>> 32) == 0x003e0016) {
180 /* We set it to our JBUS ID. */
181 __asm__
__volatile__("ldxa [%%g0] %1, %0"
183 : "i" (ASI_JBUS_CONFIG
));
184 tid
= ((tid
& (0x1fUL
<<17)) << 9);
185 tid
&= IMAP_TID_JBUS
;
187 /* We set it to our Safari AID. */
188 __asm__
__volatile__("ldxa [%%g0] %1, %0"
190 : "i" (ASI_SAFARI_CONFIG
));
191 tid
= ((tid
& (0x3ffUL
<<17)) << 9);
192 tid
&= IMAP_AID_SAFARI
;
194 } else if (this_is_starfire
== 0) {
195 /* We set it to our UPA MID. */
196 __asm__
__volatile__("ldxa [%%g0] %1, %0"
198 : "i" (ASI_UPA_CONFIG
));
199 tid
= ((tid
& UPA_CONFIG_MID
) << 9);
202 tid
= (starfire_translate(imap
, smp_processor_id()) << 26);
206 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
207 * of this SYSIO's preconfigured IGN in the SYSIO Control
208 * Register, the hardware just mirrors that value here.
209 * However for Graphics and UPA Slave devices the full
210 * IMAP_INR field can be set by the programmer here.
212 * Things like FFB can now be handled via the new IRQ mechanism.
214 upa_writel(tid
| IMAP_VALID
, imap
);
219 /* This now gets passed true ino's as well. */
220 void disable_irq(unsigned int irq
)
222 struct ino_bucket
*bucket
= __bucket(irq
);
229 /* NOTE: We do not want to futz with the IRQ clear registers
230 * and move the state to IDLE, the SCSI code does call
231 * disable_irq() to assure atomicity in the queue cmd
232 * SCSI adapter driver code. Thus we'd lose interrupts.
234 tmp
= upa_readl(imap
);
236 upa_writel(tmp
, imap
);
240 /* The timer is the one "weird" interrupt which is generated by
241 * the CPU %tick register and not by some normal vectored interrupt
242 * source. To handle this special case, we use this dummy INO bucket.
244 static struct ino_bucket pil0_dummy_bucket
= {
255 unsigned int build_irq(int pil
, int inofixup
, unsigned long iclr
, unsigned long imap
)
257 struct ino_bucket
*bucket
;
261 if (iclr
!= 0UL || imap
!= 0UL) {
262 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
266 return __irq(&pil0_dummy_bucket
);
269 /* RULE: Both must be specified in all other cases. */
270 if (iclr
== 0UL || imap
== 0UL) {
271 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
272 pil
, inofixup
, iclr
, imap
);
276 ino
= (upa_readl(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
277 if (ino
> NUM_IVECS
) {
278 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
279 ino
, pil
, inofixup
, iclr
, imap
);
283 /* Ok, looks good, set it up. Don't touch the irq_chain or
286 bucket
= &ivector_table
[ino
];
287 if ((bucket
->flags
& IBF_ACTIVE
) ||
288 (bucket
->irq_info
!= NULL
)) {
289 /* This is a gross fatal error if it happens here. */
290 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
291 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
292 ino
, pil
, inofixup
, iclr
, imap
);
293 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
294 bucket
->pil
, bucket
->iclr
, bucket
->imap
);
295 prom_printf("IRQ: Cannot continue, halting...\n");
303 bucket
->irq_info
= NULL
;
305 return __irq(bucket
);
308 static void atomic_bucket_insert(struct ino_bucket
*bucket
)
310 unsigned long pstate
;
313 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
314 __asm__
__volatile__("wrpr %0, %1, %%pstate"
315 : : "r" (pstate
), "i" (PSTATE_IE
));
316 ent
= irq_work(smp_processor_id(), bucket
->pil
);
317 bucket
->irq_chain
= *ent
;
318 *ent
= __irq(bucket
);
319 __asm__
__volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate
));
322 int request_irq(unsigned int irq
, irqreturn_t (*handler
)(int, void *, struct pt_regs
*),
323 unsigned long irqflags
, const char *name
, void *dev_id
)
325 struct irqaction
*action
, *tmp
= NULL
;
326 struct ino_bucket
*bucket
= __bucket(irq
);
330 if ((bucket
!= &pil0_dummy_bucket
) &&
331 (bucket
< &ivector_table
[0] ||
332 bucket
>= &ivector_table
[NUM_IVECS
])) {
333 unsigned int *caller
;
335 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
336 printk(KERN_CRIT
"request_irq: Old style IRQ registry attempt "
337 "from %p, irq %08x.\n", caller
, irq
);
343 if ((bucket
!= &pil0_dummy_bucket
) && (irqflags
& SA_SAMPLE_RANDOM
)) {
345 * This function might sleep, we want to call it first,
346 * outside of the atomic block. In SA_STATIC_ALLOC case,
347 * random driver's kmalloc will fail, but it is safe.
348 * If already initialized, random driver will not reinit.
349 * Yes, this might clear the entropy pool if the wrong
350 * driver is attempted to be loaded, without actually
351 * installing a new handler, but is this really a problem,
352 * only the sysadmin is able to do this.
354 rand_initialize_irq(irq
);
357 spin_lock_irqsave(&irq_action_lock
, flags
);
359 action
= *(bucket
->pil
+ irq_action
);
361 if ((action
->flags
& SA_SHIRQ
) && (irqflags
& SA_SHIRQ
))
362 for (tmp
= action
; tmp
->next
; tmp
= tmp
->next
)
365 spin_unlock_irqrestore(&irq_action_lock
, flags
);
368 action
= NULL
; /* Or else! */
371 /* If this is flagged as statically allocated then we use our
372 * private struct which is never freed.
374 if (irqflags
& SA_STATIC_ALLOC
) {
375 if (static_irq_count
< MAX_STATIC_ALLOC
)
376 action
= &static_irqaction
[static_irq_count
++];
378 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379 "using kmalloc\n", irq
, name
);
382 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
386 spin_unlock_irqrestore(&irq_action_lock
, flags
);
390 if (bucket
== &pil0_dummy_bucket
) {
391 bucket
->irq_info
= action
;
392 bucket
->flags
|= IBF_ACTIVE
;
394 if ((bucket
->flags
& IBF_ACTIVE
) != 0) {
395 void *orig
= bucket
->irq_info
;
396 void **vector
= NULL
;
398 if ((bucket
->flags
& IBF_PCI
) == 0) {
399 printk("IRQ: Trying to share non-PCI bucket.\n");
402 if ((bucket
->flags
& IBF_MULTI
) == 0) {
403 vector
= kmalloc(sizeof(void *) * 4, GFP_ATOMIC
);
405 goto free_and_enomem
;
407 /* We might have slept. */
408 if ((bucket
->flags
& IBF_MULTI
) != 0) {
412 vector
= (void **)bucket
->irq_info
;
413 for(ent
= 0; ent
< 4; ent
++) {
414 if (vector
[ent
] == NULL
) {
415 vector
[ent
] = action
;
426 bucket
->irq_info
= vector
;
427 bucket
->flags
|= IBF_MULTI
;
432 vector
= (void **)orig
;
433 for (ent
= 0; ent
< 4; ent
++) {
434 if (vector
[ent
] == NULL
) {
435 vector
[ent
] = action
;
443 bucket
->irq_info
= action
;
444 bucket
->flags
|= IBF_ACTIVE
;
446 pending
= bucket
->pending
;
451 action
->handler
= handler
;
452 action
->flags
= irqflags
;
455 action
->dev_id
= dev_id
;
456 put_ino_in_irqaction(action
, irq
);
457 put_smpaff_in_irqaction(action
, CPU_MASK_NONE
);
462 *(bucket
->pil
+ irq_action
) = action
;
466 /* We ate the IVEC already, this makes sure it does not get lost. */
468 atomic_bucket_insert(bucket
);
469 set_softint(1 << bucket
->pil
);
471 spin_unlock_irqrestore(&irq_action_lock
, flags
);
472 if ((bucket
!= &pil0_dummy_bucket
) && (!(irqflags
& SA_STATIC_ALLOC
)))
473 register_irq_proc(__irq_ino(irq
));
482 spin_unlock_irqrestore(&irq_action_lock
, flags
);
487 spin_unlock_irqrestore(&irq_action_lock
, flags
);
491 EXPORT_SYMBOL(request_irq
);
493 void free_irq(unsigned int irq
, void *dev_id
)
495 struct irqaction
*action
;
496 struct irqaction
*tmp
= NULL
;
498 struct ino_bucket
*bucket
= __bucket(irq
), *bp
;
500 if ((bucket
!= &pil0_dummy_bucket
) &&
501 (bucket
< &ivector_table
[0] ||
502 bucket
>= &ivector_table
[NUM_IVECS
])) {
503 unsigned int *caller
;
505 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
506 printk(KERN_CRIT
"free_irq: Old style IRQ removal attempt "
507 "from %p, irq %08x.\n", caller
, irq
);
511 spin_lock_irqsave(&irq_action_lock
, flags
);
513 action
= *(bucket
->pil
+ irq_action
);
514 if (!action
->handler
) {
515 printk("Freeing free IRQ %d\n", bucket
->pil
);
519 for ( ; action
; action
= action
->next
) {
520 if (action
->dev_id
== dev_id
)
525 printk("Trying to free free shared IRQ %d\n", bucket
->pil
);
526 spin_unlock_irqrestore(&irq_action_lock
, flags
);
529 } else if (action
->flags
& SA_SHIRQ
) {
530 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket
->pil
);
531 spin_unlock_irqrestore(&irq_action_lock
, flags
);
535 if (action
->flags
& SA_STATIC_ALLOC
) {
536 printk("Attempt to free statically allocated IRQ %d (%s)\n",
537 bucket
->pil
, action
->name
);
538 spin_unlock_irqrestore(&irq_action_lock
, flags
);
543 tmp
->next
= action
->next
;
545 *(bucket
->pil
+ irq_action
) = action
->next
;
547 spin_unlock_irqrestore(&irq_action_lock
, flags
);
549 synchronize_irq(irq
);
551 spin_lock_irqsave(&irq_action_lock
, flags
);
553 if (bucket
!= &pil0_dummy_bucket
) {
554 unsigned long imap
= bucket
->imap
;
555 void **vector
, *orig
;
558 orig
= bucket
->irq_info
;
559 vector
= (void **)orig
;
561 if ((bucket
->flags
& IBF_MULTI
) != 0) {
564 for (ent
= 0; ent
< 4; ent
++) {
565 if (vector
[ent
] == action
)
567 else if (vector
[ent
] != NULL
) {
568 orphan
= vector
[ent
];
573 /* Only free when no other shared irq
578 /* Convert back to non-shared bucket. */
579 bucket
->irq_info
= orphan
;
580 bucket
->flags
&= ~(IBF_MULTI
);
586 bucket
->irq_info
= NULL
;
589 /* This unique interrupt source is now inactive. */
590 bucket
->flags
&= ~IBF_ACTIVE
;
592 /* See if any other buckets share this bucket's IMAP
593 * and are still active.
595 for (ent
= 0; ent
< NUM_IVECS
; ent
++) {
596 bp
= &ivector_table
[ent
];
599 (bp
->flags
& IBF_ACTIVE
) != 0)
603 /* Only disable when no other sub-irq levels of
604 * the same IMAP are active.
606 if (ent
== NUM_IVECS
)
612 spin_unlock_irqrestore(&irq_action_lock
, flags
);
615 EXPORT_SYMBOL(free_irq
);
618 void synchronize_irq(unsigned int irq
)
620 struct ino_bucket
*bucket
= __bucket(irq
);
623 /* The following is how I wish I could implement this.
624 * Unfortunately the ICLR registers are read-only, you can
625 * only write ICLR_foo values to them. To get the current
626 * IRQ status you would need to get at the IRQ diag registers
627 * in the PCI/SBUS controller and the layout of those vary
628 * from one controller to the next, sigh... -DaveM
630 unsigned long iclr
= bucket
->iclr
;
633 u32 tmp
= upa_readl(iclr
);
635 if (tmp
== ICLR_TRANSMIT
||
636 tmp
== ICLR_PENDING
) {
643 /* So we have to do this with a INPROGRESS bit just like x86. */
644 while (bucket
->flags
& IBF_INPROGRESS
)
648 #endif /* CONFIG_SMP */
650 void catch_disabled_ivec(struct pt_regs
*regs
)
652 int cpu
= smp_processor_id();
653 struct ino_bucket
*bucket
= __bucket(*irq_work(cpu
, 0));
655 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656 * to other devices. Here a single IMAP enabled potentially multiple
657 * unique interrupt sources (which each do have a unique ICLR register.
659 * So what we do is just register that the IVEC arrived, when registered
660 * for real the request_irq() code will check the bit and signal
661 * a local CPU interrupt for it.
664 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665 bucket
- &ivector_table
[0], regs
->tpc
);
667 *irq_work(cpu
, 0) = 0;
672 #define FORWARD_VOLUME 12
676 static inline void redirect_intr(int cpu
, struct ino_bucket
*bp
)
678 /* Ok, here is what is going on:
679 * 1) Retargeting IRQs on Starfire is very
680 * expensive so just forget about it on them.
681 * 2) Moving around very high priority interrupts
683 * 3) If the current cpu is idle, interrupts are
684 * useful work, so keep them here. But do not
685 * pass to our neighbour if he is not very idle.
686 * 4) If sysadmin explicitly asks for directed intrs,
689 struct irqaction
*ap
= bp
->irq_info
;
691 unsigned int buddy
, ticks
;
693 cpu_mask
= get_smpaff_in_irqaction(ap
);
694 cpus_and(cpu_mask
, cpu_mask
, cpu_online_map
);
695 if (cpus_empty(cpu_mask
))
696 cpu_mask
= cpu_online_map
;
698 if (this_is_starfire
!= 0 ||
699 bp
->pil
>= 10 || current
->pid
== 0)
702 /* 'cpu' is the MID (ie. UPAID), calculate the MID
706 if (buddy
>= NR_CPUS
)
710 while (!cpu_isset(buddy
, cpu_mask
)) {
711 if (++buddy
>= NR_CPUS
)
713 if (++ticks
> NR_CPUS
) {
714 put_smpaff_in_irqaction(ap
, CPU_MASK_NONE
);
722 /* Voo-doo programming. */
723 if (cpu_data(buddy
).idle_volume
< FORWARD_VOLUME
)
726 /* This just so happens to be correct on Cheetah
731 /* Push it to our buddy. */
732 upa_writel(buddy
| IMAP_VALID
, bp
->imap
);
740 void handler_irq(int irq
, struct pt_regs
*regs
)
742 struct ino_bucket
*bp
, *nbp
;
743 int cpu
= smp_processor_id();
747 * Check for TICK_INT on level 14 softint.
750 unsigned long clr_mask
= 1 << irq
;
751 unsigned long tick_mask
= tick_ops
->softint_mask
;
753 if ((irq
== 14) && (get_softint() & tick_mask
)) {
755 clr_mask
= tick_mask
;
757 clear_softint(clr_mask
);
760 int should_forward
= 0;
762 clear_softint(1 << irq
);
766 kstat_this_cpu
.irqs
[irq
]++;
771 __bucket(xchg32(irq_work(cpu
, irq
), 0)) :
774 bp
= __bucket(xchg32(irq_work(cpu
, irq
), 0));
776 for ( ; bp
!= NULL
; bp
= nbp
) {
777 unsigned char flags
= bp
->flags
;
778 unsigned char random
= 0;
780 nbp
= __bucket(bp
->irq_chain
);
783 bp
->flags
|= IBF_INPROGRESS
;
785 if ((flags
& IBF_ACTIVE
) != 0) {
787 if ((flags
& IBF_DMA_SYNC
) != 0) {
788 upa_readl(dma_sync_reg_table
[bp
->synctab_ent
]);
789 upa_readq(pci_dma_wsync
);
792 if ((flags
& IBF_MULTI
) == 0) {
793 struct irqaction
*ap
= bp
->irq_info
;
796 ret
= ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
797 if (ret
== IRQ_HANDLED
)
800 void **vector
= (void **)bp
->irq_info
;
802 for (ent
= 0; ent
< 4; ent
++) {
803 struct irqaction
*ap
= vector
[ent
];
807 ret
= ap
->handler(__irq(bp
),
810 if (ret
== IRQ_HANDLED
)
815 /* Only the dummy bucket lacks IMAP/ICLR. */
818 if (should_forward
) {
819 redirect_intr(cpu
, bp
);
823 upa_writel(ICLR_IDLE
, bp
->iclr
);
825 /* Test and add entropy */
826 if (random
& SA_SAMPLE_RANDOM
)
827 add_interrupt_randomness(irq
);
832 bp
->flags
&= ~IBF_INPROGRESS
;
837 #ifdef CONFIG_BLK_DEV_FD
838 extern irqreturn_t
floppy_interrupt(int, void *, struct pt_regs
*);;
840 /* XXX No easy way to include asm/floppy.h XXX */
841 extern unsigned char *pdma_vaddr
;
842 extern unsigned long pdma_size
;
843 extern volatile int doing_pdma
;
844 extern unsigned long fdc_status
;
846 irqreturn_t
sparc_floppy_irq(int irq
, void *dev_cookie
, struct pt_regs
*regs
)
848 if (likely(doing_pdma
)) {
849 void __iomem
*stat
= (void __iomem
*) fdc_status
;
850 unsigned char *vaddr
= pdma_vaddr
;
851 unsigned long size
= pdma_size
;
856 if (unlikely(!(val
& 0x80))) {
861 if (unlikely(!(val
& 0x20))) {
869 *vaddr
++ = readb(stat
+ 1);
871 unsigned char data
= *vaddr
++;
874 writeb(data
, stat
+ 1);
882 /* Send Terminal Count pulse to floppy controller. */
883 val
= readb(auxio_register
);
884 val
|= AUXIO_AUX1_FTCNT
;
885 writeb(val
, auxio_register
);
886 val
&= AUXIO_AUX1_FTCNT
;
887 writeb(val
, auxio_register
);
893 return floppy_interrupt(irq
, dev_cookie
, regs
);
895 EXPORT_SYMBOL(sparc_floppy_irq
);
898 /* We really don't need these at all on the Sparc. We only have
899 * stubs here because they are exported to modules.
901 unsigned long probe_irq_on(void)
906 EXPORT_SYMBOL(probe_irq_on
);
908 int probe_irq_off(unsigned long mask
)
913 EXPORT_SYMBOL(probe_irq_off
);
916 static int retarget_one_irq(struct irqaction
*p
, int goal_cpu
)
918 struct ino_bucket
*bucket
= get_ino_in_irqaction(p
) + ivector_table
;
919 unsigned long imap
= bucket
->imap
;
922 while (!cpu_online(goal_cpu
)) {
923 if (++goal_cpu
>= NR_CPUS
)
927 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
928 tid
= goal_cpu
<< 26;
929 tid
&= IMAP_AID_SAFARI
;
930 } else if (this_is_starfire
== 0) {
931 tid
= goal_cpu
<< 26;
934 tid
= (starfire_translate(imap
, goal_cpu
) << 26);
937 upa_writel(tid
| IMAP_VALID
, imap
);
940 if (++goal_cpu
>= NR_CPUS
)
942 } while (!cpu_online(goal_cpu
));
947 /* Called from request_irq. */
948 static void distribute_irqs(void)
953 spin_lock_irqsave(&irq_action_lock
, flags
);
957 * Skip the timer at [0], and very rare error/power intrs at [15].
958 * Also level [12], it causes problems on Ex000 systems.
960 for (level
= 1; level
< NR_IRQS
; level
++) {
961 struct irqaction
*p
= irq_action
[level
];
962 if (level
== 12) continue;
964 cpu
= retarget_one_irq(p
, cpu
);
968 spin_unlock_irqrestore(&irq_action_lock
, flags
);
973 struct sun5_timer
*prom_timers
;
974 static u64 prom_limit0
, prom_limit1
;
976 static void map_prom_timers(void)
978 unsigned int addr
[3];
981 /* PROM timer node hangs out in the top level of device siblings... */
982 tnode
= prom_finddevice("/counter-timer");
984 /* Assume if node is not present, PROM uses different tick mechanism
985 * which we should not care about.
987 if (tnode
== 0 || tnode
== -1) {
988 prom_timers
= (struct sun5_timer
*) 0;
992 /* If PROM is really using this, it must be mapped by him. */
993 err
= prom_getproperty(tnode
, "address", (char *)addr
, sizeof(addr
));
995 prom_printf("PROM does not have timer mapped, trying to continue.\n");
996 prom_timers
= (struct sun5_timer
*) 0;
999 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
1002 static void kill_prom_timer(void)
1007 /* Save them away for later. */
1008 prom_limit0
= prom_timers
->limit0
;
1009 prom_limit1
= prom_timers
->limit1
;
1011 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1012 * We turn both off here just to be paranoid.
1014 prom_timers
->limit0
= 0;
1015 prom_timers
->limit1
= 0;
1017 /* Wheee, eat the interrupt packet too... */
1018 __asm__
__volatile__(
1020 " ldxa [%%g0] %0, %%g1\n"
1021 " ldxa [%%g2] %1, %%g1\n"
1022 " stxa %%g0, [%%g0] %0\n"
1025 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
1029 void enable_prom_timer(void)
1034 /* Set it to whatever was there before. */
1035 prom_timers
->limit1
= prom_limit1
;
1036 prom_timers
->count1
= 0;
1037 prom_timers
->limit0
= prom_limit0
;
1038 prom_timers
->count0
= 0;
1041 void init_irqwork_curcpu(void)
1043 register struct irq_work_struct
*workp
asm("o2");
1044 register unsigned long tmp
asm("o3");
1045 int cpu
= hard_smp_processor_id();
1047 memset(__irq_work
+ cpu
, 0, sizeof(*workp
));
1049 /* Make sure we are called with PSTATE_IE disabled. */
1050 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1052 if (tmp
& PSTATE_IE
) {
1053 prom_printf("BUG: init_irqwork_curcpu() called with "
1054 "PSTATE_IE enabled, bailing.\n");
1055 __asm__
__volatile__("mov %%i7, %0\n\t"
1057 prom_printf("BUG: Called from %lx\n", tmp
);
1061 /* Set interrupt globals. */
1062 workp
= &__irq_work
[cpu
];
1063 __asm__
__volatile__(
1064 "rdpr %%pstate, %0\n\t"
1065 "wrpr %0, %1, %%pstate\n\t"
1067 "wrpr %0, 0x0, %%pstate\n\t"
1069 : "i" (PSTATE_IG
), "r" (workp
));
1072 /* Only invoked on boot processor. */
1073 void __init
init_IRQ(void)
1077 memset(&ivector_table
[0], 0, sizeof(ivector_table
));
1079 /* We need to clear any IRQ's pending in the soft interrupt
1080 * registers, a spurious one could be left around from the
1081 * PROM timer which we just disabled.
1083 clear_softint(get_softint());
1085 /* Now that ivector table is initialized, it is safe
1086 * to receive IRQ vector traps. We will normally take
1087 * one or two right now, in case some device PROM used
1088 * to boot us wants to speak to us. We just ignore them.
1090 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1091 "or %%g1, %0, %%g1\n\t"
1092 "wrpr %%g1, 0x0, %%pstate"
1098 static struct proc_dir_entry
* root_irq_dir
;
1099 static struct proc_dir_entry
* irq_dir
[NUM_IVECS
];
1103 static int irq_affinity_read_proc (char *page
, char **start
, off_t off
,
1104 int count
, int *eof
, void *data
)
1106 struct ino_bucket
*bp
= ivector_table
+ (long)data
;
1107 struct irqaction
*ap
= bp
->irq_info
;
1111 mask
= get_smpaff_in_irqaction(ap
);
1112 if (cpus_empty(mask
))
1113 mask
= cpu_online_map
;
1115 len
= cpumask_scnprintf(page
, count
, mask
);
1116 if (count
- len
< 2)
1118 len
+= sprintf(page
+ len
, "\n");
1122 static inline void set_intr_affinity(int irq
, cpumask_t hw_aff
)
1124 struct ino_bucket
*bp
= ivector_table
+ irq
;
1126 /* Users specify affinity in terms of hw cpu ids.
1127 * As soon as we do this, handler_irq() might see and take action.
1129 put_smpaff_in_irqaction((struct irqaction
*)bp
->irq_info
, hw_aff
);
1131 /* Migration is simply done by the next cpu to service this
1136 static int irq_affinity_write_proc (struct file
*file
, const char __user
*buffer
,
1137 unsigned long count
, void *data
)
1139 int irq
= (long) data
, full_count
= count
, err
;
1140 cpumask_t new_value
;
1142 err
= cpumask_parse(buffer
, count
, new_value
);
1145 * Do not allow disabling IRQs completely - it's a too easy
1146 * way to make the system unusable accidentally :-) At least
1147 * one online CPU still has to be targeted.
1149 cpus_and(new_value
, new_value
, cpu_online_map
);
1150 if (cpus_empty(new_value
))
1153 set_intr_affinity(irq
, new_value
);
1160 #define MAX_NAMELEN 10
1162 static void register_irq_proc (unsigned int irq
)
1164 char name
[MAX_NAMELEN
];
1166 if (!root_irq_dir
|| irq_dir
[irq
])
1169 memset(name
, 0, MAX_NAMELEN
);
1170 sprintf(name
, "%x", irq
);
1172 /* create /proc/irq/1234 */
1173 irq_dir
[irq
] = proc_mkdir(name
, root_irq_dir
);
1176 /* XXX SMP affinity not supported on starfire yet. */
1177 if (this_is_starfire
== 0) {
1178 struct proc_dir_entry
*entry
;
1180 /* create /proc/irq/1234/smp_affinity */
1181 entry
= create_proc_entry("smp_affinity", 0600, irq_dir
[irq
]);
1185 entry
->data
= (void *)(long)irq
;
1186 entry
->read_proc
= irq_affinity_read_proc
;
1187 entry
->write_proc
= irq_affinity_write_proc
;
1193 void init_irq_proc (void)
1195 /* create /proc/irq */
1196 root_irq_dir
= proc_mkdir("irq", NULL
);