]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* hardirq.h: PA-RISC hard IRQ support. |
2 | * | |
3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> | |
4 | * | |
5 | * The locking is really quite interesting. There's a cpu-local | |
6 | * count of how many interrupts are being handled, and a global | |
7 | * lock. An interrupt can only be serviced if the global lock | |
8 | * is free. You can't be sure no more interrupts are being | |
9 | * serviced until you've acquired the lock and then checked | |
10 | * all the per-cpu interrupt counts are all zero. It's a specialised | |
11 | * br_lock, and that's exactly how Sparc does it. We don't because | |
12 | * it's more locking for us. This way is lock-free in the interrupt path. | |
13 | */ | |
14 | ||
15 | #ifndef _PARISC_HARDIRQ_H | |
16 | #define _PARISC_HARDIRQ_H | |
17 | ||
18 | #include <linux/threads.h> | |
19 | #include <linux/irq.h> | |
20 | ||
21 | typedef struct { | |
22 | unsigned long __softirq_pending; /* set_bit is used on this */ | |
23 | } ____cacheline_aligned irq_cpustat_t; | |
24 | ||
25 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | |
26 | ||
27 | void ack_bad_irq(unsigned int irq); | |
28 | ||
29 | #endif /* _PARISC_HARDIRQ_H */ |