]>
Commit | Line | Data |
---|---|---|
e144710b TG |
1 | #ifndef _LINUX_IRQDESC_H |
2 | #define _LINUX_IRQDESC_H | |
3 | ||
4 | /* | |
5 | * Core internal functions to deal with irq descriptors | |
6 | * | |
7 | * This include will move to kernel/irq once we cleaned up the tree. | |
8 | * For now it's included from <linux/irq.h> | |
9 | */ | |
10 | ||
cd7eab44 | 11 | struct irq_affinity_notify; |
e144710b TG |
12 | struct proc_dir_entry; |
13 | struct timer_rand_state; | |
e144710b TG |
14 | /** |
15 | * struct irq_desc - interrupt descriptor | |
16 | * @irq_data: per irq and chip data passed down to chip functions | |
17 | * @timer_rand_state: pointer to timer rand state struct | |
18 | * @kstat_irqs: irq stats per cpu | |
19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | |
20 | * @action: the irq action chain | |
21 | * @status: status information | |
22 | * @depth: disable-depth, for nested irq_disable() calls | |
23 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | |
24 | * @irq_count: stats field to detect stalled irqs | |
25 | * @last_unhandled: aging timer for unhandled count | |
26 | * @irqs_unhandled: stats field for spurious unhandled interrupts | |
27 | * @lock: locking for SMP | |
cd7eab44 | 28 | * @affinity_notify: context for notification of affinity changes |
e144710b TG |
29 | * @pending_mask: pending rebalanced interrupts |
30 | * @threads_active: number of irqaction threads currently running | |
31 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | |
32 | * @dir: /proc/irq/ procfs entry | |
33 | * @name: flow handler name for /proc/interrupts output | |
34 | */ | |
35 | struct irq_desc { | |
36 | ||
37 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | |
38 | struct irq_data irq_data; | |
39 | #else | |
40 | /* | |
41 | * This union will go away, once we fixed the direct access to | |
42 | * irq_desc all over the place. The direct fields are a 1:1 | |
43 | * overlay of irq_data. | |
44 | */ | |
45 | union { | |
46 | struct irq_data irq_data; | |
47 | struct { | |
48 | unsigned int irq; | |
49 | unsigned int node; | |
50 | struct irq_chip *chip; | |
51 | void *handler_data; | |
52 | void *chip_data; | |
53 | struct msi_desc *msi_desc; | |
54 | #ifdef CONFIG_SMP | |
55 | cpumask_var_t affinity; | |
e144710b TG |
56 | #endif |
57 | }; | |
58 | }; | |
59 | #endif | |
60 | ||
61 | struct timer_rand_state *timer_rand_state; | |
6c9ae009 | 62 | unsigned int __percpu *kstat_irqs; |
e144710b TG |
63 | irq_flow_handler_t handle_irq; |
64 | struct irqaction *action; /* IRQ action list */ | |
65 | unsigned int status; /* IRQ status */ | |
66 | ||
67 | unsigned int depth; /* nested irq disables */ | |
68 | unsigned int wake_depth; /* nested wake enables */ | |
69 | unsigned int irq_count; /* For detecting broken IRQs */ | |
70 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | |
71 | unsigned int irqs_unhandled; | |
72 | raw_spinlock_t lock; | |
73 | #ifdef CONFIG_SMP | |
74 | const struct cpumask *affinity_hint; | |
cd7eab44 | 75 | struct irq_affinity_notify *affinity_notify; |
e144710b TG |
76 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
77 | cpumask_var_t pending_mask; | |
78 | #endif | |
79 | #endif | |
80 | atomic_t threads_active; | |
81 | wait_queue_head_t wait_for_threads; | |
82 | #ifdef CONFIG_PROC_FS | |
83 | struct proc_dir_entry *dir; | |
84 | #endif | |
85 | const char *name; | |
86 | } ____cacheline_internodealigned_in_smp; | |
87 | ||
e144710b TG |
88 | #ifndef CONFIG_SPARSE_IRQ |
89 | extern struct irq_desc irq_desc[NR_IRQS]; | |
90 | #endif | |
91 | ||
78f90d91 TG |
92 | /* Will be removed once the last users in power and sh are gone */ |
93 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | |
e144710b TG |
94 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
95 | { | |
96 | return desc; | |
97 | } | |
e144710b TG |
98 | |
99 | #ifdef CONFIG_GENERIC_HARDIRQS | |
100 | ||
a0cd9ca2 TG |
101 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) |
102 | { | |
103 | return desc->irq_data.chip; | |
104 | } | |
105 | ||
106 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | |
107 | { | |
108 | return desc->irq_data.chip_data; | |
109 | } | |
110 | ||
111 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | |
112 | { | |
113 | return desc->irq_data.handler_data; | |
114 | } | |
115 | ||
116 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | |
117 | { | |
118 | return desc->irq_data.msi_desc; | |
119 | } | |
120 | ||
121 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | |
122 | static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) | |
123 | { | |
124 | return irq_desc_get_chip(desc); | |
125 | } | |
126 | static inline void *get_irq_desc_data(struct irq_desc *desc) | |
127 | { | |
128 | return irq_desc_get_handler_data(desc); | |
129 | } | |
130 | ||
131 | static inline void *get_irq_desc_chip_data(struct irq_desc *desc) | |
132 | { | |
133 | return irq_desc_get_chip_data(desc); | |
134 | } | |
135 | ||
136 | static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) | |
137 | { | |
138 | return irq_desc_get_msi_desc(desc); | |
139 | } | |
140 | #endif | |
e144710b | 141 | |
e144710b TG |
142 | /* |
143 | * Architectures call this to let the generic IRQ layer | |
144 | * handle an interrupt. If the descriptor is attached to an | |
145 | * irqchip-style controller then we call the ->handle_irq() handler, | |
146 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | |
147 | */ | |
148 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | |
149 | { | |
e144710b | 150 | desc->handle_irq(irq, desc); |
e144710b TG |
151 | } |
152 | ||
153 | static inline void generic_handle_irq(unsigned int irq) | |
154 | { | |
155 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | |
156 | } | |
157 | ||
158 | /* Test to see if a driver has successfully requested an irq */ | |
159 | static inline int irq_has_action(unsigned int irq) | |
160 | { | |
161 | struct irq_desc *desc = irq_to_desc(irq); | |
162 | return desc->action != NULL; | |
163 | } | |
164 | ||
165 | static inline int irq_balancing_disabled(unsigned int irq) | |
166 | { | |
167 | struct irq_desc *desc; | |
168 | ||
169 | desc = irq_to_desc(irq); | |
170 | return desc->status & IRQ_NO_BALANCING_MASK; | |
171 | } | |
172 | ||
173 | /* caller has locked the irq_desc and both params are valid */ | |
174 | static inline void __set_irq_handler_unlocked(int irq, | |
175 | irq_flow_handler_t handler) | |
176 | { | |
177 | struct irq_desc *desc; | |
178 | ||
179 | desc = irq_to_desc(irq); | |
180 | desc->handle_irq = handler; | |
181 | } | |
182 | #endif | |
183 | ||
184 | #endif |