]>
Commit | Line | Data |
---|---|---|
2be6bb0c PM |
1 | /* |
2 | * Shared interrupt handling code for IPR and INTC2 types of IRQs. | |
3 | * | |
4 | * Copyright (C) 2007, 2008 Magnus Damm | |
5 | * Copyright (C) 2009, 2010 Paul Mundt | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License. See the file "COPYING" in the main directory of this archive | |
9 | * for more details. | |
10 | */ | |
11 | #include <linux/init.h> | |
12 | #include <linux/irq.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include "internals.h" | |
15 | ||
0f552393 | 16 | static unsigned long ack_handle[INTC_NR_IRQS]; |
2be6bb0c PM |
17 | |
18 | static intc_enum __init intc_grp_id(struct intc_desc *desc, | |
19 | intc_enum enum_id) | |
20 | { | |
21 | struct intc_group *g = desc->hw.groups; | |
22 | unsigned int i, j; | |
23 | ||
24 | for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { | |
25 | g = desc->hw.groups + i; | |
26 | ||
27 | for (j = 0; g->enum_ids[j]; j++) { | |
28 | if (g->enum_ids[j] != enum_id) | |
29 | continue; | |
30 | ||
31 | return g->enum_id; | |
32 | } | |
33 | } | |
34 | ||
35 | return 0; | |
36 | } | |
37 | ||
38 | static unsigned int __init _intc_mask_data(struct intc_desc *desc, | |
39 | struct intc_desc_int *d, | |
40 | intc_enum enum_id, | |
41 | unsigned int *reg_idx, | |
42 | unsigned int *fld_idx) | |
43 | { | |
44 | struct intc_mask_reg *mr = desc->hw.mask_regs; | |
45 | unsigned int fn, mode; | |
46 | unsigned long reg_e, reg_d; | |
47 | ||
48 | while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { | |
49 | mr = desc->hw.mask_regs + *reg_idx; | |
50 | ||
51 | for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { | |
52 | if (mr->enum_ids[*fld_idx] != enum_id) | |
53 | continue; | |
54 | ||
55 | if (mr->set_reg && mr->clr_reg) { | |
56 | fn = REG_FN_WRITE_BASE; | |
57 | mode = MODE_DUAL_REG; | |
58 | reg_e = mr->clr_reg; | |
59 | reg_d = mr->set_reg; | |
60 | } else { | |
61 | fn = REG_FN_MODIFY_BASE; | |
62 | if (mr->set_reg) { | |
63 | mode = MODE_ENABLE_REG; | |
64 | reg_e = mr->set_reg; | |
65 | reg_d = mr->set_reg; | |
66 | } else { | |
67 | mode = MODE_MASK_REG; | |
68 | reg_e = mr->clr_reg; | |
69 | reg_d = mr->clr_reg; | |
70 | } | |
71 | } | |
72 | ||
73 | fn += (mr->reg_width >> 3) - 1; | |
74 | return _INTC_MK(fn, mode, | |
75 | intc_get_reg(d, reg_e), | |
76 | intc_get_reg(d, reg_d), | |
77 | 1, | |
78 | (mr->reg_width - 1) - *fld_idx); | |
79 | } | |
80 | ||
81 | *fld_idx = 0; | |
82 | (*reg_idx)++; | |
83 | } | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | unsigned int __init | |
89 | intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d, | |
90 | intc_enum enum_id, int do_grps) | |
91 | { | |
92 | unsigned int i = 0; | |
93 | unsigned int j = 0; | |
94 | unsigned int ret; | |
95 | ||
96 | ret = _intc_mask_data(desc, d, enum_id, &i, &j); | |
97 | if (ret) | |
98 | return ret; | |
99 | ||
100 | if (do_grps) | |
101 | return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0); | |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
106 | static unsigned int __init _intc_prio_data(struct intc_desc *desc, | |
107 | struct intc_desc_int *d, | |
108 | intc_enum enum_id, | |
109 | unsigned int *reg_idx, | |
110 | unsigned int *fld_idx) | |
111 | { | |
112 | struct intc_prio_reg *pr = desc->hw.prio_regs; | |
113 | unsigned int fn, n, mode, bit; | |
114 | unsigned long reg_e, reg_d; | |
115 | ||
116 | while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { | |
117 | pr = desc->hw.prio_regs + *reg_idx; | |
118 | ||
119 | for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { | |
120 | if (pr->enum_ids[*fld_idx] != enum_id) | |
121 | continue; | |
122 | ||
123 | if (pr->set_reg && pr->clr_reg) { | |
124 | fn = REG_FN_WRITE_BASE; | |
125 | mode = MODE_PCLR_REG; | |
126 | reg_e = pr->set_reg; | |
127 | reg_d = pr->clr_reg; | |
128 | } else { | |
129 | fn = REG_FN_MODIFY_BASE; | |
130 | mode = MODE_PRIO_REG; | |
131 | if (!pr->set_reg) | |
132 | BUG(); | |
133 | reg_e = pr->set_reg; | |
134 | reg_d = pr->set_reg; | |
135 | } | |
136 | ||
137 | fn += (pr->reg_width >> 3) - 1; | |
138 | n = *fld_idx + 1; | |
139 | ||
140 | BUG_ON(n * pr->field_width > pr->reg_width); | |
141 | ||
142 | bit = pr->reg_width - (n * pr->field_width); | |
143 | ||
144 | return _INTC_MK(fn, mode, | |
145 | intc_get_reg(d, reg_e), | |
146 | intc_get_reg(d, reg_d), | |
147 | pr->field_width, bit); | |
148 | } | |
149 | ||
150 | *fld_idx = 0; | |
151 | (*reg_idx)++; | |
152 | } | |
153 | ||
154 | return 0; | |
155 | } | |
156 | ||
157 | unsigned int __init | |
158 | intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d, | |
159 | intc_enum enum_id, int do_grps) | |
160 | { | |
161 | unsigned int i = 0; | |
162 | unsigned int j = 0; | |
163 | unsigned int ret; | |
164 | ||
165 | ret = _intc_prio_data(desc, d, enum_id, &i, &j); | |
166 | if (ret) | |
167 | return ret; | |
168 | ||
169 | if (do_grps) | |
170 | return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0); | |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
b448d6ad PM |
175 | static unsigned int intc_ack_data(struct intc_desc *desc, |
176 | struct intc_desc_int *d, intc_enum enum_id) | |
2be6bb0c PM |
177 | { |
178 | struct intc_mask_reg *mr = desc->hw.ack_regs; | |
179 | unsigned int i, j, fn, mode; | |
180 | unsigned long reg_e, reg_d; | |
181 | ||
182 | for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { | |
183 | mr = desc->hw.ack_regs + i; | |
184 | ||
185 | for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { | |
186 | if (mr->enum_ids[j] != enum_id) | |
187 | continue; | |
188 | ||
189 | fn = REG_FN_MODIFY_BASE; | |
190 | mode = MODE_ENABLE_REG; | |
191 | reg_e = mr->set_reg; | |
192 | reg_d = mr->set_reg; | |
193 | ||
194 | fn += (mr->reg_width >> 3) - 1; | |
195 | return _INTC_MK(fn, mode, | |
196 | intc_get_reg(d, reg_e), | |
197 | intc_get_reg(d, reg_d), | |
198 | 1, | |
199 | (mr->reg_width - 1) - j); | |
200 | } | |
201 | } | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | static void intc_enable_disable(struct intc_desc_int *d, | |
207 | unsigned long handle, int do_enable) | |
208 | { | |
209 | unsigned long addr; | |
210 | unsigned int cpu; | |
211 | unsigned long (*fn)(unsigned long, unsigned long, | |
212 | unsigned long (*)(unsigned long, unsigned long, | |
213 | unsigned long), | |
214 | unsigned int); | |
215 | ||
216 | if (do_enable) { | |
217 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { | |
218 | addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); | |
219 | fn = intc_enable_noprio_fns[_INTC_MODE(handle)]; | |
220 | fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); | |
221 | } | |
222 | } else { | |
223 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { | |
224 | addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); | |
225 | fn = intc_disable_fns[_INTC_MODE(handle)]; | |
226 | fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); | |
227 | } | |
228 | } | |
229 | } | |
230 | ||
231 | void __init intc_enable_disable_enum(struct intc_desc *desc, | |
232 | struct intc_desc_int *d, | |
233 | intc_enum enum_id, int enable) | |
234 | { | |
235 | unsigned int i, j, data; | |
236 | ||
237 | /* go through and enable/disable all mask bits */ | |
238 | i = j = 0; | |
239 | do { | |
240 | data = _intc_mask_data(desc, d, enum_id, &i, &j); | |
241 | if (data) | |
242 | intc_enable_disable(d, data, enable); | |
243 | j++; | |
244 | } while (data); | |
245 | ||
246 | /* go through and enable/disable all priority fields */ | |
247 | i = j = 0; | |
248 | do { | |
249 | data = _intc_prio_data(desc, d, enum_id, &i, &j); | |
250 | if (data) | |
251 | intc_enable_disable(d, data, enable); | |
252 | ||
253 | j++; | |
254 | } while (data); | |
255 | } | |
256 | ||
257 | unsigned int __init | |
258 | intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d, | |
259 | intc_enum enum_id) | |
260 | { | |
261 | struct intc_sense_reg *sr = desc->hw.sense_regs; | |
262 | unsigned int i, j, fn, bit; | |
263 | ||
264 | for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { | |
265 | sr = desc->hw.sense_regs + i; | |
266 | ||
267 | for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { | |
268 | if (sr->enum_ids[j] != enum_id) | |
269 | continue; | |
270 | ||
271 | fn = REG_FN_MODIFY_BASE; | |
272 | fn += (sr->reg_width >> 3) - 1; | |
273 | ||
274 | BUG_ON((j + 1) * sr->field_width > sr->reg_width); | |
275 | ||
276 | bit = sr->reg_width - ((j + 1) * sr->field_width); | |
277 | ||
278 | return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), | |
279 | 0, sr->field_width, bit); | |
280 | } | |
281 | } | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
286 | ||
287 | void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, | |
288 | struct intc_desc_int *d, intc_enum id) | |
289 | { | |
290 | unsigned long flags; | |
291 | ||
292 | /* | |
293 | * Nothing to do for this IRQ. | |
294 | */ | |
295 | if (!desc->hw.ack_regs) | |
296 | return; | |
297 | ||
298 | raw_spin_lock_irqsave(&intc_big_lock, flags); | |
299 | ack_handle[irq] = intc_ack_data(desc, d, id); | |
300 | raw_spin_unlock_irqrestore(&intc_big_lock, flags); | |
301 | } | |
302 | ||
303 | unsigned long intc_get_ack_handle(unsigned int irq) | |
304 | { | |
305 | return ack_handle[irq]; | |
306 | } |