]>
Commit | Line | Data |
---|---|---|
3795de23 TG |
1 | /* |
2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
4 | * | |
99bfce5d TG |
5 | * This file contains the interrupt descriptor management code. Detailed |
6 | * information is available in Documentation/core-api/genericirq.rst | |
3795de23 TG |
7 | * |
8 | */ | |
9 | #include <linux/irq.h> | |
10 | #include <linux/slab.h> | |
ec53cf23 | 11 | #include <linux/export.h> |
3795de23 TG |
12 | #include <linux/interrupt.h> |
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/radix-tree.h> | |
1f5a5b87 | 15 | #include <linux/bitmap.h> |
76ba59f8 | 16 | #include <linux/irqdomain.h> |
ecb3f394 | 17 | #include <linux/sysfs.h> |
3795de23 TG |
18 | |
19 | #include "internals.h" | |
20 | ||
21 | /* | |
22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | |
23 | */ | |
78f90d91 | 24 | static struct lock_class_key irq_desc_lock_class; |
3795de23 | 25 | |
fe051434 | 26 | #if defined(CONFIG_SMP) |
fbf19803 TG |
27 | static int __init irq_affinity_setup(char *str) |
28 | { | |
10d94ff4 | 29 | alloc_bootmem_cpumask_var(&irq_default_affinity); |
fbf19803 TG |
30 | cpulist_parse(str, irq_default_affinity); |
31 | /* | |
32 | * Set at least the boot cpu. We don't want to end up with | |
33 | * bugreports caused by random comandline masks | |
34 | */ | |
35 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | |
36 | return 1; | |
37 | } | |
38 | __setup("irqaffinity=", irq_affinity_setup); | |
39 | ||
3795de23 TG |
40 | static void __init init_irq_default_affinity(void) |
41 | { | |
10d94ff4 | 42 | if (!cpumask_available(irq_default_affinity)) |
fbf19803 | 43 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
fbf19803 TG |
44 | if (cpumask_empty(irq_default_affinity)) |
45 | cpumask_setall(irq_default_affinity); | |
3795de23 TG |
46 | } |
47 | #else | |
48 | static void __init init_irq_default_affinity(void) | |
49 | { | |
50 | } | |
51 | #endif | |
52 | ||
1f5a5b87 | 53 | #ifdef CONFIG_SMP |
4ab764c3 | 54 | static int alloc_masks(struct irq_desc *desc, int node) |
1f5a5b87 | 55 | { |
9df872fa | 56 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
4ab764c3 | 57 | GFP_KERNEL, node)) |
1f5a5b87 TG |
58 | return -ENOMEM; |
59 | ||
0d3f5425 TG |
60 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
61 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, | |
62 | GFP_KERNEL, node)) { | |
63 | free_cpumask_var(desc->irq_common_data.affinity); | |
64 | return -ENOMEM; | |
65 | } | |
66 | #endif | |
67 | ||
1f5a5b87 | 68 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
4ab764c3 | 69 | if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { |
0d3f5425 TG |
70 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
71 | free_cpumask_var(desc->irq_common_data.effective_affinity); | |
72 | #endif | |
9df872fa | 73 | free_cpumask_var(desc->irq_common_data.affinity); |
1f5a5b87 TG |
74 | return -ENOMEM; |
75 | } | |
76 | #endif | |
77 | return 0; | |
78 | } | |
79 | ||
45ddcecb TG |
80 | static void desc_smp_init(struct irq_desc *desc, int node, |
81 | const struct cpumask *affinity) | |
1f5a5b87 | 82 | { |
45ddcecb TG |
83 | if (!affinity) |
84 | affinity = irq_default_affinity; | |
85 | cpumask_copy(desc->irq_common_data.affinity, affinity); | |
86 | ||
b7b29338 TG |
87 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
88 | cpumask_clear(desc->pending_mask); | |
89 | #endif | |
449e9cae JL |
90 | #ifdef CONFIG_NUMA |
91 | desc->irq_common_data.node = node; | |
92 | #endif | |
b7b29338 TG |
93 | } |
94 | ||
1f5a5b87 TG |
95 | #else |
96 | static inline int | |
4ab764c3 | 97 | alloc_masks(struct irq_desc *desc, int node) { return 0; } |
45ddcecb TG |
98 | static inline void |
99 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } | |
1f5a5b87 TG |
100 | #endif |
101 | ||
b6873807 | 102 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
45ddcecb | 103 | const struct cpumask *affinity, struct module *owner) |
1f5a5b87 | 104 | { |
6c9ae009 ED |
105 | int cpu; |
106 | ||
af7080e0 | 107 | desc->irq_common_data.handler_data = NULL; |
b237721c | 108 | desc->irq_common_data.msi_desc = NULL; |
af7080e0 | 109 | |
0d0b4c86 | 110 | desc->irq_data.common = &desc->irq_common_data; |
1f5a5b87 TG |
111 | desc->irq_data.irq = irq; |
112 | desc->irq_data.chip = &no_irq_chip; | |
113 | desc->irq_data.chip_data = NULL; | |
f9e4989e | 114 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
801a0e9a | 115 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
d829b8fb | 116 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
1f5a5b87 TG |
117 | desc->handle_irq = handle_bad_irq; |
118 | desc->depth = 1; | |
b7b29338 TG |
119 | desc->irq_count = 0; |
120 | desc->irqs_unhandled = 0; | |
1f5a5b87 | 121 | desc->name = NULL; |
b6873807 | 122 | desc->owner = owner; |
6c9ae009 ED |
123 | for_each_possible_cpu(cpu) |
124 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | |
45ddcecb | 125 | desc_smp_init(desc, node, affinity); |
1f5a5b87 TG |
126 | } |
127 | ||
3795de23 TG |
128 | int nr_irqs = NR_IRQS; |
129 | EXPORT_SYMBOL_GPL(nr_irqs); | |
130 | ||
a05a900a | 131 | static DEFINE_MUTEX(sparse_irq_lock); |
c1ee6264 | 132 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
1f5a5b87 | 133 | |
3795de23 TG |
134 | #ifdef CONFIG_SPARSE_IRQ |
135 | ||
ecb3f394 CG |
136 | static void irq_kobj_release(struct kobject *kobj); |
137 | ||
138 | #ifdef CONFIG_SYSFS | |
139 | static struct kobject *irq_kobj_base; | |
140 | ||
141 | #define IRQ_ATTR_RO(_name) \ | |
142 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
143 | ||
144 | static ssize_t per_cpu_count_show(struct kobject *kobj, | |
145 | struct kobj_attribute *attr, char *buf) | |
146 | { | |
147 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
148 | int cpu, irq = desc->irq_data.irq; | |
149 | ssize_t ret = 0; | |
150 | char *p = ""; | |
151 | ||
152 | for_each_possible_cpu(cpu) { | |
153 | unsigned int c = kstat_irqs_cpu(irq, cpu); | |
154 | ||
155 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); | |
156 | p = ","; | |
157 | } | |
158 | ||
159 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | |
160 | return ret; | |
161 | } | |
162 | IRQ_ATTR_RO(per_cpu_count); | |
163 | ||
164 | static ssize_t chip_name_show(struct kobject *kobj, | |
165 | struct kobj_attribute *attr, char *buf) | |
166 | { | |
167 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
168 | ssize_t ret = 0; | |
169 | ||
170 | raw_spin_lock_irq(&desc->lock); | |
171 | if (desc->irq_data.chip && desc->irq_data.chip->name) { | |
172 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", | |
173 | desc->irq_data.chip->name); | |
174 | } | |
175 | raw_spin_unlock_irq(&desc->lock); | |
176 | ||
177 | return ret; | |
178 | } | |
179 | IRQ_ATTR_RO(chip_name); | |
180 | ||
181 | static ssize_t hwirq_show(struct kobject *kobj, | |
182 | struct kobj_attribute *attr, char *buf) | |
183 | { | |
184 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
185 | ssize_t ret = 0; | |
186 | ||
187 | raw_spin_lock_irq(&desc->lock); | |
188 | if (desc->irq_data.domain) | |
189 | ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); | |
190 | raw_spin_unlock_irq(&desc->lock); | |
191 | ||
192 | return ret; | |
193 | } | |
194 | IRQ_ATTR_RO(hwirq); | |
195 | ||
196 | static ssize_t type_show(struct kobject *kobj, | |
197 | struct kobj_attribute *attr, char *buf) | |
198 | { | |
199 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
200 | ssize_t ret = 0; | |
201 | ||
202 | raw_spin_lock_irq(&desc->lock); | |
203 | ret = sprintf(buf, "%s\n", | |
204 | irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | |
205 | raw_spin_unlock_irq(&desc->lock); | |
206 | ||
207 | return ret; | |
208 | ||
209 | } | |
210 | IRQ_ATTR_RO(type); | |
211 | ||
d61e2944 AS |
212 | static ssize_t wakeup_show(struct kobject *kobj, |
213 | struct kobj_attribute *attr, char *buf) | |
214 | { | |
215 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
216 | ssize_t ret = 0; | |
217 | ||
218 | raw_spin_lock_irq(&desc->lock); | |
219 | ret = sprintf(buf, "%s\n", | |
220 | irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); | |
221 | raw_spin_unlock_irq(&desc->lock); | |
222 | ||
223 | return ret; | |
224 | ||
225 | } | |
226 | IRQ_ATTR_RO(wakeup); | |
227 | ||
ecb3f394 CG |
228 | static ssize_t name_show(struct kobject *kobj, |
229 | struct kobj_attribute *attr, char *buf) | |
230 | { | |
231 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
232 | ssize_t ret = 0; | |
233 | ||
234 | raw_spin_lock_irq(&desc->lock); | |
235 | if (desc->name) | |
236 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); | |
237 | raw_spin_unlock_irq(&desc->lock); | |
238 | ||
239 | return ret; | |
240 | } | |
241 | IRQ_ATTR_RO(name); | |
242 | ||
243 | static ssize_t actions_show(struct kobject *kobj, | |
244 | struct kobj_attribute *attr, char *buf) | |
245 | { | |
246 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
247 | struct irqaction *action; | |
248 | ssize_t ret = 0; | |
249 | char *p = ""; | |
250 | ||
251 | raw_spin_lock_irq(&desc->lock); | |
252 | for (action = desc->action; action != NULL; action = action->next) { | |
253 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", | |
254 | p, action->name); | |
255 | p = ","; | |
256 | } | |
257 | raw_spin_unlock_irq(&desc->lock); | |
258 | ||
259 | if (ret) | |
260 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | |
261 | ||
262 | return ret; | |
263 | } | |
264 | IRQ_ATTR_RO(actions); | |
265 | ||
266 | static struct attribute *irq_attrs[] = { | |
267 | &per_cpu_count_attr.attr, | |
268 | &chip_name_attr.attr, | |
269 | &hwirq_attr.attr, | |
270 | &type_attr.attr, | |
d61e2944 | 271 | &wakeup_attr.attr, |
ecb3f394 CG |
272 | &name_attr.attr, |
273 | &actions_attr.attr, | |
274 | NULL | |
275 | }; | |
276 | ||
277 | static struct kobj_type irq_kobj_type = { | |
278 | .release = irq_kobj_release, | |
279 | .sysfs_ops = &kobj_sysfs_ops, | |
280 | .default_attrs = irq_attrs, | |
281 | }; | |
282 | ||
283 | static void irq_sysfs_add(int irq, struct irq_desc *desc) | |
284 | { | |
285 | if (irq_kobj_base) { | |
286 | /* | |
287 | * Continue even in case of failure as this is nothing | |
288 | * crucial. | |
289 | */ | |
290 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) | |
291 | pr_warn("Failed to add kobject for irq %d\n", irq); | |
292 | } | |
293 | } | |
294 | ||
295 | static int __init irq_sysfs_init(void) | |
296 | { | |
297 | struct irq_desc *desc; | |
298 | int irq; | |
299 | ||
300 | /* Prevent concurrent irq alloc/free */ | |
301 | irq_lock_sparse(); | |
302 | ||
303 | irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | |
304 | if (!irq_kobj_base) { | |
305 | irq_unlock_sparse(); | |
306 | return -ENOMEM; | |
307 | } | |
308 | ||
309 | /* Add the already allocated interrupts */ | |
310 | for_each_irq_desc(irq, desc) | |
311 | irq_sysfs_add(irq, desc); | |
312 | irq_unlock_sparse(); | |
313 | ||
314 | return 0; | |
315 | } | |
316 | postcore_initcall(irq_sysfs_init); | |
317 | ||
318 | #else /* !CONFIG_SYSFS */ | |
319 | ||
320 | static struct kobj_type irq_kobj_type = { | |
321 | .release = irq_kobj_release, | |
322 | }; | |
323 | ||
324 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} | |
325 | ||
326 | #endif /* CONFIG_SYSFS */ | |
327 | ||
baa0d233 | 328 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
3795de23 | 329 | |
1f5a5b87 | 330 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
3795de23 TG |
331 | { |
332 | radix_tree_insert(&irq_desc_tree, irq, desc); | |
333 | } | |
334 | ||
335 | struct irq_desc *irq_to_desc(unsigned int irq) | |
336 | { | |
337 | return radix_tree_lookup(&irq_desc_tree, irq); | |
338 | } | |
3911ff30 | 339 | EXPORT_SYMBOL(irq_to_desc); |
3795de23 | 340 | |
1f5a5b87 TG |
341 | static void delete_irq_desc(unsigned int irq) |
342 | { | |
343 | radix_tree_delete(&irq_desc_tree, irq); | |
344 | } | |
345 | ||
346 | #ifdef CONFIG_SMP | |
347 | static void free_masks(struct irq_desc *desc) | |
348 | { | |
349 | #ifdef CONFIG_GENERIC_PENDING_IRQ | |
350 | free_cpumask_var(desc->pending_mask); | |
351 | #endif | |
9df872fa | 352 | free_cpumask_var(desc->irq_common_data.affinity); |
0d3f5425 TG |
353 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
354 | free_cpumask_var(desc->irq_common_data.effective_affinity); | |
355 | #endif | |
1f5a5b87 TG |
356 | } |
357 | #else | |
358 | static inline void free_masks(struct irq_desc *desc) { } | |
359 | #endif | |
360 | ||
c291ee62 TG |
361 | void irq_lock_sparse(void) |
362 | { | |
363 | mutex_lock(&sparse_irq_lock); | |
364 | } | |
365 | ||
366 | void irq_unlock_sparse(void) | |
367 | { | |
368 | mutex_unlock(&sparse_irq_lock); | |
369 | } | |
370 | ||
45ddcecb TG |
371 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
372 | const struct cpumask *affinity, | |
373 | struct module *owner) | |
1f5a5b87 TG |
374 | { |
375 | struct irq_desc *desc; | |
1f5a5b87 | 376 | |
4ab764c3 | 377 | desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); |
1f5a5b87 TG |
378 | if (!desc) |
379 | return NULL; | |
380 | /* allocate based on nr_cpu_ids */ | |
6c9ae009 | 381 | desc->kstat_irqs = alloc_percpu(unsigned int); |
1f5a5b87 TG |
382 | if (!desc->kstat_irqs) |
383 | goto err_desc; | |
384 | ||
4ab764c3 | 385 | if (alloc_masks(desc, node)) |
1f5a5b87 TG |
386 | goto err_kstat; |
387 | ||
388 | raw_spin_lock_init(&desc->lock); | |
389 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | |
9114014c | 390 | mutex_init(&desc->request_mutex); |
425a5072 | 391 | init_rcu_head(&desc->rcu); |
1f5a5b87 | 392 | |
45ddcecb TG |
393 | desc_set_defaults(irq, desc, node, affinity, owner); |
394 | irqd_set(&desc->irq_data, flags); | |
ecb3f394 | 395 | kobject_init(&desc->kobj, &irq_kobj_type); |
1f5a5b87 TG |
396 | |
397 | return desc; | |
398 | ||
399 | err_kstat: | |
6c9ae009 | 400 | free_percpu(desc->kstat_irqs); |
1f5a5b87 TG |
401 | err_desc: |
402 | kfree(desc); | |
403 | return NULL; | |
404 | } | |
405 | ||
ecb3f394 | 406 | static void irq_kobj_release(struct kobject *kobj) |
425a5072 | 407 | { |
ecb3f394 | 408 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
425a5072 TG |
409 | |
410 | free_masks(desc); | |
411 | free_percpu(desc->kstat_irqs); | |
412 | kfree(desc); | |
413 | } | |
414 | ||
ecb3f394 CG |
415 | static void delayed_free_desc(struct rcu_head *rhp) |
416 | { | |
417 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); | |
418 | ||
419 | kobject_put(&desc->kobj); | |
420 | } | |
421 | ||
1f5a5b87 TG |
422 | static void free_desc(unsigned int irq) |
423 | { | |
424 | struct irq_desc *desc = irq_to_desc(irq); | |
1f5a5b87 | 425 | |
087cdfb6 | 426 | irq_remove_debugfs_entry(desc); |
13bfe99e TG |
427 | unregister_irq_proc(irq, desc); |
428 | ||
c291ee62 TG |
429 | /* |
430 | * sparse_irq_lock protects also show_interrupts() and | |
431 | * kstat_irq_usr(). Once we deleted the descriptor from the | |
432 | * sparse tree we can free it. Access in proc will fail to | |
433 | * lookup the descriptor. | |
ecb3f394 CG |
434 | * |
435 | * The sysfs entry must be serialized against a concurrent | |
436 | * irq_sysfs_init() as well. | |
c291ee62 | 437 | */ |
ecb3f394 | 438 | kobject_del(&desc->kobj); |
1f5a5b87 | 439 | delete_irq_desc(irq); |
1f5a5b87 | 440 | |
425a5072 TG |
441 | /* |
442 | * We free the descriptor, masks and stat fields via RCU. That | |
443 | * allows demultiplex interrupts to do rcu based management of | |
444 | * the child interrupts. | |
445 | */ | |
446 | call_rcu(&desc->rcu, delayed_free_desc); | |
1f5a5b87 TG |
447 | } |
448 | ||
b6873807 | 449 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
06ee6d57 | 450 | const struct cpumask *affinity, struct module *owner) |
1f5a5b87 | 451 | { |
45ddcecb | 452 | const struct cpumask *mask = NULL; |
1f5a5b87 | 453 | struct irq_desc *desc; |
45ddcecb | 454 | unsigned int flags; |
e75eafb9 | 455 | int i; |
45ddcecb | 456 | |
e75eafb9 TG |
457 | /* Validate affinity mask(s) */ |
458 | if (affinity) { | |
459 | for (i = 0, mask = affinity; i < cnt; i++, mask++) { | |
460 | if (cpumask_empty(mask)) | |
461 | return -EINVAL; | |
462 | } | |
463 | } | |
45ddcecb | 464 | |
239306fe | 465 | flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0; |
e75eafb9 | 466 | mask = NULL; |
1f5a5b87 TG |
467 | |
468 | for (i = 0; i < cnt; i++) { | |
45ddcecb | 469 | if (affinity) { |
e75eafb9 TG |
470 | node = cpu_to_node(cpumask_first(affinity)); |
471 | mask = affinity; | |
472 | affinity++; | |
45ddcecb TG |
473 | } |
474 | desc = alloc_desc(start + i, node, flags, mask, owner); | |
1f5a5b87 TG |
475 | if (!desc) |
476 | goto err; | |
1f5a5b87 | 477 | irq_insert_desc(start + i, desc); |
ecb3f394 | 478 | irq_sysfs_add(start + i, desc); |
e0b47794 | 479 | irq_add_debugfs_entry(start + i, desc); |
1f5a5b87 | 480 | } |
12ac1d0f | 481 | bitmap_set(allocated_irqs, start, cnt); |
1f5a5b87 TG |
482 | return start; |
483 | ||
484 | err: | |
485 | for (i--; i >= 0; i--) | |
486 | free_desc(start + i); | |
1f5a5b87 TG |
487 | return -ENOMEM; |
488 | } | |
489 | ||
ed4dea6e | 490 | static int irq_expand_nr_irqs(unsigned int nr) |
e7bcecb7 | 491 | { |
ed4dea6e | 492 | if (nr > IRQ_BITMAP_BITS) |
e7bcecb7 | 493 | return -ENOMEM; |
ed4dea6e | 494 | nr_irqs = nr; |
e7bcecb7 TG |
495 | return 0; |
496 | } | |
497 | ||
3795de23 TG |
498 | int __init early_irq_init(void) |
499 | { | |
b683de2b | 500 | int i, initcnt, node = first_online_node; |
3795de23 | 501 | struct irq_desc *desc; |
3795de23 TG |
502 | |
503 | init_irq_default_affinity(); | |
504 | ||
b683de2b TG |
505 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
506 | initcnt = arch_probe_nr_irqs(); | |
5a29ef22 VL |
507 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", |
508 | NR_IRQS, nr_irqs, initcnt); | |
3795de23 | 509 | |
c1ee6264 TG |
510 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
511 | nr_irqs = IRQ_BITMAP_BITS; | |
512 | ||
513 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | |
514 | initcnt = IRQ_BITMAP_BITS; | |
515 | ||
516 | if (initcnt > nr_irqs) | |
517 | nr_irqs = initcnt; | |
518 | ||
b683de2b | 519 | for (i = 0; i < initcnt; i++) { |
45ddcecb | 520 | desc = alloc_desc(i, node, 0, NULL, NULL); |
aa99ec0f TG |
521 | set_bit(i, allocated_irqs); |
522 | irq_insert_desc(i, desc); | |
3795de23 | 523 | } |
3795de23 TG |
524 | return arch_early_irq_init(); |
525 | } | |
526 | ||
3795de23 TG |
527 | #else /* !CONFIG_SPARSE_IRQ */ |
528 | ||
529 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |
530 | [0 ... NR_IRQS-1] = { | |
3795de23 TG |
531 | .handle_irq = handle_bad_irq, |
532 | .depth = 1, | |
533 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | |
534 | } | |
535 | }; | |
536 | ||
3795de23 TG |
537 | int __init early_irq_init(void) |
538 | { | |
aa99ec0f | 539 | int count, i, node = first_online_node; |
3795de23 | 540 | struct irq_desc *desc; |
3795de23 TG |
541 | |
542 | init_irq_default_affinity(); | |
543 | ||
5a29ef22 | 544 | printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); |
3795de23 TG |
545 | |
546 | desc = irq_desc; | |
547 | count = ARRAY_SIZE(irq_desc); | |
548 | ||
549 | for (i = 0; i < count; i++) { | |
6c9ae009 | 550 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
4ab764c3 | 551 | alloc_masks(&desc[i], node); |
e7fbad30 | 552 | raw_spin_lock_init(&desc[i].lock); |
154cd387 | 553 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
45ddcecb | 554 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
3795de23 TG |
555 | } |
556 | return arch_early_irq_init(); | |
557 | } | |
558 | ||
559 | struct irq_desc *irq_to_desc(unsigned int irq) | |
560 | { | |
561 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
562 | } | |
2c45aada | 563 | EXPORT_SYMBOL(irq_to_desc); |
3795de23 | 564 | |
1f5a5b87 TG |
565 | static void free_desc(unsigned int irq) |
566 | { | |
d8179bc0 TG |
567 | struct irq_desc *desc = irq_to_desc(irq); |
568 | unsigned long flags; | |
569 | ||
570 | raw_spin_lock_irqsave(&desc->lock, flags); | |
45ddcecb | 571 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
d8179bc0 | 572 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1f5a5b87 TG |
573 | } |
574 | ||
b6873807 | 575 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
06ee6d57 | 576 | const struct cpumask *affinity, |
b6873807 | 577 | struct module *owner) |
1f5a5b87 | 578 | { |
b6873807 SAS |
579 | u32 i; |
580 | ||
581 | for (i = 0; i < cnt; i++) { | |
582 | struct irq_desc *desc = irq_to_desc(start + i); | |
583 | ||
584 | desc->owner = owner; | |
585 | } | |
12ac1d0f | 586 | bitmap_set(allocated_irqs, start, cnt); |
1f5a5b87 TG |
587 | return start; |
588 | } | |
e7bcecb7 | 589 | |
ed4dea6e | 590 | static int irq_expand_nr_irqs(unsigned int nr) |
e7bcecb7 TG |
591 | { |
592 | return -ENOMEM; | |
593 | } | |
594 | ||
f63b6a05 TG |
595 | void irq_mark_irq(unsigned int irq) |
596 | { | |
597 | mutex_lock(&sparse_irq_lock); | |
598 | bitmap_set(allocated_irqs, irq, 1); | |
599 | mutex_unlock(&sparse_irq_lock); | |
600 | } | |
601 | ||
c940e01c TG |
602 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
603 | void irq_init_desc(unsigned int irq) | |
604 | { | |
d8179bc0 | 605 | free_desc(irq); |
c940e01c TG |
606 | } |
607 | #endif | |
608 | ||
3795de23 TG |
609 | #endif /* !CONFIG_SPARSE_IRQ */ |
610 | ||
fe12bc2c TG |
611 | /** |
612 | * generic_handle_irq - Invoke the handler for a particular irq | |
613 | * @irq: The irq number to handle | |
614 | * | |
615 | */ | |
616 | int generic_handle_irq(unsigned int irq) | |
617 | { | |
618 | struct irq_desc *desc = irq_to_desc(irq); | |
619 | ||
620 | if (!desc) | |
621 | return -EINVAL; | |
bd0b9ac4 | 622 | generic_handle_irq_desc(desc); |
fe12bc2c TG |
623 | return 0; |
624 | } | |
edf76f83 | 625 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
fe12bc2c | 626 | |
76ba59f8 MZ |
627 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
628 | /** | |
629 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain | |
630 | * @domain: The domain where to perform the lookup | |
631 | * @hwirq: The HW irq number to convert to a logical one | |
632 | * @lookup: Whether to perform the domain lookup or not | |
633 | * @regs: Register file coming from the low-level handling code | |
634 | * | |
635 | * Returns: 0 on success, or -EINVAL if conversion has failed | |
636 | */ | |
637 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, | |
638 | bool lookup, struct pt_regs *regs) | |
639 | { | |
640 | struct pt_regs *old_regs = set_irq_regs(regs); | |
641 | unsigned int irq = hwirq; | |
642 | int ret = 0; | |
643 | ||
644 | irq_enter(); | |
645 | ||
646 | #ifdef CONFIG_IRQ_DOMAIN | |
647 | if (lookup) | |
648 | irq = irq_find_mapping(domain, hwirq); | |
649 | #endif | |
650 | ||
651 | /* | |
652 | * Some hardware gives randomly wrong interrupts. Rather | |
653 | * than crashing, do something sensible. | |
654 | */ | |
655 | if (unlikely(!irq || irq >= nr_irqs)) { | |
656 | ack_bad_irq(irq); | |
657 | ret = -EINVAL; | |
658 | } else { | |
659 | generic_handle_irq(irq); | |
660 | } | |
661 | ||
662 | irq_exit(); | |
663 | set_irq_regs(old_regs); | |
664 | return ret; | |
665 | } | |
666 | #endif | |
667 | ||
1f5a5b87 TG |
668 | /* Dynamic interrupt handling */ |
669 | ||
670 | /** | |
671 | * irq_free_descs - free irq descriptors | |
672 | * @from: Start of descriptor range | |
673 | * @cnt: Number of consecutive irqs to free | |
674 | */ | |
675 | void irq_free_descs(unsigned int from, unsigned int cnt) | |
676 | { | |
1f5a5b87 TG |
677 | int i; |
678 | ||
679 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | |
680 | return; | |
681 | ||
12ac1d0f | 682 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 TG |
683 | for (i = 0; i < cnt; i++) |
684 | free_desc(from + i); | |
685 | ||
1f5a5b87 | 686 | bitmap_clear(allocated_irqs, from, cnt); |
a05a900a | 687 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 | 688 | } |
edf76f83 | 689 | EXPORT_SYMBOL_GPL(irq_free_descs); |
1f5a5b87 TG |
690 | |
691 | /** | |
692 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | |
693 | * @irq: Allocate for specific irq number if irq >= 0 | |
694 | * @from: Start the search from this irq number | |
695 | * @cnt: Number of consecutive irqs to allocate. | |
696 | * @node: Preferred node on which the irq descriptor should be allocated | |
d522a0d1 | 697 | * @owner: Owning module (can be NULL) |
e75eafb9 TG |
698 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
699 | * hints where the irq descriptors should be allocated and which | |
700 | * default affinities to use | |
1f5a5b87 TG |
701 | * |
702 | * Returns the first irq number or error code | |
703 | */ | |
704 | int __ref | |
b6873807 | 705 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
06ee6d57 | 706 | struct module *owner, const struct cpumask *affinity) |
1f5a5b87 | 707 | { |
1f5a5b87 TG |
708 | int start, ret; |
709 | ||
710 | if (!cnt) | |
711 | return -EINVAL; | |
712 | ||
c5182b88 MB |
713 | if (irq >= 0) { |
714 | if (from > irq) | |
715 | return -EINVAL; | |
716 | from = irq; | |
62a08ae2 TG |
717 | } else { |
718 | /* | |
719 | * For interrupts which are freely allocated the | |
720 | * architecture can force a lower bound to the @from | |
721 | * argument. x86 uses this to exclude the GSI space. | |
722 | */ | |
723 | from = arch_dynirq_lower_bound(from); | |
c5182b88 MB |
724 | } |
725 | ||
a05a900a | 726 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 727 | |
ed4dea6e YL |
728 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
729 | from, cnt, 0); | |
1f5a5b87 TG |
730 | ret = -EEXIST; |
731 | if (irq >=0 && start != irq) | |
12ac1d0f | 732 | goto unlock; |
1f5a5b87 | 733 | |
ed4dea6e YL |
734 | if (start + cnt > nr_irqs) { |
735 | ret = irq_expand_nr_irqs(start + cnt); | |
e7bcecb7 | 736 | if (ret) |
12ac1d0f | 737 | goto unlock; |
e7bcecb7 | 738 | } |
12ac1d0f TG |
739 | ret = alloc_descs(start, cnt, node, affinity, owner); |
740 | unlock: | |
a05a900a | 741 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
742 | return ret; |
743 | } | |
b6873807 | 744 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
1f5a5b87 | 745 | |
7b6ef126 TG |
746 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
747 | /** | |
748 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware | |
749 | * @cnt: number of interrupts to allocate | |
750 | * @node: node on which to allocate | |
751 | * | |
752 | * Returns an interrupt number > 0 or 0, if the allocation fails. | |
753 | */ | |
754 | unsigned int irq_alloc_hwirqs(int cnt, int node) | |
755 | { | |
06ee6d57 | 756 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); |
7b6ef126 TG |
757 | |
758 | if (irq < 0) | |
759 | return 0; | |
760 | ||
761 | for (i = irq; cnt > 0; i++, cnt--) { | |
762 | if (arch_setup_hwirq(i, node)) | |
763 | goto err; | |
764 | irq_clear_status_flags(i, _IRQ_NOREQUEST); | |
765 | } | |
766 | return irq; | |
767 | ||
768 | err: | |
769 | for (i--; i >= irq; i--) { | |
770 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); | |
771 | arch_teardown_hwirq(i); | |
772 | } | |
773 | irq_free_descs(irq, cnt); | |
774 | return 0; | |
775 | } | |
776 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); | |
777 | ||
778 | /** | |
779 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware | |
780 | * @from: Free from irq number | |
781 | * @cnt: number of interrupts to free | |
782 | * | |
783 | */ | |
784 | void irq_free_hwirqs(unsigned int from, int cnt) | |
785 | { | |
8844aad8 | 786 | int i, j; |
7b6ef126 | 787 | |
8844aad8 | 788 | for (i = from, j = cnt; j > 0; i++, j--) { |
7b6ef126 TG |
789 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
790 | arch_teardown_hwirq(i); | |
791 | } | |
792 | irq_free_descs(from, cnt); | |
793 | } | |
794 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); | |
795 | #endif | |
796 | ||
a98d24b7 TG |
797 | /** |
798 | * irq_get_next_irq - get next allocated irq number | |
799 | * @offset: where to start the search | |
800 | * | |
801 | * Returns next irq number after offset or nr_irqs if none is found. | |
802 | */ | |
803 | unsigned int irq_get_next_irq(unsigned int offset) | |
804 | { | |
805 | return find_next_bit(allocated_irqs, nr_irqs, offset); | |
806 | } | |
807 | ||
d5eb4ad2 | 808 | struct irq_desc * |
31d9d9b6 MZ |
809 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
810 | unsigned int check) | |
d5eb4ad2 TG |
811 | { |
812 | struct irq_desc *desc = irq_to_desc(irq); | |
813 | ||
814 | if (desc) { | |
31d9d9b6 MZ |
815 | if (check & _IRQ_DESC_CHECK) { |
816 | if ((check & _IRQ_DESC_PERCPU) && | |
817 | !irq_settings_is_per_cpu_devid(desc)) | |
818 | return NULL; | |
819 | ||
820 | if (!(check & _IRQ_DESC_PERCPU) && | |
821 | irq_settings_is_per_cpu_devid(desc)) | |
822 | return NULL; | |
823 | } | |
824 | ||
d5eb4ad2 TG |
825 | if (bus) |
826 | chip_bus_lock(desc); | |
827 | raw_spin_lock_irqsave(&desc->lock, *flags); | |
828 | } | |
829 | return desc; | |
830 | } | |
831 | ||
832 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | |
833 | { | |
834 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
835 | if (bus) | |
836 | chip_bus_sync_unlock(desc); | |
837 | } | |
838 | ||
222df54f MZ |
839 | int irq_set_percpu_devid_partition(unsigned int irq, |
840 | const struct cpumask *affinity) | |
31d9d9b6 MZ |
841 | { |
842 | struct irq_desc *desc = irq_to_desc(irq); | |
843 | ||
844 | if (!desc) | |
845 | return -EINVAL; | |
846 | ||
847 | if (desc->percpu_enabled) | |
848 | return -EINVAL; | |
849 | ||
850 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | |
851 | ||
852 | if (!desc->percpu_enabled) | |
853 | return -ENOMEM; | |
854 | ||
222df54f MZ |
855 | if (affinity) |
856 | desc->percpu_affinity = affinity; | |
857 | else | |
858 | desc->percpu_affinity = cpu_possible_mask; | |
859 | ||
31d9d9b6 MZ |
860 | irq_set_percpu_devid_flags(irq); |
861 | return 0; | |
862 | } | |
863 | ||
222df54f MZ |
864 | int irq_set_percpu_devid(unsigned int irq) |
865 | { | |
866 | return irq_set_percpu_devid_partition(irq, NULL); | |
867 | } | |
868 | ||
869 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) | |
870 | { | |
871 | struct irq_desc *desc = irq_to_desc(irq); | |
872 | ||
873 | if (!desc || !desc->percpu_enabled) | |
874 | return -EINVAL; | |
875 | ||
876 | if (affinity) | |
877 | cpumask_copy(affinity, desc->percpu_affinity); | |
878 | ||
879 | return 0; | |
880 | } | |
5ffeb050 | 881 | EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); |
222df54f | 882 | |
792d0018 TG |
883 | void kstat_incr_irq_this_cpu(unsigned int irq) |
884 | { | |
b51bf95c | 885 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
792d0018 TG |
886 | } |
887 | ||
c291ee62 TG |
888 | /** |
889 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu | |
890 | * @irq: The interrupt number | |
891 | * @cpu: The cpu number | |
892 | * | |
893 | * Returns the sum of interrupt counts on @cpu since boot for | |
894 | * @irq. The caller must ensure that the interrupt is not removed | |
895 | * concurrently. | |
896 | */ | |
3795de23 TG |
897 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
898 | { | |
899 | struct irq_desc *desc = irq_to_desc(irq); | |
6c9ae009 ED |
900 | |
901 | return desc && desc->kstat_irqs ? | |
902 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | |
3795de23 | 903 | } |
478735e3 | 904 | |
c291ee62 TG |
905 | /** |
906 | * kstat_irqs - Get the statistics for an interrupt | |
907 | * @irq: The interrupt number | |
908 | * | |
909 | * Returns the sum of interrupt counts on all cpus since boot for | |
910 | * @irq. The caller must ensure that the interrupt is not removed | |
911 | * concurrently. | |
912 | */ | |
478735e3 KH |
913 | unsigned int kstat_irqs(unsigned int irq) |
914 | { | |
915 | struct irq_desc *desc = irq_to_desc(irq); | |
916 | int cpu; | |
5e9662fa | 917 | unsigned int sum = 0; |
478735e3 | 918 | |
6c9ae009 | 919 | if (!desc || !desc->kstat_irqs) |
478735e3 KH |
920 | return 0; |
921 | for_each_possible_cpu(cpu) | |
6c9ae009 | 922 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
478735e3 KH |
923 | return sum; |
924 | } | |
c291ee62 TG |
925 | |
926 | /** | |
927 | * kstat_irqs_usr - Get the statistics for an interrupt | |
928 | * @irq: The interrupt number | |
929 | * | |
930 | * Returns the sum of interrupt counts on all cpus since boot for | |
931 | * @irq. Contrary to kstat_irqs() this can be called from any | |
932 | * preemptible context. It's protected against concurrent removal of | |
933 | * an interrupt descriptor when sparse irqs are enabled. | |
934 | */ | |
935 | unsigned int kstat_irqs_usr(unsigned int irq) | |
936 | { | |
7df0b278 | 937 | unsigned int sum; |
c291ee62 TG |
938 | |
939 | irq_lock_sparse(); | |
940 | sum = kstat_irqs(irq); | |
941 | irq_unlock_sparse(); | |
942 | return sum; | |
943 | } |