]>
Commit | Line | Data |
---|---|---|
52a65ff5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3795de23 TG |
2 | /* |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
5 | * | |
99bfce5d TG |
6 | * This file contains the interrupt descriptor management code. Detailed |
7 | * information is available in Documentation/core-api/genericirq.rst | |
3795de23 TG |
8 | * |
9 | */ | |
10 | #include <linux/irq.h> | |
11 | #include <linux/slab.h> | |
ec53cf23 | 12 | #include <linux/export.h> |
3795de23 TG |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/radix-tree.h> | |
1f5a5b87 | 16 | #include <linux/bitmap.h> |
76ba59f8 | 17 | #include <linux/irqdomain.h> |
ecb3f394 | 18 | #include <linux/sysfs.h> |
3795de23 TG |
19 | |
20 | #include "internals.h" | |
21 | ||
22 | /* | |
23 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | |
24 | */ | |
78f90d91 | 25 | static struct lock_class_key irq_desc_lock_class; |
3795de23 | 26 | |
fe051434 | 27 | #if defined(CONFIG_SMP) |
fbf19803 TG |
28 | static int __init irq_affinity_setup(char *str) |
29 | { | |
10d94ff4 | 30 | alloc_bootmem_cpumask_var(&irq_default_affinity); |
fbf19803 TG |
31 | cpulist_parse(str, irq_default_affinity); |
32 | /* | |
33 | * Set at least the boot cpu. We don't want to end up with | |
34 | * bugreports caused by random comandline masks | |
35 | */ | |
36 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | |
37 | return 1; | |
38 | } | |
39 | __setup("irqaffinity=", irq_affinity_setup); | |
40 | ||
3795de23 TG |
41 | static void __init init_irq_default_affinity(void) |
42 | { | |
10d94ff4 | 43 | if (!cpumask_available(irq_default_affinity)) |
fbf19803 | 44 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
fbf19803 TG |
45 | if (cpumask_empty(irq_default_affinity)) |
46 | cpumask_setall(irq_default_affinity); | |
3795de23 TG |
47 | } |
48 | #else | |
49 | static void __init init_irq_default_affinity(void) | |
50 | { | |
51 | } | |
52 | #endif | |
53 | ||
1f5a5b87 | 54 | #ifdef CONFIG_SMP |
4ab764c3 | 55 | static int alloc_masks(struct irq_desc *desc, int node) |
1f5a5b87 | 56 | { |
9df872fa | 57 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
4ab764c3 | 58 | GFP_KERNEL, node)) |
1f5a5b87 TG |
59 | return -ENOMEM; |
60 | ||
0d3f5425 TG |
61 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
62 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, | |
63 | GFP_KERNEL, node)) { | |
64 | free_cpumask_var(desc->irq_common_data.affinity); | |
65 | return -ENOMEM; | |
66 | } | |
67 | #endif | |
68 | ||
1f5a5b87 | 69 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
4ab764c3 | 70 | if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { |
0d3f5425 TG |
71 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
72 | free_cpumask_var(desc->irq_common_data.effective_affinity); | |
73 | #endif | |
9df872fa | 74 | free_cpumask_var(desc->irq_common_data.affinity); |
1f5a5b87 TG |
75 | return -ENOMEM; |
76 | } | |
77 | #endif | |
78 | return 0; | |
79 | } | |
80 | ||
45ddcecb TG |
81 | static void desc_smp_init(struct irq_desc *desc, int node, |
82 | const struct cpumask *affinity) | |
1f5a5b87 | 83 | { |
45ddcecb TG |
84 | if (!affinity) |
85 | affinity = irq_default_affinity; | |
86 | cpumask_copy(desc->irq_common_data.affinity, affinity); | |
87 | ||
b7b29338 TG |
88 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
89 | cpumask_clear(desc->pending_mask); | |
90 | #endif | |
449e9cae JL |
91 | #ifdef CONFIG_NUMA |
92 | desc->irq_common_data.node = node; | |
93 | #endif | |
b7b29338 TG |
94 | } |
95 | ||
1f5a5b87 TG |
96 | #else |
97 | static inline int | |
4ab764c3 | 98 | alloc_masks(struct irq_desc *desc, int node) { return 0; } |
45ddcecb TG |
99 | static inline void |
100 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } | |
1f5a5b87 TG |
101 | #endif |
102 | ||
b6873807 | 103 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
45ddcecb | 104 | const struct cpumask *affinity, struct module *owner) |
1f5a5b87 | 105 | { |
6c9ae009 ED |
106 | int cpu; |
107 | ||
af7080e0 | 108 | desc->irq_common_data.handler_data = NULL; |
b237721c | 109 | desc->irq_common_data.msi_desc = NULL; |
af7080e0 | 110 | |
0d0b4c86 | 111 | desc->irq_data.common = &desc->irq_common_data; |
1f5a5b87 TG |
112 | desc->irq_data.irq = irq; |
113 | desc->irq_data.chip = &no_irq_chip; | |
114 | desc->irq_data.chip_data = NULL; | |
f9e4989e | 115 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
801a0e9a | 116 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
d829b8fb | 117 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
1f5a5b87 TG |
118 | desc->handle_irq = handle_bad_irq; |
119 | desc->depth = 1; | |
b7b29338 TG |
120 | desc->irq_count = 0; |
121 | desc->irqs_unhandled = 0; | |
1f5a5b87 | 122 | desc->name = NULL; |
b6873807 | 123 | desc->owner = owner; |
6c9ae009 ED |
124 | for_each_possible_cpu(cpu) |
125 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | |
45ddcecb | 126 | desc_smp_init(desc, node, affinity); |
1f5a5b87 TG |
127 | } |
128 | ||
3795de23 TG |
129 | int nr_irqs = NR_IRQS; |
130 | EXPORT_SYMBOL_GPL(nr_irqs); | |
131 | ||
a05a900a | 132 | static DEFINE_MUTEX(sparse_irq_lock); |
c1ee6264 | 133 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
1f5a5b87 | 134 | |
3795de23 TG |
135 | #ifdef CONFIG_SPARSE_IRQ |
136 | ||
ecb3f394 CG |
137 | static void irq_kobj_release(struct kobject *kobj); |
138 | ||
139 | #ifdef CONFIG_SYSFS | |
140 | static struct kobject *irq_kobj_base; | |
141 | ||
142 | #define IRQ_ATTR_RO(_name) \ | |
143 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
144 | ||
145 | static ssize_t per_cpu_count_show(struct kobject *kobj, | |
146 | struct kobj_attribute *attr, char *buf) | |
147 | { | |
148 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
149 | int cpu, irq = desc->irq_data.irq; | |
150 | ssize_t ret = 0; | |
151 | char *p = ""; | |
152 | ||
153 | for_each_possible_cpu(cpu) { | |
154 | unsigned int c = kstat_irqs_cpu(irq, cpu); | |
155 | ||
156 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); | |
157 | p = ","; | |
158 | } | |
159 | ||
160 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | |
161 | return ret; | |
162 | } | |
163 | IRQ_ATTR_RO(per_cpu_count); | |
164 | ||
165 | static ssize_t chip_name_show(struct kobject *kobj, | |
166 | struct kobj_attribute *attr, char *buf) | |
167 | { | |
168 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
169 | ssize_t ret = 0; | |
170 | ||
171 | raw_spin_lock_irq(&desc->lock); | |
172 | if (desc->irq_data.chip && desc->irq_data.chip->name) { | |
173 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", | |
174 | desc->irq_data.chip->name); | |
175 | } | |
176 | raw_spin_unlock_irq(&desc->lock); | |
177 | ||
178 | return ret; | |
179 | } | |
180 | IRQ_ATTR_RO(chip_name); | |
181 | ||
182 | static ssize_t hwirq_show(struct kobject *kobj, | |
183 | struct kobj_attribute *attr, char *buf) | |
184 | { | |
185 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
186 | ssize_t ret = 0; | |
187 | ||
188 | raw_spin_lock_irq(&desc->lock); | |
189 | if (desc->irq_data.domain) | |
190 | ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); | |
191 | raw_spin_unlock_irq(&desc->lock); | |
192 | ||
193 | return ret; | |
194 | } | |
195 | IRQ_ATTR_RO(hwirq); | |
196 | ||
197 | static ssize_t type_show(struct kobject *kobj, | |
198 | struct kobj_attribute *attr, char *buf) | |
199 | { | |
200 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
201 | ssize_t ret = 0; | |
202 | ||
203 | raw_spin_lock_irq(&desc->lock); | |
204 | ret = sprintf(buf, "%s\n", | |
205 | irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | |
206 | raw_spin_unlock_irq(&desc->lock); | |
207 | ||
208 | return ret; | |
209 | ||
210 | } | |
211 | IRQ_ATTR_RO(type); | |
212 | ||
d61e2944 AS |
213 | static ssize_t wakeup_show(struct kobject *kobj, |
214 | struct kobj_attribute *attr, char *buf) | |
215 | { | |
216 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
217 | ssize_t ret = 0; | |
218 | ||
219 | raw_spin_lock_irq(&desc->lock); | |
220 | ret = sprintf(buf, "%s\n", | |
221 | irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); | |
222 | raw_spin_unlock_irq(&desc->lock); | |
223 | ||
224 | return ret; | |
225 | ||
226 | } | |
227 | IRQ_ATTR_RO(wakeup); | |
228 | ||
ecb3f394 CG |
229 | static ssize_t name_show(struct kobject *kobj, |
230 | struct kobj_attribute *attr, char *buf) | |
231 | { | |
232 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
233 | ssize_t ret = 0; | |
234 | ||
235 | raw_spin_lock_irq(&desc->lock); | |
236 | if (desc->name) | |
237 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); | |
238 | raw_spin_unlock_irq(&desc->lock); | |
239 | ||
240 | return ret; | |
241 | } | |
242 | IRQ_ATTR_RO(name); | |
243 | ||
244 | static ssize_t actions_show(struct kobject *kobj, | |
245 | struct kobj_attribute *attr, char *buf) | |
246 | { | |
247 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
248 | struct irqaction *action; | |
249 | ssize_t ret = 0; | |
250 | char *p = ""; | |
251 | ||
252 | raw_spin_lock_irq(&desc->lock); | |
253 | for (action = desc->action; action != NULL; action = action->next) { | |
254 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", | |
255 | p, action->name); | |
256 | p = ","; | |
257 | } | |
258 | raw_spin_unlock_irq(&desc->lock); | |
259 | ||
260 | if (ret) | |
261 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | |
262 | ||
263 | return ret; | |
264 | } | |
265 | IRQ_ATTR_RO(actions); | |
266 | ||
267 | static struct attribute *irq_attrs[] = { | |
268 | &per_cpu_count_attr.attr, | |
269 | &chip_name_attr.attr, | |
270 | &hwirq_attr.attr, | |
271 | &type_attr.attr, | |
d61e2944 | 272 | &wakeup_attr.attr, |
ecb3f394 CG |
273 | &name_attr.attr, |
274 | &actions_attr.attr, | |
275 | NULL | |
276 | }; | |
277 | ||
278 | static struct kobj_type irq_kobj_type = { | |
279 | .release = irq_kobj_release, | |
280 | .sysfs_ops = &kobj_sysfs_ops, | |
281 | .default_attrs = irq_attrs, | |
282 | }; | |
283 | ||
284 | static void irq_sysfs_add(int irq, struct irq_desc *desc) | |
285 | { | |
286 | if (irq_kobj_base) { | |
287 | /* | |
288 | * Continue even in case of failure as this is nothing | |
289 | * crucial. | |
290 | */ | |
291 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) | |
292 | pr_warn("Failed to add kobject for irq %d\n", irq); | |
293 | } | |
294 | } | |
295 | ||
296 | static int __init irq_sysfs_init(void) | |
297 | { | |
298 | struct irq_desc *desc; | |
299 | int irq; | |
300 | ||
301 | /* Prevent concurrent irq alloc/free */ | |
302 | irq_lock_sparse(); | |
303 | ||
304 | irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | |
305 | if (!irq_kobj_base) { | |
306 | irq_unlock_sparse(); | |
307 | return -ENOMEM; | |
308 | } | |
309 | ||
310 | /* Add the already allocated interrupts */ | |
311 | for_each_irq_desc(irq, desc) | |
312 | irq_sysfs_add(irq, desc); | |
313 | irq_unlock_sparse(); | |
314 | ||
315 | return 0; | |
316 | } | |
317 | postcore_initcall(irq_sysfs_init); | |
318 | ||
319 | #else /* !CONFIG_SYSFS */ | |
320 | ||
321 | static struct kobj_type irq_kobj_type = { | |
322 | .release = irq_kobj_release, | |
323 | }; | |
324 | ||
325 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} | |
326 | ||
327 | #endif /* CONFIG_SYSFS */ | |
328 | ||
baa0d233 | 329 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
3795de23 | 330 | |
1f5a5b87 | 331 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
3795de23 TG |
332 | { |
333 | radix_tree_insert(&irq_desc_tree, irq, desc); | |
334 | } | |
335 | ||
336 | struct irq_desc *irq_to_desc(unsigned int irq) | |
337 | { | |
338 | return radix_tree_lookup(&irq_desc_tree, irq); | |
339 | } | |
3911ff30 | 340 | EXPORT_SYMBOL(irq_to_desc); |
3795de23 | 341 | |
1f5a5b87 TG |
342 | static void delete_irq_desc(unsigned int irq) |
343 | { | |
344 | radix_tree_delete(&irq_desc_tree, irq); | |
345 | } | |
346 | ||
347 | #ifdef CONFIG_SMP | |
348 | static void free_masks(struct irq_desc *desc) | |
349 | { | |
350 | #ifdef CONFIG_GENERIC_PENDING_IRQ | |
351 | free_cpumask_var(desc->pending_mask); | |
352 | #endif | |
9df872fa | 353 | free_cpumask_var(desc->irq_common_data.affinity); |
0d3f5425 TG |
354 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
355 | free_cpumask_var(desc->irq_common_data.effective_affinity); | |
356 | #endif | |
1f5a5b87 TG |
357 | } |
358 | #else | |
359 | static inline void free_masks(struct irq_desc *desc) { } | |
360 | #endif | |
361 | ||
c291ee62 TG |
362 | void irq_lock_sparse(void) |
363 | { | |
364 | mutex_lock(&sparse_irq_lock); | |
365 | } | |
366 | ||
367 | void irq_unlock_sparse(void) | |
368 | { | |
369 | mutex_unlock(&sparse_irq_lock); | |
370 | } | |
371 | ||
45ddcecb TG |
372 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
373 | const struct cpumask *affinity, | |
374 | struct module *owner) | |
1f5a5b87 TG |
375 | { |
376 | struct irq_desc *desc; | |
1f5a5b87 | 377 | |
4ab764c3 | 378 | desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); |
1f5a5b87 TG |
379 | if (!desc) |
380 | return NULL; | |
381 | /* allocate based on nr_cpu_ids */ | |
6c9ae009 | 382 | desc->kstat_irqs = alloc_percpu(unsigned int); |
1f5a5b87 TG |
383 | if (!desc->kstat_irqs) |
384 | goto err_desc; | |
385 | ||
4ab764c3 | 386 | if (alloc_masks(desc, node)) |
1f5a5b87 TG |
387 | goto err_kstat; |
388 | ||
389 | raw_spin_lock_init(&desc->lock); | |
390 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | |
9114014c | 391 | mutex_init(&desc->request_mutex); |
425a5072 | 392 | init_rcu_head(&desc->rcu); |
1f5a5b87 | 393 | |
45ddcecb TG |
394 | desc_set_defaults(irq, desc, node, affinity, owner); |
395 | irqd_set(&desc->irq_data, flags); | |
ecb3f394 | 396 | kobject_init(&desc->kobj, &irq_kobj_type); |
1f5a5b87 TG |
397 | |
398 | return desc; | |
399 | ||
400 | err_kstat: | |
6c9ae009 | 401 | free_percpu(desc->kstat_irqs); |
1f5a5b87 TG |
402 | err_desc: |
403 | kfree(desc); | |
404 | return NULL; | |
405 | } | |
406 | ||
ecb3f394 | 407 | static void irq_kobj_release(struct kobject *kobj) |
425a5072 | 408 | { |
ecb3f394 | 409 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
425a5072 TG |
410 | |
411 | free_masks(desc); | |
412 | free_percpu(desc->kstat_irqs); | |
413 | kfree(desc); | |
414 | } | |
415 | ||
ecb3f394 CG |
416 | static void delayed_free_desc(struct rcu_head *rhp) |
417 | { | |
418 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); | |
419 | ||
420 | kobject_put(&desc->kobj); | |
421 | } | |
422 | ||
1f5a5b87 TG |
423 | static void free_desc(unsigned int irq) |
424 | { | |
425 | struct irq_desc *desc = irq_to_desc(irq); | |
1f5a5b87 | 426 | |
087cdfb6 | 427 | irq_remove_debugfs_entry(desc); |
13bfe99e TG |
428 | unregister_irq_proc(irq, desc); |
429 | ||
c291ee62 TG |
430 | /* |
431 | * sparse_irq_lock protects also show_interrupts() and | |
432 | * kstat_irq_usr(). Once we deleted the descriptor from the | |
433 | * sparse tree we can free it. Access in proc will fail to | |
434 | * lookup the descriptor. | |
ecb3f394 CG |
435 | * |
436 | * The sysfs entry must be serialized against a concurrent | |
437 | * irq_sysfs_init() as well. | |
c291ee62 | 438 | */ |
ecb3f394 | 439 | kobject_del(&desc->kobj); |
1f5a5b87 | 440 | delete_irq_desc(irq); |
1f5a5b87 | 441 | |
425a5072 TG |
442 | /* |
443 | * We free the descriptor, masks and stat fields via RCU. That | |
444 | * allows demultiplex interrupts to do rcu based management of | |
445 | * the child interrupts. | |
4a5f4d2f | 446 | * This also allows us to use rcu in kstat_irqs_usr(). |
425a5072 TG |
447 | */ |
448 | call_rcu(&desc->rcu, delayed_free_desc); | |
1f5a5b87 TG |
449 | } |
450 | ||
b6873807 | 451 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
bec04037 DL |
452 | const struct irq_affinity_desc *affinity, |
453 | struct module *owner) | |
1f5a5b87 TG |
454 | { |
455 | struct irq_desc *desc; | |
e75eafb9 | 456 | int i; |
45ddcecb | 457 | |
e75eafb9 TG |
458 | /* Validate affinity mask(s) */ |
459 | if (affinity) { | |
c410abbb | 460 | for (i = 0; i < cnt; i++, i++) { |
bec04037 | 461 | if (cpumask_empty(&affinity[i].mask)) |
e75eafb9 TG |
462 | return -EINVAL; |
463 | } | |
464 | } | |
45ddcecb | 465 | |
1f5a5b87 | 466 | for (i = 0; i < cnt; i++) { |
bec04037 | 467 | const struct cpumask *mask = NULL; |
c410abbb | 468 | unsigned int flags = 0; |
bec04037 | 469 | |
45ddcecb | 470 | if (affinity) { |
c410abbb DL |
471 | if (affinity->is_managed) { |
472 | flags = IRQD_AFFINITY_MANAGED | | |
473 | IRQD_MANAGED_SHUTDOWN; | |
474 | } | |
bec04037 | 475 | mask = &affinity->mask; |
c410abbb | 476 | node = cpu_to_node(cpumask_first(mask)); |
e75eafb9 | 477 | affinity++; |
45ddcecb | 478 | } |
c410abbb | 479 | |
45ddcecb | 480 | desc = alloc_desc(start + i, node, flags, mask, owner); |
1f5a5b87 TG |
481 | if (!desc) |
482 | goto err; | |
1f5a5b87 | 483 | irq_insert_desc(start + i, desc); |
ecb3f394 | 484 | irq_sysfs_add(start + i, desc); |
e0b47794 | 485 | irq_add_debugfs_entry(start + i, desc); |
1f5a5b87 | 486 | } |
12ac1d0f | 487 | bitmap_set(allocated_irqs, start, cnt); |
1f5a5b87 TG |
488 | return start; |
489 | ||
490 | err: | |
491 | for (i--; i >= 0; i--) | |
492 | free_desc(start + i); | |
1f5a5b87 TG |
493 | return -ENOMEM; |
494 | } | |
495 | ||
ed4dea6e | 496 | static int irq_expand_nr_irqs(unsigned int nr) |
e7bcecb7 | 497 | { |
ed4dea6e | 498 | if (nr > IRQ_BITMAP_BITS) |
e7bcecb7 | 499 | return -ENOMEM; |
ed4dea6e | 500 | nr_irqs = nr; |
e7bcecb7 TG |
501 | return 0; |
502 | } | |
503 | ||
3795de23 TG |
504 | int __init early_irq_init(void) |
505 | { | |
b683de2b | 506 | int i, initcnt, node = first_online_node; |
3795de23 | 507 | struct irq_desc *desc; |
3795de23 TG |
508 | |
509 | init_irq_default_affinity(); | |
510 | ||
b683de2b TG |
511 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
512 | initcnt = arch_probe_nr_irqs(); | |
5a29ef22 VL |
513 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", |
514 | NR_IRQS, nr_irqs, initcnt); | |
3795de23 | 515 | |
c1ee6264 TG |
516 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
517 | nr_irqs = IRQ_BITMAP_BITS; | |
518 | ||
519 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | |
520 | initcnt = IRQ_BITMAP_BITS; | |
521 | ||
522 | if (initcnt > nr_irqs) | |
523 | nr_irqs = initcnt; | |
524 | ||
b683de2b | 525 | for (i = 0; i < initcnt; i++) { |
45ddcecb | 526 | desc = alloc_desc(i, node, 0, NULL, NULL); |
aa99ec0f TG |
527 | set_bit(i, allocated_irqs); |
528 | irq_insert_desc(i, desc); | |
3795de23 | 529 | } |
3795de23 TG |
530 | return arch_early_irq_init(); |
531 | } | |
532 | ||
3795de23 TG |
533 | #else /* !CONFIG_SPARSE_IRQ */ |
534 | ||
535 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |
536 | [0 ... NR_IRQS-1] = { | |
3795de23 TG |
537 | .handle_irq = handle_bad_irq, |
538 | .depth = 1, | |
539 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | |
540 | } | |
541 | }; | |
542 | ||
3795de23 TG |
543 | int __init early_irq_init(void) |
544 | { | |
aa99ec0f | 545 | int count, i, node = first_online_node; |
3795de23 | 546 | struct irq_desc *desc; |
3795de23 TG |
547 | |
548 | init_irq_default_affinity(); | |
549 | ||
5a29ef22 | 550 | printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); |
3795de23 TG |
551 | |
552 | desc = irq_desc; | |
553 | count = ARRAY_SIZE(irq_desc); | |
554 | ||
555 | for (i = 0; i < count; i++) { | |
6c9ae009 | 556 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
4ab764c3 | 557 | alloc_masks(&desc[i], node); |
e7fbad30 | 558 | raw_spin_lock_init(&desc[i].lock); |
154cd387 | 559 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
45ddcecb | 560 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
3795de23 TG |
561 | } |
562 | return arch_early_irq_init(); | |
563 | } | |
564 | ||
565 | struct irq_desc *irq_to_desc(unsigned int irq) | |
566 | { | |
567 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
568 | } | |
2c45aada | 569 | EXPORT_SYMBOL(irq_to_desc); |
3795de23 | 570 | |
1f5a5b87 TG |
571 | static void free_desc(unsigned int irq) |
572 | { | |
d8179bc0 TG |
573 | struct irq_desc *desc = irq_to_desc(irq); |
574 | unsigned long flags; | |
575 | ||
576 | raw_spin_lock_irqsave(&desc->lock, flags); | |
45ddcecb | 577 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
d8179bc0 | 578 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1f5a5b87 TG |
579 | } |
580 | ||
b6873807 | 581 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
bec04037 | 582 | const struct irq_affinity_desc *affinity, |
b6873807 | 583 | struct module *owner) |
1f5a5b87 | 584 | { |
b6873807 SAS |
585 | u32 i; |
586 | ||
587 | for (i = 0; i < cnt; i++) { | |
588 | struct irq_desc *desc = irq_to_desc(start + i); | |
589 | ||
590 | desc->owner = owner; | |
591 | } | |
12ac1d0f | 592 | bitmap_set(allocated_irqs, start, cnt); |
1f5a5b87 TG |
593 | return start; |
594 | } | |
e7bcecb7 | 595 | |
ed4dea6e | 596 | static int irq_expand_nr_irqs(unsigned int nr) |
e7bcecb7 TG |
597 | { |
598 | return -ENOMEM; | |
599 | } | |
600 | ||
f63b6a05 TG |
601 | void irq_mark_irq(unsigned int irq) |
602 | { | |
603 | mutex_lock(&sparse_irq_lock); | |
604 | bitmap_set(allocated_irqs, irq, 1); | |
605 | mutex_unlock(&sparse_irq_lock); | |
606 | } | |
607 | ||
c940e01c TG |
608 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
609 | void irq_init_desc(unsigned int irq) | |
610 | { | |
d8179bc0 | 611 | free_desc(irq); |
c940e01c TG |
612 | } |
613 | #endif | |
614 | ||
3795de23 TG |
615 | #endif /* !CONFIG_SPARSE_IRQ */ |
616 | ||
fe12bc2c TG |
617 | /** |
618 | * generic_handle_irq - Invoke the handler for a particular irq | |
619 | * @irq: The irq number to handle | |
620 | * | |
621 | */ | |
622 | int generic_handle_irq(unsigned int irq) | |
623 | { | |
624 | struct irq_desc *desc = irq_to_desc(irq); | |
625 | ||
626 | if (!desc) | |
627 | return -EINVAL; | |
bd0b9ac4 | 628 | generic_handle_irq_desc(desc); |
fe12bc2c TG |
629 | return 0; |
630 | } | |
edf76f83 | 631 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
fe12bc2c | 632 | |
76ba59f8 MZ |
633 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
634 | /** | |
635 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain | |
636 | * @domain: The domain where to perform the lookup | |
637 | * @hwirq: The HW irq number to convert to a logical one | |
638 | * @lookup: Whether to perform the domain lookup or not | |
639 | * @regs: Register file coming from the low-level handling code | |
640 | * | |
641 | * Returns: 0 on success, or -EINVAL if conversion has failed | |
642 | */ | |
643 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, | |
644 | bool lookup, struct pt_regs *regs) | |
645 | { | |
646 | struct pt_regs *old_regs = set_irq_regs(regs); | |
647 | unsigned int irq = hwirq; | |
648 | int ret = 0; | |
649 | ||
650 | irq_enter(); | |
651 | ||
652 | #ifdef CONFIG_IRQ_DOMAIN | |
653 | if (lookup) | |
654 | irq = irq_find_mapping(domain, hwirq); | |
655 | #endif | |
656 | ||
657 | /* | |
658 | * Some hardware gives randomly wrong interrupts. Rather | |
659 | * than crashing, do something sensible. | |
660 | */ | |
661 | if (unlikely(!irq || irq >= nr_irqs)) { | |
662 | ack_bad_irq(irq); | |
663 | ret = -EINVAL; | |
664 | } else { | |
665 | generic_handle_irq(irq); | |
666 | } | |
667 | ||
668 | irq_exit(); | |
669 | set_irq_regs(old_regs); | |
670 | return ret; | |
671 | } | |
672 | #endif | |
673 | ||
1f5a5b87 TG |
674 | /* Dynamic interrupt handling */ |
675 | ||
676 | /** | |
677 | * irq_free_descs - free irq descriptors | |
678 | * @from: Start of descriptor range | |
679 | * @cnt: Number of consecutive irqs to free | |
680 | */ | |
681 | void irq_free_descs(unsigned int from, unsigned int cnt) | |
682 | { | |
1f5a5b87 TG |
683 | int i; |
684 | ||
685 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | |
686 | return; | |
687 | ||
12ac1d0f | 688 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 TG |
689 | for (i = 0; i < cnt; i++) |
690 | free_desc(from + i); | |
691 | ||
1f5a5b87 | 692 | bitmap_clear(allocated_irqs, from, cnt); |
a05a900a | 693 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 | 694 | } |
edf76f83 | 695 | EXPORT_SYMBOL_GPL(irq_free_descs); |
1f5a5b87 TG |
696 | |
697 | /** | |
698 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | |
699 | * @irq: Allocate for specific irq number if irq >= 0 | |
700 | * @from: Start the search from this irq number | |
701 | * @cnt: Number of consecutive irqs to allocate. | |
702 | * @node: Preferred node on which the irq descriptor should be allocated | |
d522a0d1 | 703 | * @owner: Owning module (can be NULL) |
e75eafb9 TG |
704 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
705 | * hints where the irq descriptors should be allocated and which | |
706 | * default affinities to use | |
1f5a5b87 TG |
707 | * |
708 | * Returns the first irq number or error code | |
709 | */ | |
710 | int __ref | |
b6873807 | 711 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
bec04037 | 712 | struct module *owner, const struct irq_affinity_desc *affinity) |
1f5a5b87 | 713 | { |
1f5a5b87 TG |
714 | int start, ret; |
715 | ||
716 | if (!cnt) | |
717 | return -EINVAL; | |
718 | ||
c5182b88 MB |
719 | if (irq >= 0) { |
720 | if (from > irq) | |
721 | return -EINVAL; | |
722 | from = irq; | |
62a08ae2 TG |
723 | } else { |
724 | /* | |
725 | * For interrupts which are freely allocated the | |
726 | * architecture can force a lower bound to the @from | |
727 | * argument. x86 uses this to exclude the GSI space. | |
728 | */ | |
729 | from = arch_dynirq_lower_bound(from); | |
c5182b88 MB |
730 | } |
731 | ||
a05a900a | 732 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 733 | |
ed4dea6e YL |
734 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
735 | from, cnt, 0); | |
1f5a5b87 TG |
736 | ret = -EEXIST; |
737 | if (irq >=0 && start != irq) | |
12ac1d0f | 738 | goto unlock; |
1f5a5b87 | 739 | |
ed4dea6e YL |
740 | if (start + cnt > nr_irqs) { |
741 | ret = irq_expand_nr_irqs(start + cnt); | |
e7bcecb7 | 742 | if (ret) |
12ac1d0f | 743 | goto unlock; |
e7bcecb7 | 744 | } |
12ac1d0f TG |
745 | ret = alloc_descs(start, cnt, node, affinity, owner); |
746 | unlock: | |
a05a900a | 747 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
748 | return ret; |
749 | } | |
b6873807 | 750 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
1f5a5b87 | 751 | |
7b6ef126 TG |
752 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
753 | /** | |
754 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware | |
755 | * @cnt: number of interrupts to allocate | |
756 | * @node: node on which to allocate | |
757 | * | |
758 | * Returns an interrupt number > 0 or 0, if the allocation fails. | |
759 | */ | |
760 | unsigned int irq_alloc_hwirqs(int cnt, int node) | |
761 | { | |
06ee6d57 | 762 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); |
7b6ef126 TG |
763 | |
764 | if (irq < 0) | |
765 | return 0; | |
766 | ||
767 | for (i = irq; cnt > 0; i++, cnt--) { | |
768 | if (arch_setup_hwirq(i, node)) | |
769 | goto err; | |
770 | irq_clear_status_flags(i, _IRQ_NOREQUEST); | |
771 | } | |
772 | return irq; | |
773 | ||
774 | err: | |
775 | for (i--; i >= irq; i--) { | |
776 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); | |
777 | arch_teardown_hwirq(i); | |
778 | } | |
779 | irq_free_descs(irq, cnt); | |
780 | return 0; | |
781 | } | |
782 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); | |
783 | ||
784 | /** | |
785 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware | |
786 | * @from: Free from irq number | |
787 | * @cnt: number of interrupts to free | |
788 | * | |
789 | */ | |
790 | void irq_free_hwirqs(unsigned int from, int cnt) | |
791 | { | |
8844aad8 | 792 | int i, j; |
7b6ef126 | 793 | |
8844aad8 | 794 | for (i = from, j = cnt; j > 0; i++, j--) { |
7b6ef126 TG |
795 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
796 | arch_teardown_hwirq(i); | |
797 | } | |
798 | irq_free_descs(from, cnt); | |
799 | } | |
800 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); | |
801 | #endif | |
802 | ||
a98d24b7 TG |
803 | /** |
804 | * irq_get_next_irq - get next allocated irq number | |
805 | * @offset: where to start the search | |
806 | * | |
807 | * Returns next irq number after offset or nr_irqs if none is found. | |
808 | */ | |
809 | unsigned int irq_get_next_irq(unsigned int offset) | |
810 | { | |
811 | return find_next_bit(allocated_irqs, nr_irqs, offset); | |
812 | } | |
813 | ||
d5eb4ad2 | 814 | struct irq_desc * |
31d9d9b6 MZ |
815 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
816 | unsigned int check) | |
d5eb4ad2 TG |
817 | { |
818 | struct irq_desc *desc = irq_to_desc(irq); | |
819 | ||
820 | if (desc) { | |
31d9d9b6 MZ |
821 | if (check & _IRQ_DESC_CHECK) { |
822 | if ((check & _IRQ_DESC_PERCPU) && | |
823 | !irq_settings_is_per_cpu_devid(desc)) | |
824 | return NULL; | |
825 | ||
826 | if (!(check & _IRQ_DESC_PERCPU) && | |
827 | irq_settings_is_per_cpu_devid(desc)) | |
828 | return NULL; | |
829 | } | |
830 | ||
d5eb4ad2 TG |
831 | if (bus) |
832 | chip_bus_lock(desc); | |
833 | raw_spin_lock_irqsave(&desc->lock, *flags); | |
834 | } | |
835 | return desc; | |
836 | } | |
837 | ||
838 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | |
839 | { | |
840 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
841 | if (bus) | |
842 | chip_bus_sync_unlock(desc); | |
843 | } | |
844 | ||
222df54f MZ |
845 | int irq_set_percpu_devid_partition(unsigned int irq, |
846 | const struct cpumask *affinity) | |
31d9d9b6 MZ |
847 | { |
848 | struct irq_desc *desc = irq_to_desc(irq); | |
849 | ||
850 | if (!desc) | |
851 | return -EINVAL; | |
852 | ||
853 | if (desc->percpu_enabled) | |
854 | return -EINVAL; | |
855 | ||
856 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | |
857 | ||
858 | if (!desc->percpu_enabled) | |
859 | return -ENOMEM; | |
860 | ||
222df54f MZ |
861 | if (affinity) |
862 | desc->percpu_affinity = affinity; | |
863 | else | |
864 | desc->percpu_affinity = cpu_possible_mask; | |
865 | ||
31d9d9b6 MZ |
866 | irq_set_percpu_devid_flags(irq); |
867 | return 0; | |
868 | } | |
869 | ||
222df54f MZ |
870 | int irq_set_percpu_devid(unsigned int irq) |
871 | { | |
872 | return irq_set_percpu_devid_partition(irq, NULL); | |
873 | } | |
874 | ||
875 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) | |
876 | { | |
877 | struct irq_desc *desc = irq_to_desc(irq); | |
878 | ||
879 | if (!desc || !desc->percpu_enabled) | |
880 | return -EINVAL; | |
881 | ||
882 | if (affinity) | |
883 | cpumask_copy(affinity, desc->percpu_affinity); | |
884 | ||
885 | return 0; | |
886 | } | |
5ffeb050 | 887 | EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); |
222df54f | 888 | |
792d0018 TG |
889 | void kstat_incr_irq_this_cpu(unsigned int irq) |
890 | { | |
b51bf95c | 891 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
792d0018 TG |
892 | } |
893 | ||
c291ee62 TG |
894 | /** |
895 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu | |
896 | * @irq: The interrupt number | |
897 | * @cpu: The cpu number | |
898 | * | |
899 | * Returns the sum of interrupt counts on @cpu since boot for | |
900 | * @irq. The caller must ensure that the interrupt is not removed | |
901 | * concurrently. | |
902 | */ | |
3795de23 TG |
903 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
904 | { | |
905 | struct irq_desc *desc = irq_to_desc(irq); | |
6c9ae009 ED |
906 | |
907 | return desc && desc->kstat_irqs ? | |
908 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | |
3795de23 | 909 | } |
478735e3 | 910 | |
c291ee62 TG |
911 | /** |
912 | * kstat_irqs - Get the statistics for an interrupt | |
913 | * @irq: The interrupt number | |
914 | * | |
915 | * Returns the sum of interrupt counts on all cpus since boot for | |
916 | * @irq. The caller must ensure that the interrupt is not removed | |
917 | * concurrently. | |
918 | */ | |
478735e3 KH |
919 | unsigned int kstat_irqs(unsigned int irq) |
920 | { | |
921 | struct irq_desc *desc = irq_to_desc(irq); | |
922 | int cpu; | |
5e9662fa | 923 | unsigned int sum = 0; |
478735e3 | 924 | |
6c9ae009 | 925 | if (!desc || !desc->kstat_irqs) |
478735e3 KH |
926 | return 0; |
927 | for_each_possible_cpu(cpu) | |
6c9ae009 | 928 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
478735e3 KH |
929 | return sum; |
930 | } | |
c291ee62 TG |
931 | |
932 | /** | |
933 | * kstat_irqs_usr - Get the statistics for an interrupt | |
934 | * @irq: The interrupt number | |
935 | * | |
4a5f4d2f ED |
936 | * Returns the sum of interrupt counts on all cpus since boot for @irq. |
937 | * Contrary to kstat_irqs() this can be called from any context. | |
938 | * It uses rcu since a concurrent removal of an interrupt descriptor is | |
939 | * observing an rcu grace period before delayed_free_desc()/irq_kobj_release(). | |
c291ee62 TG |
940 | */ |
941 | unsigned int kstat_irqs_usr(unsigned int irq) | |
942 | { | |
7df0b278 | 943 | unsigned int sum; |
c291ee62 | 944 | |
4a5f4d2f | 945 | rcu_read_lock(); |
c291ee62 | 946 | sum = kstat_irqs(irq); |
4a5f4d2f | 947 | rcu_read_unlock(); |
c291ee62 TG |
948 | return sum; |
949 | } |