]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/irq/irqdesc.c
genirq-update-kerneldoc.patch
[mirror_ubuntu-zesty-kernel.git] / kernel / irq / irqdesc.c
CommitLineData
3795de23
TG
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
1f5a5b87 16#include <linux/bitmap.h>
3795de23
TG
17
18#include "internals.h"
19
20/*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
23struct lock_class_key irq_desc_lock_class;
24
25#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26static void __init init_irq_default_affinity(void)
27{
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30}
31#else
32static void __init init_irq_default_affinity(void)
33{
34}
35#endif
36
1f5a5b87
TG
37#ifdef CONFIG_SMP
38static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39{
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43#ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48#endif
49 return 0;
50}
51
52static void desc_smp_init(struct irq_desc *desc, int node)
53{
54 desc->node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56}
57
58#else
59static inline int
60alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
61static inline void desc_smp_init(struct irq_desc *desc, int node) { }
62#endif
63
64static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
65{
66 desc->irq_data.irq = irq;
67 desc->irq_data.chip = &no_irq_chip;
68 desc->irq_data.chip_data = NULL;
69 desc->irq_data.handler_data = NULL;
70 desc->irq_data.msi_desc = NULL;
71 desc->status = IRQ_DEFAULT_INIT_FLAGS;
72 desc->handle_irq = handle_bad_irq;
73 desc->depth = 1;
74 desc->name = NULL;
75 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
76 desc_smp_init(desc, node);
77}
78
3795de23
TG
79int nr_irqs = NR_IRQS;
80EXPORT_SYMBOL_GPL(nr_irqs);
81
1f5a5b87
TG
82DEFINE_RAW_SPINLOCK(sparse_irq_lock);
83static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
84
3795de23
TG
85#ifdef CONFIG_SPARSE_IRQ
86
87static struct irq_desc irq_desc_init = {
1318a481 88 .status = IRQ_DEFAULT_INIT_FLAGS,
3795de23
TG
89 .handle_irq = handle_bad_irq,
90 .depth = 1,
91 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
92};
93
94void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
95{
96 void *ptr;
97
98 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
99 GFP_ATOMIC, node);
100
101 /*
102 * don't overwite if can not get new one
103 * init_copy_kstat_irqs() could still use old one
104 */
105 if (ptr) {
106 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
107 desc->kstat_irqs = ptr;
108 }
109}
110
111static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
112{
113 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
114
115 raw_spin_lock_init(&desc->lock);
116 desc->irq_data.irq = irq;
117#ifdef CONFIG_SMP
118 desc->irq_data.node = node;
119#endif
120 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
121 init_kstat_irqs(desc, node, nr_cpu_ids);
122 if (!desc->kstat_irqs) {
123 printk(KERN_ERR "can not alloc kstat_irqs\n");
124 BUG_ON(1);
125 }
126 if (!alloc_desc_masks(desc, node, false)) {
127 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
128 BUG_ON(1);
129 }
130 init_desc_masks(desc);
131 arch_init_chip_data(desc, node);
132}
133
3795de23
TG
134static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
135
1f5a5b87 136static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
3795de23
TG
137{
138 radix_tree_insert(&irq_desc_tree, irq, desc);
139}
140
141struct irq_desc *irq_to_desc(unsigned int irq)
142{
143 return radix_tree_lookup(&irq_desc_tree, irq);
144}
145
146void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
147{
148 void **ptr;
149
150 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
151 if (ptr)
152 radix_tree_replace_slot(ptr, desc);
153}
154
1f5a5b87
TG
155static void delete_irq_desc(unsigned int irq)
156{
157 radix_tree_delete(&irq_desc_tree, irq);
158}
159
160#ifdef CONFIG_SMP
161static void free_masks(struct irq_desc *desc)
162{
163#ifdef CONFIG_GENERIC_PENDING_IRQ
164 free_cpumask_var(desc->pending_mask);
165#endif
166 free_cpumask_var(desc->affinity);
167}
168#else
169static inline void free_masks(struct irq_desc *desc) { }
170#endif
171
172static struct irq_desc *alloc_desc(int irq, int node)
173{
174 struct irq_desc *desc;
175 gfp_t gfp = GFP_KERNEL;
176
177 desc = kzalloc_node(sizeof(*desc), gfp, node);
178 if (!desc)
179 return NULL;
180 /* allocate based on nr_cpu_ids */
181 desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
182 gfp, node);
183 if (!desc->kstat_irqs)
184 goto err_desc;
185
186 if (alloc_masks(desc, gfp, node))
187 goto err_kstat;
188
189 raw_spin_lock_init(&desc->lock);
190 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
191
192 desc_set_defaults(irq, desc, node);
193
194 return desc;
195
196err_kstat:
197 kfree(desc->kstat_irqs);
198err_desc:
199 kfree(desc);
200 return NULL;
201}
202
203static void free_desc(unsigned int irq)
204{
205 struct irq_desc *desc = irq_to_desc(irq);
206 unsigned long flags;
207
13bfe99e
TG
208 unregister_irq_proc(irq, desc);
209
1f5a5b87
TG
210 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
211 delete_irq_desc(irq);
212 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
213
214 free_masks(desc);
215 kfree(desc->kstat_irqs);
216 kfree(desc);
217}
218
219static int alloc_descs(unsigned int start, unsigned int cnt, int node)
220{
221 struct irq_desc *desc;
222 unsigned long flags;
223 int i;
224
225 for (i = 0; i < cnt; i++) {
226 desc = alloc_desc(start + i, node);
227 if (!desc)
228 goto err;
229 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
230 irq_insert_desc(start + i, desc);
231 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
232 }
233 return start;
234
235err:
236 for (i--; i >= 0; i--)
237 free_desc(start + i);
238
239 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
240 bitmap_clear(allocated_irqs, start, cnt);
241 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
242 return -ENOMEM;
243}
244
3795de23
TG
245static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
246 [0 ... NR_IRQS_LEGACY-1] = {
1318a481 247 .status = IRQ_DEFAULT_INIT_FLAGS,
3795de23
TG
248 .handle_irq = handle_bad_irq,
249 .depth = 1,
250 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
251 }
252};
253
254static unsigned int *kstat_irqs_legacy;
255
256int __init early_irq_init(void)
257{
258 struct irq_desc *desc;
259 int legacy_count;
260 int node;
261 int i;
262
263 init_irq_default_affinity();
264
265 /* initialize nr_irqs based on nr_cpu_ids */
266 arch_probe_nr_irqs();
267 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
268
269 desc = irq_desc_legacy;
270 legacy_count = ARRAY_SIZE(irq_desc_legacy);
271 node = first_online_node;
272
273 /* allocate based on nr_cpu_ids */
274 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
275 sizeof(int), GFP_NOWAIT, node);
276
277 irq_desc_init.irq_data.chip = &no_irq_chip;
278
279 for (i = 0; i < legacy_count; i++) {
280 desc[i].irq_data.irq = i;
281 desc[i].irq_data.chip = &no_irq_chip;
282#ifdef CONFIG_SMP
283 desc[i].irq_data.node = node;
284#endif
285 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
286 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
287 alloc_desc_masks(&desc[i], node, true);
288 init_desc_masks(&desc[i]);
1f5a5b87 289 irq_insert_desc(i, &desc[i]);
3795de23
TG
290 }
291
292 return arch_early_irq_init();
293}
294
295struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
296{
297 struct irq_desc *desc;
298 unsigned long flags;
299
300 if (irq >= nr_irqs) {
301 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
302 irq, nr_irqs);
303 return NULL;
304 }
305
306 desc = irq_to_desc(irq);
307 if (desc)
308 return desc;
309
310 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
311
312 /* We have to check it to avoid races with another CPU */
313 desc = irq_to_desc(irq);
314 if (desc)
315 goto out_unlock;
316
317 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
318
319 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
320 if (!desc) {
321 printk(KERN_ERR "can not alloc irq_desc\n");
322 BUG_ON(1);
323 }
324 init_one_irq_desc(irq, desc, node);
325
1f5a5b87 326 irq_insert_desc(irq, desc);
3795de23
TG
327
328out_unlock:
329 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
330
331 return desc;
332}
333
334#else /* !CONFIG_SPARSE_IRQ */
335
336struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
337 [0 ... NR_IRQS-1] = {
1318a481 338 .status = IRQ_DEFAULT_INIT_FLAGS,
3795de23
TG
339 .handle_irq = handle_bad_irq,
340 .depth = 1,
341 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
342 }
343};
344
345static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
346int __init early_irq_init(void)
347{
348 struct irq_desc *desc;
349 int count;
350 int i;
351
352 init_irq_default_affinity();
353
354 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
355
356 desc = irq_desc;
357 count = ARRAY_SIZE(irq_desc);
358
359 for (i = 0; i < count; i++) {
360 desc[i].irq_data.irq = i;
361 desc[i].irq_data.chip = &no_irq_chip;
362 alloc_desc_masks(&desc[i], 0, true);
363 init_desc_masks(&desc[i]);
364 desc[i].kstat_irqs = kstat_irqs_all[i];
154cd387 365 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
3795de23
TG
366 }
367 return arch_early_irq_init();
368}
369
370struct irq_desc *irq_to_desc(unsigned int irq)
371{
372 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
373}
374
375struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
376{
377 return irq_to_desc(irq);
378}
1f5a5b87
TG
379
380#ifdef CONFIG_SMP
381static inline int desc_node(struct irq_desc *desc)
382{
383 return desc->irq_data.node;
384}
385#else
386static inline int desc_node(struct irq_desc *desc) { return 0; }
387#endif
388
389static void free_desc(unsigned int irq)
390{
391 struct irq_desc *desc = irq_to_desc(irq);
392 unsigned long flags;
393
394 raw_spin_lock_irqsave(&desc->lock, flags);
395 desc_set_defaults(irq, desc, desc_node(desc));
396 raw_spin_unlock_irqrestore(&desc->lock, flags);
397}
398
399static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
400{
401 return start;
402}
3795de23
TG
403#endif /* !CONFIG_SPARSE_IRQ */
404
1f5a5b87
TG
405/* Dynamic interrupt handling */
406
407/**
408 * irq_free_descs - free irq descriptors
409 * @from: Start of descriptor range
410 * @cnt: Number of consecutive irqs to free
411 */
412void irq_free_descs(unsigned int from, unsigned int cnt)
413{
414 unsigned long flags;
415 int i;
416
417 if (from >= nr_irqs || (from + cnt) > nr_irqs)
418 return;
419
420 for (i = 0; i < cnt; i++)
421 free_desc(from + i);
422
423 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
424 bitmap_clear(allocated_irqs, from, cnt);
425 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
426}
427
428/**
429 * irq_alloc_descs - allocate and initialize a range of irq descriptors
430 * @irq: Allocate for specific irq number if irq >= 0
431 * @from: Start the search from this irq number
432 * @cnt: Number of consecutive irqs to allocate.
433 * @node: Preferred node on which the irq descriptor should be allocated
434 *
435 * Returns the first irq number or error code
436 */
437int __ref
438irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
439{
440 unsigned long flags;
441 int start, ret;
442
443 if (!cnt)
444 return -EINVAL;
445
446 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
447
448 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
449 ret = -EEXIST;
450 if (irq >=0 && start != irq)
451 goto err;
452
453 ret = -ENOMEM;
454 if (start >= nr_irqs)
455 goto err;
456
457 bitmap_set(allocated_irqs, start, cnt);
458 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
459 return alloc_descs(start, cnt, node);
460
461err:
462 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
463 return ret;
464}
465
06f6c339
TG
466/**
467 * irq_reserve_irqs - mark irqs allocated
468 * @from: mark from irq number
469 * @cnt: number of irqs to mark
470 *
471 * Returns 0 on success or an appropriate error code
472 */
473int irq_reserve_irqs(unsigned int from, unsigned int cnt)
474{
475 unsigned long flags;
476 unsigned int start;
477 int ret = 0;
478
479 if (!cnt || (from + cnt) > nr_irqs)
480 return -EINVAL;
481
482 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
483 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
484 if (start == from)
485 bitmap_set(allocated_irqs, start, cnt);
486 else
487 ret = -EEXIST;
488 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
489 return ret;
490}
491
a98d24b7
TG
492/**
493 * irq_get_next_irq - get next allocated irq number
494 * @offset: where to start the search
495 *
496 * Returns next irq number after offset or nr_irqs if none is found.
497 */
498unsigned int irq_get_next_irq(unsigned int offset)
499{
500 return find_next_bit(allocated_irqs, nr_irqs, offset);
501}
502
1f5a5b87 503/* Statistics access */
3795de23
TG
504void clear_kstat_irqs(struct irq_desc *desc)
505{
506 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
507}
508
3795de23
TG
509unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
510{
511 struct irq_desc *desc = irq_to_desc(irq);
512 return desc ? desc->kstat_irqs[cpu] : 0;
513}