]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/irq/irqdesc.c
genirq: Prevent access beyond allocated_irqs bitmap
[mirror_ubuntu-artful-kernel.git] / kernel / irq / irqdesc.c
1 /*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17
18 #include "internals.h"
19
20 /*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
23 static struct lock_class_key irq_desc_lock_class;
24
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init init_irq_default_affinity(void)
27 {
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48 #endif
49 return 0;
50 }
51
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 desc->irq_data.node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc->pending_mask);
58 #endif
59 }
60
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 return desc->irq_data.node;
64 }
65
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74 {
75 int cpu;
76
77 desc->irq_data.irq = irq;
78 desc->irq_data.chip = &no_irq_chip;
79 desc->irq_data.chip_data = NULL;
80 desc->irq_data.handler_data = NULL;
81 desc->irq_data.msi_desc = NULL;
82 desc->status = IRQ_DEFAULT_INIT_FLAGS;
83 desc->handle_irq = handle_bad_irq;
84 desc->depth = 1;
85 desc->irq_count = 0;
86 desc->irqs_unhandled = 0;
87 desc->name = NULL;
88 for_each_possible_cpu(cpu)
89 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
90 desc_smp_init(desc, node);
91 }
92
93 int nr_irqs = NR_IRQS;
94 EXPORT_SYMBOL_GPL(nr_irqs);
95
96 static DEFINE_MUTEX(sparse_irq_lock);
97 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
98
99 #ifdef CONFIG_SPARSE_IRQ
100
101 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
102
103 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
104 {
105 radix_tree_insert(&irq_desc_tree, irq, desc);
106 }
107
108 struct irq_desc *irq_to_desc(unsigned int irq)
109 {
110 return radix_tree_lookup(&irq_desc_tree, irq);
111 }
112
113 static void delete_irq_desc(unsigned int irq)
114 {
115 radix_tree_delete(&irq_desc_tree, irq);
116 }
117
118 #ifdef CONFIG_SMP
119 static void free_masks(struct irq_desc *desc)
120 {
121 #ifdef CONFIG_GENERIC_PENDING_IRQ
122 free_cpumask_var(desc->pending_mask);
123 #endif
124 free_cpumask_var(desc->irq_data.affinity);
125 }
126 #else
127 static inline void free_masks(struct irq_desc *desc) { }
128 #endif
129
130 static struct irq_desc *alloc_desc(int irq, int node)
131 {
132 struct irq_desc *desc;
133 gfp_t gfp = GFP_KERNEL;
134
135 desc = kzalloc_node(sizeof(*desc), gfp, node);
136 if (!desc)
137 return NULL;
138 /* allocate based on nr_cpu_ids */
139 desc->kstat_irqs = alloc_percpu(unsigned int);
140 if (!desc->kstat_irqs)
141 goto err_desc;
142
143 if (alloc_masks(desc, gfp, node))
144 goto err_kstat;
145
146 raw_spin_lock_init(&desc->lock);
147 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
148
149 desc_set_defaults(irq, desc, node);
150
151 return desc;
152
153 err_kstat:
154 free_percpu(desc->kstat_irqs);
155 err_desc:
156 kfree(desc);
157 return NULL;
158 }
159
160 static void free_desc(unsigned int irq)
161 {
162 struct irq_desc *desc = irq_to_desc(irq);
163
164 unregister_irq_proc(irq, desc);
165
166 mutex_lock(&sparse_irq_lock);
167 delete_irq_desc(irq);
168 mutex_unlock(&sparse_irq_lock);
169
170 free_masks(desc);
171 free_percpu(desc->kstat_irqs);
172 kfree(desc);
173 }
174
175 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
176 {
177 struct irq_desc *desc;
178 int i;
179
180 for (i = 0; i < cnt; i++) {
181 desc = alloc_desc(start + i, node);
182 if (!desc)
183 goto err;
184 mutex_lock(&sparse_irq_lock);
185 irq_insert_desc(start + i, desc);
186 mutex_unlock(&sparse_irq_lock);
187 }
188 return start;
189
190 err:
191 for (i--; i >= 0; i--)
192 free_desc(start + i);
193
194 mutex_lock(&sparse_irq_lock);
195 bitmap_clear(allocated_irqs, start, cnt);
196 mutex_unlock(&sparse_irq_lock);
197 return -ENOMEM;
198 }
199
200 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201 {
202 int res = irq_alloc_descs(irq, irq, 1, node);
203
204 if (res == -EEXIST || res == irq)
205 return irq_to_desc(irq);
206 return NULL;
207 }
208
209 int __init early_irq_init(void)
210 {
211 int i, initcnt, node = first_online_node;
212 struct irq_desc *desc;
213
214 init_irq_default_affinity();
215
216 /* Let arch update nr_irqs and return the nr of preallocated irqs */
217 initcnt = arch_probe_nr_irqs();
218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219
220 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 nr_irqs = IRQ_BITMAP_BITS;
222
223 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 initcnt = IRQ_BITMAP_BITS;
225
226 if (initcnt > nr_irqs)
227 nr_irqs = initcnt;
228
229 for (i = 0; i < initcnt; i++) {
230 desc = alloc_desc(i, node);
231 set_bit(i, allocated_irqs);
232 irq_insert_desc(i, desc);
233 }
234 return arch_early_irq_init();
235 }
236
237 #else /* !CONFIG_SPARSE_IRQ */
238
239 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
240 [0 ... NR_IRQS-1] = {
241 .status = IRQ_DEFAULT_INIT_FLAGS,
242 .handle_irq = handle_bad_irq,
243 .depth = 1,
244 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
245 }
246 };
247
248 int __init early_irq_init(void)
249 {
250 int count, i, node = first_online_node;
251 struct irq_desc *desc;
252
253 init_irq_default_affinity();
254
255 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
256
257 desc = irq_desc;
258 count = ARRAY_SIZE(irq_desc);
259
260 for (i = 0; i < count; i++) {
261 desc[i].irq_data.irq = i;
262 desc[i].irq_data.chip = &no_irq_chip;
263 /* TODO : do this allocation on-demand ... */
264 desc[i].kstat_irqs = alloc_percpu(unsigned int);
265 alloc_masks(desc + i, GFP_KERNEL, node);
266 desc_smp_init(desc + i, node);
267 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
268 }
269 return arch_early_irq_init();
270 }
271
272 struct irq_desc *irq_to_desc(unsigned int irq)
273 {
274 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
275 }
276
277 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
278 {
279 return irq_to_desc(irq);
280 }
281
282 static void free_desc(unsigned int irq)
283 {
284 dynamic_irq_cleanup(irq);
285 }
286
287 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
288 {
289 #if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
290 struct irq_desc *desc;
291 unsigned int i;
292
293 for (i = 0; i < cnt; i++) {
294 desc = irq_to_desc(start + i);
295 if (desc && !desc->kstat_irqs) {
296 unsigned int __percpu *stats = alloc_percpu(unsigned int);
297
298 if (!stats)
299 return -1;
300 if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
301 free_percpu(stats);
302 }
303 }
304 #endif
305 return start;
306 }
307 #endif /* !CONFIG_SPARSE_IRQ */
308
309 /* Dynamic interrupt handling */
310
311 /**
312 * irq_free_descs - free irq descriptors
313 * @from: Start of descriptor range
314 * @cnt: Number of consecutive irqs to free
315 */
316 void irq_free_descs(unsigned int from, unsigned int cnt)
317 {
318 int i;
319
320 if (from >= nr_irqs || (from + cnt) > nr_irqs)
321 return;
322
323 for (i = 0; i < cnt; i++)
324 free_desc(from + i);
325
326 mutex_lock(&sparse_irq_lock);
327 bitmap_clear(allocated_irqs, from, cnt);
328 mutex_unlock(&sparse_irq_lock);
329 }
330
331 /**
332 * irq_alloc_descs - allocate and initialize a range of irq descriptors
333 * @irq: Allocate for specific irq number if irq >= 0
334 * @from: Start the search from this irq number
335 * @cnt: Number of consecutive irqs to allocate.
336 * @node: Preferred node on which the irq descriptor should be allocated
337 *
338 * Returns the first irq number or error code
339 */
340 int __ref
341 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
342 {
343 int start, ret;
344
345 if (!cnt)
346 return -EINVAL;
347
348 mutex_lock(&sparse_irq_lock);
349
350 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
351 ret = -EEXIST;
352 if (irq >=0 && start != irq)
353 goto err;
354
355 ret = -ENOMEM;
356 if (start >= nr_irqs)
357 goto err;
358
359 bitmap_set(allocated_irqs, start, cnt);
360 mutex_unlock(&sparse_irq_lock);
361 return alloc_descs(start, cnt, node);
362
363 err:
364 mutex_unlock(&sparse_irq_lock);
365 return ret;
366 }
367
368 /**
369 * irq_reserve_irqs - mark irqs allocated
370 * @from: mark from irq number
371 * @cnt: number of irqs to mark
372 *
373 * Returns 0 on success or an appropriate error code
374 */
375 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
376 {
377 unsigned int start;
378 int ret = 0;
379
380 if (!cnt || (from + cnt) > nr_irqs)
381 return -EINVAL;
382
383 mutex_lock(&sparse_irq_lock);
384 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
385 if (start == from)
386 bitmap_set(allocated_irqs, start, cnt);
387 else
388 ret = -EEXIST;
389 mutex_unlock(&sparse_irq_lock);
390 return ret;
391 }
392
393 /**
394 * irq_get_next_irq - get next allocated irq number
395 * @offset: where to start the search
396 *
397 * Returns next irq number after offset or nr_irqs if none is found.
398 */
399 unsigned int irq_get_next_irq(unsigned int offset)
400 {
401 return find_next_bit(allocated_irqs, nr_irqs, offset);
402 }
403
404 /**
405 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
406 * @irq: irq number to initialize
407 */
408 void dynamic_irq_cleanup(unsigned int irq)
409 {
410 struct irq_desc *desc = irq_to_desc(irq);
411 unsigned long flags;
412
413 raw_spin_lock_irqsave(&desc->lock, flags);
414 desc_set_defaults(irq, desc, desc_node(desc));
415 raw_spin_unlock_irqrestore(&desc->lock, flags);
416 }
417
418 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
419 {
420 struct irq_desc *desc = irq_to_desc(irq);
421
422 return desc && desc->kstat_irqs ?
423 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
424 }
425
426 #ifdef CONFIG_GENERIC_HARDIRQS
427 unsigned int kstat_irqs(unsigned int irq)
428 {
429 struct irq_desc *desc = irq_to_desc(irq);
430 int cpu;
431 int sum = 0;
432
433 if (!desc || !desc->kstat_irqs)
434 return 0;
435 for_each_possible_cpu(cpu)
436 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
437 return sum;
438 }
439 #endif /* CONFIG_GENERIC_HARDIRQS */