]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/irq/irqdomain.c
irq_domain: Remove references to old irq_host names
[mirror_ubuntu-zesty-kernel.git] / kernel / irq / irqdomain.c
CommitLineData
cc79ca69
GL
1#include <linux/debugfs.h>
2#include <linux/hardirq.h>
3#include <linux/interrupt.h>
08a543ad 4#include <linux/irq.h>
cc79ca69 5#include <linux/irqdesc.h>
08a543ad
GL
6#include <linux/irqdomain.h>
7#include <linux/module.h>
8#include <linux/mutex.h>
9#include <linux/of.h>
7e713301 10#include <linux/of_address.h>
cc79ca69 11#include <linux/seq_file.h>
7e713301 12#include <linux/slab.h>
cc79ca69
GL
13#include <linux/smp.h>
14#include <linux/fs.h>
08a543ad
GL
15
16static LIST_HEAD(irq_domain_list);
17static DEFINE_MUTEX(irq_domain_mutex);
18
cc79ca69
GL
19#ifdef CONFIG_PPC
20static DEFINE_MUTEX(revmap_trees_mutex);
21static unsigned int irq_virq_count = NR_IRQS;
68700650 22static struct irq_domain *irq_default_domain;
cc79ca69 23
68700650 24static int default_irq_domain_match(struct irq_domain *d, struct device_node *np)
cc79ca69 25{
68700650 26 return d->of_node != NULL && d->of_node == np;
cc79ca69
GL
27}
28
29/**
30 * irq_alloc_host() - Allocate a new irq_domain data structure
31 * @of_node: optional device-tree node of the interrupt controller
32 * @revmap_type: type of reverse mapping to use
33 * @revmap_arg: for IRQ_DOMAIN_MAP_LINEAR linear only: size of the map
68700650
GL
34 * @ops: map/unmap domain callbacks
35 * @inval_irq: provide a hw number in that domain space that is always invalid
cc79ca69
GL
36 *
37 * Allocates and initialize and irq_domain structure. Note that in the case of
38 * IRQ_DOMAIN_MAP_LEGACY, the map() callback will be called before this returns
39 * for all legacy interrupts except 0 (which is always the invalid irq for
40 * a legacy controller). For a IRQ_DOMAIN_MAP_LINEAR, the map is allocated by
41 * this call as well. For a IRQ_DOMAIN_MAP_TREE, the radix tree will be
42 * allocated later during boot automatically (the reverse mapping will use the
43 * slow path until that happens).
44 */
45struct irq_domain *irq_alloc_host(struct device_node *of_node,
46 unsigned int revmap_type,
47 unsigned int revmap_arg,
48 struct irq_domain_ops *ops,
49 irq_hw_number_t inval_irq)
50{
68700650 51 struct irq_domain *domain, *h;
cc79ca69
GL
52 unsigned int size = sizeof(struct irq_domain);
53 unsigned int i;
54 unsigned int *rmap;
55
56 /* Allocate structure and revmap table if using linear mapping */
57 if (revmap_type == IRQ_DOMAIN_MAP_LINEAR)
58 size += revmap_arg * sizeof(unsigned int);
68700650
GL
59 domain = kzalloc(size, GFP_KERNEL);
60 if (domain == NULL)
cc79ca69
GL
61 return NULL;
62
63 /* Fill structure */
68700650
GL
64 domain->revmap_type = revmap_type;
65 domain->inval_irq = inval_irq;
66 domain->ops = ops;
67 domain->of_node = of_node_get(of_node);
cc79ca69 68
68700650
GL
69 if (domain->ops->match == NULL)
70 domain->ops->match = default_irq_domain_match;
cc79ca69
GL
71
72 mutex_lock(&irq_domain_mutex);
73 /* Make sure only one legacy controller can be created */
74 if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) {
75 list_for_each_entry(h, &irq_domain_list, link) {
76 if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) {
77 mutex_unlock(&irq_domain_mutex);
68700650
GL
78 of_node_put(domain->of_node);
79 kfree(domain);
cc79ca69
GL
80 return NULL;
81 }
82 }
83 }
68700650 84 list_add(&domain->link, &irq_domain_list);
cc79ca69
GL
85 mutex_unlock(&irq_domain_mutex);
86
87 /* Additional setups per revmap type */
88 switch(revmap_type) {
89 case IRQ_DOMAIN_MAP_LEGACY:
90 /* 0 is always the invalid number for legacy */
68700650
GL
91 domain->inval_irq = 0;
92 /* setup us as the domain for all legacy interrupts */
cc79ca69
GL
93 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
94 struct irq_data *irq_data = irq_get_irq_data(i);
95 irq_data->hwirq = i;
68700650 96 irq_data->domain = domain;
cc79ca69
GL
97
98 /* Legacy flags are left to default at this point,
99 * one can then use irq_create_mapping() to
100 * explicitly change them
101 */
68700650 102 ops->map(domain, i, i);
cc79ca69
GL
103
104 /* Clear norequest flags */
105 irq_clear_status_flags(i, IRQ_NOREQUEST);
106 }
107 break;
108 case IRQ_DOMAIN_MAP_LINEAR:
68700650 109 rmap = (unsigned int *)(domain + 1);
cc79ca69 110 for (i = 0; i < revmap_arg; i++)
03848373 111 rmap[i] = 0;
68700650
GL
112 domain->revmap_data.linear.size = revmap_arg;
113 domain->revmap_data.linear.revmap = rmap;
cc79ca69
GL
114 break;
115 case IRQ_DOMAIN_MAP_TREE:
68700650 116 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
cc79ca69
GL
117 break;
118 default:
119 break;
120 }
121
68700650 122 pr_debug("irq: Allocated domain of type %d @0x%p\n", revmap_type, domain);
cc79ca69 123
68700650 124 return domain;
cc79ca69
GL
125}
126
127/**
128 * irq_find_host() - Locates a domain for a given device node
129 * @node: device-tree node of the interrupt controller
130 */
131struct irq_domain *irq_find_host(struct device_node *node)
132{
133 struct irq_domain *h, *found = NULL;
134
135 /* We might want to match the legacy controller last since
136 * it might potentially be set to match all interrupts in
137 * the absence of a device node. This isn't a problem so far
138 * yet though...
139 */
140 mutex_lock(&irq_domain_mutex);
141 list_for_each_entry(h, &irq_domain_list, link)
142 if (h->ops->match(h, node)) {
143 found = h;
144 break;
145 }
146 mutex_unlock(&irq_domain_mutex);
147 return found;
148}
149EXPORT_SYMBOL_GPL(irq_find_host);
150
151/**
152 * irq_set_default_host() - Set a "default" irq domain
68700650 153 * @domain: default domain pointer
cc79ca69
GL
154 *
155 * For convenience, it's possible to set a "default" domain that will be used
156 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
157 * platforms that want to manipulate a few hard coded interrupt numbers that
158 * aren't properly represented in the device-tree.
159 */
68700650 160void irq_set_default_host(struct irq_domain *domain)
cc79ca69 161{
68700650 162 pr_debug("irq: Default domain set to @0x%p\n", domain);
cc79ca69 163
68700650 164 irq_default_domain = domain;
cc79ca69
GL
165}
166
167/**
168 * irq_set_virq_count() - Set the maximum number of linux irqs
169 * @count: number of linux irqs, capped with NR_IRQS
170 *
171 * This is mainly for use by platforms like iSeries who want to program
172 * the virtual irq number in the controller to avoid the reverse mapping
173 */
174void irq_set_virq_count(unsigned int count)
175{
176 pr_debug("irq: Trying to set virq count to %d\n", count);
177
178 BUG_ON(count < NUM_ISA_INTERRUPTS);
179 if (count < NR_IRQS)
180 irq_virq_count = count;
181}
182
68700650 183static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
cc79ca69
GL
184 irq_hw_number_t hwirq)
185{
186 struct irq_data *irq_data = irq_get_irq_data(virq);
187
188 irq_data->hwirq = hwirq;
68700650
GL
189 irq_data->domain = domain;
190 if (domain->ops->map(domain, virq, hwirq)) {
cc79ca69
GL
191 pr_debug("irq: -> mapping failed, freeing\n");
192 irq_data->domain = NULL;
193 irq_data->hwirq = 0;
194 return -1;
195 }
196
197 irq_clear_status_flags(virq, IRQ_NOREQUEST);
198
199 return 0;
200}
201
202/**
203 * irq_create_direct_mapping() - Allocate an irq for direct mapping
68700650 204 * @domain: domain to allocate the irq for or NULL for default domain
cc79ca69
GL
205 *
206 * This routine is used for irq controllers which can choose the hardware
207 * interrupt numbers they generate. In such a case it's simplest to use
208 * the linux irq as the hardware interrupt number.
209 */
68700650 210unsigned int irq_create_direct_mapping(struct irq_domain *domain)
cc79ca69
GL
211{
212 unsigned int virq;
213
68700650
GL
214 if (domain == NULL)
215 domain = irq_default_domain;
cc79ca69 216
68700650
GL
217 BUG_ON(domain == NULL);
218 WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
cc79ca69
GL
219
220 virq = irq_alloc_desc_from(1, 0);
03848373 221 if (!virq) {
cc79ca69 222 pr_debug("irq: create_direct virq allocation failed\n");
03848373 223 return 0;
cc79ca69
GL
224 }
225 if (virq >= irq_virq_count) {
226 pr_err("ERROR: no free irqs available below %i maximum\n",
227 irq_virq_count);
228 irq_free_desc(virq);
229 return 0;
230 }
231
232 pr_debug("irq: create_direct obtained virq %d\n", virq);
233
68700650 234 if (irq_setup_virq(domain, virq, virq)) {
cc79ca69 235 irq_free_desc(virq);
03848373 236 return 0;
cc79ca69
GL
237 }
238
239 return virq;
240}
241
242/**
243 * irq_create_mapping() - Map a hardware interrupt into linux irq space
68700650
GL
244 * @domain: domain owning this hardware interrupt or NULL for default domain
245 * @hwirq: hardware irq number in that domain space
cc79ca69
GL
246 *
247 * Only one mapping per hardware interrupt is permitted. Returns a linux
248 * irq number.
249 * If the sense/trigger is to be specified, set_irq_type() should be called
250 * on the number returned from that call.
251 */
68700650 252unsigned int irq_create_mapping(struct irq_domain *domain,
cc79ca69
GL
253 irq_hw_number_t hwirq)
254{
255 unsigned int virq, hint;
256
68700650 257 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
cc79ca69 258
68700650
GL
259 /* Look for default domain if nececssary */
260 if (domain == NULL)
261 domain = irq_default_domain;
262 if (domain == NULL) {
cc79ca69 263 printk(KERN_WARNING "irq_create_mapping called for"
68700650 264 " NULL domain, hwirq=%lx\n", hwirq);
cc79ca69 265 WARN_ON(1);
03848373 266 return 0;
cc79ca69 267 }
68700650 268 pr_debug("irq: -> using domain @%p\n", domain);
cc79ca69
GL
269
270 /* Check if mapping already exists */
68700650 271 virq = irq_find_mapping(domain, hwirq);
03848373 272 if (virq) {
cc79ca69
GL
273 pr_debug("irq: -> existing mapping on virq %d\n", virq);
274 return virq;
275 }
276
277 /* Get a virtual interrupt number */
68700650 278 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) {
cc79ca69
GL
279 /* Handle legacy */
280 virq = (unsigned int)hwirq;
281 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
03848373 282 return 0;
cc79ca69
GL
283 return virq;
284 } else {
285 /* Allocate a virtual interrupt number */
286 hint = hwirq % irq_virq_count;
287 if (hint == 0)
288 hint++;
289 virq = irq_alloc_desc_from(hint, 0);
290 if (!virq)
291 virq = irq_alloc_desc_from(1, 0);
03848373 292 if (!virq) {
cc79ca69 293 pr_debug("irq: -> virq allocation failed\n");
03848373 294 return 0;
cc79ca69
GL
295 }
296 }
297
68700650
GL
298 if (irq_setup_virq(domain, virq, hwirq)) {
299 if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
cc79ca69 300 irq_free_desc(virq);
03848373 301 return 0;
cc79ca69
GL
302 }
303
68700650
GL
304 pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
305 hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
cc79ca69
GL
306
307 return virq;
308}
309EXPORT_SYMBOL_GPL(irq_create_mapping);
310
311unsigned int irq_create_of_mapping(struct device_node *controller,
312 const u32 *intspec, unsigned int intsize)
313{
68700650 314 struct irq_domain *domain;
cc79ca69
GL
315 irq_hw_number_t hwirq;
316 unsigned int type = IRQ_TYPE_NONE;
317 unsigned int virq;
318
68700650
GL
319 domain = controller ? irq_find_host(controller) : irq_default_domain;
320 if (!domain) {
321 printk(KERN_WARNING "irq: no irq domain found for %s !\n",
cc79ca69 322 controller->full_name);
03848373 323 return 0;
cc79ca69
GL
324 }
325
68700650
GL
326 /* If domain has no translation, then we assume interrupt line */
327 if (domain->ops->xlate == NULL)
cc79ca69
GL
328 hwirq = intspec[0];
329 else {
68700650 330 if (domain->ops->xlate(domain, controller, intspec, intsize,
cc79ca69 331 &hwirq, &type))
03848373 332 return 0;
cc79ca69
GL
333 }
334
335 /* Create mapping */
68700650 336 virq = irq_create_mapping(domain, hwirq);
03848373 337 if (!virq)
cc79ca69
GL
338 return virq;
339
340 /* Set type if specified and different than the current one */
341 if (type != IRQ_TYPE_NONE &&
342 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
343 irq_set_irq_type(virq, type);
344 return virq;
345}
346EXPORT_SYMBOL_GPL(irq_create_of_mapping);
347
348/**
349 * irq_dispose_mapping() - Unmap an interrupt
350 * @virq: linux irq number of the interrupt to unmap
351 */
352void irq_dispose_mapping(unsigned int virq)
353{
354 struct irq_data *irq_data = irq_get_irq_data(virq);
68700650 355 struct irq_domain *domain;
cc79ca69
GL
356 irq_hw_number_t hwirq;
357
03848373 358 if (!virq || !irq_data)
cc79ca69
GL
359 return;
360
68700650
GL
361 domain = irq_data->domain;
362 if (WARN_ON(domain == NULL))
cc79ca69
GL
363 return;
364
365 /* Never unmap legacy interrupts */
68700650 366 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
cc79ca69
GL
367 return;
368
369 irq_set_status_flags(virq, IRQ_NOREQUEST);
370
371 /* remove chip and handler */
372 irq_set_chip_and_handler(virq, NULL, NULL);
373
374 /* Make sure it's completed */
375 synchronize_irq(virq);
376
377 /* Tell the PIC about it */
68700650
GL
378 if (domain->ops->unmap)
379 domain->ops->unmap(domain, virq);
cc79ca69
GL
380 smp_mb();
381
382 /* Clear reverse map */
383 hwirq = irq_data->hwirq;
68700650 384 switch(domain->revmap_type) {
cc79ca69 385 case IRQ_DOMAIN_MAP_LINEAR:
68700650
GL
386 if (hwirq < domain->revmap_data.linear.size)
387 domain->revmap_data.linear.revmap[hwirq] = 0;
cc79ca69
GL
388 break;
389 case IRQ_DOMAIN_MAP_TREE:
390 mutex_lock(&revmap_trees_mutex);
68700650 391 radix_tree_delete(&domain->revmap_data.tree, hwirq);
cc79ca69
GL
392 mutex_unlock(&revmap_trees_mutex);
393 break;
394 }
395
396 /* Destroy map */
68700650 397 irq_data->hwirq = domain->inval_irq;
cc79ca69
GL
398
399 irq_free_desc(virq);
400}
401EXPORT_SYMBOL_GPL(irq_dispose_mapping);
402
403/**
404 * irq_find_mapping() - Find a linux irq from an hw irq number.
68700650
GL
405 * @domain: domain owning this hardware interrupt
406 * @hwirq: hardware irq number in that domain space
cc79ca69
GL
407 *
408 * This is a slow path, for use by generic code. It's expected that an
409 * irq controller implementation directly calls the appropriate low level
410 * mapping function.
411 */
68700650 412unsigned int irq_find_mapping(struct irq_domain *domain,
cc79ca69
GL
413 irq_hw_number_t hwirq)
414{
415 unsigned int i;
416 unsigned int hint = hwirq % irq_virq_count;
417
68700650
GL
418 /* Look for default domain if nececssary */
419 if (domain == NULL)
420 domain = irq_default_domain;
421 if (domain == NULL)
03848373 422 return 0;
cc79ca69
GL
423
424 /* legacy -> bail early */
68700650 425 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
cc79ca69
GL
426 return hwirq;
427
428 /* Slow path does a linear search of the map */
429 if (hint == 0)
430 hint = 1;
431 i = hint;
432 do {
433 struct irq_data *data = irq_get_irq_data(i);
68700650 434 if (data && (data->domain == domain) && (data->hwirq == hwirq))
cc79ca69
GL
435 return i;
436 i++;
437 if (i >= irq_virq_count)
438 i = 1;
439 } while(i != hint);
03848373 440 return 0;
cc79ca69
GL
441}
442EXPORT_SYMBOL_GPL(irq_find_mapping);
443
444/**
445 * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
68700650
GL
446 * @domain: domain owning this hardware interrupt
447 * @hwirq: hardware irq number in that domain space
cc79ca69
GL
448 *
449 * This is a fast path, for use by irq controller code that uses radix tree
450 * revmaps
451 */
68700650 452unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
cc79ca69
GL
453 irq_hw_number_t hwirq)
454{
455 struct irq_data *irq_data;
456
68700650
GL
457 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
458 return irq_find_mapping(domain, hwirq);
cc79ca69
GL
459
460 /*
461 * Freeing an irq can delete nodes along the path to
462 * do the lookup via call_rcu.
463 */
464 rcu_read_lock();
68700650 465 irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
cc79ca69
GL
466 rcu_read_unlock();
467
468 /*
469 * If found in radix tree, then fine.
470 * Else fallback to linear lookup - this should not happen in practice
471 * as it means that we failed to insert the node in the radix tree.
472 */
68700650 473 return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
cc79ca69
GL
474}
475
476/**
477 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
68700650 478 * @domain: domain owning this hardware interrupt
cc79ca69 479 * @virq: linux irq number
68700650 480 * @hwirq: hardware irq number in that domain space
cc79ca69
GL
481 *
482 * This is for use by irq controllers that use a radix tree reverse
483 * mapping for fast lookup.
484 */
68700650 485void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
cc79ca69
GL
486 irq_hw_number_t hwirq)
487{
488 struct irq_data *irq_data = irq_get_irq_data(virq);
489
68700650 490 if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
cc79ca69
GL
491 return;
492
03848373 493 if (virq) {
cc79ca69 494 mutex_lock(&revmap_trees_mutex);
68700650 495 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
cc79ca69
GL
496 mutex_unlock(&revmap_trees_mutex);
497 }
498}
499
500/**
501 * irq_linear_revmap() - Find a linux irq from a hw irq number.
68700650
GL
502 * @domain: domain owning this hardware interrupt
503 * @hwirq: hardware irq number in that domain space
cc79ca69
GL
504 *
505 * This is a fast path, for use by irq controller code that uses linear
506 * revmaps. It does fallback to the slow path if the revmap doesn't exist
507 * yet and will create the revmap entry with appropriate locking
508 */
68700650 509unsigned int irq_linear_revmap(struct irq_domain *domain,
cc79ca69
GL
510 irq_hw_number_t hwirq)
511{
512 unsigned int *revmap;
513
68700650
GL
514 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
515 return irq_find_mapping(domain, hwirq);
cc79ca69
GL
516
517 /* Check revmap bounds */
68700650
GL
518 if (unlikely(hwirq >= domain->revmap_data.linear.size))
519 return irq_find_mapping(domain, hwirq);
cc79ca69
GL
520
521 /* Check if revmap was allocated */
68700650 522 revmap = domain->revmap_data.linear.revmap;
cc79ca69 523 if (unlikely(revmap == NULL))
68700650 524 return irq_find_mapping(domain, hwirq);
cc79ca69
GL
525
526 /* Fill up revmap with slow path if no mapping found */
03848373 527 if (unlikely(!revmap[hwirq]))
68700650 528 revmap[hwirq] = irq_find_mapping(domain, hwirq);
cc79ca69
GL
529
530 return revmap[hwirq];
531}
532
533#ifdef CONFIG_VIRQ_DEBUG
534static int virq_debug_show(struct seq_file *m, void *private)
535{
536 unsigned long flags;
537 struct irq_desc *desc;
538 const char *p;
539 static const char none[] = "none";
540 void *data;
541 int i;
542
543 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
68700650 544 "chip name", "chip data", "domain name");
cc79ca69
GL
545
546 for (i = 1; i < nr_irqs; i++) {
547 desc = irq_to_desc(i);
548 if (!desc)
549 continue;
550
551 raw_spin_lock_irqsave(&desc->lock, flags);
552
553 if (desc->action && desc->action->handler) {
554 struct irq_chip *chip;
555
556 seq_printf(m, "%5d ", i);
557 seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
558
559 chip = irq_desc_get_chip(desc);
560 if (chip && chip->name)
561 p = chip->name;
562 else
563 p = none;
564 seq_printf(m, "%-15s ", p);
565
566 data = irq_desc_get_chip_data(desc);
567 seq_printf(m, "0x%16p ", data);
568
569 if (desc->irq_data.domain->of_node)
570 p = desc->irq_data.domain->of_node->full_name;
571 else
572 p = none;
573 seq_printf(m, "%s\n", p);
574 }
575
576 raw_spin_unlock_irqrestore(&desc->lock, flags);
577 }
578
579 return 0;
580}
581
582static int virq_debug_open(struct inode *inode, struct file *file)
583{
584 return single_open(file, virq_debug_show, inode->i_private);
585}
586
587static const struct file_operations virq_debug_fops = {
588 .open = virq_debug_open,
589 .read = seq_read,
590 .llseek = seq_lseek,
591 .release = single_release,
592};
593
594static int __init irq_debugfs_init(void)
595{
596 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
597 NULL, &virq_debug_fops) == NULL)
598 return -ENOMEM;
599
600 return 0;
601}
602__initcall(irq_debugfs_init);
603#endif /* CONFIG_VIRQ_DEBUG */
604
605#else /* CONFIG_PPC */
606
08a543ad
GL
607/**
608 * irq_domain_add() - Register an irq_domain
609 * @domain: ptr to initialized irq_domain structure
610 *
611 * Registers an irq_domain structure. The irq_domain must at a minimum be
612 * initialized with an ops structure pointer, and either a ->to_irq hook or
613 * a valid irq_base value. Everything else is optional.
614 */
615void irq_domain_add(struct irq_domain *domain)
616{
617 struct irq_data *d;
6d274309 618 int hwirq, irq;
08a543ad
GL
619
620 /*
621 * This assumes that the irq_domain owner has already allocated
622 * the irq_descs. This block will be removed when support for dynamic
623 * allocation of irq_descs is added to irq_domain.
624 */
6d274309
RH
625 irq_domain_for_each_irq(domain, hwirq, irq) {
626 d = irq_get_irq_data(irq);
eef24afb
RH
627 if (!d) {
628 WARN(1, "error: assigning domain to non existant irq_desc");
629 return;
630 }
631 if (d->domain) {
08a543ad
GL
632 /* things are broken; just report, don't clean up */
633 WARN(1, "error: irq_desc already assigned to a domain");
634 return;
635 }
636 d->domain = domain;
637 d->hwirq = hwirq;
638 }
639
640 mutex_lock(&irq_domain_mutex);
7bb69bad 641 list_add(&domain->link, &irq_domain_list);
08a543ad
GL
642 mutex_unlock(&irq_domain_mutex);
643}
644
645/**
646 * irq_domain_del() - Unregister an irq_domain
647 * @domain: ptr to registered irq_domain.
648 */
649void irq_domain_del(struct irq_domain *domain)
650{
651 struct irq_data *d;
6d274309 652 int hwirq, irq;
08a543ad
GL
653
654 mutex_lock(&irq_domain_mutex);
7bb69bad 655 list_del(&domain->link);
08a543ad
GL
656 mutex_unlock(&irq_domain_mutex);
657
658 /* Clear the irq_domain assignments */
6d274309
RH
659 irq_domain_for_each_irq(domain, hwirq, irq) {
660 d = irq_get_irq_data(irq);
08a543ad
GL
661 d->domain = NULL;
662 }
663}
664
665#if defined(CONFIG_OF_IRQ)
666/**
667 * irq_create_of_mapping() - Map a linux irq number from a DT interrupt spec
668 *
669 * Used by the device tree interrupt mapping code to translate a device tree
670 * interrupt specifier to a valid linux irq number. Returns either a valid
671 * linux IRQ number or 0.
672 *
673 * When the caller no longer need the irq number returned by this function it
674 * should arrange to call irq_dispose_mapping().
675 */
676unsigned int irq_create_of_mapping(struct device_node *controller,
677 const u32 *intspec, unsigned int intsize)
678{
679 struct irq_domain *domain;
680 unsigned long hwirq;
681 unsigned int irq, type;
682 int rc = -EINVAL;
683
684 /* Find a domain which can translate the irq spec */
685 mutex_lock(&irq_domain_mutex);
7bb69bad
GL
686 list_for_each_entry(domain, &irq_domain_list, link) {
687 if (!domain->ops->xlate)
08a543ad 688 continue;
7bb69bad 689 rc = domain->ops->xlate(domain, controller,
08a543ad
GL
690 intspec, intsize, &hwirq, &type);
691 if (rc == 0)
692 break;
693 }
694 mutex_unlock(&irq_domain_mutex);
695
696 if (rc != 0)
697 return 0;
698
699 irq = irq_domain_to_irq(domain, hwirq);
700 if (type != IRQ_TYPE_NONE)
701 irq_set_irq_type(irq, type);
702 pr_debug("%s: mapped hwirq=%i to irq=%i, flags=%x\n",
703 controller->full_name, (int)hwirq, irq, type);
704 return irq;
705}
706EXPORT_SYMBOL_GPL(irq_create_of_mapping);
707
708/**
709 * irq_dispose_mapping() - Discard a mapping created by irq_create_of_mapping()
710 * @irq: linux irq number to be discarded
711 *
712 * Calling this function indicates the caller no longer needs a reference to
713 * the linux irq number returned by a prior call to irq_create_of_mapping().
714 */
715void irq_dispose_mapping(unsigned int irq)
716{
717 /*
718 * nothing yet; will be filled when support for dynamic allocation of
719 * irq_descs is added to irq_domain
720 */
721}
722EXPORT_SYMBOL_GPL(irq_dispose_mapping);
7e713301 723
7bb69bad 724int irq_domain_simple_xlate(struct irq_domain *d,
7e713301
GL
725 struct device_node *controller,
726 const u32 *intspec, unsigned int intsize,
727 unsigned long *out_hwirq, unsigned int *out_type)
728{
729 if (d->of_node != controller)
730 return -EINVAL;
731 if (intsize < 1)
732 return -EINVAL;
93797d87
RH
733 if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
734 (intspec[0] >= d->hwirq_base + d->nr_irq)))
735 return -EINVAL;
7e713301
GL
736
737 *out_hwirq = intspec[0];
738 *out_type = IRQ_TYPE_NONE;
739 if (intsize > 1)
740 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
741 return 0;
742}
743
7e713301
GL
744/**
745 * irq_domain_create_simple() - Set up a 'simple' translation range
746 */
747void irq_domain_add_simple(struct device_node *controller, int irq_base)
748{
749 struct irq_domain *domain;
750
751 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
752 if (!domain) {
753 WARN_ON(1);
754 return;
755 }
756
757 domain->irq_base = irq_base;
758 domain->of_node = of_node_get(controller);
759 domain->ops = &irq_domain_simple_ops;
760 irq_domain_add(domain);
761}
762EXPORT_SYMBOL_GPL(irq_domain_add_simple);
763
764void irq_domain_generate_simple(const struct of_device_id *match,
765 u64 phys_base, unsigned int irq_start)
766{
767 struct device_node *node;
e1964c50 768 pr_debug("looking for phys_base=%llx, irq_start=%i\n",
7e713301
GL
769 (unsigned long long) phys_base, (int) irq_start);
770 node = of_find_matching_node_by_address(NULL, match, phys_base);
771 if (node)
772 irq_domain_add_simple(node, irq_start);
7e713301
GL
773}
774EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
08a543ad 775#endif /* CONFIG_OF_IRQ */
c87fb573
JI
776
777struct irq_domain_ops irq_domain_simple_ops = {
778#ifdef CONFIG_OF_IRQ
7bb69bad 779 .xlate = irq_domain_simple_xlate,
c87fb573
JI
780#endif /* CONFIG_OF_IRQ */
781};
782EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
cc79ca69
GL
783
784#endif /* !CONFIG_PPC */