]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - drivers/base/regmap/regmap-irq.c
block: Cleanup license notice
[mirror_ubuntu-disco-kernel.git] / drivers / base / regmap / regmap-irq.c
1 /*
2 * regmap based irq_chip
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21
22 #include "internal.h"
23
24 struct regmap_irq_chip_data {
25 struct mutex lock;
26 struct irq_chip irq_chip;
27
28 struct regmap *map;
29 const struct regmap_irq_chip *chip;
30
31 int irq_base;
32 struct irq_domain *domain;
33
34 int irq;
35 int wake_count;
36
37 void *status_reg_buf;
38 unsigned int *status_buf;
39 unsigned int *mask_buf;
40 unsigned int *mask_buf_def;
41 unsigned int *wake_buf;
42 unsigned int *type_buf;
43 unsigned int *type_buf_def;
44
45 unsigned int irq_reg_stride;
46 unsigned int type_reg_stride;
47
48 bool clear_status:1;
49 };
50
51 static inline const
52 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
53 int irq)
54 {
55 return &data->chip->irqs[irq];
56 }
57
58 static void regmap_irq_lock(struct irq_data *data)
59 {
60 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
61
62 mutex_lock(&d->lock);
63 }
64
65 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
66 unsigned int reg, unsigned int mask,
67 unsigned int val)
68 {
69 if (d->chip->mask_writeonly)
70 return regmap_write_bits(d->map, reg, mask, val);
71 else
72 return regmap_update_bits(d->map, reg, mask, val);
73 }
74
75 static void regmap_irq_sync_unlock(struct irq_data *data)
76 {
77 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
78 struct regmap *map = d->map;
79 int i, ret;
80 u32 reg;
81 u32 unmask_offset;
82 u32 val;
83
84 if (d->chip->runtime_pm) {
85 ret = pm_runtime_get_sync(map->dev);
86 if (ret < 0)
87 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
88 ret);
89 }
90
91 if (d->clear_status) {
92 for (i = 0; i < d->chip->num_regs; i++) {
93 reg = d->chip->status_base +
94 (i * map->reg_stride * d->irq_reg_stride);
95
96 ret = regmap_read(map, reg, &val);
97 if (ret)
98 dev_err(d->map->dev,
99 "Failed to clear the interrupt status bits\n");
100 }
101
102 d->clear_status = false;
103 }
104
105 /*
106 * If there's been a change in the mask write it back to the
107 * hardware. We rely on the use of the regmap core cache to
108 * suppress pointless writes.
109 */
110 for (i = 0; i < d->chip->num_regs; i++) {
111 reg = d->chip->mask_base +
112 (i * map->reg_stride * d->irq_reg_stride);
113 if (d->chip->mask_invert) {
114 ret = regmap_irq_update_bits(d, reg,
115 d->mask_buf_def[i], ~d->mask_buf[i]);
116 } else if (d->chip->unmask_base) {
117 /* set mask with mask_base register */
118 ret = regmap_irq_update_bits(d, reg,
119 d->mask_buf_def[i], ~d->mask_buf[i]);
120 if (ret < 0)
121 dev_err(d->map->dev,
122 "Failed to sync unmasks in %x\n",
123 reg);
124 unmask_offset = d->chip->unmask_base -
125 d->chip->mask_base;
126 /* clear mask with unmask_base register */
127 ret = regmap_irq_update_bits(d,
128 reg + unmask_offset,
129 d->mask_buf_def[i],
130 d->mask_buf[i]);
131 } else {
132 ret = regmap_irq_update_bits(d, reg,
133 d->mask_buf_def[i], d->mask_buf[i]);
134 }
135 if (ret != 0)
136 dev_err(d->map->dev, "Failed to sync masks in %x\n",
137 reg);
138
139 reg = d->chip->wake_base +
140 (i * map->reg_stride * d->irq_reg_stride);
141 if (d->wake_buf) {
142 if (d->chip->wake_invert)
143 ret = regmap_irq_update_bits(d, reg,
144 d->mask_buf_def[i],
145 ~d->wake_buf[i]);
146 else
147 ret = regmap_irq_update_bits(d, reg,
148 d->mask_buf_def[i],
149 d->wake_buf[i]);
150 if (ret != 0)
151 dev_err(d->map->dev,
152 "Failed to sync wakes in %x: %d\n",
153 reg, ret);
154 }
155
156 if (!d->chip->init_ack_masked)
157 continue;
158 /*
159 * Ack all the masked interrupts unconditionally,
160 * OR if there is masked interrupt which hasn't been Acked,
161 * it'll be ignored in irq handler, then may introduce irq storm
162 */
163 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164 reg = d->chip->ack_base +
165 (i * map->reg_stride * d->irq_reg_stride);
166 /* some chips ack by write 0 */
167 if (d->chip->ack_invert)
168 ret = regmap_write(map, reg, ~d->mask_buf[i]);
169 else
170 ret = regmap_write(map, reg, d->mask_buf[i]);
171 if (ret != 0)
172 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
173 reg, ret);
174 }
175 }
176
177 /* Don't update the type bits if we're using mask bits for irq type. */
178 if (!d->chip->type_in_mask) {
179 for (i = 0; i < d->chip->num_type_reg; i++) {
180 if (!d->type_buf_def[i])
181 continue;
182 reg = d->chip->type_base +
183 (i * map->reg_stride * d->type_reg_stride);
184 if (d->chip->type_invert)
185 ret = regmap_irq_update_bits(d, reg,
186 d->type_buf_def[i], ~d->type_buf[i]);
187 else
188 ret = regmap_irq_update_bits(d, reg,
189 d->type_buf_def[i], d->type_buf[i]);
190 if (ret != 0)
191 dev_err(d->map->dev, "Failed to sync type in %x\n",
192 reg);
193 }
194 }
195
196 if (d->chip->runtime_pm)
197 pm_runtime_put(map->dev);
198
199 /* If we've changed our wakeup count propagate it to the parent */
200 if (d->wake_count < 0)
201 for (i = d->wake_count; i < 0; i++)
202 irq_set_irq_wake(d->irq, 0);
203 else if (d->wake_count > 0)
204 for (i = 0; i < d->wake_count; i++)
205 irq_set_irq_wake(d->irq, 1);
206
207 d->wake_count = 0;
208
209 mutex_unlock(&d->lock);
210 }
211
212 static void regmap_irq_enable(struct irq_data *data)
213 {
214 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
215 struct regmap *map = d->map;
216 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
217 unsigned int mask, type;
218
219 type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
220
221 /*
222 * The type_in_mask flag means that the underlying hardware uses
223 * separate mask bits for rising and falling edge interrupts, but
224 * we want to make them into a single virtual interrupt with
225 * configurable edge.
226 *
227 * If the interrupt we're enabling defines the falling or rising
228 * masks then instead of using the regular mask bits for this
229 * interrupt, use the value previously written to the type buffer
230 * at the corresponding offset in regmap_irq_set_type().
231 */
232 if (d->chip->type_in_mask && type)
233 mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
234 else
235 mask = irq_data->mask;
236
237 if (d->chip->clear_on_unmask)
238 d->clear_status = true;
239
240 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
241 }
242
243 static void regmap_irq_disable(struct irq_data *data)
244 {
245 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
246 struct regmap *map = d->map;
247 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
248
249 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
250 }
251
252 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
253 {
254 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
255 struct regmap *map = d->map;
256 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
257 int reg;
258 const struct regmap_irq_type *t = &irq_data->type;
259
260 if ((t->types_supported & type) != type)
261 return -ENOTSUPP;
262
263 reg = t->type_reg_offset / map->reg_stride;
264
265 if (t->type_reg_mask)
266 d->type_buf[reg] &= ~t->type_reg_mask;
267 else
268 d->type_buf[reg] &= ~(t->type_falling_val |
269 t->type_rising_val |
270 t->type_level_low_val |
271 t->type_level_high_val);
272 switch (type) {
273 case IRQ_TYPE_EDGE_FALLING:
274 d->type_buf[reg] |= t->type_falling_val;
275 break;
276
277 case IRQ_TYPE_EDGE_RISING:
278 d->type_buf[reg] |= t->type_rising_val;
279 break;
280
281 case IRQ_TYPE_EDGE_BOTH:
282 d->type_buf[reg] |= (t->type_falling_val |
283 t->type_rising_val);
284 break;
285
286 case IRQ_TYPE_LEVEL_HIGH:
287 d->type_buf[reg] |= t->type_level_high_val;
288 break;
289
290 case IRQ_TYPE_LEVEL_LOW:
291 d->type_buf[reg] |= t->type_level_low_val;
292 break;
293 default:
294 return -EINVAL;
295 }
296 return 0;
297 }
298
299 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
300 {
301 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
302 struct regmap *map = d->map;
303 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
304
305 if (on) {
306 if (d->wake_buf)
307 d->wake_buf[irq_data->reg_offset / map->reg_stride]
308 &= ~irq_data->mask;
309 d->wake_count++;
310 } else {
311 if (d->wake_buf)
312 d->wake_buf[irq_data->reg_offset / map->reg_stride]
313 |= irq_data->mask;
314 d->wake_count--;
315 }
316
317 return 0;
318 }
319
320 static const struct irq_chip regmap_irq_chip = {
321 .irq_bus_lock = regmap_irq_lock,
322 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
323 .irq_disable = regmap_irq_disable,
324 .irq_enable = regmap_irq_enable,
325 .irq_set_type = regmap_irq_set_type,
326 .irq_set_wake = regmap_irq_set_wake,
327 };
328
329 static irqreturn_t regmap_irq_thread(int irq, void *d)
330 {
331 struct regmap_irq_chip_data *data = d;
332 const struct regmap_irq_chip *chip = data->chip;
333 struct regmap *map = data->map;
334 int ret, i;
335 bool handled = false;
336 u32 reg;
337
338 if (chip->handle_pre_irq)
339 chip->handle_pre_irq(chip->irq_drv_data);
340
341 if (chip->runtime_pm) {
342 ret = pm_runtime_get_sync(map->dev);
343 if (ret < 0) {
344 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
345 ret);
346 pm_runtime_put(map->dev);
347 goto exit;
348 }
349 }
350
351 /*
352 * Read in the statuses, using a single bulk read if possible
353 * in order to reduce the I/O overheads.
354 */
355 if (!map->use_single_read && map->reg_stride == 1 &&
356 data->irq_reg_stride == 1) {
357 u8 *buf8 = data->status_reg_buf;
358 u16 *buf16 = data->status_reg_buf;
359 u32 *buf32 = data->status_reg_buf;
360
361 BUG_ON(!data->status_reg_buf);
362
363 ret = regmap_bulk_read(map, chip->status_base,
364 data->status_reg_buf,
365 chip->num_regs);
366 if (ret != 0) {
367 dev_err(map->dev, "Failed to read IRQ status: %d\n",
368 ret);
369 goto exit;
370 }
371
372 for (i = 0; i < data->chip->num_regs; i++) {
373 switch (map->format.val_bytes) {
374 case 1:
375 data->status_buf[i] = buf8[i];
376 break;
377 case 2:
378 data->status_buf[i] = buf16[i];
379 break;
380 case 4:
381 data->status_buf[i] = buf32[i];
382 break;
383 default:
384 BUG();
385 goto exit;
386 }
387 }
388
389 } else {
390 for (i = 0; i < data->chip->num_regs; i++) {
391 ret = regmap_read(map, chip->status_base +
392 (i * map->reg_stride
393 * data->irq_reg_stride),
394 &data->status_buf[i]);
395
396 if (ret != 0) {
397 dev_err(map->dev,
398 "Failed to read IRQ status: %d\n",
399 ret);
400 if (chip->runtime_pm)
401 pm_runtime_put(map->dev);
402 goto exit;
403 }
404 }
405 }
406
407 /*
408 * Ignore masked IRQs and ack if we need to; we ack early so
409 * there is no race between handling and acknowleding the
410 * interrupt. We assume that typically few of the interrupts
411 * will fire simultaneously so don't worry about overhead from
412 * doing a write per register.
413 */
414 for (i = 0; i < data->chip->num_regs; i++) {
415 data->status_buf[i] &= ~data->mask_buf[i];
416
417 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
418 reg = chip->ack_base +
419 (i * map->reg_stride * data->irq_reg_stride);
420 ret = regmap_write(map, reg, data->status_buf[i]);
421 if (ret != 0)
422 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
423 reg, ret);
424 }
425 }
426
427 for (i = 0; i < chip->num_irqs; i++) {
428 if (data->status_buf[chip->irqs[i].reg_offset /
429 map->reg_stride] & chip->irqs[i].mask) {
430 handle_nested_irq(irq_find_mapping(data->domain, i));
431 handled = true;
432 }
433 }
434
435 if (chip->runtime_pm)
436 pm_runtime_put(map->dev);
437
438 exit:
439 if (chip->handle_post_irq)
440 chip->handle_post_irq(chip->irq_drv_data);
441
442 if (handled)
443 return IRQ_HANDLED;
444 else
445 return IRQ_NONE;
446 }
447
448 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
449 irq_hw_number_t hw)
450 {
451 struct regmap_irq_chip_data *data = h->host_data;
452
453 irq_set_chip_data(virq, data);
454 irq_set_chip(virq, &data->irq_chip);
455 irq_set_nested_thread(virq, 1);
456 irq_set_parent(virq, data->irq);
457 irq_set_noprobe(virq);
458
459 return 0;
460 }
461
462 static const struct irq_domain_ops regmap_domain_ops = {
463 .map = regmap_irq_map,
464 .xlate = irq_domain_xlate_onetwocell,
465 };
466
467 /**
468 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
469 *
470 * @map: The regmap for the device.
471 * @irq: The IRQ the device uses to signal interrupts.
472 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
473 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
474 * @chip: Configuration for the interrupt controller.
475 * @data: Runtime data structure for the controller, allocated on success.
476 *
477 * Returns 0 on success or an errno on failure.
478 *
479 * In order for this to be efficient the chip really should use a
480 * register cache. The chip driver is responsible for restoring the
481 * register values used by the IRQ controller over suspend and resume.
482 */
483 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
484 int irq_base, const struct regmap_irq_chip *chip,
485 struct regmap_irq_chip_data **data)
486 {
487 struct regmap_irq_chip_data *d;
488 int i;
489 int ret = -ENOMEM;
490 int num_type_reg;
491 u32 reg;
492 u32 unmask_offset;
493
494 if (chip->num_regs <= 0)
495 return -EINVAL;
496
497 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
498 return -EINVAL;
499
500 for (i = 0; i < chip->num_irqs; i++) {
501 if (chip->irqs[i].reg_offset % map->reg_stride)
502 return -EINVAL;
503 if (chip->irqs[i].reg_offset / map->reg_stride >=
504 chip->num_regs)
505 return -EINVAL;
506 }
507
508 if (irq_base) {
509 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
510 if (irq_base < 0) {
511 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
512 irq_base);
513 return irq_base;
514 }
515 }
516
517 d = kzalloc(sizeof(*d), GFP_KERNEL);
518 if (!d)
519 return -ENOMEM;
520
521 d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
522 GFP_KERNEL);
523 if (!d->status_buf)
524 goto err_alloc;
525
526 d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
527 GFP_KERNEL);
528 if (!d->mask_buf)
529 goto err_alloc;
530
531 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
532 GFP_KERNEL);
533 if (!d->mask_buf_def)
534 goto err_alloc;
535
536 if (chip->wake_base) {
537 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
538 GFP_KERNEL);
539 if (!d->wake_buf)
540 goto err_alloc;
541 }
542
543 num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
544 if (num_type_reg) {
545 d->type_buf_def = kcalloc(num_type_reg,
546 sizeof(unsigned int), GFP_KERNEL);
547 if (!d->type_buf_def)
548 goto err_alloc;
549
550 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
551 GFP_KERNEL);
552 if (!d->type_buf)
553 goto err_alloc;
554 }
555
556 d->irq_chip = regmap_irq_chip;
557 d->irq_chip.name = chip->name;
558 d->irq = irq;
559 d->map = map;
560 d->chip = chip;
561 d->irq_base = irq_base;
562
563 if (chip->irq_reg_stride)
564 d->irq_reg_stride = chip->irq_reg_stride;
565 else
566 d->irq_reg_stride = 1;
567
568 if (chip->type_reg_stride)
569 d->type_reg_stride = chip->type_reg_stride;
570 else
571 d->type_reg_stride = 1;
572
573 if (!map->use_single_read && map->reg_stride == 1 &&
574 d->irq_reg_stride == 1) {
575 d->status_reg_buf = kmalloc_array(chip->num_regs,
576 map->format.val_bytes,
577 GFP_KERNEL);
578 if (!d->status_reg_buf)
579 goto err_alloc;
580 }
581
582 mutex_init(&d->lock);
583
584 for (i = 0; i < chip->num_irqs; i++)
585 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
586 |= chip->irqs[i].mask;
587
588 /* Mask all the interrupts by default */
589 for (i = 0; i < chip->num_regs; i++) {
590 d->mask_buf[i] = d->mask_buf_def[i];
591 reg = chip->mask_base +
592 (i * map->reg_stride * d->irq_reg_stride);
593 if (chip->mask_invert)
594 ret = regmap_irq_update_bits(d, reg,
595 d->mask_buf[i], ~d->mask_buf[i]);
596 else if (d->chip->unmask_base) {
597 unmask_offset = d->chip->unmask_base -
598 d->chip->mask_base;
599 ret = regmap_irq_update_bits(d,
600 reg + unmask_offset,
601 d->mask_buf[i],
602 d->mask_buf[i]);
603 } else
604 ret = regmap_irq_update_bits(d, reg,
605 d->mask_buf[i], d->mask_buf[i]);
606 if (ret != 0) {
607 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
608 reg, ret);
609 goto err_alloc;
610 }
611
612 if (!chip->init_ack_masked)
613 continue;
614
615 /* Ack masked but set interrupts */
616 reg = chip->status_base +
617 (i * map->reg_stride * d->irq_reg_stride);
618 ret = regmap_read(map, reg, &d->status_buf[i]);
619 if (ret != 0) {
620 dev_err(map->dev, "Failed to read IRQ status: %d\n",
621 ret);
622 goto err_alloc;
623 }
624
625 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
626 reg = chip->ack_base +
627 (i * map->reg_stride * d->irq_reg_stride);
628 if (chip->ack_invert)
629 ret = regmap_write(map, reg,
630 ~(d->status_buf[i] & d->mask_buf[i]));
631 else
632 ret = regmap_write(map, reg,
633 d->status_buf[i] & d->mask_buf[i]);
634 if (ret != 0) {
635 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
636 reg, ret);
637 goto err_alloc;
638 }
639 }
640 }
641
642 /* Wake is disabled by default */
643 if (d->wake_buf) {
644 for (i = 0; i < chip->num_regs; i++) {
645 d->wake_buf[i] = d->mask_buf_def[i];
646 reg = chip->wake_base +
647 (i * map->reg_stride * d->irq_reg_stride);
648
649 if (chip->wake_invert)
650 ret = regmap_irq_update_bits(d, reg,
651 d->mask_buf_def[i],
652 0);
653 else
654 ret = regmap_irq_update_bits(d, reg,
655 d->mask_buf_def[i],
656 d->wake_buf[i]);
657 if (ret != 0) {
658 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
659 reg, ret);
660 goto err_alloc;
661 }
662 }
663 }
664
665 if (chip->num_type_reg && !chip->type_in_mask) {
666 for (i = 0; i < chip->num_type_reg; ++i) {
667 if (!d->type_buf_def[i])
668 continue;
669
670 reg = chip->type_base +
671 (i * map->reg_stride * d->type_reg_stride);
672
673 ret = regmap_read(map, reg, &d->type_buf_def[i]);
674
675 if (d->chip->type_invert)
676 d->type_buf_def[i] = ~d->type_buf_def[i];
677
678 if (ret) {
679 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
680 reg, ret);
681 goto err_alloc;
682 }
683 }
684 }
685
686 if (irq_base)
687 d->domain = irq_domain_add_legacy(map->dev->of_node,
688 chip->num_irqs, irq_base, 0,
689 &regmap_domain_ops, d);
690 else
691 d->domain = irq_domain_add_linear(map->dev->of_node,
692 chip->num_irqs,
693 &regmap_domain_ops, d);
694 if (!d->domain) {
695 dev_err(map->dev, "Failed to create IRQ domain\n");
696 ret = -ENOMEM;
697 goto err_alloc;
698 }
699
700 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
701 irq_flags | IRQF_ONESHOT,
702 chip->name, d);
703 if (ret != 0) {
704 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
705 irq, chip->name, ret);
706 goto err_domain;
707 }
708
709 *data = d;
710
711 return 0;
712
713 err_domain:
714 /* Should really dispose of the domain but... */
715 err_alloc:
716 kfree(d->type_buf);
717 kfree(d->type_buf_def);
718 kfree(d->wake_buf);
719 kfree(d->mask_buf_def);
720 kfree(d->mask_buf);
721 kfree(d->status_buf);
722 kfree(d->status_reg_buf);
723 kfree(d);
724 return ret;
725 }
726 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
727
728 /**
729 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
730 *
731 * @irq: Primary IRQ for the device
732 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
733 *
734 * This function also disposes of all mapped IRQs on the chip.
735 */
736 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
737 {
738 unsigned int virq;
739 int hwirq;
740
741 if (!d)
742 return;
743
744 free_irq(irq, d);
745
746 /* Dispose all virtual irq from irq domain before removing it */
747 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
748 /* Ignore hwirq if holes in the IRQ list */
749 if (!d->chip->irqs[hwirq].mask)
750 continue;
751
752 /*
753 * Find the virtual irq of hwirq on chip and if it is
754 * there then dispose it
755 */
756 virq = irq_find_mapping(d->domain, hwirq);
757 if (virq)
758 irq_dispose_mapping(virq);
759 }
760
761 irq_domain_remove(d->domain);
762 kfree(d->type_buf);
763 kfree(d->type_buf_def);
764 kfree(d->wake_buf);
765 kfree(d->mask_buf_def);
766 kfree(d->mask_buf);
767 kfree(d->status_reg_buf);
768 kfree(d->status_buf);
769 kfree(d);
770 }
771 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
772
773 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
774 {
775 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
776
777 regmap_del_irq_chip(d->irq, d);
778 }
779
780 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
781
782 {
783 struct regmap_irq_chip_data **r = res;
784
785 if (!r || !*r) {
786 WARN_ON(!r || !*r);
787 return 0;
788 }
789 return *r == data;
790 }
791
792 /**
793 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
794 *
795 * @dev: The device pointer on which irq_chip belongs to.
796 * @map: The regmap for the device.
797 * @irq: The IRQ the device uses to signal interrupts
798 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
799 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
800 * @chip: Configuration for the interrupt controller.
801 * @data: Runtime data structure for the controller, allocated on success
802 *
803 * Returns 0 on success or an errno on failure.
804 *
805 * The &regmap_irq_chip_data will be automatically released when the device is
806 * unbound.
807 */
808 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
809 int irq_flags, int irq_base,
810 const struct regmap_irq_chip *chip,
811 struct regmap_irq_chip_data **data)
812 {
813 struct regmap_irq_chip_data **ptr, *d;
814 int ret;
815
816 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
817 GFP_KERNEL);
818 if (!ptr)
819 return -ENOMEM;
820
821 ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
822 chip, &d);
823 if (ret < 0) {
824 devres_free(ptr);
825 return ret;
826 }
827
828 *ptr = d;
829 devres_add(dev, ptr);
830 *data = d;
831 return 0;
832 }
833 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
834
835 /**
836 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
837 *
838 * @dev: Device for which which resource was allocated.
839 * @irq: Primary IRQ for the device.
840 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
841 *
842 * A resource managed version of regmap_del_irq_chip().
843 */
844 void devm_regmap_del_irq_chip(struct device *dev, int irq,
845 struct regmap_irq_chip_data *data)
846 {
847 int rc;
848
849 WARN_ON(irq != data->irq);
850 rc = devres_release(dev, devm_regmap_irq_chip_release,
851 devm_regmap_irq_chip_match, data);
852
853 if (rc != 0)
854 WARN_ON(rc);
855 }
856 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
857
858 /**
859 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
860 *
861 * @data: regmap irq controller to operate on.
862 *
863 * Useful for drivers to request their own IRQs.
864 */
865 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
866 {
867 WARN_ON(!data->irq_base);
868 return data->irq_base;
869 }
870 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
871
872 /**
873 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
874 *
875 * @data: regmap irq controller to operate on.
876 * @irq: index of the interrupt requested in the chip IRQs.
877 *
878 * Useful for drivers to request their own IRQs.
879 */
880 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
881 {
882 /* Handle holes in the IRQ list */
883 if (!data->chip->irqs[irq].mask)
884 return -EINVAL;
885
886 return irq_create_mapping(data->domain, irq);
887 }
888 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
889
890 /**
891 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
892 *
893 * @data: regmap_irq controller to operate on.
894 *
895 * Useful for drivers to request their own IRQs and for integration
896 * with subsystems. For ease of integration NULL is accepted as a
897 * domain, allowing devices to just call this even if no domain is
898 * allocated.
899 */
900 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
901 {
902 if (data)
903 return data->domain;
904 else
905 return NULL;
906 }
907 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);