]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/irqchip/exynos-combiner.c
5c82e3bdafdf0f61f054b7ea14144346a4390002
[mirror_ubuntu-bionic-kernel.git] / drivers / irqchip / exynos-combiner.c
1 /*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Combiner irqchip for EXYNOS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/io.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/irqdomain.h>
18 #include <linux/irqchip/chained_irq.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22
23 #include "irqchip.h"
24
25 #define COMBINER_ENABLE_SET 0x0
26 #define COMBINER_ENABLE_CLEAR 0x4
27 #define COMBINER_INT_STATUS 0xC
28
29 #define IRQ_IN_COMBINER 8
30
31 static DEFINE_SPINLOCK(irq_controller_lock);
32
33 struct combiner_chip_data {
34 unsigned int hwirq_offset;
35 unsigned int irq_mask;
36 void __iomem *base;
37 unsigned int parent_irq;
38 #ifdef CONFIG_PM
39 u32 pm_save;
40 #endif
41 };
42
43 static struct combiner_chip_data *combiner_data;
44 static struct irq_domain *combiner_irq_domain;
45 static unsigned int max_nr = 20;
46
47 static inline void __iomem *combiner_base(struct irq_data *data)
48 {
49 struct combiner_chip_data *combiner_data =
50 irq_data_get_irq_chip_data(data);
51
52 return combiner_data->base;
53 }
54
55 static void combiner_mask_irq(struct irq_data *data)
56 {
57 u32 mask = 1 << (data->hwirq % 32);
58
59 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
60 }
61
62 static void combiner_unmask_irq(struct irq_data *data)
63 {
64 u32 mask = 1 << (data->hwirq % 32);
65
66 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
67 }
68
69 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
70 {
71 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
72 struct irq_chip *chip = irq_get_chip(irq);
73 unsigned int cascade_irq, combiner_irq;
74 unsigned long status;
75
76 chained_irq_enter(chip, desc);
77
78 spin_lock(&irq_controller_lock);
79 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
80 spin_unlock(&irq_controller_lock);
81 status &= chip_data->irq_mask;
82
83 if (status == 0)
84 goto out;
85
86 combiner_irq = chip_data->hwirq_offset + __ffs(status);
87 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
88
89 if (unlikely(!cascade_irq))
90 handle_bad_irq(irq, desc);
91 else
92 generic_handle_irq(cascade_irq);
93
94 out:
95 chained_irq_exit(chip, desc);
96 }
97
98 #ifdef CONFIG_SMP
99 static int combiner_set_affinity(struct irq_data *d,
100 const struct cpumask *mask_val, bool force)
101 {
102 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
103 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
104 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
105
106 if (chip && chip->irq_set_affinity)
107 return chip->irq_set_affinity(data, mask_val, force);
108 else
109 return -EINVAL;
110 }
111 #endif
112
113 static struct irq_chip combiner_chip = {
114 .name = "COMBINER",
115 .irq_mask = combiner_mask_irq,
116 .irq_unmask = combiner_unmask_irq,
117 #ifdef CONFIG_SMP
118 .irq_set_affinity = combiner_set_affinity,
119 #endif
120 };
121
122 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
123 unsigned int irq)
124 {
125 if (irq_set_handler_data(irq, combiner_data) != 0)
126 BUG();
127 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
128 }
129
130 static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
131 unsigned int combiner_nr,
132 void __iomem *base, unsigned int irq)
133 {
134 combiner_data->base = base;
135 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
136 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
137 combiner_data->parent_irq = irq;
138
139 /* Disable all interrupts */
140 __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
141 }
142
143 static int combiner_irq_domain_xlate(struct irq_domain *d,
144 struct device_node *controller,
145 const u32 *intspec, unsigned int intsize,
146 unsigned long *out_hwirq,
147 unsigned int *out_type)
148 {
149 if (d->of_node != controller)
150 return -EINVAL;
151
152 if (intsize < 2)
153 return -EINVAL;
154
155 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
156 *out_type = 0;
157
158 return 0;
159 }
160
161 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
162 irq_hw_number_t hw)
163 {
164 struct combiner_chip_data *combiner_data = d->host_data;
165
166 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
167 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
168 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
169
170 return 0;
171 }
172
173 static const struct irq_domain_ops combiner_irq_domain_ops = {
174 .xlate = combiner_irq_domain_xlate,
175 .map = combiner_irq_domain_map,
176 };
177
178 static void __init combiner_init(void __iomem *combiner_base,
179 struct device_node *np)
180 {
181 int i, irq;
182 unsigned int nr_irq;
183
184 nr_irq = max_nr * IRQ_IN_COMBINER;
185
186 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
187 if (!combiner_data) {
188 pr_warning("%s: could not allocate combiner data\n", __func__);
189 return;
190 }
191
192 combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
193 &combiner_irq_domain_ops, combiner_data);
194 if (WARN_ON(!combiner_irq_domain)) {
195 pr_warning("%s: irq domain init failed\n", __func__);
196 return;
197 }
198
199 for (i = 0; i < max_nr; i++) {
200 irq = irq_of_parse_and_map(np, i);
201
202 combiner_init_one(&combiner_data[i], i,
203 combiner_base + (i >> 2) * 0x10, irq);
204 combiner_cascade_irq(&combiner_data[i], irq);
205 }
206 }
207
208 #ifdef CONFIG_PM
209
210 /**
211 * combiner_suspend - save interrupt combiner state before suspend
212 *
213 * Save the interrupt enable set register for all combiner groups since
214 * the state is lost when the system enters into a sleep state.
215 *
216 */
217 static int combiner_suspend(void)
218 {
219 int i;
220
221 for (i = 0; i < max_nr; i++)
222 combiner_data[i].pm_save =
223 __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET);
224
225 return 0;
226 }
227
228 /**
229 * combiner_resume - restore interrupt combiner state after resume
230 *
231 * Restore the interrupt enable set register for all combiner groups since
232 * the state is lost when the system enters into a sleep state on suspend.
233 *
234 */
235 static void combiner_resume(void)
236 {
237 int i;
238
239 for (i = 0; i < max_nr; i++) {
240 __raw_writel(combiner_data[i].irq_mask,
241 combiner_data[i].base + COMBINER_ENABLE_CLEAR);
242 __raw_writel(combiner_data[i].pm_save,
243 combiner_data[i].base + COMBINER_ENABLE_SET);
244 }
245 }
246
247 #else
248 #define combiner_suspend NULL
249 #define combiner_resume NULL
250 #endif
251
252 static struct syscore_ops combiner_syscore_ops = {
253 .suspend = combiner_suspend,
254 .resume = combiner_resume,
255 };
256
257 static int __init combiner_of_init(struct device_node *np,
258 struct device_node *parent)
259 {
260 void __iomem *combiner_base;
261
262 combiner_base = of_iomap(np, 0);
263 if (!combiner_base) {
264 pr_err("%s: failed to map combiner registers\n", __func__);
265 return -ENXIO;
266 }
267
268 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
269 pr_info("%s: number of combiners not specified, "
270 "setting default as %d.\n",
271 __func__, max_nr);
272 }
273
274 combiner_init(combiner_base, np);
275
276 register_syscore_ops(&combiner_syscore_ops);
277
278 return 0;
279 }
280 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
281 combiner_of_init);