]>
Commit | Line | Data |
---|---|---|
0136afa0 LS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright 2017 NXP | |
4 | * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de> | |
5 | */ | |
6 | ||
7 | #include <linux/clk.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/irq.h> | |
10 | #include <linux/irqchip/chained_irq.h> | |
11 | #include <linux/irqdomain.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/of_platform.h> | |
14 | #include <linux/spinlock.h> | |
15 | ||
16 | #define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r) | |
17 | #define CHANCTRL 0x0 | |
18 | #define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) | |
19 | #define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) | |
20 | #define CHANSTATUS(n, t) (CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4) | |
21 | #define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) | |
22 | #define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) | |
23 | ||
24 | struct irqsteer_data { | |
25 | void __iomem *regs; | |
26 | struct clk *ipg_clk; | |
27 | int irq; | |
28 | raw_spinlock_t lock; | |
29 | int irq_groups; | |
30 | int channel; | |
31 | struct irq_domain *domain; | |
32 | u32 *saved_reg; | |
33 | }; | |
34 | ||
35 | static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, | |
36 | unsigned long irqnum) | |
37 | { | |
38 | return (data->irq_groups * 2 - irqnum / 32 - 1); | |
39 | } | |
40 | ||
41 | static void imx_irqsteer_irq_unmask(struct irq_data *d) | |
42 | { | |
43 | struct irqsteer_data *data = d->chip_data; | |
44 | int idx = imx_irqsteer_get_reg_index(data, d->hwirq); | |
45 | unsigned long flags; | |
46 | u32 val; | |
47 | ||
48 | raw_spin_lock_irqsave(&data->lock, flags); | |
49 | val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); | |
50 | val |= BIT(d->hwirq % 32); | |
51 | writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); | |
52 | raw_spin_unlock_irqrestore(&data->lock, flags); | |
53 | } | |
54 | ||
55 | static void imx_irqsteer_irq_mask(struct irq_data *d) | |
56 | { | |
57 | struct irqsteer_data *data = d->chip_data; | |
58 | int idx = imx_irqsteer_get_reg_index(data, d->hwirq); | |
59 | unsigned long flags; | |
60 | u32 val; | |
61 | ||
62 | raw_spin_lock_irqsave(&data->lock, flags); | |
63 | val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); | |
64 | val &= ~BIT(d->hwirq % 32); | |
65 | writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); | |
66 | raw_spin_unlock_irqrestore(&data->lock, flags); | |
67 | } | |
68 | ||
69 | static struct irq_chip imx_irqsteer_irq_chip = { | |
70 | .name = "irqsteer", | |
71 | .irq_mask = imx_irqsteer_irq_mask, | |
72 | .irq_unmask = imx_irqsteer_irq_unmask, | |
73 | }; | |
74 | ||
75 | static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq, | |
76 | irq_hw_number_t hwirq) | |
77 | { | |
78 | irq_set_status_flags(irq, IRQ_LEVEL); | |
79 | irq_set_chip_data(irq, h->host_data); | |
80 | irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq); | |
81 | ||
82 | return 0; | |
83 | } | |
84 | ||
85 | static const struct irq_domain_ops imx_irqsteer_domain_ops = { | |
86 | .map = imx_irqsteer_irq_map, | |
87 | .xlate = irq_domain_xlate_onecell, | |
88 | }; | |
89 | ||
90 | static void imx_irqsteer_irq_handler(struct irq_desc *desc) | |
91 | { | |
92 | struct irqsteer_data *data = irq_desc_get_handler_data(desc); | |
93 | int i; | |
94 | ||
95 | chained_irq_enter(irq_desc_get_chip(desc), desc); | |
96 | ||
97 | for (i = 0; i < data->irq_groups * 64; i += 32) { | |
98 | int idx = imx_irqsteer_get_reg_index(data, i); | |
99 | unsigned long irqmap; | |
100 | int pos, virq; | |
101 | ||
102 | irqmap = readl_relaxed(data->regs + | |
103 | CHANSTATUS(idx, data->irq_groups)); | |
104 | ||
105 | for_each_set_bit(pos, &irqmap, 32) { | |
106 | virq = irq_find_mapping(data->domain, pos + i); | |
107 | if (virq) | |
108 | generic_handle_irq(virq); | |
109 | } | |
110 | } | |
111 | ||
112 | chained_irq_exit(irq_desc_get_chip(desc), desc); | |
113 | } | |
114 | ||
115 | static int imx_irqsteer_probe(struct platform_device *pdev) | |
116 | { | |
117 | struct device_node *np = pdev->dev.of_node; | |
118 | struct irqsteer_data *data; | |
119 | struct resource *res; | |
120 | int ret; | |
121 | ||
122 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | |
123 | if (!data) | |
124 | return -ENOMEM; | |
125 | ||
126 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
127 | data->regs = devm_ioremap_resource(&pdev->dev, res); | |
128 | if (IS_ERR(data->regs)) { | |
129 | dev_err(&pdev->dev, "failed to initialize reg\n"); | |
130 | return PTR_ERR(data->regs); | |
131 | } | |
132 | ||
133 | data->irq = platform_get_irq(pdev, 0); | |
134 | if (data->irq <= 0) { | |
135 | dev_err(&pdev->dev, "failed to get irq\n"); | |
136 | return -ENODEV; | |
137 | } | |
138 | ||
139 | data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); | |
140 | if (IS_ERR(data->ipg_clk)) { | |
141 | ret = PTR_ERR(data->ipg_clk); | |
142 | if (ret != -EPROBE_DEFER) | |
143 | dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); | |
144 | return ret; | |
145 | } | |
146 | ||
147 | raw_spin_lock_init(&data->lock); | |
148 | ||
149 | of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups); | |
150 | of_property_read_u32(np, "fsl,channel", &data->channel); | |
151 | ||
152 | if (IS_ENABLED(CONFIG_PM_SLEEP)) { | |
153 | data->saved_reg = devm_kzalloc(&pdev->dev, | |
154 | sizeof(u32) * data->irq_groups * 2, | |
155 | GFP_KERNEL); | |
156 | if (!data->saved_reg) | |
157 | return -ENOMEM; | |
158 | } | |
159 | ||
160 | ret = clk_prepare_enable(data->ipg_clk); | |
161 | if (ret) { | |
162 | dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); | |
163 | return ret; | |
164 | } | |
165 | ||
166 | /* steer all IRQs into configured channel */ | |
167 | writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); | |
168 | ||
169 | data->domain = irq_domain_add_linear(np, data->irq_groups * 64, | |
170 | &imx_irqsteer_domain_ops, data); | |
171 | if (!data->domain) { | |
172 | dev_err(&pdev->dev, "failed to create IRQ domain\n"); | |
173 | clk_disable_unprepare(data->ipg_clk); | |
174 | return -ENOMEM; | |
175 | } | |
176 | ||
177 | irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler, | |
178 | data); | |
179 | ||
180 | platform_set_drvdata(pdev, data); | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | static int imx_irqsteer_remove(struct platform_device *pdev) | |
186 | { | |
187 | struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); | |
188 | ||
189 | irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL); | |
190 | irq_domain_remove(irqsteer_data->domain); | |
191 | ||
192 | clk_disable_unprepare(irqsteer_data->ipg_clk); | |
193 | ||
194 | return 0; | |
195 | } | |
196 | ||
197 | #ifdef CONFIG_PM_SLEEP | |
198 | static void imx_irqsteer_save_regs(struct irqsteer_data *data) | |
199 | { | |
200 | int i; | |
201 | ||
202 | for (i = 0; i < data->irq_groups * 2; i++) | |
203 | data->saved_reg[i] = readl_relaxed(data->regs + | |
204 | CHANMASK(i, data->irq_groups)); | |
205 | } | |
206 | ||
207 | static void imx_irqsteer_restore_regs(struct irqsteer_data *data) | |
208 | { | |
209 | int i; | |
210 | ||
211 | writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); | |
212 | for (i = 0; i < data->irq_groups * 2; i++) | |
213 | writel_relaxed(data->saved_reg[i], | |
214 | data->regs + CHANMASK(i, data->irq_groups)); | |
215 | } | |
216 | ||
217 | static int imx_irqsteer_suspend(struct device *dev) | |
218 | { | |
219 | struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev); | |
220 | ||
221 | imx_irqsteer_save_regs(irqsteer_data); | |
222 | clk_disable_unprepare(irqsteer_data->ipg_clk); | |
223 | ||
224 | return 0; | |
225 | } | |
226 | ||
227 | static int imx_irqsteer_resume(struct device *dev) | |
228 | { | |
229 | struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev); | |
230 | int ret; | |
231 | ||
232 | ret = clk_prepare_enable(irqsteer_data->ipg_clk); | |
233 | if (ret) { | |
234 | dev_err(dev, "failed to enable ipg clk: %d\n", ret); | |
235 | return ret; | |
236 | } | |
237 | imx_irqsteer_restore_regs(irqsteer_data); | |
238 | ||
239 | return 0; | |
240 | } | |
241 | #endif | |
242 | ||
243 | static const struct dev_pm_ops imx_irqsteer_pm_ops = { | |
244 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_irqsteer_suspend, imx_irqsteer_resume) | |
245 | }; | |
246 | ||
247 | static const struct of_device_id imx_irqsteer_dt_ids[] = { | |
248 | { .compatible = "fsl,imx-irqsteer", }, | |
249 | {}, | |
250 | }; | |
251 | ||
252 | static struct platform_driver imx_irqsteer_driver = { | |
253 | .driver = { | |
254 | .name = "imx-irqsteer", | |
255 | .of_match_table = imx_irqsteer_dt_ids, | |
256 | .pm = &imx_irqsteer_pm_ops, | |
257 | }, | |
258 | .probe = imx_irqsteer_probe, | |
259 | .remove = imx_irqsteer_remove, | |
260 | }; | |
261 | builtin_platform_driver(imx_irqsteer_driver); |