]>
Commit | Line | Data |
---|---|---|
9f0fd049 AP |
1 | /* |
2 | * This file implements an irqchip for OPAL events. Whenever there is | |
3 | * an interrupt that is handled by OPAL we get passed a list of events | |
4 | * that Linux needs to do something about. These basically look like | |
5 | * interrupts to Linux so we implement an irqchip to handle them. | |
6 | * | |
7 | * Copyright Alistair Popple, IBM Corporation 2014. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the | |
11 | * Free Software Foundation; either version 2 of the License, or (at your | |
12 | * option) any later version. | |
13 | */ | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/irq.h> | |
16 | #include <linux/irqchip.h> | |
17 | #include <linux/irqdomain.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/of.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/kthread.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/irq_work.h> | |
26 | ||
27 | #include <asm/machdep.h> | |
28 | #include <asm/opal.h> | |
29 | ||
30 | #include "powernv.h" | |
31 | ||
32 | /* Maximum number of events supported by OPAL firmware */ | |
33 | #define MAX_NUM_EVENTS 64 | |
34 | ||
35 | struct opal_event_irqchip { | |
36 | struct irq_chip irqchip; | |
37 | struct irq_domain *domain; | |
38 | unsigned long mask; | |
39 | }; | |
40 | static struct opal_event_irqchip opal_event_irqchip; | |
41 | ||
42 | static unsigned int opal_irq_count; | |
43 | static unsigned int *opal_irqs; | |
44 | ||
45 | static void opal_handle_irq_work(struct irq_work *work); | |
25642e14 | 46 | static u64 last_outstanding_events; |
9f0fd049 AP |
47 | static struct irq_work opal_event_irq_work = { |
48 | .func = opal_handle_irq_work, | |
49 | }; | |
50 | ||
25642e14 AP |
51 | void opal_handle_events(uint64_t events) |
52 | { | |
53 | int virq, hwirq = 0; | |
54 | u64 mask = opal_event_irqchip.mask; | |
55 | ||
56 | if (!in_irq() && (events & mask)) { | |
57 | last_outstanding_events = events; | |
58 | irq_work_queue(&opal_event_irq_work); | |
59 | return; | |
60 | } | |
61 | ||
62 | while (events & mask) { | |
63 | hwirq = fls64(events) - 1; | |
64 | if (BIT_ULL(hwirq) & mask) { | |
65 | virq = irq_find_mapping(opal_event_irqchip.domain, | |
66 | hwirq); | |
67 | if (virq) | |
68 | generic_handle_irq(virq); | |
69 | } | |
70 | events &= ~BIT_ULL(hwirq); | |
71 | } | |
72 | } | |
73 | ||
9f0fd049 AP |
74 | static void opal_event_mask(struct irq_data *d) |
75 | { | |
76 | clear_bit(d->hwirq, &opal_event_irqchip.mask); | |
77 | } | |
78 | ||
79 | static void opal_event_unmask(struct irq_data *d) | |
80 | { | |
25642e14 AP |
81 | __be64 events; |
82 | ||
9f0fd049 AP |
83 | set_bit(d->hwirq, &opal_event_irqchip.mask); |
84 | ||
25642e14 | 85 | opal_poll_events(&events); |
036592fb AP |
86 | last_outstanding_events = be64_to_cpu(events); |
87 | ||
88 | /* | |
89 | * We can't just handle the events now with opal_handle_events(). | |
90 | * If we did we would deadlock when opal_event_unmask() is called from | |
91 | * handle_level_irq() with the irq descriptor lock held, because | |
92 | * calling opal_handle_events() would call generic_handle_irq() and | |
93 | * then handle_level_irq() which would try to take the descriptor lock | |
94 | * again. Instead queue the events for later. | |
95 | */ | |
96 | if (last_outstanding_events & opal_event_irqchip.mask) | |
97 | /* Need to retrigger the interrupt */ | |
98 | irq_work_queue(&opal_event_irq_work); | |
9f0fd049 AP |
99 | } |
100 | ||
101 | static int opal_event_set_type(struct irq_data *d, unsigned int flow_type) | |
102 | { | |
103 | /* | |
104 | * For now we only support level triggered events. The irq | |
105 | * handler will be called continuously until the event has | |
106 | * been cleared in OPAL. | |
107 | */ | |
108 | if (flow_type != IRQ_TYPE_LEVEL_HIGH) | |
109 | return -EINVAL; | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
114 | static struct opal_event_irqchip opal_event_irqchip = { | |
115 | .irqchip = { | |
116 | .name = "OPAL EVT", | |
117 | .irq_mask = opal_event_mask, | |
118 | .irq_unmask = opal_event_unmask, | |
119 | .irq_set_type = opal_event_set_type, | |
120 | }, | |
121 | .mask = 0, | |
122 | }; | |
123 | ||
124 | static int opal_event_map(struct irq_domain *d, unsigned int irq, | |
125 | irq_hw_number_t hwirq) | |
126 | { | |
127 | irq_set_chip_data(irq, &opal_event_irqchip); | |
128 | irq_set_chip_and_handler(irq, &opal_event_irqchip.irqchip, | |
129 | handle_level_irq); | |
130 | ||
131 | return 0; | |
132 | } | |
133 | ||
9f0fd049 AP |
134 | static irqreturn_t opal_interrupt(int irq, void *data) |
135 | { | |
136 | __be64 events; | |
137 | ||
138 | opal_handle_interrupt(virq_to_hw(irq), &events); | |
139 | opal_handle_events(be64_to_cpu(events)); | |
140 | ||
141 | return IRQ_HANDLED; | |
142 | } | |
143 | ||
144 | static void opal_handle_irq_work(struct irq_work *work) | |
145 | { | |
25642e14 | 146 | opal_handle_events(last_outstanding_events); |
9f0fd049 AP |
147 | } |
148 | ||
ad3aedfb MZ |
149 | static int opal_event_match(struct irq_domain *h, struct device_node *node, |
150 | enum irq_domain_bus_token bus_token) | |
9f0fd049 | 151 | { |
5d4c9bc7 | 152 | return irq_domain_get_of_node(h) == node; |
9f0fd049 AP |
153 | } |
154 | ||
155 | static int opal_event_xlate(struct irq_domain *h, struct device_node *np, | |
156 | const u32 *intspec, unsigned int intsize, | |
157 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | |
158 | { | |
159 | *out_hwirq = intspec[0]; | |
160 | *out_flags = IRQ_TYPE_LEVEL_HIGH; | |
161 | ||
162 | return 0; | |
163 | } | |
164 | ||
165 | static const struct irq_domain_ops opal_event_domain_ops = { | |
166 | .match = opal_event_match, | |
167 | .map = opal_event_map, | |
168 | .xlate = opal_event_xlate, | |
169 | }; | |
170 | ||
171 | void opal_event_shutdown(void) | |
172 | { | |
173 | unsigned int i; | |
174 | ||
175 | /* First free interrupts, which will also mask them */ | |
176 | for (i = 0; i < opal_irq_count; i++) { | |
177 | if (opal_irqs[i]) | |
178 | free_irq(opal_irqs[i], NULL); | |
179 | opal_irqs[i] = 0; | |
180 | } | |
181 | } | |
182 | ||
183 | int __init opal_event_init(void) | |
184 | { | |
185 | struct device_node *dn, *opal_node; | |
186 | const __be32 *irqs; | |
187 | int i, irqlen, rc = 0; | |
188 | ||
189 | opal_node = of_find_node_by_path("/ibm,opal"); | |
190 | if (!opal_node) { | |
191 | pr_warn("opal: Node not found\n"); | |
192 | return -ENODEV; | |
193 | } | |
194 | ||
195 | /* If dn is NULL it means the domain won't be linked to a DT | |
196 | * node so therefore irq_of_parse_and_map(...) wont work. But | |
197 | * that shouldn't be problem because if we're running a | |
198 | * version of skiboot that doesn't have the dn then the | |
199 | * devices won't have the correct properties and will have to | |
200 | * fall back to the legacy method (opal_event_request(...)) | |
201 | * anyway. */ | |
202 | dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event"); | |
203 | opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, | |
204 | &opal_event_domain_ops, &opal_event_irqchip); | |
205 | of_node_put(dn); | |
206 | if (!opal_event_irqchip.domain) { | |
207 | pr_warn("opal: Unable to create irq domain\n"); | |
208 | rc = -ENOMEM; | |
209 | goto out; | |
210 | } | |
211 | ||
212 | /* Get interrupt property */ | |
213 | irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); | |
214 | opal_irq_count = irqs ? (irqlen / 4) : 0; | |
215 | pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count); | |
216 | ||
217 | /* Install interrupt handlers */ | |
218 | opal_irqs = kcalloc(opal_irq_count, sizeof(*opal_irqs), GFP_KERNEL); | |
219 | for (i = 0; irqs && i < opal_irq_count; i++, irqs++) { | |
220 | unsigned int irq, virq; | |
221 | ||
222 | /* Get hardware and virtual IRQ */ | |
223 | irq = be32_to_cpup(irqs); | |
224 | virq = irq_create_mapping(NULL, irq); | |
225 | if (virq == NO_IRQ) { | |
226 | pr_warn("Failed to map irq 0x%x\n", irq); | |
227 | continue; | |
228 | } | |
229 | ||
230 | /* Install interrupt handler */ | |
880a3d6a BH |
231 | rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW, |
232 | "opal", NULL); | |
9f0fd049 AP |
233 | if (rc) { |
234 | irq_dispose_mapping(virq); | |
235 | pr_warn("Error %d requesting irq %d (0x%x)\n", | |
236 | rc, virq, irq); | |
237 | continue; | |
238 | } | |
239 | ||
240 | /* Cache IRQ */ | |
241 | opal_irqs[i] = virq; | |
242 | } | |
243 | ||
244 | out: | |
245 | of_node_put(opal_node); | |
246 | return rc; | |
247 | } | |
02b6505c | 248 | machine_arch_initcall(powernv, opal_event_init); |
9f0fd049 AP |
249 | |
250 | /** | |
251 | * opal_event_request(unsigned int opal_event_nr) - Request an event | |
252 | * @opal_event_nr: the opal event number to request | |
253 | * | |
254 | * This routine can be used to find the linux virq number which can | |
255 | * then be passed to request_irq to assign a handler for a particular | |
256 | * opal event. This should only be used by legacy devices which don't | |
257 | * have proper device tree bindings. Most devices should use | |
258 | * irq_of_parse_and_map() instead. | |
259 | */ | |
260 | int opal_event_request(unsigned int opal_event_nr) | |
261 | { | |
02b6505c AP |
262 | if (WARN_ON_ONCE(!opal_event_irqchip.domain)) |
263 | return NO_IRQ; | |
264 | ||
9f0fd049 AP |
265 | return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr); |
266 | } | |
267 | EXPORT_SYMBOL(opal_event_request); |