]>
Commit | Line | Data |
---|---|---|
8cfab3cf | 1 | // SPDX-License-Identifier: GPL-2.0 |
3bc2b234 RJ |
2 | /* |
3 | * Copyright (C) 2015 Broadcom Corporation | |
3bc2b234 RJ |
4 | */ |
5 | ||
6 | #include <linux/interrupt.h> | |
7 | #include <linux/irqchip/chained_irq.h> | |
8 | #include <linux/irqdomain.h> | |
9 | #include <linux/msi.h> | |
10 | #include <linux/of_irq.h> | |
11 | #include <linux/of_pci.h> | |
12 | #include <linux/pci.h> | |
13 | ||
14 | #include "pcie-iproc.h" | |
15 | ||
16 | #define IPROC_MSI_INTR_EN_SHIFT 11 | |
17 | #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) | |
18 | #define IPROC_MSI_INT_N_EVENT_SHIFT 1 | |
19 | #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) | |
20 | #define IPROC_MSI_EQ_EN_SHIFT 0 | |
21 | #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) | |
22 | ||
23 | #define IPROC_MSI_EQ_MASK 0x3f | |
24 | ||
25 | /* Max number of GIC interrupts */ | |
26 | #define NR_HW_IRQS 6 | |
27 | ||
28 | /* Number of entries in each event queue */ | |
29 | #define EQ_LEN 64 | |
30 | ||
31 | /* Size of each event queue memory region */ | |
32 | #define EQ_MEM_REGION_SIZE SZ_4K | |
33 | ||
34 | /* Size of each MSI address region */ | |
35 | #define MSI_MEM_REGION_SIZE SZ_4K | |
36 | ||
37 | enum iproc_msi_reg { | |
38 | IPROC_MSI_EQ_PAGE = 0, | |
39 | IPROC_MSI_EQ_PAGE_UPPER, | |
40 | IPROC_MSI_PAGE, | |
41 | IPROC_MSI_PAGE_UPPER, | |
42 | IPROC_MSI_CTRL, | |
43 | IPROC_MSI_EQ_HEAD, | |
44 | IPROC_MSI_EQ_TAIL, | |
45 | IPROC_MSI_INTS_EN, | |
46 | IPROC_MSI_REG_SIZE, | |
47 | }; | |
48 | ||
49 | struct iproc_msi; | |
50 | ||
51 | /** | |
52 | * iProc MSI group | |
53 | * | |
54 | * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI | |
55 | * event queue. | |
56 | * | |
57 | * @msi: pointer to iProc MSI data | |
58 | * @gic_irq: GIC interrupt | |
59 | * @eq: Event queue number | |
60 | */ | |
61 | struct iproc_msi_grp { | |
62 | struct iproc_msi *msi; | |
63 | int gic_irq; | |
64 | unsigned int eq; | |
65 | }; | |
66 | ||
67 | /** | |
68 | * iProc event queue based MSI | |
69 | * | |
70 | * Only meant to be used on platforms without MSI support integrated into the | |
71 | * GIC. | |
72 | * | |
73 | * @pcie: pointer to iProc PCIe data | |
74 | * @reg_offsets: MSI register offsets | |
75 | * @grps: MSI groups | |
76 | * @nr_irqs: number of total interrupts connected to GIC | |
77 | * @nr_cpus: number of toal CPUs | |
78 | * @has_inten_reg: indicates the MSI interrupt enable register needs to be | |
79 | * set explicitly (required for some legacy platforms) | |
80 | * @bitmap: MSI vector bitmap | |
81 | * @bitmap_lock: lock to protect access to the MSI bitmap | |
82 | * @nr_msi_vecs: total number of MSI vectors | |
83 | * @inner_domain: inner IRQ domain | |
84 | * @msi_domain: MSI IRQ domain | |
85 | * @nr_eq_region: required number of 4K aligned memory region for MSI event | |
86 | * queues | |
87 | * @nr_msi_region: required number of 4K aligned address region for MSI posted | |
88 | * writes | |
89 | * @eq_cpu: pointer to allocated memory region for MSI event queues | |
90 | * @eq_dma: DMA address of MSI event queues | |
91 | * @msi_addr: MSI address | |
92 | */ | |
93 | struct iproc_msi { | |
94 | struct iproc_pcie *pcie; | |
95 | const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; | |
96 | struct iproc_msi_grp *grps; | |
97 | int nr_irqs; | |
98 | int nr_cpus; | |
99 | bool has_inten_reg; | |
100 | unsigned long *bitmap; | |
101 | struct mutex bitmap_lock; | |
102 | unsigned int nr_msi_vecs; | |
103 | struct irq_domain *inner_domain; | |
104 | struct irq_domain *msi_domain; | |
105 | unsigned int nr_eq_region; | |
106 | unsigned int nr_msi_region; | |
107 | void *eq_cpu; | |
108 | dma_addr_t eq_dma; | |
109 | phys_addr_t msi_addr; | |
110 | }; | |
111 | ||
112 | static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { | |
113 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, | |
114 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, | |
115 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, | |
116 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, | |
117 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, | |
118 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, | |
119 | }; | |
120 | ||
121 | static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { | |
122 | { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, | |
123 | { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, | |
124 | { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, | |
125 | { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, | |
126 | }; | |
127 | ||
128 | static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, | |
129 | enum iproc_msi_reg reg, | |
130 | unsigned int eq) | |
131 | { | |
132 | struct iproc_pcie *pcie = msi->pcie; | |
133 | ||
134 | return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); | |
135 | } | |
136 | ||
137 | static inline void iproc_msi_write_reg(struct iproc_msi *msi, | |
138 | enum iproc_msi_reg reg, | |
139 | int eq, u32 val) | |
140 | { | |
141 | struct iproc_pcie *pcie = msi->pcie; | |
142 | ||
143 | writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); | |
144 | } | |
145 | ||
146 | static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) | |
147 | { | |
148 | return (hwirq % msi->nr_irqs); | |
149 | } | |
150 | ||
151 | static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, | |
152 | unsigned long hwirq) | |
153 | { | |
154 | if (msi->nr_msi_region > 1) | |
155 | return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; | |
156 | else | |
157 | return hwirq_to_group(msi, hwirq) * sizeof(u32); | |
158 | } | |
159 | ||
160 | static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) | |
161 | { | |
162 | if (msi->nr_eq_region > 1) | |
163 | return eq * EQ_MEM_REGION_SIZE; | |
164 | else | |
165 | return eq * EQ_LEN * sizeof(u32); | |
166 | } | |
167 | ||
168 | static struct irq_chip iproc_msi_irq_chip = { | |
169 | .name = "iProc-MSI", | |
170 | }; | |
171 | ||
172 | static struct msi_domain_info iproc_msi_domain_info = { | |
173 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | |
fc54bae2 | 174 | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, |
3bc2b234 RJ |
175 | .chip = &iproc_msi_irq_chip, |
176 | }; | |
177 | ||
178 | /* | |
179 | * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a | |
180 | * dedicated event queue. Each MSI group can support up to 64 MSI vectors. | |
181 | * | |
182 | * The number of MSI groups varies between different iProc SoCs. The total | |
183 | * number of CPU cores also varies. To support MSI IRQ affinity, we | |
184 | * distribute GIC interrupts across all available CPUs. MSI vector is moved | |
185 | * from one GIC interrupt to another to steer to the target CPU. | |
186 | * | |
187 | * Assuming: | |
188 | * - the number of MSI groups is M | |
189 | * - the number of CPU cores is N | |
190 | * - M is always a multiple of N | |
191 | * | |
192 | * Total number of raw MSI vectors = M * 64 | |
193 | * Total number of supported MSI vectors = (M * 64) / N | |
194 | */ | |
195 | static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) | |
196 | { | |
197 | return (hwirq % msi->nr_cpus); | |
198 | } | |
199 | ||
200 | static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, | |
201 | unsigned long hwirq) | |
202 | { | |
203 | return (hwirq - hwirq_to_cpu(msi, hwirq)); | |
204 | } | |
205 | ||
206 | static int iproc_msi_irq_set_affinity(struct irq_data *data, | |
207 | const struct cpumask *mask, bool force) | |
208 | { | |
209 | struct iproc_msi *msi = irq_data_get_irq_chip_data(data); | |
210 | int target_cpu = cpumask_first(mask); | |
211 | int curr_cpu; | |
212 | ||
213 | curr_cpu = hwirq_to_cpu(msi, data->hwirq); | |
214 | if (curr_cpu == target_cpu) | |
215 | return IRQ_SET_MASK_OK_DONE; | |
216 | ||
217 | /* steer MSI to the target CPU */ | |
218 | data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; | |
219 | ||
220 | return IRQ_SET_MASK_OK; | |
221 | } | |
222 | ||
223 | static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, | |
224 | struct msi_msg *msg) | |
225 | { | |
226 | struct iproc_msi *msi = irq_data_get_irq_chip_data(data); | |
227 | dma_addr_t addr; | |
228 | ||
229 | addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); | |
230 | msg->address_lo = lower_32_bits(addr); | |
231 | msg->address_hi = upper_32_bits(addr); | |
fc54bae2 | 232 | msg->data = data->hwirq << 5; |
3bc2b234 RJ |
233 | } |
234 | ||
235 | static struct irq_chip iproc_msi_bottom_irq_chip = { | |
236 | .name = "MSI", | |
237 | .irq_set_affinity = iproc_msi_irq_set_affinity, | |
238 | .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, | |
239 | }; | |
240 | ||
241 | static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, | |
242 | unsigned int virq, unsigned int nr_irqs, | |
243 | void *args) | |
244 | { | |
245 | struct iproc_msi *msi = domain->host_data; | |
fc54bae2 | 246 | int hwirq, i; |
3bc2b234 RJ |
247 | |
248 | mutex_lock(&msi->bitmap_lock); | |
249 | ||
250 | /* Allocate 'nr_cpus' number of MSI vectors each time */ | |
251 | hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, | |
252 | msi->nr_cpus, 0); | |
253 | if (hwirq < msi->nr_msi_vecs) { | |
254 | bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); | |
255 | } else { | |
256 | mutex_unlock(&msi->bitmap_lock); | |
257 | return -ENOSPC; | |
258 | } | |
259 | ||
260 | mutex_unlock(&msi->bitmap_lock); | |
261 | ||
fc54bae2 SBM |
262 | for (i = 0; i < nr_irqs; i++) { |
263 | irq_domain_set_info(domain, virq + i, hwirq + i, | |
264 | &iproc_msi_bottom_irq_chip, | |
265 | domain->host_data, handle_simple_irq, | |
266 | NULL, NULL); | |
267 | } | |
3bc2b234 | 268 | |
fc54bae2 | 269 | return hwirq; |
3bc2b234 RJ |
270 | } |
271 | ||
272 | static void iproc_msi_irq_domain_free(struct irq_domain *domain, | |
273 | unsigned int virq, unsigned int nr_irqs) | |
274 | { | |
275 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | |
276 | struct iproc_msi *msi = irq_data_get_irq_chip_data(data); | |
277 | unsigned int hwirq; | |
278 | ||
279 | mutex_lock(&msi->bitmap_lock); | |
280 | ||
281 | hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); | |
282 | bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); | |
283 | ||
284 | mutex_unlock(&msi->bitmap_lock); | |
285 | ||
286 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
287 | } | |
288 | ||
289 | static const struct irq_domain_ops msi_domain_ops = { | |
290 | .alloc = iproc_msi_irq_domain_alloc, | |
291 | .free = iproc_msi_irq_domain_free, | |
292 | }; | |
293 | ||
294 | static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) | |
295 | { | |
296 | u32 *msg, hwirq; | |
297 | unsigned int offs; | |
298 | ||
299 | offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); | |
300 | msg = (u32 *)(msi->eq_cpu + offs); | |
fc54bae2 SBM |
301 | hwirq = readl(msg); |
302 | hwirq = (hwirq >> 5) + (hwirq & 0x1f); | |
3bc2b234 RJ |
303 | |
304 | /* | |
305 | * Since we have multiple hwirq mapped to a single MSI vector, | |
306 | * now we need to derive the hwirq at CPU0. It can then be used to | |
307 | * mapped back to virq. | |
308 | */ | |
309 | return hwirq_to_canonical_hwirq(msi, hwirq); | |
310 | } | |
311 | ||
312 | static void iproc_msi_handler(struct irq_desc *desc) | |
313 | { | |
314 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
315 | struct iproc_msi_grp *grp; | |
316 | struct iproc_msi *msi; | |
3bc2b234 RJ |
317 | u32 eq, head, tail, nr_events; |
318 | unsigned long hwirq; | |
319 | int virq; | |
320 | ||
321 | chained_irq_enter(chip, desc); | |
322 | ||
323 | grp = irq_desc_get_handler_data(desc); | |
324 | msi = grp->msi; | |
3bc2b234 RJ |
325 | eq = grp->eq; |
326 | ||
327 | /* | |
328 | * iProc MSI event queue is tracked by head and tail pointers. Head | |
329 | * pointer indicates the next entry (MSI data) to be consumed by SW in | |
330 | * the queue and needs to be updated by SW. iProc MSI core uses the | |
331 | * tail pointer as the next data insertion point. | |
332 | * | |
333 | * Entries between head and tail pointers contain valid MSI data. MSI | |
334 | * data is guaranteed to be in the event queue memory before the tail | |
335 | * pointer is updated by the iProc MSI core. | |
336 | */ | |
337 | head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, | |
338 | eq) & IPROC_MSI_EQ_MASK; | |
339 | do { | |
340 | tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, | |
341 | eq) & IPROC_MSI_EQ_MASK; | |
342 | ||
343 | /* | |
344 | * Figure out total number of events (MSI data) to be | |
345 | * processed. | |
346 | */ | |
347 | nr_events = (tail < head) ? | |
348 | (EQ_LEN - (head - tail)) : (tail - head); | |
349 | if (!nr_events) | |
350 | break; | |
351 | ||
352 | /* process all outstanding events */ | |
353 | while (nr_events--) { | |
354 | hwirq = decode_msi_hwirq(msi, eq, head); | |
355 | virq = irq_find_mapping(msi->inner_domain, hwirq); | |
356 | generic_handle_irq(virq); | |
357 | ||
358 | head++; | |
359 | head %= EQ_LEN; | |
360 | } | |
361 | ||
362 | /* | |
363 | * Now all outstanding events have been processed. Update the | |
364 | * head pointer. | |
365 | */ | |
366 | iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); | |
367 | ||
368 | /* | |
369 | * Now go read the tail pointer again to see if there are new | |
370 | * oustanding events that came in during the above window. | |
371 | */ | |
372 | } while (true); | |
373 | ||
374 | chained_irq_exit(chip, desc); | |
375 | } | |
376 | ||
377 | static void iproc_msi_enable(struct iproc_msi *msi) | |
378 | { | |
379 | int i, eq; | |
380 | u32 val; | |
381 | ||
382 | /* Program memory region for each event queue */ | |
383 | for (i = 0; i < msi->nr_eq_region; i++) { | |
384 | dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); | |
385 | ||
386 | iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, | |
387 | lower_32_bits(addr)); | |
388 | iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, | |
389 | upper_32_bits(addr)); | |
390 | } | |
391 | ||
392 | /* Program address region for MSI posted writes */ | |
393 | for (i = 0; i < msi->nr_msi_region; i++) { | |
394 | phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); | |
395 | ||
396 | iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, | |
397 | lower_32_bits(addr)); | |
398 | iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, | |
399 | upper_32_bits(addr)); | |
400 | } | |
401 | ||
402 | for (eq = 0; eq < msi->nr_irqs; eq++) { | |
403 | /* Enable MSI event queue */ | |
404 | val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | | |
405 | IPROC_MSI_EQ_EN; | |
406 | iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); | |
407 | ||
408 | /* | |
409 | * Some legacy platforms require the MSI interrupt enable | |
410 | * register to be set explicitly. | |
411 | */ | |
412 | if (msi->has_inten_reg) { | |
413 | val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); | |
414 | val |= BIT(eq); | |
415 | iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); | |
416 | } | |
417 | } | |
418 | } | |
419 | ||
420 | static void iproc_msi_disable(struct iproc_msi *msi) | |
421 | { | |
422 | u32 eq, val; | |
423 | ||
424 | for (eq = 0; eq < msi->nr_irqs; eq++) { | |
425 | if (msi->has_inten_reg) { | |
426 | val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); | |
427 | val &= ~BIT(eq); | |
428 | iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); | |
429 | } | |
430 | ||
431 | val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); | |
432 | val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | | |
433 | IPROC_MSI_EQ_EN); | |
434 | iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); | |
435 | } | |
436 | } | |
437 | ||
438 | static int iproc_msi_alloc_domains(struct device_node *node, | |
439 | struct iproc_msi *msi) | |
440 | { | |
441 | msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, | |
442 | &msi_domain_ops, msi); | |
443 | if (!msi->inner_domain) | |
444 | return -ENOMEM; | |
445 | ||
446 | msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), | |
447 | &iproc_msi_domain_info, | |
448 | msi->inner_domain); | |
449 | if (!msi->msi_domain) { | |
450 | irq_domain_remove(msi->inner_domain); | |
451 | return -ENOMEM; | |
452 | } | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
457 | static void iproc_msi_free_domains(struct iproc_msi *msi) | |
458 | { | |
459 | if (msi->msi_domain) | |
460 | irq_domain_remove(msi->msi_domain); | |
461 | ||
462 | if (msi->inner_domain) | |
463 | irq_domain_remove(msi->inner_domain); | |
464 | } | |
465 | ||
466 | static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) | |
467 | { | |
468 | int i; | |
469 | ||
470 | for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { | |
471 | irq_set_chained_handler_and_data(msi->grps[i].gic_irq, | |
472 | NULL, NULL); | |
473 | } | |
474 | } | |
475 | ||
476 | static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) | |
477 | { | |
478 | int i, ret; | |
479 | cpumask_var_t mask; | |
480 | struct iproc_pcie *pcie = msi->pcie; | |
481 | ||
482 | for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { | |
483 | irq_set_chained_handler_and_data(msi->grps[i].gic_irq, | |
484 | iproc_msi_handler, | |
485 | &msi->grps[i]); | |
486 | /* Dedicate GIC interrupt to each CPU core */ | |
487 | if (alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
488 | cpumask_clear(mask); | |
489 | cpumask_set_cpu(cpu, mask); | |
490 | ret = irq_set_affinity(msi->grps[i].gic_irq, mask); | |
491 | if (ret) | |
492 | dev_err(pcie->dev, | |
493 | "failed to set affinity for IRQ%d\n", | |
494 | msi->grps[i].gic_irq); | |
495 | free_cpumask_var(mask); | |
496 | } else { | |
497 | dev_err(pcie->dev, "failed to alloc CPU mask\n"); | |
498 | ret = -EINVAL; | |
499 | } | |
500 | ||
501 | if (ret) { | |
502 | /* Free all configured/unconfigured IRQs */ | |
503 | iproc_msi_irq_free(msi, cpu); | |
504 | return ret; | |
505 | } | |
506 | } | |
507 | ||
508 | return 0; | |
509 | } | |
510 | ||
511 | int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) | |
512 | { | |
513 | struct iproc_msi *msi; | |
514 | int i, ret; | |
515 | unsigned int cpu; | |
516 | ||
517 | if (!of_device_is_compatible(node, "brcm,iproc-msi")) | |
518 | return -ENODEV; | |
519 | ||
520 | if (!of_find_property(node, "msi-controller", NULL)) | |
521 | return -ENODEV; | |
522 | ||
523 | if (pcie->msi) | |
524 | return -EBUSY; | |
525 | ||
526 | msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); | |
527 | if (!msi) | |
528 | return -ENOMEM; | |
529 | ||
530 | msi->pcie = pcie; | |
531 | pcie->msi = msi; | |
532 | msi->msi_addr = pcie->base_addr; | |
533 | mutex_init(&msi->bitmap_lock); | |
534 | msi->nr_cpus = num_possible_cpus(); | |
535 | ||
536 | msi->nr_irqs = of_irq_count(node); | |
537 | if (!msi->nr_irqs) { | |
538 | dev_err(pcie->dev, "found no MSI GIC interrupt\n"); | |
539 | return -ENODEV; | |
540 | } | |
541 | ||
542 | if (msi->nr_irqs > NR_HW_IRQS) { | |
543 | dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", | |
544 | msi->nr_irqs); | |
545 | msi->nr_irqs = NR_HW_IRQS; | |
546 | } | |
547 | ||
548 | if (msi->nr_irqs < msi->nr_cpus) { | |
549 | dev_err(pcie->dev, | |
550 | "not enough GIC interrupts for MSI affinity\n"); | |
551 | return -EINVAL; | |
552 | } | |
553 | ||
554 | if (msi->nr_irqs % msi->nr_cpus != 0) { | |
555 | msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; | |
556 | dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", | |
557 | msi->nr_irqs); | |
558 | } | |
559 | ||
560 | switch (pcie->type) { | |
404349c5 | 561 | case IPROC_PCIE_PAXB_BCMA: |
3bc2b234 RJ |
562 | case IPROC_PCIE_PAXB: |
563 | msi->reg_offsets = iproc_msi_reg_paxb; | |
564 | msi->nr_eq_region = 1; | |
565 | msi->nr_msi_region = 1; | |
566 | break; | |
567 | case IPROC_PCIE_PAXC: | |
568 | msi->reg_offsets = iproc_msi_reg_paxc; | |
569 | msi->nr_eq_region = msi->nr_irqs; | |
570 | msi->nr_msi_region = msi->nr_irqs; | |
571 | break; | |
572 | default: | |
573 | dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); | |
574 | return -EINVAL; | |
575 | } | |
576 | ||
577 | if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) | |
578 | msi->has_inten_reg = true; | |
579 | ||
580 | msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; | |
581 | msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), | |
582 | sizeof(*msi->bitmap), GFP_KERNEL); | |
583 | if (!msi->bitmap) | |
584 | return -ENOMEM; | |
585 | ||
586 | msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), | |
587 | GFP_KERNEL); | |
588 | if (!msi->grps) | |
589 | return -ENOMEM; | |
590 | ||
591 | for (i = 0; i < msi->nr_irqs; i++) { | |
592 | unsigned int irq = irq_of_parse_and_map(node, i); | |
593 | ||
594 | if (!irq) { | |
595 | dev_err(pcie->dev, "unable to parse/map interrupt\n"); | |
596 | ret = -ENODEV; | |
597 | goto free_irqs; | |
598 | } | |
599 | msi->grps[i].gic_irq = irq; | |
600 | msi->grps[i].msi = msi; | |
601 | msi->grps[i].eq = i; | |
602 | } | |
603 | ||
604 | /* Reserve memory for event queue and make sure memories are zeroed */ | |
750afb08 LC |
605 | msi->eq_cpu = dma_alloc_coherent(pcie->dev, |
606 | msi->nr_eq_region * EQ_MEM_REGION_SIZE, | |
607 | &msi->eq_dma, GFP_KERNEL); | |
3bc2b234 RJ |
608 | if (!msi->eq_cpu) { |
609 | ret = -ENOMEM; | |
610 | goto free_irqs; | |
611 | } | |
612 | ||
613 | ret = iproc_msi_alloc_domains(node, msi); | |
614 | if (ret) { | |
615 | dev_err(pcie->dev, "failed to create MSI domains\n"); | |
616 | goto free_eq_dma; | |
617 | } | |
618 | ||
619 | for_each_online_cpu(cpu) { | |
620 | ret = iproc_msi_irq_setup(msi, cpu); | |
621 | if (ret) | |
622 | goto free_msi_irq; | |
623 | } | |
624 | ||
625 | iproc_msi_enable(msi); | |
626 | ||
627 | return 0; | |
628 | ||
629 | free_msi_irq: | |
630 | for_each_online_cpu(cpu) | |
631 | iproc_msi_irq_free(msi, cpu); | |
632 | iproc_msi_free_domains(msi); | |
633 | ||
634 | free_eq_dma: | |
635 | dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, | |
636 | msi->eq_cpu, msi->eq_dma); | |
637 | ||
638 | free_irqs: | |
639 | for (i = 0; i < msi->nr_irqs; i++) { | |
640 | if (msi->grps[i].gic_irq) | |
641 | irq_dispose_mapping(msi->grps[i].gic_irq); | |
642 | } | |
643 | pcie->msi = NULL; | |
644 | return ret; | |
645 | } | |
646 | EXPORT_SYMBOL(iproc_msi_init); | |
647 | ||
648 | void iproc_msi_exit(struct iproc_pcie *pcie) | |
649 | { | |
650 | struct iproc_msi *msi = pcie->msi; | |
651 | unsigned int i, cpu; | |
652 | ||
653 | if (!msi) | |
654 | return; | |
655 | ||
656 | iproc_msi_disable(msi); | |
657 | ||
658 | for_each_online_cpu(cpu) | |
659 | iproc_msi_irq_free(msi, cpu); | |
660 | ||
661 | iproc_msi_free_domains(msi); | |
662 | ||
663 | dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, | |
664 | msi->eq_cpu, msi->eq_dma); | |
665 | ||
666 | for (i = 0; i < msi->nr_irqs; i++) { | |
667 | if (msi->grps[i].gic_irq) | |
668 | irq_dispose_mapping(msi->grps[i].gic_irq); | |
669 | } | |
670 | } | |
671 | EXPORT_SYMBOL(iproc_msi_exit); |