]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/sysdev/ipic.c
treewide: Convert uses of struct resource to resource_size(ptr)
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / sysdev / ipic.c
1 /*
2 * arch/powerpc/sysdev/ipic.c
3 *
4 * IPIC routines implementations.
5 *
6 * Copyright 2005 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/device.h>
23 #include <linux/bootmem.h>
24 #include <linux/spinlock.h>
25 #include <linux/fsl_devices.h>
26 #include <asm/irq.h>
27 #include <asm/io.h>
28 #include <asm/prom.h>
29 #include <asm/ipic.h>
30
31 #include "ipic.h"
32
33 static struct ipic * primary_ipic;
34 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
35 static DEFINE_RAW_SPINLOCK(ipic_lock);
36
37 static struct ipic_info ipic_info[] = {
38 [1] = {
39 .mask = IPIC_SIMSR_H,
40 .prio = IPIC_SIPRR_C,
41 .force = IPIC_SIFCR_H,
42 .bit = 16,
43 .prio_mask = 0,
44 },
45 [2] = {
46 .mask = IPIC_SIMSR_H,
47 .prio = IPIC_SIPRR_C,
48 .force = IPIC_SIFCR_H,
49 .bit = 17,
50 .prio_mask = 1,
51 },
52 [3] = {
53 .mask = IPIC_SIMSR_H,
54 .prio = IPIC_SIPRR_C,
55 .force = IPIC_SIFCR_H,
56 .bit = 18,
57 .prio_mask = 2,
58 },
59 [4] = {
60 .mask = IPIC_SIMSR_H,
61 .prio = IPIC_SIPRR_C,
62 .force = IPIC_SIFCR_H,
63 .bit = 19,
64 .prio_mask = 3,
65 },
66 [5] = {
67 .mask = IPIC_SIMSR_H,
68 .prio = IPIC_SIPRR_C,
69 .force = IPIC_SIFCR_H,
70 .bit = 20,
71 .prio_mask = 4,
72 },
73 [6] = {
74 .mask = IPIC_SIMSR_H,
75 .prio = IPIC_SIPRR_C,
76 .force = IPIC_SIFCR_H,
77 .bit = 21,
78 .prio_mask = 5,
79 },
80 [7] = {
81 .mask = IPIC_SIMSR_H,
82 .prio = IPIC_SIPRR_C,
83 .force = IPIC_SIFCR_H,
84 .bit = 22,
85 .prio_mask = 6,
86 },
87 [8] = {
88 .mask = IPIC_SIMSR_H,
89 .prio = IPIC_SIPRR_C,
90 .force = IPIC_SIFCR_H,
91 .bit = 23,
92 .prio_mask = 7,
93 },
94 [9] = {
95 .mask = IPIC_SIMSR_H,
96 .prio = IPIC_SIPRR_D,
97 .force = IPIC_SIFCR_H,
98 .bit = 24,
99 .prio_mask = 0,
100 },
101 [10] = {
102 .mask = IPIC_SIMSR_H,
103 .prio = IPIC_SIPRR_D,
104 .force = IPIC_SIFCR_H,
105 .bit = 25,
106 .prio_mask = 1,
107 },
108 [11] = {
109 .mask = IPIC_SIMSR_H,
110 .prio = IPIC_SIPRR_D,
111 .force = IPIC_SIFCR_H,
112 .bit = 26,
113 .prio_mask = 2,
114 },
115 [12] = {
116 .mask = IPIC_SIMSR_H,
117 .prio = IPIC_SIPRR_D,
118 .force = IPIC_SIFCR_H,
119 .bit = 27,
120 .prio_mask = 3,
121 },
122 [13] = {
123 .mask = IPIC_SIMSR_H,
124 .prio = IPIC_SIPRR_D,
125 .force = IPIC_SIFCR_H,
126 .bit = 28,
127 .prio_mask = 4,
128 },
129 [14] = {
130 .mask = IPIC_SIMSR_H,
131 .prio = IPIC_SIPRR_D,
132 .force = IPIC_SIFCR_H,
133 .bit = 29,
134 .prio_mask = 5,
135 },
136 [15] = {
137 .mask = IPIC_SIMSR_H,
138 .prio = IPIC_SIPRR_D,
139 .force = IPIC_SIFCR_H,
140 .bit = 30,
141 .prio_mask = 6,
142 },
143 [16] = {
144 .mask = IPIC_SIMSR_H,
145 .prio = IPIC_SIPRR_D,
146 .force = IPIC_SIFCR_H,
147 .bit = 31,
148 .prio_mask = 7,
149 },
150 [17] = {
151 .ack = IPIC_SEPNR,
152 .mask = IPIC_SEMSR,
153 .prio = IPIC_SMPRR_A,
154 .force = IPIC_SEFCR,
155 .bit = 1,
156 .prio_mask = 5,
157 },
158 [18] = {
159 .ack = IPIC_SEPNR,
160 .mask = IPIC_SEMSR,
161 .prio = IPIC_SMPRR_A,
162 .force = IPIC_SEFCR,
163 .bit = 2,
164 .prio_mask = 6,
165 },
166 [19] = {
167 .ack = IPIC_SEPNR,
168 .mask = IPIC_SEMSR,
169 .prio = IPIC_SMPRR_A,
170 .force = IPIC_SEFCR,
171 .bit = 3,
172 .prio_mask = 7,
173 },
174 [20] = {
175 .ack = IPIC_SEPNR,
176 .mask = IPIC_SEMSR,
177 .prio = IPIC_SMPRR_B,
178 .force = IPIC_SEFCR,
179 .bit = 4,
180 .prio_mask = 4,
181 },
182 [21] = {
183 .ack = IPIC_SEPNR,
184 .mask = IPIC_SEMSR,
185 .prio = IPIC_SMPRR_B,
186 .force = IPIC_SEFCR,
187 .bit = 5,
188 .prio_mask = 5,
189 },
190 [22] = {
191 .ack = IPIC_SEPNR,
192 .mask = IPIC_SEMSR,
193 .prio = IPIC_SMPRR_B,
194 .force = IPIC_SEFCR,
195 .bit = 6,
196 .prio_mask = 6,
197 },
198 [23] = {
199 .ack = IPIC_SEPNR,
200 .mask = IPIC_SEMSR,
201 .prio = IPIC_SMPRR_B,
202 .force = IPIC_SEFCR,
203 .bit = 7,
204 .prio_mask = 7,
205 },
206 [32] = {
207 .mask = IPIC_SIMSR_H,
208 .prio = IPIC_SIPRR_A,
209 .force = IPIC_SIFCR_H,
210 .bit = 0,
211 .prio_mask = 0,
212 },
213 [33] = {
214 .mask = IPIC_SIMSR_H,
215 .prio = IPIC_SIPRR_A,
216 .force = IPIC_SIFCR_H,
217 .bit = 1,
218 .prio_mask = 1,
219 },
220 [34] = {
221 .mask = IPIC_SIMSR_H,
222 .prio = IPIC_SIPRR_A,
223 .force = IPIC_SIFCR_H,
224 .bit = 2,
225 .prio_mask = 2,
226 },
227 [35] = {
228 .mask = IPIC_SIMSR_H,
229 .prio = IPIC_SIPRR_A,
230 .force = IPIC_SIFCR_H,
231 .bit = 3,
232 .prio_mask = 3,
233 },
234 [36] = {
235 .mask = IPIC_SIMSR_H,
236 .prio = IPIC_SIPRR_A,
237 .force = IPIC_SIFCR_H,
238 .bit = 4,
239 .prio_mask = 4,
240 },
241 [37] = {
242 .mask = IPIC_SIMSR_H,
243 .prio = IPIC_SIPRR_A,
244 .force = IPIC_SIFCR_H,
245 .bit = 5,
246 .prio_mask = 5,
247 },
248 [38] = {
249 .mask = IPIC_SIMSR_H,
250 .prio = IPIC_SIPRR_A,
251 .force = IPIC_SIFCR_H,
252 .bit = 6,
253 .prio_mask = 6,
254 },
255 [39] = {
256 .mask = IPIC_SIMSR_H,
257 .prio = IPIC_SIPRR_A,
258 .force = IPIC_SIFCR_H,
259 .bit = 7,
260 .prio_mask = 7,
261 },
262 [40] = {
263 .mask = IPIC_SIMSR_H,
264 .prio = IPIC_SIPRR_B,
265 .force = IPIC_SIFCR_H,
266 .bit = 8,
267 .prio_mask = 0,
268 },
269 [41] = {
270 .mask = IPIC_SIMSR_H,
271 .prio = IPIC_SIPRR_B,
272 .force = IPIC_SIFCR_H,
273 .bit = 9,
274 .prio_mask = 1,
275 },
276 [42] = {
277 .mask = IPIC_SIMSR_H,
278 .prio = IPIC_SIPRR_B,
279 .force = IPIC_SIFCR_H,
280 .bit = 10,
281 .prio_mask = 2,
282 },
283 [43] = {
284 .mask = IPIC_SIMSR_H,
285 .prio = IPIC_SIPRR_B,
286 .force = IPIC_SIFCR_H,
287 .bit = 11,
288 .prio_mask = 3,
289 },
290 [44] = {
291 .mask = IPIC_SIMSR_H,
292 .prio = IPIC_SIPRR_B,
293 .force = IPIC_SIFCR_H,
294 .bit = 12,
295 .prio_mask = 4,
296 },
297 [45] = {
298 .mask = IPIC_SIMSR_H,
299 .prio = IPIC_SIPRR_B,
300 .force = IPIC_SIFCR_H,
301 .bit = 13,
302 .prio_mask = 5,
303 },
304 [46] = {
305 .mask = IPIC_SIMSR_H,
306 .prio = IPIC_SIPRR_B,
307 .force = IPIC_SIFCR_H,
308 .bit = 14,
309 .prio_mask = 6,
310 },
311 [47] = {
312 .mask = IPIC_SIMSR_H,
313 .prio = IPIC_SIPRR_B,
314 .force = IPIC_SIFCR_H,
315 .bit = 15,
316 .prio_mask = 7,
317 },
318 [48] = {
319 .mask = IPIC_SEMSR,
320 .prio = IPIC_SMPRR_A,
321 .force = IPIC_SEFCR,
322 .bit = 0,
323 .prio_mask = 4,
324 },
325 [64] = {
326 .mask = IPIC_SIMSR_L,
327 .prio = IPIC_SMPRR_A,
328 .force = IPIC_SIFCR_L,
329 .bit = 0,
330 .prio_mask = 0,
331 },
332 [65] = {
333 .mask = IPIC_SIMSR_L,
334 .prio = IPIC_SMPRR_A,
335 .force = IPIC_SIFCR_L,
336 .bit = 1,
337 .prio_mask = 1,
338 },
339 [66] = {
340 .mask = IPIC_SIMSR_L,
341 .prio = IPIC_SMPRR_A,
342 .force = IPIC_SIFCR_L,
343 .bit = 2,
344 .prio_mask = 2,
345 },
346 [67] = {
347 .mask = IPIC_SIMSR_L,
348 .prio = IPIC_SMPRR_A,
349 .force = IPIC_SIFCR_L,
350 .bit = 3,
351 .prio_mask = 3,
352 },
353 [68] = {
354 .mask = IPIC_SIMSR_L,
355 .prio = IPIC_SMPRR_B,
356 .force = IPIC_SIFCR_L,
357 .bit = 4,
358 .prio_mask = 0,
359 },
360 [69] = {
361 .mask = IPIC_SIMSR_L,
362 .prio = IPIC_SMPRR_B,
363 .force = IPIC_SIFCR_L,
364 .bit = 5,
365 .prio_mask = 1,
366 },
367 [70] = {
368 .mask = IPIC_SIMSR_L,
369 .prio = IPIC_SMPRR_B,
370 .force = IPIC_SIFCR_L,
371 .bit = 6,
372 .prio_mask = 2,
373 },
374 [71] = {
375 .mask = IPIC_SIMSR_L,
376 .prio = IPIC_SMPRR_B,
377 .force = IPIC_SIFCR_L,
378 .bit = 7,
379 .prio_mask = 3,
380 },
381 [72] = {
382 .mask = IPIC_SIMSR_L,
383 .prio = 0,
384 .force = IPIC_SIFCR_L,
385 .bit = 8,
386 },
387 [73] = {
388 .mask = IPIC_SIMSR_L,
389 .prio = 0,
390 .force = IPIC_SIFCR_L,
391 .bit = 9,
392 },
393 [74] = {
394 .mask = IPIC_SIMSR_L,
395 .prio = 0,
396 .force = IPIC_SIFCR_L,
397 .bit = 10,
398 },
399 [75] = {
400 .mask = IPIC_SIMSR_L,
401 .prio = 0,
402 .force = IPIC_SIFCR_L,
403 .bit = 11,
404 },
405 [76] = {
406 .mask = IPIC_SIMSR_L,
407 .prio = 0,
408 .force = IPIC_SIFCR_L,
409 .bit = 12,
410 },
411 [77] = {
412 .mask = IPIC_SIMSR_L,
413 .prio = 0,
414 .force = IPIC_SIFCR_L,
415 .bit = 13,
416 },
417 [78] = {
418 .mask = IPIC_SIMSR_L,
419 .prio = 0,
420 .force = IPIC_SIFCR_L,
421 .bit = 14,
422 },
423 [79] = {
424 .mask = IPIC_SIMSR_L,
425 .prio = 0,
426 .force = IPIC_SIFCR_L,
427 .bit = 15,
428 },
429 [80] = {
430 .mask = IPIC_SIMSR_L,
431 .prio = 0,
432 .force = IPIC_SIFCR_L,
433 .bit = 16,
434 },
435 [81] = {
436 .mask = IPIC_SIMSR_L,
437 .prio = 0,
438 .force = IPIC_SIFCR_L,
439 .bit = 17,
440 },
441 [82] = {
442 .mask = IPIC_SIMSR_L,
443 .prio = 0,
444 .force = IPIC_SIFCR_L,
445 .bit = 18,
446 },
447 [83] = {
448 .mask = IPIC_SIMSR_L,
449 .prio = 0,
450 .force = IPIC_SIFCR_L,
451 .bit = 19,
452 },
453 [84] = {
454 .mask = IPIC_SIMSR_L,
455 .prio = 0,
456 .force = IPIC_SIFCR_L,
457 .bit = 20,
458 },
459 [85] = {
460 .mask = IPIC_SIMSR_L,
461 .prio = 0,
462 .force = IPIC_SIFCR_L,
463 .bit = 21,
464 },
465 [86] = {
466 .mask = IPIC_SIMSR_L,
467 .prio = 0,
468 .force = IPIC_SIFCR_L,
469 .bit = 22,
470 },
471 [87] = {
472 .mask = IPIC_SIMSR_L,
473 .prio = 0,
474 .force = IPIC_SIFCR_L,
475 .bit = 23,
476 },
477 [88] = {
478 .mask = IPIC_SIMSR_L,
479 .prio = 0,
480 .force = IPIC_SIFCR_L,
481 .bit = 24,
482 },
483 [89] = {
484 .mask = IPIC_SIMSR_L,
485 .prio = 0,
486 .force = IPIC_SIFCR_L,
487 .bit = 25,
488 },
489 [90] = {
490 .mask = IPIC_SIMSR_L,
491 .prio = 0,
492 .force = IPIC_SIFCR_L,
493 .bit = 26,
494 },
495 [91] = {
496 .mask = IPIC_SIMSR_L,
497 .prio = 0,
498 .force = IPIC_SIFCR_L,
499 .bit = 27,
500 },
501 [94] = {
502 .mask = IPIC_SIMSR_L,
503 .prio = 0,
504 .force = IPIC_SIFCR_L,
505 .bit = 30,
506 },
507 };
508
509 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
510 {
511 return in_be32(base + (reg >> 2));
512 }
513
514 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
515 {
516 out_be32(base + (reg >> 2), value);
517 }
518
519 static inline struct ipic * ipic_from_irq(unsigned int virq)
520 {
521 return primary_ipic;
522 }
523
524 static void ipic_unmask_irq(struct irq_data *d)
525 {
526 struct ipic *ipic = ipic_from_irq(d->irq);
527 unsigned int src = irqd_to_hwirq(d);
528 unsigned long flags;
529 u32 temp;
530
531 raw_spin_lock_irqsave(&ipic_lock, flags);
532
533 temp = ipic_read(ipic->regs, ipic_info[src].mask);
534 temp |= (1 << (31 - ipic_info[src].bit));
535 ipic_write(ipic->regs, ipic_info[src].mask, temp);
536
537 raw_spin_unlock_irqrestore(&ipic_lock, flags);
538 }
539
540 static void ipic_mask_irq(struct irq_data *d)
541 {
542 struct ipic *ipic = ipic_from_irq(d->irq);
543 unsigned int src = irqd_to_hwirq(d);
544 unsigned long flags;
545 u32 temp;
546
547 raw_spin_lock_irqsave(&ipic_lock, flags);
548
549 temp = ipic_read(ipic->regs, ipic_info[src].mask);
550 temp &= ~(1 << (31 - ipic_info[src].bit));
551 ipic_write(ipic->regs, ipic_info[src].mask, temp);
552
553 /* mb() can't guarantee that masking is finished. But it does finish
554 * for nearly all cases. */
555 mb();
556
557 raw_spin_unlock_irqrestore(&ipic_lock, flags);
558 }
559
560 static void ipic_ack_irq(struct irq_data *d)
561 {
562 struct ipic *ipic = ipic_from_irq(d->irq);
563 unsigned int src = irqd_to_hwirq(d);
564 unsigned long flags;
565 u32 temp;
566
567 raw_spin_lock_irqsave(&ipic_lock, flags);
568
569 temp = 1 << (31 - ipic_info[src].bit);
570 ipic_write(ipic->regs, ipic_info[src].ack, temp);
571
572 /* mb() can't guarantee that ack is finished. But it does finish
573 * for nearly all cases. */
574 mb();
575
576 raw_spin_unlock_irqrestore(&ipic_lock, flags);
577 }
578
579 static void ipic_mask_irq_and_ack(struct irq_data *d)
580 {
581 struct ipic *ipic = ipic_from_irq(d->irq);
582 unsigned int src = irqd_to_hwirq(d);
583 unsigned long flags;
584 u32 temp;
585
586 raw_spin_lock_irqsave(&ipic_lock, flags);
587
588 temp = ipic_read(ipic->regs, ipic_info[src].mask);
589 temp &= ~(1 << (31 - ipic_info[src].bit));
590 ipic_write(ipic->regs, ipic_info[src].mask, temp);
591
592 temp = 1 << (31 - ipic_info[src].bit);
593 ipic_write(ipic->regs, ipic_info[src].ack, temp);
594
595 /* mb() can't guarantee that ack is finished. But it does finish
596 * for nearly all cases. */
597 mb();
598
599 raw_spin_unlock_irqrestore(&ipic_lock, flags);
600 }
601
602 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
603 {
604 struct ipic *ipic = ipic_from_irq(d->irq);
605 unsigned int src = irqd_to_hwirq(d);
606 unsigned int vold, vnew, edibit;
607
608 if (flow_type == IRQ_TYPE_NONE)
609 flow_type = IRQ_TYPE_LEVEL_LOW;
610
611 /* ipic supports only low assertion and high-to-low change senses
612 */
613 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
614 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
615 flow_type);
616 return -EINVAL;
617 }
618 /* ipic supports only edge mode on external interrupts */
619 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
620 printk(KERN_ERR "ipic: edge sense not supported on internal "
621 "interrupts\n");
622 return -EINVAL;
623
624 }
625
626 irqd_set_trigger_type(d, flow_type);
627 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
628 __irq_set_handler_locked(d->irq, handle_level_irq);
629 d->chip = &ipic_level_irq_chip;
630 } else {
631 __irq_set_handler_locked(d->irq, handle_edge_irq);
632 d->chip = &ipic_edge_irq_chip;
633 }
634
635 /* only EXT IRQ senses are programmable on ipic
636 * internal IRQ senses are LEVEL_LOW
637 */
638 if (src == IPIC_IRQ_EXT0)
639 edibit = 15;
640 else
641 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
642 edibit = (14 - (src - IPIC_IRQ_EXT1));
643 else
644 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
645
646 vold = ipic_read(ipic->regs, IPIC_SECNR);
647 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
648 vnew = vold | (1 << edibit);
649 } else {
650 vnew = vold & ~(1 << edibit);
651 }
652 if (vold != vnew)
653 ipic_write(ipic->regs, IPIC_SECNR, vnew);
654 return IRQ_SET_MASK_OK_NOCOPY;
655 }
656
657 /* level interrupts and edge interrupts have different ack operations */
658 static struct irq_chip ipic_level_irq_chip = {
659 .name = "IPIC",
660 .irq_unmask = ipic_unmask_irq,
661 .irq_mask = ipic_mask_irq,
662 .irq_mask_ack = ipic_mask_irq,
663 .irq_set_type = ipic_set_irq_type,
664 };
665
666 static struct irq_chip ipic_edge_irq_chip = {
667 .name = "IPIC",
668 .irq_unmask = ipic_unmask_irq,
669 .irq_mask = ipic_mask_irq,
670 .irq_mask_ack = ipic_mask_irq_and_ack,
671 .irq_ack = ipic_ack_irq,
672 .irq_set_type = ipic_set_irq_type,
673 };
674
675 static int ipic_host_match(struct irq_host *h, struct device_node *node)
676 {
677 /* Exact match, unless ipic node is NULL */
678 return h->of_node == NULL || h->of_node == node;
679 }
680
681 static int ipic_host_map(struct irq_host *h, unsigned int virq,
682 irq_hw_number_t hw)
683 {
684 struct ipic *ipic = h->host_data;
685
686 irq_set_chip_data(virq, ipic);
687 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
688
689 /* Set default irq type */
690 irq_set_irq_type(virq, IRQ_TYPE_NONE);
691
692 return 0;
693 }
694
695 static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
696 const u32 *intspec, unsigned int intsize,
697 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
698
699 {
700 /* interrupt sense values coming from the device tree equal either
701 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
702 */
703 *out_hwirq = intspec[0];
704 if (intsize > 1)
705 *out_flags = intspec[1];
706 else
707 *out_flags = IRQ_TYPE_NONE;
708 return 0;
709 }
710
711 static struct irq_host_ops ipic_host_ops = {
712 .match = ipic_host_match,
713 .map = ipic_host_map,
714 .xlate = ipic_host_xlate,
715 };
716
717 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
718 {
719 struct ipic *ipic;
720 struct resource res;
721 u32 temp = 0, ret;
722
723 ret = of_address_to_resource(node, 0, &res);
724 if (ret)
725 return NULL;
726
727 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
728 if (ipic == NULL)
729 return NULL;
730
731 ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
732 NR_IPIC_INTS,
733 &ipic_host_ops, 0);
734 if (ipic->irqhost == NULL) {
735 kfree(ipic);
736 return NULL;
737 }
738
739 ipic->regs = ioremap(res.start, resource_size(&res));
740
741 ipic->irqhost->host_data = ipic;
742
743 /* init hw */
744 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
745
746 /* default priority scheme is grouped. If spread mode is required
747 * configure SICFR accordingly */
748 if (flags & IPIC_SPREADMODE_GRP_A)
749 temp |= SICFR_IPSA;
750 if (flags & IPIC_SPREADMODE_GRP_B)
751 temp |= SICFR_IPSB;
752 if (flags & IPIC_SPREADMODE_GRP_C)
753 temp |= SICFR_IPSC;
754 if (flags & IPIC_SPREADMODE_GRP_D)
755 temp |= SICFR_IPSD;
756 if (flags & IPIC_SPREADMODE_MIX_A)
757 temp |= SICFR_MPSA;
758 if (flags & IPIC_SPREADMODE_MIX_B)
759 temp |= SICFR_MPSB;
760
761 ipic_write(ipic->regs, IPIC_SICFR, temp);
762
763 /* handle MCP route */
764 temp = 0;
765 if (flags & IPIC_DISABLE_MCP_OUT)
766 temp = SERCR_MCPR;
767 ipic_write(ipic->regs, IPIC_SERCR, temp);
768
769 /* handle routing of IRQ0 to MCP */
770 temp = ipic_read(ipic->regs, IPIC_SEMSR);
771
772 if (flags & IPIC_IRQ0_MCP)
773 temp |= SEMSR_SIRQ0;
774 else
775 temp &= ~SEMSR_SIRQ0;
776
777 ipic_write(ipic->regs, IPIC_SEMSR, temp);
778
779 primary_ipic = ipic;
780 irq_set_default_host(primary_ipic->irqhost);
781
782 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
783 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
784
785 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
786 primary_ipic->regs);
787
788 return ipic;
789 }
790
791 int ipic_set_priority(unsigned int virq, unsigned int priority)
792 {
793 struct ipic *ipic = ipic_from_irq(virq);
794 unsigned int src = virq_to_hw(virq);
795 u32 temp;
796
797 if (priority > 7)
798 return -EINVAL;
799 if (src > 127)
800 return -EINVAL;
801 if (ipic_info[src].prio == 0)
802 return -EINVAL;
803
804 temp = ipic_read(ipic->regs, ipic_info[src].prio);
805
806 if (priority < 4) {
807 temp &= ~(0x7 << (20 + (3 - priority) * 3));
808 temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
809 } else {
810 temp &= ~(0x7 << (4 + (7 - priority) * 3));
811 temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
812 }
813
814 ipic_write(ipic->regs, ipic_info[src].prio, temp);
815
816 return 0;
817 }
818
819 void ipic_set_highest_priority(unsigned int virq)
820 {
821 struct ipic *ipic = ipic_from_irq(virq);
822 unsigned int src = virq_to_hw(virq);
823 u32 temp;
824
825 temp = ipic_read(ipic->regs, IPIC_SICFR);
826
827 /* clear and set HPI */
828 temp &= 0x7f000000;
829 temp |= (src & 0x7f) << 24;
830
831 ipic_write(ipic->regs, IPIC_SICFR, temp);
832 }
833
834 void ipic_set_default_priority(void)
835 {
836 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
837 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
838 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
839 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
840 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
841 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
842 }
843
844 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
845 {
846 struct ipic *ipic = primary_ipic;
847 u32 temp;
848
849 temp = ipic_read(ipic->regs, IPIC_SERMR);
850 temp |= (1 << (31 - mcp_irq));
851 ipic_write(ipic->regs, IPIC_SERMR, temp);
852 }
853
854 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
855 {
856 struct ipic *ipic = primary_ipic;
857 u32 temp;
858
859 temp = ipic_read(ipic->regs, IPIC_SERMR);
860 temp &= (1 << (31 - mcp_irq));
861 ipic_write(ipic->regs, IPIC_SERMR, temp);
862 }
863
864 u32 ipic_get_mcp_status(void)
865 {
866 return ipic_read(primary_ipic->regs, IPIC_SERMR);
867 }
868
869 void ipic_clear_mcp_status(u32 mask)
870 {
871 ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
872 }
873
874 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
875 unsigned int ipic_get_irq(void)
876 {
877 int irq;
878
879 BUG_ON(primary_ipic == NULL);
880
881 #define IPIC_SIVCR_VECTOR_MASK 0x7f
882 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
883
884 if (irq == 0) /* 0 --> no irq is pending */
885 return NO_IRQ;
886
887 return irq_linear_revmap(primary_ipic->irqhost, irq);
888 }
889
890 #ifdef CONFIG_SUSPEND
891 static struct {
892 u32 sicfr;
893 u32 siprr[2];
894 u32 simsr[2];
895 u32 sicnr;
896 u32 smprr[2];
897 u32 semsr;
898 u32 secnr;
899 u32 sermr;
900 u32 sercr;
901 } ipic_saved_state;
902
903 static int ipic_suspend(void)
904 {
905 struct ipic *ipic = primary_ipic;
906
907 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
908 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
909 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
910 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
911 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
912 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
913 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
914 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
915 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
916 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
917 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
918 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
919
920 if (fsl_deep_sleep()) {
921 /* In deep sleep, make sure there can be no
922 * pending interrupts, as this can cause
923 * problems on 831x.
924 */
925 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
926 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
927 ipic_write(ipic->regs, IPIC_SEMSR, 0);
928 ipic_write(ipic->regs, IPIC_SERMR, 0);
929 }
930
931 return 0;
932 }
933
934 static void ipic_resume(void)
935 {
936 struct ipic *ipic = primary_ipic;
937
938 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
939 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
940 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
941 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
942 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
943 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
944 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
945 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
946 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
947 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
948 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
949 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
950 }
951 #else
952 #define ipic_suspend NULL
953 #define ipic_resume NULL
954 #endif
955
956 static struct syscore_ops ipic_syscore_ops = {
957 .suspend = ipic_suspend,
958 .resume = ipic_resume,
959 };
960
961 static int __init init_ipic_syscore(void)
962 {
963 if (!primary_ipic || !primary_ipic->regs)
964 return -ENODEV;
965
966 printk(KERN_DEBUG "Registering ipic system core operations\n");
967 register_syscore_ops(&ipic_syscore_ops);
968
969 return 0;
970 }
971
972 subsys_initcall(init_ipic_syscore);