]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/sysdev/mpic.c
[PATCH] aoa: tas: add missing bass/treble controls
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / sysdev / mpic.c
CommitLineData
14cf11af
PM
1/*
2 * arch/powerpc/kernel/mpic.c
3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal
6 * with various broken implementations of this HW.
7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive
12 * for more details.
13 */
14
15#undef DEBUG
1beb6a7d
BH
16#undef DEBUG_IPI
17#undef DEBUG_IRQ
18#undef DEBUG_LOW
14cf11af 19
14cf11af
PM
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/irq.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
26#include <linux/bootmem.h>
27#include <linux/spinlock.h>
28#include <linux/pci.h>
29
30#include <asm/ptrace.h>
31#include <asm/signal.h>
32#include <asm/io.h>
33#include <asm/pgtable.h>
34#include <asm/irq.h>
35#include <asm/machdep.h>
36#include <asm/mpic.h>
37#include <asm/smp.h>
38
39#ifdef DEBUG
40#define DBG(fmt...) printk(fmt)
41#else
42#define DBG(fmt...)
43#endif
44
45static struct mpic *mpics;
46static struct mpic *mpic_primary;
47static DEFINE_SPINLOCK(mpic_lock);
48
c0c0d996 49#ifdef CONFIG_PPC32 /* XXX for now */
e40c7f02
AW
50#ifdef CONFIG_IRQ_ALL_CPUS
51#define distribute_irqs (1)
52#else
53#define distribute_irqs (0)
54#endif
c0c0d996 55#endif
14cf11af
PM
56
57/*
58 * Register accessor functions
59 */
60
61
62static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
63 unsigned int reg)
64{
65 if (be)
66 return in_be32(base + (reg >> 2));
67 else
68 return in_le32(base + (reg >> 2));
69}
70
71static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
72 unsigned int reg, u32 value)
73{
74 if (be)
75 out_be32(base + (reg >> 2), value);
76 else
77 out_le32(base + (reg >> 2), value);
78}
79
80static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
81{
82 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
83 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
84
85 if (mpic->flags & MPIC_BROKEN_IPI)
86 be = !be;
87 return _mpic_read(be, mpic->gregs, offset);
88}
89
90static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
91{
92 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
93
94 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
95}
96
97static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
98{
99 unsigned int cpu = 0;
100
101 if (mpic->flags & MPIC_PRIMARY)
102 cpu = hard_smp_processor_id();
b9e5b4e6
BH
103 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
104 mpic->cpuregs[cpu], reg);
14cf11af
PM
105}
106
107static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
108{
109 unsigned int cpu = 0;
110
111 if (mpic->flags & MPIC_PRIMARY)
112 cpu = hard_smp_processor_id();
113
114 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
115}
116
117static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
118{
119 unsigned int isu = src_no >> mpic->isu_shift;
120 unsigned int idx = src_no & mpic->isu_mask;
121
122 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
123 reg + (idx * MPIC_IRQ_STRIDE));
124}
125
126static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
127 unsigned int reg, u32 value)
128{
129 unsigned int isu = src_no >> mpic->isu_shift;
130 unsigned int idx = src_no & mpic->isu_mask;
131
132 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
133 reg + (idx * MPIC_IRQ_STRIDE), value);
134}
135
136#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
137#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
138#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
139#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
140#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
141#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
142#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
143#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
144
145
146/*
147 * Low level utility functions
148 */
149
150
151
152/* Check if we have one of those nice broken MPICs with a flipped endian on
153 * reads from IPI registers
154 */
155static void __init mpic_test_broken_ipi(struct mpic *mpic)
156{
157 u32 r;
158
159 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
160 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
161
162 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
163 printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
164 mpic->flags |= MPIC_BROKEN_IPI;
165 }
166}
167
168#ifdef CONFIG_MPIC_BROKEN_U3
169
170/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
171 * to force the edge setting on the MPIC and do the ack workaround.
172 */
1beb6a7d 173static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
14cf11af 174{
1beb6a7d 175 if (source >= 128 || !mpic->fixups)
14cf11af 176 return 0;
1beb6a7d 177 return mpic->fixups[source].base != NULL;
14cf11af
PM
178}
179
c4b22f26 180
1beb6a7d 181static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
14cf11af 182{
1beb6a7d 183 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
14cf11af 184
1beb6a7d
BH
185 if (fixup->applebase) {
186 unsigned int soff = (fixup->index >> 3) & ~3;
187 unsigned int mask = 1U << (fixup->index & 0x1f);
188 writel(mask, fixup->applebase + soff);
189 } else {
190 spin_lock(&mpic->fixup_lock);
191 writeb(0x11 + 2 * fixup->index, fixup->base + 2);
192 writel(fixup->data, fixup->base + 4);
193 spin_unlock(&mpic->fixup_lock);
194 }
14cf11af
PM
195}
196
1beb6a7d
BH
197static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
198 unsigned int irqflags)
199{
200 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
201 unsigned long flags;
202 u32 tmp;
203
204 if (fixup->base == NULL)
205 return;
206
207 DBG("startup_ht_interrupt(%u, %u) index: %d\n",
208 source, irqflags, fixup->index);
209 spin_lock_irqsave(&mpic->fixup_lock, flags);
210 /* Enable and configure */
211 writeb(0x10 + 2 * fixup->index, fixup->base + 2);
212 tmp = readl(fixup->base + 4);
213 tmp &= ~(0x23U);
214 if (irqflags & IRQ_LEVEL)
215 tmp |= 0x22;
216 writel(tmp, fixup->base + 4);
217 spin_unlock_irqrestore(&mpic->fixup_lock, flags);
218}
219
220static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
221 unsigned int irqflags)
222{
223 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
224 unsigned long flags;
225 u32 tmp;
226
227 if (fixup->base == NULL)
228 return;
229
230 DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
231
232 /* Disable */
233 spin_lock_irqsave(&mpic->fixup_lock, flags);
234 writeb(0x10 + 2 * fixup->index, fixup->base + 2);
235 tmp = readl(fixup->base + 4);
72b13819 236 tmp |= 1;
1beb6a7d
BH
237 writel(tmp, fixup->base + 4);
238 spin_unlock_irqrestore(&mpic->fixup_lock, flags);
239}
14cf11af 240
1beb6a7d
BH
241static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
242 unsigned int devfn, u32 vdid)
14cf11af 243{
c4b22f26 244 int i, irq, n;
1beb6a7d 245 u8 __iomem *base;
14cf11af 246 u32 tmp;
c4b22f26 247 u8 pos;
14cf11af 248
1beb6a7d
BH
249 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
250 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
251 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
252 if (id == PCI_CAP_ID_HT_IRQCONF) {
c4b22f26
SB
253 id = readb(devbase + pos + 3);
254 if (id == 0x80)
255 break;
256 }
14cf11af 257 }
c4b22f26
SB
258 if (pos == 0)
259 return;
260
1beb6a7d
BH
261 base = devbase + pos;
262 writeb(0x01, base + 2);
263 n = (readl(base + 4) >> 16) & 0xff;
14cf11af 264
1beb6a7d
BH
265 printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
266 " has %d irqs\n",
267 devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
c4b22f26
SB
268
269 for (i = 0; i <= n; i++) {
1beb6a7d
BH
270 writeb(0x10 + 2 * i, base + 2);
271 tmp = readl(base + 4);
14cf11af 272 irq = (tmp >> 16) & 0xff;
1beb6a7d
BH
273 DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
274 /* mask it , will be unmasked later */
275 tmp |= 0x1;
276 writel(tmp, base + 4);
277 mpic->fixups[irq].index = i;
278 mpic->fixups[irq].base = base;
279 /* Apple HT PIC has a non-standard way of doing EOIs */
280 if ((vdid & 0xffff) == 0x106b)
281 mpic->fixups[irq].applebase = devbase + 0x60;
282 else
283 mpic->fixups[irq].applebase = NULL;
284 writeb(0x11 + 2 * i, base + 2);
285 mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
14cf11af
PM
286 }
287}
288
c4b22f26 289
1beb6a7d 290static void __init mpic_scan_ht_pics(struct mpic *mpic)
14cf11af
PM
291{
292 unsigned int devfn;
293 u8 __iomem *cfgspace;
294
1beb6a7d 295 printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
14cf11af
PM
296
297 /* Allocate fixups array */
298 mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
299 BUG_ON(mpic->fixups == NULL);
300 memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
301
302 /* Init spinlock */
303 spin_lock_init(&mpic->fixup_lock);
304
c4b22f26
SB
305 /* Map U3 config space. We assume all IO-APICs are on the primary bus
306 * so we only need to map 64kB.
14cf11af 307 */
c4b22f26 308 cfgspace = ioremap(0xf2000000, 0x10000);
14cf11af
PM
309 BUG_ON(cfgspace == NULL);
310
1beb6a7d
BH
311 /* Now we scan all slots. We do a very quick scan, we read the header
312 * type, vendor ID and device ID only, that's plenty enough
14cf11af 313 */
c4b22f26 314 for (devfn = 0; devfn < 0x100; devfn++) {
14cf11af
PM
315 u8 __iomem *devbase = cfgspace + (devfn << 8);
316 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
317 u32 l = readl(devbase + PCI_VENDOR_ID);
1beb6a7d 318 u16 s;
14cf11af
PM
319
320 DBG("devfn %x, l: %x\n", devfn, l);
321
322 /* If no device, skip */
323 if (l == 0xffffffff || l == 0x00000000 ||
324 l == 0x0000ffff || l == 0xffff0000)
325 goto next;
1beb6a7d
BH
326 /* Check if is supports capability lists */
327 s = readw(devbase + PCI_STATUS);
328 if (!(s & PCI_STATUS_CAP_LIST))
329 goto next;
14cf11af 330
1beb6a7d 331 mpic_scan_ht_pic(mpic, devbase, devfn, l);
c4b22f26 332
14cf11af
PM
333 next:
334 /* next device, if function 0 */
c4b22f26 335 if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
14cf11af
PM
336 devfn += 7;
337 }
338}
339
340#endif /* CONFIG_MPIC_BROKEN_U3 */
341
342
0ebfff14
BH
343#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
344
14cf11af
PM
345/* Find an mpic associated with a given linux interrupt */
346static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
347{
0ebfff14
BH
348 unsigned int src = mpic_irq_to_hw(irq);
349
350 if (irq < NUM_ISA_INTERRUPTS)
351 return NULL;
352 if (is_ipi)
353 *is_ipi = (src >= MPIC_VEC_IPI_0 && src <= MPIC_VEC_IPI_3);
354
355 return irq_desc[irq].chip_data;
14cf11af
PM
356}
357
358/* Convert a cpu mask from logical to physical cpu numbers. */
359static inline u32 mpic_physmask(u32 cpumask)
360{
361 int i;
362 u32 mask = 0;
363
364 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
365 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
366 return mask;
367}
368
369#ifdef CONFIG_SMP
370/* Get the mpic structure from the IPI number */
371static inline struct mpic * mpic_from_ipi(unsigned int ipi)
372{
b9e5b4e6 373 return irq_desc[ipi].chip_data;
14cf11af
PM
374}
375#endif
376
377/* Get the mpic structure from the irq number */
378static inline struct mpic * mpic_from_irq(unsigned int irq)
379{
b9e5b4e6 380 return irq_desc[irq].chip_data;
14cf11af
PM
381}
382
383/* Send an EOI */
384static inline void mpic_eoi(struct mpic *mpic)
385{
386 mpic_cpu_write(MPIC_CPU_EOI, 0);
387 (void)mpic_cpu_read(MPIC_CPU_WHOAMI);
388}
389
390#ifdef CONFIG_SMP
391static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
392{
0ebfff14 393 smp_message_recv(mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0, regs);
14cf11af
PM
394 return IRQ_HANDLED;
395}
396#endif /* CONFIG_SMP */
397
398/*
399 * Linux descriptor level callbacks
400 */
401
402
b9e5b4e6 403static void mpic_unmask_irq(unsigned int irq)
14cf11af
PM
404{
405 unsigned int loops = 100000;
406 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 407 unsigned int src = mpic_irq_to_hw(irq);
ba1826e5 408 unsigned long flags;
14cf11af 409
bd561c79 410 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
14cf11af 411
ba1826e5 412 spin_lock_irqsave(&mpic_lock, flags);
14cf11af 413 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
e5356640
BH
414 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
415 ~MPIC_VECPRI_MASK);
14cf11af
PM
416 /* make sure mask gets to controller before we return to user */
417 do {
418 if (!loops--) {
419 printk(KERN_ERR "mpic_enable_irq timeout\n");
420 break;
421 }
ba1826e5
BH
422 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
423 spin_unlock_irqrestore(&mpic_lock, flags);
14cf11af
PM
424}
425
b9e5b4e6 426static void mpic_mask_irq(unsigned int irq)
14cf11af
PM
427{
428 unsigned int loops = 100000;
429 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 430 unsigned int src = mpic_irq_to_hw(irq);
ba1826e5 431 unsigned long flags;
14cf11af
PM
432
433 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
434
ba1826e5 435 spin_lock_irqsave(&mpic_lock, flags);
14cf11af 436 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
e5356640
BH
437 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
438 MPIC_VECPRI_MASK);
14cf11af
PM
439
440 /* make sure mask gets to controller before we return to user */
441 do {
442 if (!loops--) {
443 printk(KERN_ERR "mpic_enable_irq timeout\n");
444 break;
445 }
446 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
ba1826e5 447 spin_unlock_irqrestore(&mpic_lock, flags);
14cf11af
PM
448}
449
b9e5b4e6 450static void mpic_end_irq(unsigned int irq)
1beb6a7d 451{
b9e5b4e6
BH
452 struct mpic *mpic = mpic_from_irq(irq);
453
454#ifdef DEBUG_IRQ
455 DBG("%s: end_irq: %d\n", mpic->name, irq);
456#endif
457 /* We always EOI on end_irq() even for edge interrupts since that
458 * should only lower the priority, the MPIC should have properly
459 * latched another edge interrupt coming in anyway
460 */
461
462 mpic_eoi(mpic);
463}
464
1beb6a7d 465#ifdef CONFIG_MPIC_BROKEN_U3
b9e5b4e6
BH
466
467static void mpic_unmask_ht_irq(unsigned int irq)
468{
1beb6a7d 469 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 470 unsigned int src = mpic_irq_to_hw(irq);
1beb6a7d 471
b9e5b4e6 472 mpic_unmask_irq(irq);
1beb6a7d 473
b9e5b4e6
BH
474 if (irq_desc[irq].status & IRQ_LEVEL)
475 mpic_ht_end_irq(mpic, src);
476}
477
478static unsigned int mpic_startup_ht_irq(unsigned int irq)
479{
480 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 481 unsigned int src = mpic_irq_to_hw(irq);
1beb6a7d 482
b9e5b4e6
BH
483 mpic_unmask_irq(irq);
484 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
485
486 return 0;
1beb6a7d
BH
487}
488
b9e5b4e6
BH
489static void mpic_shutdown_ht_irq(unsigned int irq)
490{
491 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 492 unsigned int src = mpic_irq_to_hw(irq);
b9e5b4e6
BH
493
494 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
495 mpic_mask_irq(irq);
496}
497
498static void mpic_end_ht_irq(unsigned int irq)
14cf11af
PM
499{
500 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 501 unsigned int src = mpic_irq_to_hw(irq);
14cf11af 502
1beb6a7d 503#ifdef DEBUG_IRQ
14cf11af 504 DBG("%s: end_irq: %d\n", mpic->name, irq);
1beb6a7d 505#endif
14cf11af
PM
506 /* We always EOI on end_irq() even for edge interrupts since that
507 * should only lower the priority, the MPIC should have properly
508 * latched another edge interrupt coming in anyway
509 */
510
b9e5b4e6
BH
511 if (irq_desc[irq].status & IRQ_LEVEL)
512 mpic_ht_end_irq(mpic, src);
14cf11af
PM
513 mpic_eoi(mpic);
514}
515
b9e5b4e6
BH
516#endif /* CONFIG_MPIC_BROKEN_U3 */
517
14cf11af
PM
518#ifdef CONFIG_SMP
519
b9e5b4e6 520static void mpic_unmask_ipi(unsigned int irq)
14cf11af
PM
521{
522 struct mpic *mpic = mpic_from_ipi(irq);
0ebfff14 523 unsigned int src = mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0;
14cf11af
PM
524
525 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
526 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
527}
528
b9e5b4e6 529static void mpic_mask_ipi(unsigned int irq)
14cf11af
PM
530{
531 /* NEVER disable an IPI... that's just plain wrong! */
532}
533
534static void mpic_end_ipi(unsigned int irq)
535{
536 struct mpic *mpic = mpic_from_ipi(irq);
537
538 /*
539 * IPIs are marked IRQ_PER_CPU. This has the side effect of
540 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
541 * applying to them. We EOI them late to avoid re-entering.
6714465e 542 * We mark IPI's with IRQF_DISABLED as they must run with
14cf11af
PM
543 * irqs disabled.
544 */
545 mpic_eoi(mpic);
546}
547
548#endif /* CONFIG_SMP */
549
550static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
551{
552 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 553 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
554
555 cpumask_t tmp;
556
557 cpus_and(tmp, cpumask, cpu_online_map);
558
0ebfff14 559 mpic_irq_write(src, MPIC_IRQ_DESTINATION,
14cf11af
PM
560 mpic_physmask(cpus_addr(tmp)[0]));
561}
562
0ebfff14
BH
563static unsigned int mpic_flags_to_vecpri(unsigned int flags, int *level)
564{
565 unsigned int vecpri;
566
567 /* Now convert sense value */
568 switch(flags & IRQ_TYPE_SENSE_MASK) {
569 case IRQ_TYPE_EDGE_RISING:
570 vecpri = MPIC_VECPRI_SENSE_EDGE |
571 MPIC_VECPRI_POLARITY_POSITIVE;
572 *level = 0;
573 break;
574 case IRQ_TYPE_EDGE_FALLING:
575 vecpri = MPIC_VECPRI_SENSE_EDGE |
576 MPIC_VECPRI_POLARITY_NEGATIVE;
577 *level = 0;
578 break;
579 case IRQ_TYPE_LEVEL_HIGH:
580 vecpri = MPIC_VECPRI_SENSE_LEVEL |
581 MPIC_VECPRI_POLARITY_POSITIVE;
582 *level = 1;
583 break;
584 case IRQ_TYPE_LEVEL_LOW:
585 default:
586 vecpri = MPIC_VECPRI_SENSE_LEVEL |
587 MPIC_VECPRI_POLARITY_NEGATIVE;
588 *level = 1;
589 }
590 return vecpri;
591}
592
b9e5b4e6
BH
593static struct irq_chip mpic_irq_chip = {
594 .mask = mpic_mask_irq,
595 .unmask = mpic_unmask_irq,
596 .eoi = mpic_end_irq,
597};
598
599#ifdef CONFIG_SMP
600static struct irq_chip mpic_ipi_chip = {
601 .mask = mpic_mask_ipi,
602 .unmask = mpic_unmask_ipi,
603 .eoi = mpic_end_ipi,
604};
605#endif /* CONFIG_SMP */
606
607#ifdef CONFIG_MPIC_BROKEN_U3
608static struct irq_chip mpic_irq_ht_chip = {
609 .startup = mpic_startup_ht_irq,
610 .shutdown = mpic_shutdown_ht_irq,
611 .mask = mpic_mask_irq,
612 .unmask = mpic_unmask_ht_irq,
613 .eoi = mpic_end_ht_irq,
614};
615#endif /* CONFIG_MPIC_BROKEN_U3 */
616
14cf11af 617
0ebfff14
BH
618static int mpic_host_match(struct irq_host *h, struct device_node *node)
619{
620 struct mpic *mpic = h->host_data;
621
622 /* Exact match, unless mpic node is NULL */
623 return mpic->of_node == NULL || mpic->of_node == node;
624}
625
626static int mpic_host_map(struct irq_host *h, unsigned int virq,
627 irq_hw_number_t hw, unsigned int flags)
628{
629 struct irq_desc *desc = get_irq_desc(virq);
630 struct irq_chip *chip;
631 struct mpic *mpic = h->host_data;
ba1826e5 632 u32 v, vecpri = MPIC_VECPRI_SENSE_LEVEL |
0ebfff14
BH
633 MPIC_VECPRI_POLARITY_NEGATIVE;
634 int level;
ba1826e5 635 unsigned long iflags;
0ebfff14
BH
636
637 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n",
638 virq, hw, flags);
639
640 if (hw == MPIC_VEC_SPURRIOUS)
641 return -EINVAL;
642#ifdef CONFIG_SMP
643 else if (hw >= MPIC_VEC_IPI_0) {
644 WARN_ON(!(mpic->flags & MPIC_PRIMARY));
645
646 pr_debug("mpic: mapping as IPI\n");
647 set_irq_chip_data(virq, mpic);
648 set_irq_chip_and_handler(virq, &mpic->hc_ipi,
649 handle_percpu_irq);
650 return 0;
651 }
652#endif /* CONFIG_SMP */
653
654 if (hw >= mpic->irq_count)
655 return -EINVAL;
656
657 /* If no sense provided, check default sense array */
658 if (((flags & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE) &&
659 mpic->senses && hw < mpic->senses_count)
660 flags |= mpic->senses[hw];
661
662 vecpri = mpic_flags_to_vecpri(flags, &level);
663 if (level)
664 desc->status |= IRQ_LEVEL;
665 chip = &mpic->hc_irq;
666
667#ifdef CONFIG_MPIC_BROKEN_U3
668 /* Check for HT interrupts, override vecpri */
669 if (mpic_is_ht_interrupt(mpic, hw)) {
670 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
671 MPIC_VECPRI_POLARITY_MASK);
672 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
673 chip = &mpic->hc_ht_irq;
674 }
675#endif
676
ba1826e5
BH
677 /* Reconfigure irq. We must preserve the mask bit as we can be called
678 * while the interrupt is still active (This may change in the future
679 * but for now, it is the case).
680 */
681 spin_lock_irqsave(&mpic_lock, iflags);
682 v = mpic_irq_read(hw, MPIC_IRQ_VECTOR_PRI);
683 vecpri = (v &
684 ~(MPIC_VECPRI_POLARITY_MASK | MPIC_VECPRI_SENSE_MASK)) |
685 vecpri;
686 if (vecpri != v)
687 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri);
688 spin_unlock_irqrestore(&mpic_lock, iflags);
689
690 pr_debug("mpic: mapping as IRQ, vecpri = 0x%08x (was 0x%08x)\n",
691 vecpri, v);
0ebfff14
BH
692
693 set_irq_chip_data(virq, mpic);
694 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
695 return 0;
696}
697
698static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
699 u32 *intspec, unsigned int intsize,
700 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
701
702{
703 static unsigned char map_mpic_senses[4] = {
704 IRQ_TYPE_EDGE_RISING,
705 IRQ_TYPE_LEVEL_LOW,
706 IRQ_TYPE_LEVEL_HIGH,
707 IRQ_TYPE_EDGE_FALLING,
708 };
709
710 *out_hwirq = intspec[0];
711 if (intsize > 1 && intspec[1] < 4)
712 *out_flags = map_mpic_senses[intspec[1]];
713 else
714 *out_flags = IRQ_TYPE_NONE;
715
716 return 0;
717}
718
719static struct irq_host_ops mpic_host_ops = {
720 .match = mpic_host_match,
721 .map = mpic_host_map,
722 .xlate = mpic_host_xlate,
723};
724
14cf11af
PM
725/*
726 * Exported functions
727 */
728
0ebfff14
BH
729struct mpic * __init mpic_alloc(struct device_node *node,
730 unsigned long phys_addr,
14cf11af
PM
731 unsigned int flags,
732 unsigned int isu_size,
14cf11af 733 unsigned int irq_count,
14cf11af
PM
734 const char *name)
735{
736 struct mpic *mpic;
737 u32 reg;
738 const char *vers;
739 int i;
740
741 mpic = alloc_bootmem(sizeof(struct mpic));
742 if (mpic == NULL)
743 return NULL;
744
14cf11af
PM
745 memset(mpic, 0, sizeof(struct mpic));
746 mpic->name = name;
0ebfff14 747 mpic->of_node = node ? of_node_get(node) : NULL;
14cf11af 748
0ebfff14
BH
749 mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 256,
750 &mpic_host_ops,
751 MPIC_VEC_SPURRIOUS);
752 if (mpic->irqhost == NULL) {
753 of_node_put(node);
754 return NULL;
755 }
756
757 mpic->irqhost->host_data = mpic;
b9e5b4e6 758 mpic->hc_irq = mpic_irq_chip;
14cf11af 759 mpic->hc_irq.typename = name;
14cf11af
PM
760 if (flags & MPIC_PRIMARY)
761 mpic->hc_irq.set_affinity = mpic_set_affinity;
b9e5b4e6
BH
762#ifdef CONFIG_MPIC_BROKEN_U3
763 mpic->hc_ht_irq = mpic_irq_ht_chip;
764 mpic->hc_ht_irq.typename = name;
765 if (flags & MPIC_PRIMARY)
766 mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
767#endif /* CONFIG_MPIC_BROKEN_U3 */
14cf11af 768#ifdef CONFIG_SMP
b9e5b4e6 769 mpic->hc_ipi = mpic_ipi_chip;
0ebfff14 770 mpic->hc_ipi.typename = name;
14cf11af
PM
771#endif /* CONFIG_SMP */
772
773 mpic->flags = flags;
774 mpic->isu_size = isu_size;
14cf11af 775 mpic->irq_count = irq_count;
14cf11af 776 mpic->num_sources = 0; /* so far */
14cf11af
PM
777
778 /* Map the global registers */
779 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
bd561c79 780 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
14cf11af
PM
781 BUG_ON(mpic->gregs == NULL);
782
783 /* Reset */
784 if (flags & MPIC_WANTS_RESET) {
785 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
786 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
787 | MPIC_GREG_GCONF_RESET);
788 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
789 & MPIC_GREG_GCONF_RESET)
790 mb();
791 }
792
793 /* Read feature register, calculate num CPUs and, for non-ISU
794 * MPICs, num sources as well. On ISU MPICs, sources are counted
795 * as ISUs are added
796 */
797 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
798 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
799 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
800 if (isu_size == 0)
801 mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
802 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
803
804 /* Map the per-CPU registers */
805 for (i = 0; i < mpic->num_cpus; i++) {
806 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
807 i * MPIC_CPU_STRIDE, 0x1000);
808 BUG_ON(mpic->cpuregs[i] == NULL);
809 }
810
811 /* Initialize main ISU if none provided */
812 if (mpic->isu_size == 0) {
813 mpic->isu_size = mpic->num_sources;
814 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
815 MPIC_IRQ_STRIDE * mpic->isu_size);
816 BUG_ON(mpic->isus[0] == NULL);
817 }
818 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
819 mpic->isu_mask = (1 << mpic->isu_shift) - 1;
820
821 /* Display version */
822 switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
823 case 1:
824 vers = "1.0";
825 break;
826 case 2:
827 vers = "1.2";
828 break;
829 case 3:
830 vers = "1.3";
831 break;
832 default:
833 vers = "<unknown>";
834 break;
835 }
836 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
837 name, vers, phys_addr, mpic->num_cpus);
838 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
839 mpic->isu_shift, mpic->isu_mask);
840
841 mpic->next = mpics;
842 mpics = mpic;
843
0ebfff14 844 if (flags & MPIC_PRIMARY) {
14cf11af 845 mpic_primary = mpic;
0ebfff14
BH
846 irq_set_default_host(mpic->irqhost);
847 }
14cf11af
PM
848
849 return mpic;
850}
851
852void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
853 unsigned long phys_addr)
854{
855 unsigned int isu_first = isu_num * mpic->isu_size;
856
857 BUG_ON(isu_num >= MPIC_MAX_ISU);
858
859 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
860 if ((isu_first + mpic->isu_size) > mpic->num_sources)
861 mpic->num_sources = isu_first + mpic->isu_size;
862}
863
0ebfff14
BH
864void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
865{
866 mpic->senses = senses;
867 mpic->senses_count = count;
868}
869
14cf11af
PM
870void __init mpic_init(struct mpic *mpic)
871{
872 int i;
873
874 BUG_ON(mpic->num_sources == 0);
0ebfff14
BH
875 WARN_ON(mpic->num_sources > MPIC_VEC_IPI_0);
876
877 /* Sanitize source count */
878 if (mpic->num_sources > MPIC_VEC_IPI_0)
879 mpic->num_sources = MPIC_VEC_IPI_0;
14cf11af
PM
880
881 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
882
883 /* Set current processor priority to max */
884 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
885
886 /* Initialize timers: just disable them all */
887 for (i = 0; i < 4; i++) {
888 mpic_write(mpic->tmregs,
889 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
890 mpic_write(mpic->tmregs,
891 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
892 MPIC_VECPRI_MASK |
893 (MPIC_VEC_TIMER_0 + i));
894 }
895
896 /* Initialize IPIs to our reserved vectors and mark them disabled for now */
897 mpic_test_broken_ipi(mpic);
898 for (i = 0; i < 4; i++) {
899 mpic_ipi_write(i,
900 MPIC_VECPRI_MASK |
901 (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
902 (MPIC_VEC_IPI_0 + i));
14cf11af
PM
903 }
904
905 /* Initialize interrupt sources */
906 if (mpic->irq_count == 0)
907 mpic->irq_count = mpic->num_sources;
908
909#ifdef CONFIG_MPIC_BROKEN_U3
1beb6a7d 910 /* Do the HT PIC fixups on U3 broken mpic */
14cf11af
PM
911 DBG("MPIC flags: %x\n", mpic->flags);
912 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
b9e5b4e6 913 mpic_scan_ht_pics(mpic);
14cf11af
PM
914#endif /* CONFIG_MPIC_BROKEN_U3 */
915
916 for (i = 0; i < mpic->num_sources; i++) {
917 /* start with vector = source number, and masked */
918 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
0ebfff14 919 int level = 1;
14cf11af 920
14cf11af 921 /* do senses munging */
0ebfff14 922 if (mpic->senses && i < mpic->senses_count)
ba1826e5
BH
923 vecpri |= mpic_flags_to_vecpri(mpic->senses[i],
924 &level);
0ebfff14 925 else
14cf11af
PM
926 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
927
14cf11af
PM
928 /* deal with broken U3 */
929 if (mpic->flags & MPIC_BROKEN_U3) {
930#ifdef CONFIG_MPIC_BROKEN_U3
931 if (mpic_is_ht_interrupt(mpic, i)) {
932 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
933 MPIC_VECPRI_POLARITY_MASK);
934 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
935 }
936#else
937 printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
938#endif
939 }
940
941 DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
942 (level != 0));
943
944 /* init hw */
945 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
946 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
947 1 << hard_smp_processor_id());
14cf11af
PM
948 }
949
950 /* Init spurrious vector */
951 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
952
953 /* Disable 8259 passthrough */
954 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
955 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
956 | MPIC_GREG_GCONF_8259_PTHROU_DIS);
957
958 /* Set current processor priority to 0 */
959 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
960}
961
868ea0c9
MG
962void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
963{
964 u32 v;
965
966 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
967 v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
968 v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
969 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
970}
14cf11af 971
868ea0c9
MG
972void __init mpic_set_serial_int(struct mpic *mpic, int enable)
973{
ba1826e5 974 unsigned long flags;
868ea0c9
MG
975 u32 v;
976
ba1826e5 977 spin_lock_irqsave(&mpic_lock, flags);
868ea0c9
MG
978 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
979 if (enable)
980 v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
981 else
982 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
983 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
ba1826e5 984 spin_unlock_irqrestore(&mpic_lock, flags);
868ea0c9 985}
14cf11af
PM
986
987void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
988{
989 int is_ipi;
990 struct mpic *mpic = mpic_find(irq, &is_ipi);
0ebfff14 991 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
992 unsigned long flags;
993 u32 reg;
994
995 spin_lock_irqsave(&mpic_lock, flags);
996 if (is_ipi) {
0ebfff14 997 reg = mpic_ipi_read(src - MPIC_VEC_IPI_0) &
e5356640 998 ~MPIC_VECPRI_PRIORITY_MASK;
0ebfff14 999 mpic_ipi_write(src - MPIC_VEC_IPI_0,
14cf11af
PM
1000 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1001 } else {
0ebfff14 1002 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI)
e5356640 1003 & ~MPIC_VECPRI_PRIORITY_MASK;
0ebfff14 1004 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
14cf11af
PM
1005 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1006 }
1007 spin_unlock_irqrestore(&mpic_lock, flags);
1008}
1009
1010unsigned int mpic_irq_get_priority(unsigned int irq)
1011{
1012 int is_ipi;
1013 struct mpic *mpic = mpic_find(irq, &is_ipi);
0ebfff14 1014 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
1015 unsigned long flags;
1016 u32 reg;
1017
1018 spin_lock_irqsave(&mpic_lock, flags);
1019 if (is_ipi)
0ebfff14 1020 reg = mpic_ipi_read(src = MPIC_VEC_IPI_0);
14cf11af 1021 else
0ebfff14 1022 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI);
14cf11af
PM
1023 spin_unlock_irqrestore(&mpic_lock, flags);
1024 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
1025}
1026
1027void mpic_setup_this_cpu(void)
1028{
1029#ifdef CONFIG_SMP
1030 struct mpic *mpic = mpic_primary;
1031 unsigned long flags;
1032 u32 msk = 1 << hard_smp_processor_id();
1033 unsigned int i;
1034
1035 BUG_ON(mpic == NULL);
1036
1037 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1038
1039 spin_lock_irqsave(&mpic_lock, flags);
1040
1041 /* let the mpic know we want intrs. default affinity is 0xffffffff
1042 * until changed via /proc. That's how it's done on x86. If we want
1043 * it differently, then we should make sure we also change the default
a53da52f 1044 * values of irq_desc[].affinity in irq.c.
14cf11af
PM
1045 */
1046 if (distribute_irqs) {
1047 for (i = 0; i < mpic->num_sources ; i++)
1048 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
1049 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
1050 }
1051
1052 /* Set current processor priority to 0 */
1053 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
1054
1055 spin_unlock_irqrestore(&mpic_lock, flags);
1056#endif /* CONFIG_SMP */
1057}
1058
1059int mpic_cpu_get_priority(void)
1060{
1061 struct mpic *mpic = mpic_primary;
1062
1063 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
1064}
1065
1066void mpic_cpu_set_priority(int prio)
1067{
1068 struct mpic *mpic = mpic_primary;
1069
1070 prio &= MPIC_CPU_TASKPRI_MASK;
1071 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
1072}
1073
1074/*
1075 * XXX: someone who knows mpic should check this.
1076 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
1077 * or can we reset the mpic in the new kernel?
1078 */
1079void mpic_teardown_this_cpu(int secondary)
1080{
1081 struct mpic *mpic = mpic_primary;
1082 unsigned long flags;
1083 u32 msk = 1 << hard_smp_processor_id();
1084 unsigned int i;
1085
1086 BUG_ON(mpic == NULL);
1087
1088 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1089 spin_lock_irqsave(&mpic_lock, flags);
1090
1091 /* let the mpic know we don't want intrs. */
1092 for (i = 0; i < mpic->num_sources ; i++)
1093 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
1094 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
1095
1096 /* Set current processor priority to max */
1097 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
1098
1099 spin_unlock_irqrestore(&mpic_lock, flags);
1100}
1101
1102
1103void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
1104{
1105 struct mpic *mpic = mpic_primary;
1106
1107 BUG_ON(mpic == NULL);
1108
1beb6a7d 1109#ifdef DEBUG_IPI
14cf11af 1110 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
1beb6a7d 1111#endif
14cf11af
PM
1112
1113 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
1114 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
1115}
1116
0ebfff14 1117unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
14cf11af 1118{
0ebfff14 1119 u32 src;
14cf11af 1120
0ebfff14 1121 src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
1beb6a7d 1122#ifdef DEBUG_LOW
0ebfff14 1123 DBG("%s: get_one_irq(): %d\n", mpic->name, src);
1beb6a7d 1124#endif
0ebfff14
BH
1125 if (unlikely(src == MPIC_VEC_SPURRIOUS))
1126 return NO_IRQ;
1127 return irq_linear_revmap(mpic->irqhost, src);
14cf11af
PM
1128}
1129
0ebfff14 1130unsigned int mpic_get_irq(struct pt_regs *regs)
14cf11af
PM
1131{
1132 struct mpic *mpic = mpic_primary;
1133
1134 BUG_ON(mpic == NULL);
1135
1136 return mpic_get_one_irq(mpic, regs);
1137}
1138
1139
1140#ifdef CONFIG_SMP
1141void mpic_request_ipis(void)
1142{
1143 struct mpic *mpic = mpic_primary;
0ebfff14
BH
1144 int i;
1145 static char *ipi_names[] = {
1146 "IPI0 (call function)",
1147 "IPI1 (reschedule)",
1148 "IPI2 (unused)",
1149 "IPI3 (debugger break)",
1150 };
14cf11af 1151 BUG_ON(mpic == NULL);
14cf11af 1152
0ebfff14
BH
1153 printk(KERN_INFO "mpic: requesting IPIs ... \n");
1154
1155 for (i = 0; i < 4; i++) {
1156 unsigned int vipi = irq_create_mapping(mpic->irqhost,
1157 MPIC_VEC_IPI_0 + i, 0);
1158 if (vipi == NO_IRQ) {
1159 printk(KERN_ERR "Failed to map IPI %d\n", i);
1160 break;
1161 }
1162 request_irq(vipi, mpic_ipi_action, IRQF_DISABLED,
1163 ipi_names[i], mpic);
1164 }
14cf11af 1165}
a9c59264
PM
1166
1167void smp_mpic_message_pass(int target, int msg)
1168{
1169 /* make sure we're sending something that translates to an IPI */
1170 if ((unsigned int)msg > 3) {
1171 printk("SMP %d: smp_message_pass: unknown msg %d\n",
1172 smp_processor_id(), msg);
1173 return;
1174 }
1175 switch (target) {
1176 case MSG_ALL:
1177 mpic_send_ipi(msg, 0xffffffff);
1178 break;
1179 case MSG_ALL_BUT_SELF:
1180 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
1181 break;
1182 default:
1183 mpic_send_ipi(msg, 1 << target);
1184 break;
1185 }
1186}
14cf11af 1187#endif /* CONFIG_SMP */