]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/sysdev/mpic.c
[PATCH] powerpc: Remove device_node addrs/n_addr
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / sysdev / mpic.c
CommitLineData
14cf11af
PM
1/*
2 * arch/powerpc/kernel/mpic.c
3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal
6 * with various broken implementations of this HW.
7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive
12 * for more details.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/irq.h>
22#include <linux/smp.h>
23#include <linux/interrupt.h>
24#include <linux/bootmem.h>
25#include <linux/spinlock.h>
26#include <linux/pci.h>
27
28#include <asm/ptrace.h>
29#include <asm/signal.h>
30#include <asm/io.h>
31#include <asm/pgtable.h>
32#include <asm/irq.h>
33#include <asm/machdep.h>
34#include <asm/mpic.h>
35#include <asm/smp.h>
36
37#ifdef DEBUG
38#define DBG(fmt...) printk(fmt)
39#else
40#define DBG(fmt...)
41#endif
42
43static struct mpic *mpics;
44static struct mpic *mpic_primary;
45static DEFINE_SPINLOCK(mpic_lock);
46
c0c0d996 47#ifdef CONFIG_PPC32 /* XXX for now */
e40c7f02
AW
48#ifdef CONFIG_IRQ_ALL_CPUS
49#define distribute_irqs (1)
50#else
51#define distribute_irqs (0)
52#endif
c0c0d996 53#endif
14cf11af
PM
54
55/*
56 * Register accessor functions
57 */
58
59
60static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
61 unsigned int reg)
62{
63 if (be)
64 return in_be32(base + (reg >> 2));
65 else
66 return in_le32(base + (reg >> 2));
67}
68
69static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
70 unsigned int reg, u32 value)
71{
72 if (be)
73 out_be32(base + (reg >> 2), value);
74 else
75 out_le32(base + (reg >> 2), value);
76}
77
78static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
79{
80 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
81 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
82
83 if (mpic->flags & MPIC_BROKEN_IPI)
84 be = !be;
85 return _mpic_read(be, mpic->gregs, offset);
86}
87
88static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
89{
90 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
91
92 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
93}
94
95static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
96{
97 unsigned int cpu = 0;
98
99 if (mpic->flags & MPIC_PRIMARY)
100 cpu = hard_smp_processor_id();
101
102 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
103}
104
105static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
106{
107 unsigned int cpu = 0;
108
109 if (mpic->flags & MPIC_PRIMARY)
110 cpu = hard_smp_processor_id();
111
112 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
113}
114
115static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
116{
117 unsigned int isu = src_no >> mpic->isu_shift;
118 unsigned int idx = src_no & mpic->isu_mask;
119
120 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
121 reg + (idx * MPIC_IRQ_STRIDE));
122}
123
124static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
125 unsigned int reg, u32 value)
126{
127 unsigned int isu = src_no >> mpic->isu_shift;
128 unsigned int idx = src_no & mpic->isu_mask;
129
130 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
131 reg + (idx * MPIC_IRQ_STRIDE), value);
132}
133
134#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
135#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
136#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
137#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
138#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
139#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
140#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
141#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
142
143
144/*
145 * Low level utility functions
146 */
147
148
149
150/* Check if we have one of those nice broken MPICs with a flipped endian on
151 * reads from IPI registers
152 */
153static void __init mpic_test_broken_ipi(struct mpic *mpic)
154{
155 u32 r;
156
157 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
158 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
159
160 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
161 printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
162 mpic->flags |= MPIC_BROKEN_IPI;
163 }
164}
165
166#ifdef CONFIG_MPIC_BROKEN_U3
167
168/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
169 * to force the edge setting on the MPIC and do the ack workaround.
170 */
171static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no)
172{
173 if (source_no >= 128 || !mpic->fixups)
174 return 0;
175 return mpic->fixups[source_no].base != NULL;
176}
177
178static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no)
179{
180 struct mpic_irq_fixup *fixup = &mpic->fixups[source_no];
181 u32 tmp;
182
183 spin_lock(&mpic->fixup_lock);
184 writeb(0x11 + 2 * fixup->irq, fixup->base);
185 tmp = readl(fixup->base + 2);
186 writel(tmp | 0x80000000ul, fixup->base + 2);
187 /* config writes shouldn't be posted but let's be safe ... */
188 (void)readl(fixup->base + 2);
189 spin_unlock(&mpic->fixup_lock);
190}
191
192
193static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase)
194{
195 int i, irq;
196 u32 tmp;
197
198 printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase);
199
200 for (i=0; i < 24; i++) {
201 writeb(0x10 + 2*i, devbase + 0xf2);
202 tmp = readl(devbase + 0xf4);
203 if ((tmp & 0x1) || !(tmp & 0x20))
204 continue;
205 irq = (tmp >> 16) & 0xff;
206 mpic->fixups[irq].irq = i;
207 mpic->fixups[irq].base = devbase + 0xf2;
208 }
209}
210
211static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase)
212{
213 int i, irq;
214 u32 tmp;
215
216 printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase);
217
218 for (i=0; i < 4; i++) {
219 writeb(0x10 + 2*i, devbase + 0xba);
220 tmp = readl(devbase + 0xbc);
221 if ((tmp & 0x1) || !(tmp & 0x20))
222 continue;
223 irq = (tmp >> 16) & 0xff;
224 mpic->fixups[irq].irq = i;
225 mpic->fixups[irq].base = devbase + 0xba;
226 }
227}
228
229static void __init mpic_scan_ioapics(struct mpic *mpic)
230{
231 unsigned int devfn;
232 u8 __iomem *cfgspace;
233
234 printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n");
235
236 /* Allocate fixups array */
237 mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
238 BUG_ON(mpic->fixups == NULL);
239 memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
240
241 /* Init spinlock */
242 spin_lock_init(&mpic->fixup_lock);
243
244 /* Map u3 config space. We assume all IO-APICs are on the primary bus
245 * and slot will never be above "0xf" so we only need to map 32k
246 */
247 cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000);
248 BUG_ON(cfgspace == NULL);
249
250 /* Now we scan all slots. We do a very quick scan, we read the header type,
251 * vendor ID and device ID only, that's plenty enough
252 */
253 for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) {
254 u8 __iomem *devbase = cfgspace + (devfn << 8);
255 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
256 u32 l = readl(devbase + PCI_VENDOR_ID);
257 u16 vendor_id, device_id;
258 int multifunc = 0;
259
260 DBG("devfn %x, l: %x\n", devfn, l);
261
262 /* If no device, skip */
263 if (l == 0xffffffff || l == 0x00000000 ||
264 l == 0x0000ffff || l == 0xffff0000)
265 goto next;
266
267 /* Check if it's a multifunction device (only really used
268 * to function 0 though
269 */
270 multifunc = !!(hdr_type & 0x80);
271 vendor_id = l & 0xffff;
272 device_id = (l >> 16) & 0xffff;
273
274 /* If a known device, go to fixup setup code */
275 if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460)
276 mpic_amd8111_read_irq(mpic, devbase);
277 if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450)
278 mpic_amd8131_read_irq(mpic, devbase);
279 next:
280 /* next device, if function 0 */
281 if ((PCI_FUNC(devfn) == 0) && !multifunc)
282 devfn += 7;
283 }
284}
285
286#endif /* CONFIG_MPIC_BROKEN_U3 */
287
288
289/* Find an mpic associated with a given linux interrupt */
290static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
291{
292 struct mpic *mpic = mpics;
293
294 while(mpic) {
295 /* search IPIs first since they may override the main interrupts */
296 if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
297 if (is_ipi)
298 *is_ipi = 1;
299 return mpic;
300 }
301 if (irq >= mpic->irq_offset &&
302 irq < (mpic->irq_offset + mpic->irq_count)) {
303 if (is_ipi)
304 *is_ipi = 0;
305 return mpic;
306 }
307 mpic = mpic -> next;
308 }
309 return NULL;
310}
311
312/* Convert a cpu mask from logical to physical cpu numbers. */
313static inline u32 mpic_physmask(u32 cpumask)
314{
315 int i;
316 u32 mask = 0;
317
318 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
319 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
320 return mask;
321}
322
323#ifdef CONFIG_SMP
324/* Get the mpic structure from the IPI number */
325static inline struct mpic * mpic_from_ipi(unsigned int ipi)
326{
327 return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
328}
329#endif
330
331/* Get the mpic structure from the irq number */
332static inline struct mpic * mpic_from_irq(unsigned int irq)
333{
334 return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
335}
336
337/* Send an EOI */
338static inline void mpic_eoi(struct mpic *mpic)
339{
340 mpic_cpu_write(MPIC_CPU_EOI, 0);
341 (void)mpic_cpu_read(MPIC_CPU_WHOAMI);
342}
343
344#ifdef CONFIG_SMP
345static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
346{
347 struct mpic *mpic = dev_id;
348
349 smp_message_recv(irq - mpic->ipi_offset, regs);
350 return IRQ_HANDLED;
351}
352#endif /* CONFIG_SMP */
353
354/*
355 * Linux descriptor level callbacks
356 */
357
358
359static void mpic_enable_irq(unsigned int irq)
360{
361 unsigned int loops = 100000;
362 struct mpic *mpic = mpic_from_irq(irq);
363 unsigned int src = irq - mpic->irq_offset;
364
bd561c79 365 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
14cf11af
PM
366
367 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
e5356640
BH
368 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
369 ~MPIC_VECPRI_MASK);
14cf11af
PM
370
371 /* make sure mask gets to controller before we return to user */
372 do {
373 if (!loops--) {
374 printk(KERN_ERR "mpic_enable_irq timeout\n");
375 break;
376 }
377 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
378}
379
380static void mpic_disable_irq(unsigned int irq)
381{
382 unsigned int loops = 100000;
383 struct mpic *mpic = mpic_from_irq(irq);
384 unsigned int src = irq - mpic->irq_offset;
385
386 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
387
388 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
e5356640
BH
389 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
390 MPIC_VECPRI_MASK);
14cf11af
PM
391
392 /* make sure mask gets to controller before we return to user */
393 do {
394 if (!loops--) {
395 printk(KERN_ERR "mpic_enable_irq timeout\n");
396 break;
397 }
398 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
399}
400
401static void mpic_end_irq(unsigned int irq)
402{
403 struct mpic *mpic = mpic_from_irq(irq);
404
405 DBG("%s: end_irq: %d\n", mpic->name, irq);
406
407 /* We always EOI on end_irq() even for edge interrupts since that
408 * should only lower the priority, the MPIC should have properly
409 * latched another edge interrupt coming in anyway
410 */
411
412#ifdef CONFIG_MPIC_BROKEN_U3
413 if (mpic->flags & MPIC_BROKEN_U3) {
414 unsigned int src = irq - mpic->irq_offset;
415 if (mpic_is_ht_interrupt(mpic, src))
416 mpic_apic_end_irq(mpic, src);
417 }
418#endif /* CONFIG_MPIC_BROKEN_U3 */
419
420 mpic_eoi(mpic);
421}
422
423#ifdef CONFIG_SMP
424
425static void mpic_enable_ipi(unsigned int irq)
426{
427 struct mpic *mpic = mpic_from_ipi(irq);
428 unsigned int src = irq - mpic->ipi_offset;
429
430 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
431 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
432}
433
434static void mpic_disable_ipi(unsigned int irq)
435{
436 /* NEVER disable an IPI... that's just plain wrong! */
437}
438
439static void mpic_end_ipi(unsigned int irq)
440{
441 struct mpic *mpic = mpic_from_ipi(irq);
442
443 /*
444 * IPIs are marked IRQ_PER_CPU. This has the side effect of
445 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
446 * applying to them. We EOI them late to avoid re-entering.
447 * We mark IPI's with SA_INTERRUPT as they must run with
448 * irqs disabled.
449 */
450 mpic_eoi(mpic);
451}
452
453#endif /* CONFIG_SMP */
454
455static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
456{
457 struct mpic *mpic = mpic_from_irq(irq);
458
459 cpumask_t tmp;
460
461 cpus_and(tmp, cpumask, cpu_online_map);
462
463 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
464 mpic_physmask(cpus_addr(tmp)[0]));
465}
466
467
468/*
469 * Exported functions
470 */
471
472
473struct mpic * __init mpic_alloc(unsigned long phys_addr,
474 unsigned int flags,
475 unsigned int isu_size,
476 unsigned int irq_offset,
477 unsigned int irq_count,
478 unsigned int ipi_offset,
479 unsigned char *senses,
480 unsigned int senses_count,
481 const char *name)
482{
483 struct mpic *mpic;
484 u32 reg;
485 const char *vers;
486 int i;
487
488 mpic = alloc_bootmem(sizeof(struct mpic));
489 if (mpic == NULL)
490 return NULL;
491
492
493 memset(mpic, 0, sizeof(struct mpic));
494 mpic->name = name;
495
496 mpic->hc_irq.typename = name;
497 mpic->hc_irq.enable = mpic_enable_irq;
498 mpic->hc_irq.disable = mpic_disable_irq;
499 mpic->hc_irq.end = mpic_end_irq;
500 if (flags & MPIC_PRIMARY)
501 mpic->hc_irq.set_affinity = mpic_set_affinity;
502#ifdef CONFIG_SMP
503 mpic->hc_ipi.typename = name;
504 mpic->hc_ipi.enable = mpic_enable_ipi;
505 mpic->hc_ipi.disable = mpic_disable_ipi;
506 mpic->hc_ipi.end = mpic_end_ipi;
507#endif /* CONFIG_SMP */
508
509 mpic->flags = flags;
510 mpic->isu_size = isu_size;
511 mpic->irq_offset = irq_offset;
512 mpic->irq_count = irq_count;
513 mpic->ipi_offset = ipi_offset;
514 mpic->num_sources = 0; /* so far */
515 mpic->senses = senses;
516 mpic->senses_count = senses_count;
517
518 /* Map the global registers */
519 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
bd561c79 520 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
14cf11af
PM
521 BUG_ON(mpic->gregs == NULL);
522
523 /* Reset */
524 if (flags & MPIC_WANTS_RESET) {
525 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
526 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
527 | MPIC_GREG_GCONF_RESET);
528 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
529 & MPIC_GREG_GCONF_RESET)
530 mb();
531 }
532
533 /* Read feature register, calculate num CPUs and, for non-ISU
534 * MPICs, num sources as well. On ISU MPICs, sources are counted
535 * as ISUs are added
536 */
537 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
538 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
539 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
540 if (isu_size == 0)
541 mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
542 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
543
544 /* Map the per-CPU registers */
545 for (i = 0; i < mpic->num_cpus; i++) {
546 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
547 i * MPIC_CPU_STRIDE, 0x1000);
548 BUG_ON(mpic->cpuregs[i] == NULL);
549 }
550
551 /* Initialize main ISU if none provided */
552 if (mpic->isu_size == 0) {
553 mpic->isu_size = mpic->num_sources;
554 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
555 MPIC_IRQ_STRIDE * mpic->isu_size);
556 BUG_ON(mpic->isus[0] == NULL);
557 }
558 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
559 mpic->isu_mask = (1 << mpic->isu_shift) - 1;
560
561 /* Display version */
562 switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
563 case 1:
564 vers = "1.0";
565 break;
566 case 2:
567 vers = "1.2";
568 break;
569 case 3:
570 vers = "1.3";
571 break;
572 default:
573 vers = "<unknown>";
574 break;
575 }
576 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
577 name, vers, phys_addr, mpic->num_cpus);
578 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
579 mpic->isu_shift, mpic->isu_mask);
580
581 mpic->next = mpics;
582 mpics = mpic;
583
584 if (flags & MPIC_PRIMARY)
585 mpic_primary = mpic;
586
587 return mpic;
588}
589
590void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
591 unsigned long phys_addr)
592{
593 unsigned int isu_first = isu_num * mpic->isu_size;
594
595 BUG_ON(isu_num >= MPIC_MAX_ISU);
596
597 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
598 if ((isu_first + mpic->isu_size) > mpic->num_sources)
599 mpic->num_sources = isu_first + mpic->isu_size;
600}
601
602void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
603 void *data)
604{
605 struct mpic *mpic = mpic_find(irq, NULL);
606 unsigned long flags;
607
608 /* Synchronization here is a bit dodgy, so don't try to replace cascade
609 * interrupts on the fly too often ... but normally it's set up at boot.
610 */
611 spin_lock_irqsave(&mpic_lock, flags);
612 if (mpic->cascade)
613 mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
614 mpic->cascade = NULL;
615 wmb();
616 mpic->cascade_vec = irq - mpic->irq_offset;
617 mpic->cascade_data = data;
618 wmb();
619 mpic->cascade = handler;
620 mpic_enable_irq(irq);
621 spin_unlock_irqrestore(&mpic_lock, flags);
622}
623
624void __init mpic_init(struct mpic *mpic)
625{
626 int i;
627
628 BUG_ON(mpic->num_sources == 0);
629
630 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
631
632 /* Set current processor priority to max */
633 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
634
635 /* Initialize timers: just disable them all */
636 for (i = 0; i < 4; i++) {
637 mpic_write(mpic->tmregs,
638 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
639 mpic_write(mpic->tmregs,
640 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
641 MPIC_VECPRI_MASK |
642 (MPIC_VEC_TIMER_0 + i));
643 }
644
645 /* Initialize IPIs to our reserved vectors and mark them disabled for now */
646 mpic_test_broken_ipi(mpic);
647 for (i = 0; i < 4; i++) {
648 mpic_ipi_write(i,
649 MPIC_VECPRI_MASK |
650 (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
651 (MPIC_VEC_IPI_0 + i));
652#ifdef CONFIG_SMP
653 if (!(mpic->flags & MPIC_PRIMARY))
654 continue;
655 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
656 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
14cf11af
PM
657#endif /* CONFIG_SMP */
658 }
659
660 /* Initialize interrupt sources */
661 if (mpic->irq_count == 0)
662 mpic->irq_count = mpic->num_sources;
663
664#ifdef CONFIG_MPIC_BROKEN_U3
665 /* Do the ioapic fixups on U3 broken mpic */
666 DBG("MPIC flags: %x\n", mpic->flags);
667 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
668 mpic_scan_ioapics(mpic);
669#endif /* CONFIG_MPIC_BROKEN_U3 */
670
671 for (i = 0; i < mpic->num_sources; i++) {
672 /* start with vector = source number, and masked */
673 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
674 int level = 0;
675
676 /* if it's an IPI, we skip it */
677 if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
678 (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
679 continue;
680
681 /* do senses munging */
682 if (mpic->senses && i < mpic->senses_count) {
683 if (mpic->senses[i] & IRQ_SENSE_LEVEL)
684 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
685 if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
686 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
687 } else
688 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
689
690 /* remember if it was a level interrupts */
691 level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
692
693 /* deal with broken U3 */
694 if (mpic->flags & MPIC_BROKEN_U3) {
695#ifdef CONFIG_MPIC_BROKEN_U3
696 if (mpic_is_ht_interrupt(mpic, i)) {
697 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
698 MPIC_VECPRI_POLARITY_MASK);
699 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
700 }
701#else
702 printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
703#endif
704 }
705
706 DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
707 (level != 0));
708
709 /* init hw */
710 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
711 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
712 1 << hard_smp_processor_id());
713
714 /* init linux descriptors */
715 if (i < mpic->irq_count) {
716 irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
717 irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
718 }
719 }
720
721 /* Init spurrious vector */
722 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
723
724 /* Disable 8259 passthrough */
725 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
726 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
727 | MPIC_GREG_GCONF_8259_PTHROU_DIS);
728
729 /* Set current processor priority to 0 */
730 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
731}
732
733
734
735void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
736{
737 int is_ipi;
738 struct mpic *mpic = mpic_find(irq, &is_ipi);
739 unsigned long flags;
740 u32 reg;
741
742 spin_lock_irqsave(&mpic_lock, flags);
743 if (is_ipi) {
e5356640
BH
744 reg = mpic_ipi_read(irq - mpic->ipi_offset) &
745 ~MPIC_VECPRI_PRIORITY_MASK;
14cf11af
PM
746 mpic_ipi_write(irq - mpic->ipi_offset,
747 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
748 } else {
e5356640
BH
749 reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
750 & ~MPIC_VECPRI_PRIORITY_MASK;
14cf11af
PM
751 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
752 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
753 }
754 spin_unlock_irqrestore(&mpic_lock, flags);
755}
756
757unsigned int mpic_irq_get_priority(unsigned int irq)
758{
759 int is_ipi;
760 struct mpic *mpic = mpic_find(irq, &is_ipi);
761 unsigned long flags;
762 u32 reg;
763
764 spin_lock_irqsave(&mpic_lock, flags);
765 if (is_ipi)
766 reg = mpic_ipi_read(irq - mpic->ipi_offset);
767 else
768 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
769 spin_unlock_irqrestore(&mpic_lock, flags);
770 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
771}
772
773void mpic_setup_this_cpu(void)
774{
775#ifdef CONFIG_SMP
776 struct mpic *mpic = mpic_primary;
777 unsigned long flags;
778 u32 msk = 1 << hard_smp_processor_id();
779 unsigned int i;
780
781 BUG_ON(mpic == NULL);
782
783 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
784
785 spin_lock_irqsave(&mpic_lock, flags);
786
787 /* let the mpic know we want intrs. default affinity is 0xffffffff
788 * until changed via /proc. That's how it's done on x86. If we want
789 * it differently, then we should make sure we also change the default
790 * values of irq_affinity in irq.c.
791 */
792 if (distribute_irqs) {
793 for (i = 0; i < mpic->num_sources ; i++)
794 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
795 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
796 }
797
798 /* Set current processor priority to 0 */
799 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
800
801 spin_unlock_irqrestore(&mpic_lock, flags);
802#endif /* CONFIG_SMP */
803}
804
805int mpic_cpu_get_priority(void)
806{
807 struct mpic *mpic = mpic_primary;
808
809 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
810}
811
812void mpic_cpu_set_priority(int prio)
813{
814 struct mpic *mpic = mpic_primary;
815
816 prio &= MPIC_CPU_TASKPRI_MASK;
817 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
818}
819
820/*
821 * XXX: someone who knows mpic should check this.
822 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
823 * or can we reset the mpic in the new kernel?
824 */
825void mpic_teardown_this_cpu(int secondary)
826{
827 struct mpic *mpic = mpic_primary;
828 unsigned long flags;
829 u32 msk = 1 << hard_smp_processor_id();
830 unsigned int i;
831
832 BUG_ON(mpic == NULL);
833
834 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
835 spin_lock_irqsave(&mpic_lock, flags);
836
837 /* let the mpic know we don't want intrs. */
838 for (i = 0; i < mpic->num_sources ; i++)
839 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
840 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
841
842 /* Set current processor priority to max */
843 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
844
845 spin_unlock_irqrestore(&mpic_lock, flags);
846}
847
848
849void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
850{
851 struct mpic *mpic = mpic_primary;
852
853 BUG_ON(mpic == NULL);
854
855 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
856
857 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
858 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
859}
860
861int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
862{
863 u32 irq;
864
865 irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
866 DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
867
868 if (mpic->cascade && irq == mpic->cascade_vec) {
869 DBG("%s: cascading ...\n", mpic->name);
870 irq = mpic->cascade(regs, mpic->cascade_data);
871 mpic_eoi(mpic);
872 return irq;
873 }
874 if (unlikely(irq == MPIC_VEC_SPURRIOUS))
875 return -1;
876 if (irq < MPIC_VEC_IPI_0)
877 return irq + mpic->irq_offset;
878 DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
879 return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
880}
881
882int mpic_get_irq(struct pt_regs *regs)
883{
884 struct mpic *mpic = mpic_primary;
885
886 BUG_ON(mpic == NULL);
887
888 return mpic_get_one_irq(mpic, regs);
889}
890
891
892#ifdef CONFIG_SMP
893void mpic_request_ipis(void)
894{
895 struct mpic *mpic = mpic_primary;
896
897 BUG_ON(mpic == NULL);
898
899 printk("requesting IPIs ... \n");
900
901 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
902 request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT,
903 "IPI0 (call function)", mpic);
904 request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
905 "IPI1 (reschedule)", mpic);
906 request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
907 "IPI2 (unused)", mpic);
908 request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
909 "IPI3 (debugger break)", mpic);
910
911 printk("IPIs requested... \n");
912}
a9c59264
PM
913
914void smp_mpic_message_pass(int target, int msg)
915{
916 /* make sure we're sending something that translates to an IPI */
917 if ((unsigned int)msg > 3) {
918 printk("SMP %d: smp_message_pass: unknown msg %d\n",
919 smp_processor_id(), msg);
920 return;
921 }
922 switch (target) {
923 case MSG_ALL:
924 mpic_send_ipi(msg, 0xffffffff);
925 break;
926 case MSG_ALL_BUT_SELF:
927 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
928 break;
929 default:
930 mpic_send_ipi(msg, 1 << target);
931 break;
932 }
933}
14cf11af 934#endif /* CONFIG_SMP */