]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/platforms/pseries/xics.c
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / platforms / pseries / xics.c
CommitLineData
007e8f51
DG
1/*
2 * arch/powerpc/platforms/pseries/xics.c
1da177e4
LT
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
0ebfff14
BH
11
12#undef DEBUG
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/threads.h>
16#include <linux/kernel.h>
17#include <linux/irq.h>
18#include <linux/smp.h>
19#include <linux/interrupt.h>
20#include <linux/signal.h>
21#include <linux/init.h>
22#include <linux/gfp.h>
23#include <linux/radix-tree.h>
24#include <linux/cpu.h>
0ebfff14 25
57cfb814 26#include <asm/firmware.h>
1da177e4
LT
27#include <asm/prom.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/smp.h>
31#include <asm/rtas.h>
1da177e4
LT
32#include <asm/hvcall.h>
33#include <asm/machdep.h>
2227718c 34#include <asm/i8259.h>
1da177e4 35
007e8f51 36#include "xics.h"
b9377ffc 37#include "plpar_wrappers.h"
007e8f51 38
1da177e4
LT
39#define XICS_IPI 2
40#define XICS_IRQ_SPURIOUS 0
41
42/* Want a priority other than 0. Various HW issues require this. */
43#define DEFAULT_PRIORITY 5
44
007e8f51 45/*
1da177e4 46 * Mark IPIs as higher priority so we can take them inside interrupts that
6714465e 47 * arent marked IRQF_DISABLED
1da177e4
LT
48 */
49#define IPI_PRIORITY 4
50
51struct xics_ipl {
52 union {
53 u32 word;
54 u8 bytes[4];
55 } xirr_poll;
56 union {
57 u32 word;
58 u8 bytes[4];
59 } xirr;
60 u32 dummy;
61 union {
62 u32 word;
63 u8 bytes[4];
64 } qirr;
65};
66
67static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
68
1da177e4 69static unsigned int default_server = 0xFF;
26370322
AB
70static unsigned int default_distrib_server = 0;
71static unsigned int interrupt_server_size = 8;
1da177e4 72
0ebfff14
BH
73static struct irq_host *xics_host;
74
1da177e4
LT
75/*
76 * XICS only has a single IPI, so encode the messages per CPU
77 */
78struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
79
80/* RTAS service tokens */
26370322
AB
81static int ibm_get_xive;
82static int ibm_set_xive;
83static int ibm_int_on;
84static int ibm_int_off;
1da177e4 85
1da177e4 86
b9e5b4e6 87/* Direct HW low level accessors */
1da177e4 88
1da177e4 89
0ebfff14 90static inline unsigned int direct_xirr_info_get(int n_cpu)
1da177e4
LT
91{
92 return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
93}
94
b9e5b4e6 95static inline void direct_xirr_info_set(int n_cpu, int value)
1da177e4
LT
96{
97 out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
98}
99
b9e5b4e6 100static inline void direct_cppr_info(int n_cpu, u8 value)
1da177e4
LT
101{
102 out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
103}
104
b9e5b4e6 105static inline void direct_qirr_info(int n_cpu, u8 value)
1da177e4
LT
106{
107 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
108}
109
1da177e4 110
b9e5b4e6 111/* LPAR low level accessors */
1da177e4 112
1da177e4 113
0ebfff14 114static inline unsigned int lpar_xirr_info_get(int n_cpu)
1da177e4
LT
115{
116 unsigned long lpar_rc;
007e8f51 117 unsigned long return_value;
1da177e4
LT
118
119 lpar_rc = plpar_xirr(&return_value);
706c8c93 120 if (lpar_rc != H_SUCCESS)
007e8f51 121 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
0ebfff14 122 return (unsigned int)return_value;
1da177e4
LT
123}
124
b9e5b4e6 125static inline void lpar_xirr_info_set(int n_cpu, int value)
1da177e4
LT
126{
127 unsigned long lpar_rc;
128 unsigned long val64 = value & 0xffffffff;
129
130 lpar_rc = plpar_eoi(val64);
706c8c93 131 if (lpar_rc != H_SUCCESS)
1da177e4 132 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
007e8f51 133 val64);
1da177e4
LT
134}
135
b9e5b4e6 136static inline void lpar_cppr_info(int n_cpu, u8 value)
1da177e4
LT
137{
138 unsigned long lpar_rc;
139
140 lpar_rc = plpar_cppr(value);
706c8c93 141 if (lpar_rc != H_SUCCESS)
007e8f51 142 panic("bad return code cppr - rc = %lx\n", lpar_rc);
1da177e4
LT
143}
144
b9e5b4e6 145static inline void lpar_qirr_info(int n_cpu , u8 value)
1da177e4
LT
146{
147 unsigned long lpar_rc;
148
149 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
706c8c93 150 if (lpar_rc != H_SUCCESS)
007e8f51 151 panic("bad return code qirr - rc = %lx\n", lpar_rc);
1da177e4
LT
152}
153
1da177e4 154
b9e5b4e6 155/* High level handlers and init code */
1da177e4 156
1da177e4
LT
157
158#ifdef CONFIG_SMP
0ebfff14 159static int get_irq_server(unsigned int virq)
1da177e4
LT
160{
161 unsigned int server;
162 /* For the moment only implement delivery to all cpus or one cpu */
0ebfff14 163 cpumask_t cpumask = irq_desc[virq].affinity;
1da177e4
LT
164 cpumask_t tmp = CPU_MASK_NONE;
165
166 if (!distribute_irqs)
167 return default_server;
168
169 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
170 server = default_distrib_server;
171 } else {
172 cpus_and(tmp, cpu_online_map, cpumask);
173
174 if (cpus_empty(tmp))
175 server = default_distrib_server;
176 else
177 server = get_hard_smp_processor_id(first_cpu(tmp));
178 }
179
180 return server;
181
182}
183#else
0ebfff14 184static int get_irq_server(unsigned int virq)
1da177e4
LT
185{
186 return default_server;
187}
188#endif
189
b9e5b4e6
BH
190
191static void xics_unmask_irq(unsigned int virq)
1da177e4
LT
192{
193 unsigned int irq;
194 int call_status;
195 unsigned int server;
196
0ebfff14
BH
197 pr_debug("xics: unmask virq %d\n", virq);
198
199 irq = (unsigned int)irq_map[virq].hwirq;
200 pr_debug(" -> map to hwirq 0x%x\n", irq);
201 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
1da177e4
LT
202 return;
203
204 server = get_irq_server(virq);
b9e5b4e6 205
1da177e4
LT
206 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
207 DEFAULT_PRIORITY);
208 if (call_status != 0) {
26370322
AB
209 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
210 "returned %d\n", irq, call_status);
211 printk("set_xive %x, server %x\n", ibm_set_xive, server);
1da177e4
LT
212 return;
213 }
214
215 /* Now unmask the interrupt (often a no-op) */
216 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
217 if (call_status != 0) {
26370322
AB
218 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
219 "returned %d\n", irq, call_status);
1da177e4
LT
220 return;
221 }
222}
223
b9e5b4e6 224static void xics_mask_real_irq(unsigned int irq)
1da177e4
LT
225{
226 int call_status;
227 unsigned int server;
228
229 if (irq == XICS_IPI)
230 return;
231
232 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
233 if (call_status != 0) {
26370322
AB
234 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
235 "ibm_int_off returned %d\n", irq, call_status);
1da177e4
LT
236 return;
237 }
238
239 server = get_irq_server(irq);
240 /* Have to set XIVE to 0xff to be able to remove a slot */
241 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
242 if (call_status != 0) {
26370322
AB
243 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
244 " returned %d\n", irq, call_status);
1da177e4
LT
245 return;
246 }
247}
248
b9e5b4e6 249static void xics_mask_irq(unsigned int virq)
1da177e4
LT
250{
251 unsigned int irq;
252
0ebfff14
BH
253 pr_debug("xics: mask virq %d\n", virq);
254
255 irq = (unsigned int)irq_map[virq].hwirq;
256 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
257 return;
258 xics_mask_real_irq(irq);
b9e5b4e6
BH
259}
260
0ebfff14 261static unsigned int xics_startup(unsigned int virq)
b9e5b4e6
BH
262{
263 unsigned int irq;
264
0ebfff14
BH
265 /* force a reverse mapping of the interrupt so it gets in the cache */
266 irq = (unsigned int)irq_map[virq].hwirq;
267 irq_radix_revmap(xics_host, irq);
1da177e4 268
0ebfff14 269 /* unmask it */
b9e5b4e6
BH
270 xics_unmask_irq(virq);
271 return 0;
272}
273
0ebfff14 274static void xics_eoi_direct(unsigned int virq)
1da177e4
LT
275{
276 int cpu = smp_processor_id();
0ebfff14 277 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
1da177e4
LT
278
279 iosync();
0ebfff14 280 direct_xirr_info_set(cpu, (0xff << 24) | irq);
1da177e4
LT
281}
282
b9e5b4e6 283
0ebfff14 284static void xics_eoi_lpar(unsigned int virq)
1da177e4
LT
285{
286 int cpu = smp_processor_id();
0ebfff14 287 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
1da177e4 288
b9e5b4e6 289 iosync();
0ebfff14 290 lpar_xirr_info_set(cpu, (0xff << 24) | irq);
1da177e4
LT
291}
292
0ebfff14 293static inline unsigned int xics_remap_irq(unsigned int vec)
1da177e4 294{
0ebfff14 295 unsigned int irq;
1da177e4 296
1da177e4
LT
297 vec &= 0x00ffffff;
298
b9e5b4e6
BH
299 if (vec == XICS_IRQ_SPURIOUS)
300 return NO_IRQ;
0ebfff14 301 irq = irq_radix_revmap(xics_host, vec);
b9e5b4e6 302 if (likely(irq != NO_IRQ))
0ebfff14 303 return irq;
b9e5b4e6
BH
304
305 printk(KERN_ERR "Interrupt %u (real) is invalid,"
306 " disabling it.\n", vec);
307 xics_mask_real_irq(vec);
308 return NO_IRQ;
1da177e4
LT
309}
310
0ebfff14 311static unsigned int xics_get_irq_direct(struct pt_regs *regs)
b9e5b4e6
BH
312{
313 unsigned int cpu = smp_processor_id();
1da177e4 314
b9e5b4e6
BH
315 return xics_remap_irq(direct_xirr_info_get(cpu));
316}
317
0ebfff14 318static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
1da177e4 319{
b9e5b4e6 320 unsigned int cpu = smp_processor_id();
1da177e4 321
b9e5b4e6
BH
322 return xics_remap_irq(lpar_xirr_info_get(cpu));
323}
324
325#ifdef CONFIG_SMP
1da177e4 326
7d12e780 327static irqreturn_t xics_ipi_dispatch(int cpu)
b9e5b4e6 328{
1da177e4
LT
329 WARN_ON(cpu_is_offline(cpu));
330
331 while (xics_ipi_message[cpu].value) {
332 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
333 &xics_ipi_message[cpu].value)) {
334 mb();
7d12e780 335 smp_message_recv(PPC_MSG_CALL_FUNCTION);
1da177e4
LT
336 }
337 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
338 &xics_ipi_message[cpu].value)) {
339 mb();
7d12e780 340 smp_message_recv(PPC_MSG_RESCHEDULE);
1da177e4
LT
341 }
342#if 0
343 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
344 &xics_ipi_message[cpu].value)) {
345 mb();
7d12e780 346 smp_message_recv(PPC_MSG_MIGRATE_TASK);
1da177e4
LT
347 }
348#endif
cc532915 349#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
1da177e4
LT
350 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
351 &xics_ipi_message[cpu].value)) {
352 mb();
7d12e780 353 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
1da177e4
LT
354 }
355#endif
356 }
357 return IRQ_HANDLED;
358}
359
7d12e780 360static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
b9e5b4e6
BH
361{
362 int cpu = smp_processor_id();
363
364 direct_qirr_info(cpu, 0xff);
365
7d12e780 366 return xics_ipi_dispatch(cpu);
b9e5b4e6
BH
367}
368
7d12e780 369static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
b9e5b4e6
BH
370{
371 int cpu = smp_processor_id();
372
373 lpar_qirr_info(cpu, 0xff);
374
7d12e780 375 return xics_ipi_dispatch(cpu);
b9e5b4e6
BH
376}
377
1da177e4
LT
378void xics_cause_IPI(int cpu)
379{
b9e5b4e6
BH
380 if (firmware_has_feature(FW_FEATURE_LPAR))
381 lpar_qirr_info(cpu, IPI_PRIORITY);
382 else
383 direct_qirr_info(cpu, IPI_PRIORITY);
1da177e4 384}
b9e5b4e6 385
6c80a21c 386#endif /* CONFIG_SMP */
1da177e4 387
b9e5b4e6
BH
388static void xics_set_cpu_priority(int cpu, unsigned char cppr)
389{
390 if (firmware_has_feature(FW_FEATURE_LPAR))
391 lpar_cppr_info(cpu, cppr);
392 else
393 direct_cppr_info(cpu, cppr);
394 iosync();
395}
396
397static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
398{
399 unsigned int irq;
400 int status;
401 int xics_status[2];
402 unsigned long newmask;
403 cpumask_t tmp = CPU_MASK_NONE;
404
0ebfff14
BH
405 irq = (unsigned int)irq_map[virq].hwirq;
406 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
b9e5b4e6
BH
407 return;
408
409 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
410
411 if (status) {
412 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
413 "returns %d\n", irq, status);
414 return;
415 }
416
417 /* For the moment only implement delivery to all cpus or one cpu */
418 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
419 newmask = default_distrib_server;
420 } else {
421 cpus_and(tmp, cpu_online_map, cpumask);
422 if (cpus_empty(tmp))
423 return;
424 newmask = get_hard_smp_processor_id(first_cpu(tmp));
425 }
426
427 status = rtas_call(ibm_set_xive, 3, 1, NULL,
428 irq, newmask, xics_status[1]);
429
430 if (status) {
431 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
432 "returns %d\n", irq, status);
433 return;
434 }
435}
436
0ebfff14
BH
437void xics_setup_cpu(void)
438{
439 int cpu = smp_processor_id();
440
441 xics_set_cpu_priority(cpu, 0xff);
442
443 /*
444 * Put the calling processor into the GIQ. This is really only
445 * necessary from a secondary thread as the OF start-cpu interface
446 * performs this function for us on primary threads.
447 *
448 * XXX: undo of teardown on kexec needs this too, as may hotplug
449 */
81b73dd9 450 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
0ebfff14
BH
451 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
452}
453
454
b9e5b4e6
BH
455static struct irq_chip xics_pic_direct = {
456 .typename = " XICS ",
457 .startup = xics_startup,
458 .mask = xics_mask_irq,
459 .unmask = xics_unmask_irq,
460 .eoi = xics_eoi_direct,
461 .set_affinity = xics_set_affinity
462};
463
464
465static struct irq_chip xics_pic_lpar = {
466 .typename = " XICS ",
467 .startup = xics_startup,
468 .mask = xics_mask_irq,
469 .unmask = xics_unmask_irq,
470 .eoi = xics_eoi_lpar,
471 .set_affinity = xics_set_affinity
472};
473
474
0ebfff14 475static int xics_host_match(struct irq_host *h, struct device_node *node)
1da177e4 476{
0ebfff14
BH
477 /* IBM machines have interrupt parents of various funky types for things
478 * like vdevices, events, etc... The trick we use here is to match
479 * everything here except the legacy 8259 which is compatible "chrp,iic"
480 */
481 return !device_is_compatible(node, "chrp,iic");
482}
1da177e4 483
0ebfff14 484static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
6e99e458 485 irq_hw_number_t hw)
0ebfff14 486{
6e99e458 487 pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
0ebfff14
BH
488
489 get_irq_desc(virq)->status |= IRQ_LEVEL;
490 set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
491 return 0;
492}
493
494static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
6e99e458 495 irq_hw_number_t hw)
0ebfff14 496{
6e99e458 497 pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
0ebfff14
BH
498
499 get_irq_desc(virq)->status |= IRQ_LEVEL;
500 set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
501 return 0;
502}
503
504static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
505 u32 *intspec, unsigned int intsize,
506 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
507
508{
509 /* Current xics implementation translates everything
510 * to level. It is not technically right for MSIs but this
511 * is irrelevant at this point. We might get smarter in the future
6c80a21c 512 */
0ebfff14
BH
513 *out_hwirq = intspec[0];
514 *out_flags = IRQ_TYPE_LEVEL_LOW;
515
516 return 0;
517}
518
519static struct irq_host_ops xics_host_direct_ops = {
520 .match = xics_host_match,
521 .map = xics_host_map_direct,
522 .xlate = xics_host_xlate,
523};
524
525static struct irq_host_ops xics_host_lpar_ops = {
526 .match = xics_host_match,
527 .map = xics_host_map_lpar,
528 .xlate = xics_host_xlate,
529};
530
531static void __init xics_init_host(void)
532{
533 struct irq_host_ops *ops;
534
535 if (firmware_has_feature(FW_FEATURE_LPAR))
536 ops = &xics_host_lpar_ops;
537 else
538 ops = &xics_host_direct_ops;
539 xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops,
540 XICS_IRQ_SPURIOUS);
541 BUG_ON(xics_host == NULL);
542 irq_set_default_host(xics_host);
6c80a21c 543}
1da177e4 544
0ebfff14
BH
545static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
546 unsigned long size)
1da177e4 547{
0ebfff14 548#ifdef CONFIG_SMP
1da177e4 549 int i;
1da177e4 550
0ebfff14
BH
551 /* This may look gross but it's good enough for now, we don't quite
552 * have a hard -> linux processor id matching.
553 */
554 for_each_possible_cpu(i) {
555 if (!cpu_present(i))
556 continue;
557 if (hw_id == get_hard_smp_processor_id(i)) {
558 xics_per_cpu[i] = ioremap(addr, size);
559 return;
560 }
561 }
562#else
563 if (hw_id != 0)
564 return;
565 xics_per_cpu[0] = ioremap(addr, size);
566#endif /* CONFIG_SMP */
567}
1da177e4 568
0ebfff14
BH
569static void __init xics_init_one_node(struct device_node *np,
570 unsigned int *indx)
571{
572 unsigned int ilen;
954a46e2 573 const u32 *ireg;
1da177e4 574
0ebfff14
BH
575 /* This code does the theorically broken assumption that the interrupt
576 * server numbers are the same as the hard CPU numbers.
577 * This happens to be the case so far but we are playing with fire...
578 * should be fixed one of these days. -BenH.
579 */
954a46e2 580 ireg = get_property(np, "ibm,interrupt-server-ranges", NULL);
1da177e4 581
0ebfff14
BH
582 /* Do that ever happen ? we'll know soon enough... but even good'old
583 * f80 does have that property ..
584 */
585 WARN_ON(ireg == NULL);
1da177e4
LT
586 if (ireg) {
587 /*
588 * set node starting index for this node
589 */
0ebfff14 590 *indx = *ireg;
1da177e4 591 }
954a46e2 592 ireg = get_property(np, "reg", &ilen);
1da177e4
LT
593 if (!ireg)
594 panic("xics_init_IRQ: can't find interrupt reg property");
007e8f51 595
0ebfff14
BH
596 while (ilen >= (4 * sizeof(u32))) {
597 unsigned long addr, size;
598
599 /* XXX Use proper OF parsing code here !!! */
600 addr = (unsigned long)*ireg++ << 32;
601 ilen -= sizeof(u32);
602 addr |= *ireg++;
603 ilen -= sizeof(u32);
604 size = (unsigned long)*ireg++ << 32;
605 ilen -= sizeof(u32);
606 size |= *ireg++;
607 ilen -= sizeof(u32);
608 xics_map_one_cpu(*indx, addr, size);
609 (*indx)++;
610 }
611}
612
613
614static void __init xics_setup_8259_cascade(void)
615{
616 struct device_node *np, *old, *found = NULL;
617 int cascade, naddr;
954a46e2 618 const u32 *addrp;
0ebfff14
BH
619 unsigned long intack = 0;
620
621 for_each_node_by_type(np, "interrupt-controller")
622 if (device_is_compatible(np, "chrp,iic")) {
623 found = np;
624 break;
625 }
626 if (found == NULL) {
627 printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
628 return;
1da177e4 629 }
0ebfff14
BH
630 cascade = irq_of_parse_and_map(found, 0);
631 if (cascade == NO_IRQ) {
632 printk(KERN_ERR "xics: failed to map cascade interrupt");
633 return;
634 }
635 pr_debug("xics: cascade mapped to irq %d\n", cascade);
636
637 for (old = of_node_get(found); old != NULL ; old = np) {
638 np = of_get_parent(old);
639 of_node_put(old);
640 if (np == NULL)
641 break;
642 if (strcmp(np->name, "pci") != 0)
643 continue;
954a46e2 644 addrp = get_property(np, "8259-interrupt-acknowledge", NULL);
0ebfff14
BH
645 if (addrp == NULL)
646 continue;
647 naddr = prom_n_addr_cells(np);
648 intack = addrp[naddr-1];
649 if (naddr > 1)
650 intack |= ((unsigned long)addrp[naddr-2]) << 32;
651 }
652 if (intack)
653 printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
654 i8259_init(found, intack);
655 of_node_put(found);
656 set_irq_chained_handler(cascade, pseries_8259_cascade);
657}
1da177e4 658
0ebfff14
BH
659void __init xics_init_IRQ(void)
660{
661 int i;
662 struct device_node *np;
954a46e2
JK
663 u32 ilen, indx = 0;
664 const u32 *ireg;
0ebfff14
BH
665 int found = 0;
666
667 ppc64_boot_msg(0x20, "XICS Init");
668
669 ibm_get_xive = rtas_token("ibm,get-xive");
670 ibm_set_xive = rtas_token("ibm,set-xive");
671 ibm_int_on = rtas_token("ibm,int-on");
672 ibm_int_off = rtas_token("ibm,int-off");
673
674 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
675 found = 1;
676 if (firmware_has_feature(FW_FEATURE_LPAR))
677 break;
678 xics_init_one_node(np, &indx);
679 }
680 if (found == 0)
681 return;
682
683 xics_init_host();
1da177e4
LT
684
685 /* Find the server numbers for the boot cpu. */
686 for (np = of_find_node_by_type(NULL, "cpu");
687 np;
688 np = of_find_node_by_type(np, "cpu")) {
954a46e2 689 ireg = get_property(np, "reg", &ilen);
4df20460 690 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
954a46e2
JK
691 ireg = get_property(np,
692 "ibm,ppc-interrupt-gserver#s", &ilen);
1da177e4
LT
693 i = ilen / sizeof(int);
694 if (ireg && i > 0) {
695 default_server = ireg[0];
0ebfff14
BH
696 /* take last element */
697 default_distrib_server = ireg[i-1];
1da177e4 698 }
954a46e2 699 ireg = get_property(np,
1da177e4
LT
700 "ibm,interrupt-server#-size", NULL);
701 if (ireg)
702 interrupt_server_size = *ireg;
703 break;
704 }
705 }
706 of_node_put(np);
707
0ebfff14
BH
708 if (firmware_has_feature(FW_FEATURE_LPAR))
709 ppc_md.get_irq = xics_get_irq_lpar;
710 else
b9e5b4e6 711 ppc_md.get_irq = xics_get_irq_direct;
1da177e4 712
6c80a21c 713 xics_setup_cpu();
1da177e4 714
0ebfff14 715 xics_setup_8259_cascade();
1da177e4 716
0ebfff14 717 ppc64_boot_msg(0x21, "XICS Done");
1da177e4 718}
b9e5b4e6 719
1da177e4
LT
720
721#ifdef CONFIG_SMP
722void xics_request_IPIs(void)
723{
0ebfff14
BH
724 unsigned int ipi;
725
6e99e458 726 ipi = irq_create_mapping(xics_host, XICS_IPI);
0ebfff14 727 BUG_ON(ipi == NO_IRQ);
1da177e4 728
6714465e
TG
729 /*
730 * IPIs are marked IRQF_DISABLED as they must run with irqs
731 * disabled
732 */
0ebfff14 733 set_irq_handler(ipi, handle_percpu_irq);
b9e5b4e6 734 if (firmware_has_feature(FW_FEATURE_LPAR))
0ebfff14
BH
735 request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
736 "IPI", NULL);
b9e5b4e6 737 else
0ebfff14
BH
738 request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
739 "IPI", NULL);
1da177e4 740}
b9e5b4e6 741#endif /* CONFIG_SMP */
1da177e4 742
6d22d85a 743void xics_teardown_cpu(int secondary)
fce0d574
S
744{
745 int cpu = smp_processor_id();
0ebfff14
BH
746 unsigned int ipi;
747 struct irq_desc *desc;
fce0d574 748
0ebfff14 749 xics_set_cpu_priority(cpu, 0);
81bbbe92 750
6e99e458
BH
751 /*
752 * Clear IPI
753 */
754 if (firmware_has_feature(FW_FEATURE_LPAR))
755 lpar_qirr_info(cpu, 0xff);
756 else
757 direct_qirr_info(cpu, 0xff);
758
81bbbe92
HM
759 /*
760 * we need to EOI the IPI if we got here from kexec down IPI
761 *
762 * probably need to check all the other interrupts too
763 * should we be flagging idle loop instead?
764 * or creating some task to be scheduled?
765 */
0ebfff14
BH
766
767 ipi = irq_find_mapping(xics_host, XICS_IPI);
768 if (ipi == XICS_IRQ_SPURIOUS)
769 return;
770 desc = get_irq_desc(ipi);
b9e5b4e6 771 if (desc->chip && desc->chip->eoi)
6e99e458 772 desc->chip->eoi(ipi);
81bbbe92 773
fce0d574 774 /*
6d22d85a
PM
775 * Some machines need to have at least one cpu in the GIQ,
776 * so leave the master cpu in the group.
fce0d574 777 */
81bbbe92 778 if (secondary)
81b73dd9 779 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
0ebfff14
BH
780 (1UL << interrupt_server_size) - 1 -
781 default_distrib_server, 0);
fce0d574
S
782}
783
1da177e4
LT
784#ifdef CONFIG_HOTPLUG_CPU
785
786/* Interrupts are disabled. */
787void xics_migrate_irqs_away(void)
788{
789 int status;
790 unsigned int irq, virq, cpu = smp_processor_id();
791
792 /* Reject any interrupt that was queued to us... */
b9e5b4e6 793 xics_set_cpu_priority(cpu, 0);
1da177e4
LT
794
795 /* remove ourselves from the global interrupt queue */
81b73dd9 796 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
1da177e4
LT
797 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
798 WARN_ON(status < 0);
799
800 /* Allow IPIs again... */
b9e5b4e6 801 xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
1da177e4
LT
802
803 for_each_irq(virq) {
b9e5b4e6 804 struct irq_desc *desc;
1da177e4
LT
805 int xics_status[2];
806 unsigned long flags;
807
808 /* We cant set affinity on ISA interrupts */
0ebfff14 809 if (virq < NUM_ISA_INTERRUPTS)
1da177e4 810 continue;
0ebfff14
BH
811 if (irq_map[virq].host != xics_host)
812 continue;
813 irq = (unsigned int)irq_map[virq].hwirq;
1da177e4 814 /* We need to get IPIs still. */
0ebfff14 815 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
1da177e4 816 continue;
0ebfff14 817 desc = get_irq_desc(virq);
1da177e4
LT
818
819 /* We only need to migrate enabled IRQS */
d1bef4ed 820 if (desc == NULL || desc->chip == NULL
1da177e4 821 || desc->action == NULL
d1bef4ed 822 || desc->chip->set_affinity == NULL)
1da177e4
LT
823 continue;
824
825 spin_lock_irqsave(&desc->lock, flags);
826
827 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
828 if (status) {
26370322 829 printk(KERN_ERR "migrate_irqs_away: irq=%u "
1da177e4
LT
830 "ibm,get-xive returns %d\n",
831 virq, status);
832 goto unlock;
833 }
834
835 /*
836 * We only support delivery to all cpus or to one cpu.
837 * The irq has to be migrated only in the single cpu
838 * case.
839 */
840 if (xics_status[0] != get_hard_smp_processor_id(cpu))
841 goto unlock;
842
26370322 843 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
1da177e4
LT
844 virq, cpu);
845
846 /* Reset affinity to all cpus */
d1bef4ed 847 desc->chip->set_affinity(virq, CPU_MASK_ALL);
a53da52f 848 irq_desc[irq].affinity = CPU_MASK_ALL;
1da177e4
LT
849unlock:
850 spin_unlock_irqrestore(&desc->lock, flags);
851 }
852}
853#endif