2 * arch/powerpc/sysdev/qe_lib/qe_ic.c
4 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
6 * Author: Li Yang <leoli@freescale.com>
7 * Based on code from Shlomi Gridish <gridish@freescale.com>
9 * QUICC ENGINE Interrupt Controller
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/stddef.h>
23 #include <linux/sched.h>
24 #include <linux/signal.h>
25 #include <linux/sysdev.h>
26 #include <linux/device.h>
27 #include <linux/bootmem.h>
28 #include <linux/spinlock.h>
32 #include <asm/qe_ic.h>
36 static DEFINE_SPINLOCK(qe_ic_lock
);
38 static struct qe_ic_info qe_ic_info
[] = {
41 .mask_reg
= QEIC_CIMR
,
43 .pri_reg
= QEIC_CIPWCC
,
47 .mask_reg
= QEIC_CIMR
,
49 .pri_reg
= QEIC_CIPWCC
,
53 .mask_reg
= QEIC_CIMR
,
55 .pri_reg
= QEIC_CIPWCC
,
59 .mask_reg
= QEIC_CIMR
,
61 .pri_reg
= QEIC_CIPZCC
,
65 .mask_reg
= QEIC_CIMR
,
67 .pri_reg
= QEIC_CIPZCC
,
71 .mask_reg
= QEIC_CIMR
,
73 .pri_reg
= QEIC_CIPZCC
,
77 .mask_reg
= QEIC_CIMR
,
79 .pri_reg
= QEIC_CIPZCC
,
83 .mask_reg
= QEIC_CIMR
,
85 .pri_reg
= QEIC_CIPZCC
,
89 .mask_reg
= QEIC_CIMR
,
91 .pri_reg
= QEIC_CIPZCC
,
95 .mask_reg
= QEIC_CRIMR
,
97 .pri_reg
= QEIC_CIPRTA
,
101 .mask_reg
= QEIC_CRIMR
,
103 .pri_reg
= QEIC_CIPRTB
,
107 .mask_reg
= QEIC_CRIMR
,
109 .pri_reg
= QEIC_CIPRTB
,
113 .mask_reg
= QEIC_CRIMR
,
115 .pri_reg
= QEIC_CIPRTB
,
119 .mask_reg
= QEIC_CRIMR
,
121 .pri_reg
= QEIC_CIPRTB
,
125 .mask_reg
= QEIC_CIMR
,
127 .pri_reg
= QEIC_CIPXCC
,
131 .mask_reg
= QEIC_CIMR
,
133 .pri_reg
= QEIC_CIPXCC
,
137 .mask_reg
= QEIC_CIMR
,
139 .pri_reg
= QEIC_CIPXCC
,
143 .mask_reg
= QEIC_CIMR
,
145 .pri_reg
= QEIC_CIPXCC
,
149 .mask_reg
= QEIC_CIMR
,
151 .pri_reg
= QEIC_CIPXCC
,
155 .mask_reg
= QEIC_CIMR
,
157 .pri_reg
= QEIC_CIPYCC
,
161 .mask_reg
= QEIC_CIMR
,
163 .pri_reg
= QEIC_CIPYCC
,
167 .mask_reg
= QEIC_CIMR
,
169 .pri_reg
= QEIC_CIPYCC
,
173 .mask_reg
= QEIC_CIMR
,
175 .pri_reg
= QEIC_CIPYCC
,
179 static inline u32
qe_ic_read(volatile __be32 __iomem
* base
, unsigned int reg
)
181 return in_be32(base
+ (reg
>> 2));
184 static inline void qe_ic_write(volatile __be32 __iomem
* base
, unsigned int reg
,
187 out_be32(base
+ (reg
>> 2), value
);
190 static inline struct qe_ic
*qe_ic_from_irq(unsigned int virq
)
192 return irq_desc
[virq
].chip_data
;
195 #define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
197 static void qe_ic_unmask_irq(unsigned int virq
)
199 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
200 unsigned int src
= virq_to_hw(virq
);
204 spin_lock_irqsave(&qe_ic_lock
, flags
);
206 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
207 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
208 temp
| qe_ic_info
[src
].mask
);
210 spin_unlock_irqrestore(&qe_ic_lock
, flags
);
213 static void qe_ic_mask_irq(unsigned int virq
)
215 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
216 unsigned int src
= virq_to_hw(virq
);
220 spin_lock_irqsave(&qe_ic_lock
, flags
);
222 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
223 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
224 temp
& ~qe_ic_info
[src
].mask
);
226 spin_unlock_irqrestore(&qe_ic_lock
, flags
);
229 static void qe_ic_mask_irq_and_ack(unsigned int virq
)
231 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
232 unsigned int src
= virq_to_hw(virq
);
236 spin_lock_irqsave(&qe_ic_lock
, flags
);
238 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
239 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
240 temp
& ~qe_ic_info
[src
].mask
);
242 /* There is nothing to do for ack here, ack is handled in ISR */
244 spin_unlock_irqrestore(&qe_ic_lock
, flags
);
247 static struct irq_chip qe_ic_irq_chip
= {
248 .typename
= " QEIC ",
249 .unmask
= qe_ic_unmask_irq
,
250 .mask
= qe_ic_mask_irq
,
251 .mask_ack
= qe_ic_mask_irq_and_ack
,
254 static int qe_ic_host_match(struct irq_host
*h
, struct device_node
*node
)
256 struct qe_ic
*qe_ic
= h
->host_data
;
258 /* Exact match, unless qe_ic node is NULL */
259 return qe_ic
->of_node
== NULL
|| qe_ic
->of_node
== node
;
262 static int qe_ic_host_map(struct irq_host
*h
, unsigned int virq
,
265 struct qe_ic
*qe_ic
= h
->host_data
;
266 struct irq_chip
*chip
;
268 if (qe_ic_info
[hw
].mask
== 0) {
269 printk(KERN_ERR
"Can't map reserved IRQ \n");
273 chip
= &qe_ic
->hc_irq
;
275 set_irq_chip_data(virq
, qe_ic
);
276 get_irq_desc(virq
)->status
|= IRQ_LEVEL
;
278 set_irq_chip_and_handler(virq
, chip
, handle_level_irq
);
283 static int qe_ic_host_xlate(struct irq_host
*h
, struct device_node
*ct
,
284 u32
* intspec
, unsigned int intsize
,
285 irq_hw_number_t
* out_hwirq
,
286 unsigned int *out_flags
)
288 *out_hwirq
= intspec
[0];
290 *out_flags
= intspec
[1];
292 *out_flags
= IRQ_TYPE_NONE
;
296 static struct irq_host_ops qe_ic_host_ops
= {
297 .match
= qe_ic_host_match
,
298 .map
= qe_ic_host_map
,
299 .xlate
= qe_ic_host_xlate
,
302 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
303 unsigned int qe_ic_get_low_irq(struct qe_ic
*qe_ic
, struct pt_regs
*regs
)
307 BUG_ON(qe_ic
== NULL
);
309 /* get the interrupt source vector. */
310 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CIVEC
) >> 26;
315 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
318 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
319 unsigned int qe_ic_get_high_irq(struct qe_ic
*qe_ic
, struct pt_regs
*regs
)
323 BUG_ON(qe_ic
== NULL
);
325 /* get the interrupt source vector. */
326 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CHIVEC
) >> 26;
331 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
334 /* FIXME: We mask all the QE Low interrupts while handling. We should
335 * let other interrupt come in, but BAD interrupts are generated */
336 void fastcall
qe_ic_cascade_low(unsigned int irq
, struct irq_desc
*desc
,
337 struct pt_regs
*regs
)
339 struct qe_ic
*qe_ic
= desc
->handler_data
;
340 struct irq_chip
*chip
= irq_desc
[irq
].chip
;
342 unsigned int cascade_irq
= qe_ic_get_low_irq(qe_ic
, regs
);
345 if (cascade_irq
!= NO_IRQ
)
346 generic_handle_irq(cascade_irq
, regs
);
350 /* FIXME: We mask all the QE High interrupts while handling. We should
351 * let other interrupt come in, but BAD interrupts are generated */
352 void fastcall
qe_ic_cascade_high(unsigned int irq
, struct irq_desc
*desc
,
353 struct pt_regs
*regs
)
355 struct qe_ic
*qe_ic
= desc
->handler_data
;
356 struct irq_chip
*chip
= irq_desc
[irq
].chip
;
358 unsigned int cascade_irq
= qe_ic_get_high_irq(qe_ic
, regs
);
361 if (cascade_irq
!= NO_IRQ
)
362 generic_handle_irq(cascade_irq
, regs
);
366 void __init
qe_ic_init(struct device_node
*node
, unsigned int flags
)
370 u32 temp
= 0, ret
, high_active
= 0;
372 qe_ic
= alloc_bootmem(sizeof(struct qe_ic
));
376 memset(qe_ic
, 0, sizeof(struct qe_ic
));
377 qe_ic
->of_node
= node
? of_node_get(node
) : NULL
;
379 qe_ic
->irqhost
= irq_alloc_host(IRQ_HOST_MAP_LINEAR
,
380 NR_QE_IC_INTS
, &qe_ic_host_ops
, 0);
381 if (qe_ic
->irqhost
== NULL
) {
386 ret
= of_address_to_resource(node
, 0, &res
);
390 qe_ic
->regs
= ioremap(res
.start
, res
.end
- res
.start
+ 1);
392 qe_ic
->irqhost
->host_data
= qe_ic
;
393 qe_ic
->hc_irq
= qe_ic_irq_chip
;
395 qe_ic
->virq_high
= irq_of_parse_and_map(node
, 0);
396 qe_ic
->virq_low
= irq_of_parse_and_map(node
, 1);
398 if (qe_ic
->virq_low
== NO_IRQ
) {
399 printk(KERN_ERR
"Failed to map QE_IC low IRQ\n");
403 /* default priority scheme is grouped. If spread mode is */
404 /* required, configure cicr accordingly. */
405 if (flags
& QE_IC_SPREADMODE_GRP_W
)
407 if (flags
& QE_IC_SPREADMODE_GRP_X
)
409 if (flags
& QE_IC_SPREADMODE_GRP_Y
)
411 if (flags
& QE_IC_SPREADMODE_GRP_Z
)
413 if (flags
& QE_IC_SPREADMODE_GRP_RISCA
)
415 if (flags
& QE_IC_SPREADMODE_GRP_RISCB
)
418 /* choose destination signal for highest priority interrupt */
419 if (flags
& QE_IC_HIGH_SIGNAL
) {
420 temp
|= (SIGNAL_HIGH
<< CICR_HPIT_SHIFT
);
424 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, temp
);
426 set_irq_data(qe_ic
->virq_low
, qe_ic
);
427 set_irq_chained_handler(qe_ic
->virq_low
, qe_ic_cascade_low
);
429 if (qe_ic
->virq_high
!= NO_IRQ
) {
430 set_irq_data(qe_ic
->virq_high
, qe_ic
);
431 set_irq_chained_handler(qe_ic
->virq_high
, qe_ic_cascade_high
);
434 printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS
, qe_ic
->regs
);
437 void qe_ic_set_highest_priority(unsigned int virq
, int high
)
439 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
440 unsigned int src
= virq_to_hw(virq
);
443 temp
= qe_ic_read(qe_ic
->regs
, QEIC_CICR
);
445 temp
&= ~CICR_HP_MASK
;
446 temp
|= src
<< CICR_HP_SHIFT
;
448 temp
&= ~CICR_HPIT_MASK
;
449 temp
|= (high
? SIGNAL_HIGH
: SIGNAL_LOW
) << CICR_HPIT_SHIFT
;
451 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, temp
);
454 /* Set Priority level within its group, from 1 to 8 */
455 int qe_ic_set_priority(unsigned int virq
, unsigned int priority
)
457 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
458 unsigned int src
= virq_to_hw(virq
);
461 if (priority
> 8 || priority
== 0)
465 if (qe_ic_info
[src
].pri_reg
== 0)
468 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].pri_reg
);
471 temp
&= ~(0x7 << (32 - priority
* 3));
472 temp
|= qe_ic_info
[src
].pri_code
<< (32 - priority
* 3);
474 temp
&= ~(0x7 << (24 - priority
* 3));
475 temp
|= qe_ic_info
[src
].pri_code
<< (24 - priority
* 3);
478 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].pri_reg
, temp
);
483 /* Set a QE priority to use high irq, only priority 1~2 can use high irq */
484 int qe_ic_set_high_priority(unsigned int virq
, unsigned int priority
, int high
)
486 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
487 unsigned int src
= virq_to_hw(virq
);
488 u32 temp
, control_reg
= QEIC_CICNR
, shift
= 0;
490 if (priority
> 2 || priority
== 0)
493 switch (qe_ic_info
[src
].pri_reg
) {
495 shift
= CICNR_ZCC1T_SHIFT
;
498 shift
= CICNR_WCC1T_SHIFT
;
501 shift
= CICNR_YCC1T_SHIFT
;
504 shift
= CICNR_XCC1T_SHIFT
;
507 shift
= CRICR_RTA1T_SHIFT
;
508 control_reg
= QEIC_CRICR
;
511 shift
= CRICR_RTB1T_SHIFT
;
512 control_reg
= QEIC_CRICR
;
518 shift
+= (2 - priority
) * 2;
519 temp
= qe_ic_read(qe_ic
->regs
, control_reg
);
520 temp
&= ~(SIGNAL_MASK
<< shift
);
521 temp
|= (high
? SIGNAL_HIGH
: SIGNAL_LOW
) << shift
;
522 qe_ic_write(qe_ic
->regs
, control_reg
, temp
);
527 static struct sysdev_class qe_ic_sysclass
= {
528 set_kset_name("qe_ic"),
531 static struct sys_device device_qe_ic
= {
533 .cls
= &qe_ic_sysclass
,
536 static int __init
init_qe_ic_sysfs(void)
540 printk(KERN_DEBUG
"Registering qe_ic with sysfs...\n");
542 rc
= sysdev_class_register(&qe_ic_sysclass
);
544 printk(KERN_ERR
"Failed registering qe_ic sys class\n");
547 rc
= sysdev_register(&device_qe_ic
);
549 printk(KERN_ERR
"Failed registering qe_ic sys device\n");
555 subsys_initcall(init_qe_ic_sysfs
);