]>
Commit | Line | Data |
---|---|---|
0b05ac6e BH |
1 | /* |
2 | * Copyright 2011 IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | */ | |
10 | #include <linux/types.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/irq.h> | |
13 | #include <linux/smp.h> | |
14 | #include <linux/interrupt.h> | |
0b05ac6e BH |
15 | #include <linux/cpu.h> |
16 | #include <linux/of.h> | |
17 | ||
18 | #include <asm/smp.h> | |
19 | #include <asm/irq.h> | |
20 | #include <asm/errno.h> | |
21 | #include <asm/xics.h> | |
22 | #include <asm/io.h> | |
23 | #include <asm/hvcall.h> | |
24 | ||
25 | static inline unsigned int icp_hv_get_xirr(unsigned char cppr) | |
26 | { | |
27 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
28 | long rc; | |
3ce21cdf | 29 | unsigned int ret = XICS_IRQ_SPURIOUS; |
0b05ac6e BH |
30 | |
31 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | |
3ce21cdf AB |
32 | if (rc == H_SUCCESS) { |
33 | ret = (unsigned int)retbuf[0]; | |
34 | } else { | |
35 | pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n", | |
36 | __func__, cppr, rc); | |
37 | WARN_ON_ONCE(1); | |
38 | } | |
39 | ||
40 | return ret; | |
0b05ac6e BH |
41 | } |
42 | ||
90e8f57c | 43 | static inline void icp_hv_set_cppr(u8 value) |
0b05ac6e | 44 | { |
90e8f57c | 45 | long rc = plpar_hcall_norets(H_CPPR, value); |
3ce21cdf | 46 | if (rc != H_SUCCESS) { |
90e8f57c | 47 | pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n", |
3ce21cdf AB |
48 | __func__, value, rc); |
49 | WARN_ON_ONCE(1); | |
50 | } | |
0b05ac6e BH |
51 | } |
52 | ||
90e8f57c | 53 | static inline void icp_hv_set_xirr(unsigned int value) |
0b05ac6e | 54 | { |
90e8f57c | 55 | long rc = plpar_hcall_norets(H_EOI, value); |
3ce21cdf | 56 | if (rc != H_SUCCESS) { |
90e8f57c | 57 | pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n", |
3ce21cdf AB |
58 | __func__, value, rc); |
59 | WARN_ON_ONCE(1); | |
90e8f57c | 60 | icp_hv_set_cppr(value >> 24); |
3ce21cdf | 61 | } |
0b05ac6e BH |
62 | } |
63 | ||
64 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) | |
65 | { | |
3ce21cdf | 66 | int hw_cpu = get_hard_smp_processor_id(n_cpu); |
9fb1b36c PM |
67 | long rc; |
68 | ||
69 | /* Make sure all previous accesses are ordered before IPI sending */ | |
70 | mb(); | |
71 | rc = plpar_hcall_norets(H_IPI, hw_cpu, value); | |
3ce21cdf AB |
72 | if (rc != H_SUCCESS) { |
73 | pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " | |
74 | "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); | |
75 | WARN_ON_ONCE(1); | |
76 | } | |
0b05ac6e BH |
77 | } |
78 | ||
79 | static void icp_hv_eoi(struct irq_data *d) | |
80 | { | |
476eb491 | 81 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); |
0b05ac6e BH |
82 | |
83 | iosync(); | |
84 | icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); | |
85 | } | |
86 | ||
87 | static void icp_hv_teardown_cpu(void) | |
88 | { | |
89 | int cpu = smp_processor_id(); | |
90 | ||
91 | /* Clear any pending IPI */ | |
92 | icp_hv_set_qirr(cpu, 0xff); | |
93 | } | |
94 | ||
95 | static void icp_hv_flush_ipi(void) | |
96 | { | |
97 | /* We take the ipi irq but and never return so we | |
98 | * need to EOI the IPI, but want to leave our priority 0 | |
99 | * | |
100 | * should we check all the other interrupts too? | |
101 | * should we be flagging idle loop instead? | |
102 | * or creating some task to be scheduled? | |
103 | */ | |
104 | ||
105 | icp_hv_set_xirr((0x00 << 24) | XICS_IPI); | |
106 | } | |
107 | ||
108 | static unsigned int icp_hv_get_irq(void) | |
109 | { | |
110 | unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); | |
111 | unsigned int vec = xirr & 0x00ffffff; | |
112 | unsigned int irq; | |
113 | ||
114 | if (vec == XICS_IRQ_SPURIOUS) | |
ef24ba70 | 115 | return 0; |
0b05ac6e | 116 | |
d6b0d1f7 | 117 | irq = irq_find_mapping(xics_host, vec); |
ef24ba70 | 118 | if (likely(irq)) { |
0b05ac6e BH |
119 | xics_push_cppr(vec); |
120 | return irq; | |
121 | } | |
122 | ||
123 | /* We don't have a linux mapping, so have rtas mask it. */ | |
124 | xics_mask_unknown_vec(vec); | |
125 | ||
126 | /* We might learn about it later, so EOI it */ | |
127 | icp_hv_set_xirr(xirr); | |
128 | ||
ef24ba70 | 129 | return 0; |
0b05ac6e BH |
130 | } |
131 | ||
132 | static void icp_hv_set_cpu_priority(unsigned char cppr) | |
133 | { | |
134 | xics_set_base_cppr(cppr); | |
135 | icp_hv_set_cppr(cppr); | |
136 | iosync(); | |
137 | } | |
138 | ||
139 | #ifdef CONFIG_SMP | |
140 | ||
23d72bfd | 141 | static void icp_hv_cause_ipi(int cpu, unsigned long data) |
0b05ac6e | 142 | { |
0b05ac6e BH |
143 | icp_hv_set_qirr(cpu, IPI_PRIORITY); |
144 | } | |
145 | ||
0b05ac6e BH |
146 | static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) |
147 | { | |
148 | int cpu = smp_processor_id(); | |
149 | ||
150 | icp_hv_set_qirr(cpu, 0xff); | |
151 | ||
23d72bfd | 152 | return smp_ipi_demux(); |
0b05ac6e BH |
153 | } |
154 | ||
155 | #endif /* CONFIG_SMP */ | |
156 | ||
157 | static const struct icp_ops icp_hv_ops = { | |
158 | .get_irq = icp_hv_get_irq, | |
159 | .eoi = icp_hv_eoi, | |
160 | .set_priority = icp_hv_set_cpu_priority, | |
161 | .teardown_cpu = icp_hv_teardown_cpu, | |
162 | .flush_ipi = icp_hv_flush_ipi, | |
163 | #ifdef CONFIG_SMP | |
164 | .ipi_action = icp_hv_ipi_action, | |
23d72bfd | 165 | .cause_ipi = icp_hv_cause_ipi, |
0b05ac6e BH |
166 | #endif |
167 | }; | |
168 | ||
169 | int icp_hv_init(void) | |
170 | { | |
171 | struct device_node *np; | |
172 | ||
173 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); | |
174 | if (!np) | |
175 | np = of_find_node_by_type(NULL, | |
176 | "PowerPC-External-Interrupt-Presentation"); | |
177 | if (!np) | |
178 | return -ENODEV; | |
179 | ||
180 | icp_ops = &icp_hv_ops; | |
181 | ||
182 | return 0; | |
183 | } | |
184 |