]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
4a907dec | 2 | /* irq.c: UltraSparc IRQ handling/init/registry. |
1da177e4 | 3 | * |
227c3311 | 4 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
5 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | |
7 | */ | |
8 | ||
1da177e4 | 9 | #include <linux/sched.h> |
9843099f | 10 | #include <linux/linkage.h> |
1da177e4 LT |
11 | #include <linux/ptrace.h> |
12 | #include <linux/errno.h> | |
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/signal.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/random.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
9960e9e8 | 23 | #include <linux/ftrace.h> |
e18e2a00 | 24 | #include <linux/irq.h> |
2e2dc1d7 | 25 | #include <linux/kmemleak.h> |
1da177e4 LT |
26 | |
27 | #include <asm/ptrace.h> | |
28 | #include <asm/processor.h> | |
60063497 | 29 | #include <linux/atomic.h> |
1da177e4 | 30 | #include <asm/irq.h> |
2e457ef6 | 31 | #include <asm/io.h> |
1da177e4 LT |
32 | #include <asm/iommu.h> |
33 | #include <asm/upa.h> | |
34 | #include <asm/oplib.h> | |
25c7581b | 35 | #include <asm/prom.h> |
1da177e4 LT |
36 | #include <asm/timer.h> |
37 | #include <asm/smp.h> | |
38 | #include <asm/starfire.h> | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <asm/cache.h> |
41 | #include <asm/cpudata.h> | |
63b61452 | 42 | #include <asm/auxio.h> |
92704a1c | 43 | #include <asm/head.h> |
4a907dec | 44 | #include <asm/hypervisor.h> |
42d5f99b | 45 | #include <asm/cacheflush.h> |
1da177e4 | 46 | |
d91aa123 | 47 | #include "entry.h" |
280ff974 | 48 | #include "cpumap.h" |
ec687886 | 49 | #include "kstack.h" |
e18e2a00 | 50 | |
10397e40 | 51 | struct ino_bucket *ivector_table; |
eb2d8d60 | 52 | unsigned long ivector_table_pa; |
1da177e4 | 53 | |
42d5f99b DM |
54 | /* On several sun4u processors, it is illegal to mix bypass and |
55 | * non-bypass accesses. Therefore we access all INO buckets | |
56 | * using bypass accesses only. | |
57 | */ | |
58 | static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) | |
59 | { | |
60 | unsigned long ret; | |
61 | ||
62 | __asm__ __volatile__("ldxa [%1] %2, %0" | |
63 | : "=&r" (ret) | |
64 | : "r" (bucket_pa + | |
65 | offsetof(struct ino_bucket, | |
66 | __irq_chain_pa)), | |
67 | "i" (ASI_PHYS_USE_EC)); | |
68 | ||
69 | return ret; | |
70 | } | |
71 | ||
72 | static void bucket_clear_chain_pa(unsigned long bucket_pa) | |
73 | { | |
74 | __asm__ __volatile__("stxa %%g0, [%0] %1" | |
75 | : /* no outputs */ | |
76 | : "r" (bucket_pa + | |
77 | offsetof(struct ino_bucket, | |
78 | __irq_chain_pa)), | |
79 | "i" (ASI_PHYS_USE_EC)); | |
80 | } | |
81 | ||
fe41493f | 82 | static unsigned int bucket_get_irq(unsigned long bucket_pa) |
42d5f99b DM |
83 | { |
84 | unsigned int ret; | |
85 | ||
86 | __asm__ __volatile__("lduwa [%1] %2, %0" | |
87 | : "=&r" (ret) | |
88 | : "r" (bucket_pa + | |
89 | offsetof(struct ino_bucket, | |
fe41493f | 90 | __irq)), |
42d5f99b DM |
91 | "i" (ASI_PHYS_USE_EC)); |
92 | ||
93 | return ret; | |
94 | } | |
95 | ||
fe41493f | 96 | static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) |
42d5f99b DM |
97 | { |
98 | __asm__ __volatile__("stwa %0, [%1] %2" | |
99 | : /* no outputs */ | |
fe41493f | 100 | : "r" (irq), |
42d5f99b DM |
101 | "r" (bucket_pa + |
102 | offsetof(struct ino_bucket, | |
fe41493f | 103 | __irq)), |
42d5f99b DM |
104 | "i" (ASI_PHYS_USE_EC)); |
105 | } | |
106 | ||
eb2d8d60 | 107 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) |
1da177e4 | 108 | |
ee6a9333 | 109 | static unsigned long hvirq_major __initdata; |
110 | static int __init early_hvirq_major(char *p) | |
111 | { | |
112 | int rc = kstrtoul(p, 10, &hvirq_major); | |
113 | ||
114 | return rc; | |
115 | } | |
116 | early_param("hvirq", early_hvirq_major); | |
117 | ||
118 | static int hv_irq_version; | |
119 | ||
120 | /* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie | |
121 | * based interfaces, but: | |
122 | * | |
123 | * 1) Several OSs, Solaris and Linux included, use them even when only | |
124 | * negotiating version 1.0 (or failing to negotiate at all). So the | |
125 | * hypervisor has a workaround that provides the VIRQ interfaces even | |
126 | * when only verion 1.0 of the API is in use. | |
127 | * | |
128 | * 2) Second, and more importantly, with major version 2.0 these VIRQ | |
129 | * interfaces only were actually hooked up for LDC interrupts, even | |
130 | * though the Hypervisor specification clearly stated: | |
131 | * | |
132 | * The new interrupt API functions will be available to a guest | |
133 | * when it negotiates version 2.0 in the interrupt API group 0x2. When | |
134 | * a guest negotiates version 2.0, all interrupt sources will only | |
135 | * support using the cookie interface, and any attempt to use the | |
136 | * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the | |
137 | * ENOTSUPPORTED error being returned. | |
138 | * | |
139 | * with an emphasis on "all interrupt sources". | |
140 | * | |
141 | * To correct this, major version 3.0 was created which does actually | |
142 | * support VIRQs for all interrupt sources (not just LDC devices). So | |
143 | * if we want to move completely over the cookie based VIRQs we must | |
144 | * negotiate major version 3.0 or later of HV_GRP_INTR. | |
145 | */ | |
146 | static bool sun4v_cookie_only_virqs(void) | |
147 | { | |
148 | if (hv_irq_version >= 3) | |
149 | return true; | |
150 | return false; | |
151 | } | |
8047e247 | 152 | |
ee6a9333 | 153 | static void __init irq_init_hv(void) |
8047e247 | 154 | { |
ee6a9333 | 155 | unsigned long hv_error, major, minor = 0; |
156 | ||
157 | if (tlb_type != hypervisor) | |
158 | return; | |
8047e247 | 159 | |
ee6a9333 | 160 | if (hvirq_major) |
161 | major = hvirq_major; | |
162 | else | |
163 | major = 3; | |
8047e247 | 164 | |
ee6a9333 | 165 | hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor); |
166 | if (!hv_error) | |
167 | hv_irq_version = major; | |
168 | else | |
169 | hv_irq_version = 1; | |
759f89e0 | 170 | |
ee6a9333 | 171 | pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n", |
172 | hv_irq_version, | |
173 | sun4v_cookie_only_virqs() ? "enabled" : "disabled"); | |
174 | } | |
175 | ||
176 | /* This function is for the timer interrupt.*/ | |
177 | int __init arch_probe_nr_irqs(void) | |
178 | { | |
179 | return 1; | |
180 | } | |
181 | ||
182 | #define DEFAULT_NUM_IVECS (0xfffU) | |
183 | static unsigned int nr_ivec = DEFAULT_NUM_IVECS; | |
184 | #define NUM_IVECS (nr_ivec) | |
185 | ||
186 | static unsigned int __init size_nr_ivec(void) | |
187 | { | |
188 | if (tlb_type == hypervisor) { | |
189 | switch (sun4v_chip_type) { | |
190 | /* Athena's devhandle|devino is large.*/ | |
191 | case SUN4V_CHIP_SPARC64X: | |
192 | nr_ivec = 0xffff; | |
35a17eb6 | 193 | break; |
ee6a9333 | 194 | } |
35a17eb6 | 195 | } |
ee6a9333 | 196 | return nr_ivec; |
197 | } | |
198 | ||
199 | struct irq_handler_data { | |
200 | union { | |
201 | struct { | |
202 | unsigned int dev_handle; | |
203 | unsigned int dev_ino; | |
204 | }; | |
205 | unsigned long sysino; | |
206 | }; | |
207 | struct ino_bucket bucket; | |
208 | unsigned long iclr; | |
209 | unsigned long imap; | |
210 | }; | |
211 | ||
212 | static inline unsigned int irq_data_to_handle(struct irq_data *data) | |
213 | { | |
6a4a5b34 | 214 | struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); |
ee6a9333 | 215 | |
216 | return ihd->dev_handle; | |
217 | } | |
218 | ||
219 | static inline unsigned int irq_data_to_ino(struct irq_data *data) | |
220 | { | |
6a4a5b34 | 221 | struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); |
8047e247 | 222 | |
ee6a9333 | 223 | return ihd->dev_ino; |
224 | } | |
225 | ||
226 | static inline unsigned long irq_data_to_sysino(struct irq_data *data) | |
227 | { | |
6a4a5b34 | 228 | struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); |
8047e247 | 229 | |
ee6a9333 | 230 | return ihd->sysino; |
8047e247 DM |
231 | } |
232 | ||
fe41493f | 233 | void irq_free(unsigned int irq) |
8047e247 | 234 | { |
ee6a9333 | 235 | void *data = irq_get_handler_data(irq); |
8047e247 | 236 | |
ee6a9333 | 237 | kfree(data); |
238 | irq_set_handler_data(irq, NULL); | |
239 | irq_free_descs(irq, 1); | |
240 | } | |
35a17eb6 | 241 | |
ee6a9333 | 242 | unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino) |
243 | { | |
244 | int irq; | |
759f89e0 | 245 | |
06ee6d57 | 246 | irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL); |
ee6a9333 | 247 | if (irq <= 0) |
248 | goto out; | |
35a17eb6 | 249 | |
ee6a9333 | 250 | return irq; |
251 | out: | |
252 | return 0; | |
253 | } | |
254 | ||
255 | static unsigned int cookie_exists(u32 devhandle, unsigned int devino) | |
256 | { | |
257 | unsigned long hv_err, cookie; | |
258 | struct ino_bucket *bucket; | |
259 | unsigned int irq = 0U; | |
260 | ||
261 | hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie); | |
262 | if (hv_err) { | |
263 | pr_err("HV get cookie failed hv_err = %ld\n", hv_err); | |
264 | goto out; | |
265 | } | |
266 | ||
267 | if (cookie & ((1UL << 63UL))) { | |
268 | cookie = ~cookie; | |
269 | bucket = (struct ino_bucket *) __va(cookie); | |
270 | irq = bucket->__irq; | |
271 | } | |
272 | out: | |
273 | return irq; | |
274 | } | |
275 | ||
276 | static unsigned int sysino_exists(u32 devhandle, unsigned int devino) | |
277 | { | |
278 | unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); | |
279 | struct ino_bucket *bucket; | |
280 | unsigned int irq; | |
281 | ||
282 | bucket = &ivector_table[sysino]; | |
283 | irq = bucket_get_irq(__pa(bucket)); | |
284 | ||
285 | return irq; | |
286 | } | |
287 | ||
288 | void ack_bad_irq(unsigned int irq) | |
289 | { | |
290 | pr_crit("BAD IRQ ack %d\n", irq); | |
291 | } | |
292 | ||
293 | void irq_install_pre_handler(int irq, | |
294 | void (*func)(unsigned int, void *, void *), | |
295 | void *arg1, void *arg2) | |
296 | { | |
297 | pr_warn("IRQ pre handler NOT supported.\n"); | |
8047e247 | 298 | } |
8047e247 | 299 | |
1da177e4 | 300 | /* |
e18e2a00 | 301 | * /proc/interrupts printing: |
1da177e4 | 302 | */ |
fa680c7c | 303 | int arch_show_interrupts(struct seq_file *p, int prec) |
1da177e4 | 304 | { |
fa680c7c | 305 | int j; |
e18e2a00 | 306 | |
fa680c7c TG |
307 | seq_printf(p, "NMI: "); |
308 | for_each_online_cpu(j) | |
309 | seq_printf(p, "%10u ", cpu_data(j).__nmi_count); | |
310 | seq_printf(p, " Non-maskable interrupts\n"); | |
1da177e4 LT |
311 | return 0; |
312 | } | |
313 | ||
ebd8c56c DM |
314 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) |
315 | { | |
316 | unsigned int tid; | |
317 | ||
318 | if (this_is_starfire) { | |
319 | tid = starfire_translate(imap, cpuid); | |
320 | tid <<= IMAP_TID_SHIFT; | |
321 | tid &= IMAP_TID_UPA; | |
322 | } else { | |
323 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
324 | unsigned long ver; | |
325 | ||
326 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
327 | if ((ver >> 32UL) == __JALAPENO_ID || | |
328 | (ver >> 32UL) == __SERRANO_ID) { | |
329 | tid = cpuid << IMAP_TID_SHIFT; | |
330 | tid &= IMAP_TID_JBUS; | |
331 | } else { | |
332 | unsigned int a = cpuid & 0x1f; | |
333 | unsigned int n = (cpuid >> 5) & 0x1f; | |
334 | ||
335 | tid = ((a << IMAP_AID_SHIFT) | | |
336 | (n << IMAP_NID_SHIFT)); | |
337 | tid &= (IMAP_AID_SAFARI | | |
a419aef8 | 338 | IMAP_NID_SAFARI); |
ebd8c56c DM |
339 | } |
340 | } else { | |
341 | tid = cpuid << IMAP_TID_SHIFT; | |
342 | tid &= IMAP_TID_UPA; | |
343 | } | |
344 | } | |
345 | ||
346 | return tid; | |
347 | } | |
348 | ||
e18e2a00 | 349 | #ifdef CONFIG_SMP |
fe41493f | 350 | static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) |
088dd1f8 | 351 | { |
e65e49d0 | 352 | cpumask_t mask; |
e18e2a00 | 353 | int cpuid; |
088dd1f8 | 354 | |
1091ce62 | 355 | cpumask_copy(&mask, affinity); |
fb1fece5 | 356 | if (cpumask_equal(&mask, cpu_online_mask)) { |
fe41493f | 357 | cpuid = map_to_cpu(irq); |
e18e2a00 DM |
358 | } else { |
359 | cpumask_t tmp; | |
088dd1f8 | 360 | |
fb1fece5 KM |
361 | cpumask_and(&tmp, cpu_online_mask, &mask); |
362 | cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp); | |
1da177e4 | 363 | } |
088dd1f8 | 364 | |
e18e2a00 DM |
365 | return cpuid; |
366 | } | |
367 | #else | |
fe41493f | 368 | #define irq_choose_cpu(irq, affinity) \ |
6abce771 | 369 | real_hard_smp_processor_id() |
e18e2a00 | 370 | #endif |
1da177e4 | 371 | |
4832b992 | 372 | static void sun4u_irq_enable(struct irq_data *data) |
e3999574 | 373 | { |
6a4a5b34 | 374 | struct irq_handler_data *handler_data; |
e3999574 | 375 | |
6a4a5b34 | 376 | handler_data = irq_data_get_irq_handler_data(data); |
cae78728 | 377 | if (likely(handler_data)) { |
861fe906 | 378 | unsigned long cpuid, imap, val; |
e18e2a00 | 379 | unsigned int tid; |
e3999574 | 380 | |
d7185a98 JL |
381 | cpuid = irq_choose_cpu(data->irq, |
382 | irq_data_get_affinity_mask(data)); | |
cae78728 | 383 | imap = handler_data->imap; |
e3999574 | 384 | |
e18e2a00 | 385 | tid = sun4u_compute_tid(imap, cpuid); |
e3999574 | 386 | |
861fe906 DM |
387 | val = upa_readq(imap); |
388 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | |
389 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | |
390 | val |= tid | IMAP_VALID; | |
391 | upa_writeq(val, imap); | |
cae78728 | 392 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
e3999574 | 393 | } |
e3999574 DM |
394 | } |
395 | ||
4832b992 SR |
396 | static int sun4u_set_affinity(struct irq_data *data, |
397 | const struct cpumask *mask, bool force) | |
b53bcb67 | 398 | { |
6a4a5b34 | 399 | struct irq_handler_data *handler_data; |
1091ce62 | 400 | |
6a4a5b34 | 401 | handler_data = irq_data_get_irq_handler_data(data); |
cae78728 | 402 | if (likely(handler_data)) { |
1091ce62 DM |
403 | unsigned long cpuid, imap, val; |
404 | unsigned int tid; | |
405 | ||
4832b992 | 406 | cpuid = irq_choose_cpu(data->irq, mask); |
cae78728 | 407 | imap = handler_data->imap; |
1091ce62 DM |
408 | |
409 | tid = sun4u_compute_tid(imap, cpuid); | |
410 | ||
411 | val = upa_readq(imap); | |
412 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | |
413 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | |
414 | val |= tid | IMAP_VALID; | |
415 | upa_writeq(val, imap); | |
cae78728 | 416 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
1091ce62 | 417 | } |
d5dedd45 YL |
418 | |
419 | return 0; | |
b53bcb67 DM |
420 | } |
421 | ||
d0cac39e DM |
422 | /* Don't do anything. The desc->status check for IRQ_DISABLED in |
423 | * handler_irq() will skip the handler call and that will leave the | |
424 | * interrupt in the sent state. The next ->enable() call will hit the | |
425 | * ICLR register to reset the state machine. | |
426 | * | |
427 | * This scheme is necessary, instead of clearing the Valid bit in the | |
428 | * IMAP register, to handle the case of IMAP registers being shared by | |
429 | * multiple INOs (and thus ICLR registers). Since we use a different | |
430 | * virtual IRQ for each shared IMAP instance, the generic code thinks | |
431 | * there is only one user so it prematurely calls ->disable() on | |
432 | * free_irq(). | |
433 | * | |
434 | * We have to provide an explicit ->disable() method instead of using | |
435 | * NULL to get the default. The reason is that if the generic code | |
436 | * sees that, it also hooks up a default ->shutdown method which | |
437 | * invokes ->mask() which we do not want. See irq_chip_set_defaults(). | |
438 | */ | |
4832b992 | 439 | static void sun4u_irq_disable(struct irq_data *data) |
1da177e4 | 440 | { |
088dd1f8 DM |
441 | } |
442 | ||
4832b992 | 443 | static void sun4u_irq_eoi(struct irq_data *data) |
088dd1f8 | 444 | { |
6a4a5b34 | 445 | struct irq_handler_data *handler_data; |
088dd1f8 | 446 | |
6a4a5b34 | 447 | handler_data = irq_data_get_irq_handler_data(data); |
cae78728 SR |
448 | if (likely(handler_data)) |
449 | upa_writeq(ICLR_IDLE, handler_data->iclr); | |
088dd1f8 DM |
450 | } |
451 | ||
4832b992 | 452 | static void sun4v_irq_enable(struct irq_data *data) |
088dd1f8 | 453 | { |
d7185a98 JL |
454 | unsigned long cpuid = irq_choose_cpu(data->irq, |
455 | irq_data_get_affinity_mask(data)); | |
ee6a9333 | 456 | unsigned int ino = irq_data_to_sysino(data); |
77182300 DM |
457 | int err; |
458 | ||
459 | err = sun4v_intr_settarget(ino, cpuid); | |
460 | if (err != HV_EOK) | |
461 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | |
462 | "err(%d)\n", ino, cpuid, err); | |
463 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | |
464 | if (err != HV_EOK) | |
465 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | |
466 | "err(%d)\n", ino, err); | |
467 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); | |
468 | if (err != HV_EOK) | |
469 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", | |
470 | ino, err); | |
088dd1f8 DM |
471 | } |
472 | ||
4832b992 SR |
473 | static int sun4v_set_affinity(struct irq_data *data, |
474 | const struct cpumask *mask, bool force) | |
b53bcb67 | 475 | { |
4832b992 | 476 | unsigned long cpuid = irq_choose_cpu(data->irq, mask); |
ee6a9333 | 477 | unsigned int ino = irq_data_to_sysino(data); |
77182300 DM |
478 | int err; |
479 | ||
480 | err = sun4v_intr_settarget(ino, cpuid); | |
481 | if (err != HV_EOK) | |
482 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | |
483 | "err(%d)\n", ino, cpuid, err); | |
d5dedd45 YL |
484 | |
485 | return 0; | |
b53bcb67 DM |
486 | } |
487 | ||
4832b992 | 488 | static void sun4v_irq_disable(struct irq_data *data) |
1da177e4 | 489 | { |
ee6a9333 | 490 | unsigned int ino = irq_data_to_sysino(data); |
77182300 | 491 | int err; |
1da177e4 | 492 | |
77182300 DM |
493 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
494 | if (err != HV_EOK) | |
495 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | |
496 | "err(%d)\n", ino, err); | |
e18e2a00 | 497 | } |
1da177e4 | 498 | |
4832b992 | 499 | static void sun4v_irq_eoi(struct irq_data *data) |
e18e2a00 | 500 | { |
ee6a9333 | 501 | unsigned int ino = irq_data_to_sysino(data); |
77182300 | 502 | int err; |
5a606b72 | 503 | |
77182300 DM |
504 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
505 | if (err != HV_EOK) | |
506 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | |
507 | "err(%d)\n", ino, err); | |
1da177e4 LT |
508 | } |
509 | ||
4832b992 | 510 | static void sun4v_virq_enable(struct irq_data *data) |
4a907dec | 511 | { |
ee6a9333 | 512 | unsigned long dev_handle = irq_data_to_handle(data); |
513 | unsigned long dev_ino = irq_data_to_ino(data); | |
514 | unsigned long cpuid; | |
77182300 DM |
515 | int err; |
516 | ||
d7185a98 | 517 | cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data)); |
77182300 | 518 | |
77182300 DM |
519 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
520 | if (err != HV_EOK) | |
521 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
522 | "err(%d)\n", | |
523 | dev_handle, dev_ino, cpuid, err); | |
524 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | |
525 | HV_INTR_STATE_IDLE); | |
526 | if (err != HV_EOK) | |
527 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
528 | "HV_INTR_STATE_IDLE): err(%d)\n", | |
529 | dev_handle, dev_ino, err); | |
530 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | |
531 | HV_INTR_ENABLED); | |
532 | if (err != HV_EOK) | |
533 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
534 | "HV_INTR_ENABLED): err(%d)\n", | |
535 | dev_handle, dev_ino, err); | |
4a907dec DM |
536 | } |
537 | ||
4832b992 SR |
538 | static int sun4v_virt_set_affinity(struct irq_data *data, |
539 | const struct cpumask *mask, bool force) | |
b53bcb67 | 540 | { |
ee6a9333 | 541 | unsigned long dev_handle = irq_data_to_handle(data); |
542 | unsigned long dev_ino = irq_data_to_ino(data); | |
543 | unsigned long cpuid; | |
77182300 | 544 | int err; |
b53bcb67 | 545 | |
4832b992 | 546 | cpuid = irq_choose_cpu(data->irq, mask); |
b53bcb67 | 547 | |
77182300 DM |
548 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
549 | if (err != HV_EOK) | |
550 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
551 | "err(%d)\n", | |
552 | dev_handle, dev_ino, cpuid, err); | |
d5dedd45 YL |
553 | |
554 | return 0; | |
b53bcb67 DM |
555 | } |
556 | ||
4832b992 | 557 | static void sun4v_virq_disable(struct irq_data *data) |
4a907dec | 558 | { |
ee6a9333 | 559 | unsigned long dev_handle = irq_data_to_handle(data); |
560 | unsigned long dev_ino = irq_data_to_ino(data); | |
77182300 DM |
561 | int err; |
562 | ||
77182300 DM |
563 | |
564 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | |
565 | HV_INTR_DISABLED); | |
566 | if (err != HV_EOK) | |
567 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
568 | "HV_INTR_DISABLED): err(%d)\n", | |
569 | dev_handle, dev_ino, err); | |
4a907dec DM |
570 | } |
571 | ||
4832b992 | 572 | static void sun4v_virq_eoi(struct irq_data *data) |
4a907dec | 573 | { |
ee6a9333 | 574 | unsigned long dev_handle = irq_data_to_handle(data); |
575 | unsigned long dev_ino = irq_data_to_ino(data); | |
77182300 | 576 | int err; |
5a606b72 | 577 | |
77182300 DM |
578 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
579 | HV_INTR_STATE_IDLE); | |
580 | if (err != HV_EOK) | |
581 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
582 | "HV_INTR_STATE_IDLE): err(%d)\n", | |
583 | dev_handle, dev_ino, err); | |
4a907dec DM |
584 | } |
585 | ||
729e7d7e | 586 | static struct irq_chip sun4u_irq = { |
4832b992 SR |
587 | .name = "sun4u", |
588 | .irq_enable = sun4u_irq_enable, | |
589 | .irq_disable = sun4u_irq_disable, | |
590 | .irq_eoi = sun4u_irq_eoi, | |
591 | .irq_set_affinity = sun4u_set_affinity, | |
fcd8d4f4 | 592 | .flags = IRQCHIP_EOI_IF_HANDLED, |
e18e2a00 | 593 | }; |
088dd1f8 | 594 | |
729e7d7e | 595 | static struct irq_chip sun4v_irq = { |
4832b992 SR |
596 | .name = "sun4v", |
597 | .irq_enable = sun4v_irq_enable, | |
598 | .irq_disable = sun4v_irq_disable, | |
599 | .irq_eoi = sun4v_irq_eoi, | |
600 | .irq_set_affinity = sun4v_set_affinity, | |
fcd8d4f4 | 601 | .flags = IRQCHIP_EOI_IF_HANDLED, |
e18e2a00 | 602 | }; |
1da177e4 | 603 | |
4a907dec | 604 | static struct irq_chip sun4v_virq = { |
4832b992 SR |
605 | .name = "vsun4v", |
606 | .irq_enable = sun4v_virq_enable, | |
607 | .irq_disable = sun4v_virq_disable, | |
608 | .irq_eoi = sun4v_virq_eoi, | |
609 | .irq_set_affinity = sun4v_virt_set_affinity, | |
fcd8d4f4 | 610 | .flags = IRQCHIP_EOI_IF_HANDLED, |
4a907dec DM |
611 | }; |
612 | ||
e18e2a00 DM |
613 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
614 | { | |
cae78728 | 615 | struct irq_handler_data *handler_data; |
ee6a9333 | 616 | struct ino_bucket *bucket; |
fe41493f | 617 | unsigned int irq; |
e18e2a00 | 618 | int ino; |
1da177e4 | 619 | |
e18e2a00 | 620 | BUG_ON(tlb_type == hypervisor); |
088dd1f8 | 621 | |
861fe906 | 622 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; |
e18e2a00 | 623 | bucket = &ivector_table[ino]; |
fe41493f SR |
624 | irq = bucket_get_irq(__pa(bucket)); |
625 | if (!irq) { | |
626 | irq = irq_alloc(0, ino); | |
627 | bucket_set_irq(__pa(bucket), irq); | |
394d441b TG |
628 | irq_set_chip_and_handler_name(irq, &sun4u_irq, |
629 | handle_fasteoi_irq, "IVEC"); | |
fd0504c3 | 630 | } |
1da177e4 | 631 | |
394d441b | 632 | handler_data = irq_get_handler_data(irq); |
cae78728 | 633 | if (unlikely(handler_data)) |
e18e2a00 | 634 | goto out; |
fd0504c3 | 635 | |
cae78728 SR |
636 | handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
637 | if (unlikely(!handler_data)) { | |
e18e2a00 DM |
638 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
639 | prom_halt(); | |
1da177e4 | 640 | } |
394d441b | 641 | irq_set_handler_data(irq, handler_data); |
1da177e4 | 642 | |
cae78728 SR |
643 | handler_data->imap = imap; |
644 | handler_data->iclr = iclr; | |
1da177e4 | 645 | |
e18e2a00 | 646 | out: |
fe41493f | 647 | return irq; |
e18e2a00 | 648 | } |
1da177e4 | 649 | |
ee6a9333 | 650 | static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino, |
651 | void (*handler_data_init)(struct irq_handler_data *data, | |
652 | u32 devhandle, unsigned int devino), | |
653 | struct irq_chip *chip) | |
1da177e4 | 654 | { |
ee6a9333 | 655 | struct irq_handler_data *data; |
fe41493f | 656 | unsigned int irq; |
8047e247 | 657 | |
ee6a9333 | 658 | irq = irq_alloc(devhandle, devino); |
659 | if (!irq) | |
660 | goto out; | |
1da177e4 | 661 | |
ee6a9333 | 662 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
663 | if (unlikely(!data)) { | |
664 | pr_err("IRQ handler data allocation failed.\n"); | |
665 | irq_free(irq); | |
666 | irq = 0; | |
667 | goto out; | |
1da177e4 | 668 | } |
1da177e4 | 669 | |
ee6a9333 | 670 | irq_set_handler_data(irq, data); |
671 | handler_data_init(data, devhandle, devino); | |
672 | irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC"); | |
673 | data->imap = ~0UL; | |
674 | data->iclr = ~0UL; | |
675 | out: | |
676 | return irq; | |
677 | } | |
1da177e4 | 678 | |
ee6a9333 | 679 | static unsigned long cookie_assign(unsigned int irq, u32 devhandle, |
680 | unsigned int devino) | |
681 | { | |
682 | struct irq_handler_data *ihd = irq_get_handler_data(irq); | |
683 | unsigned long hv_error, cookie; | |
1da177e4 | 684 | |
ee6a9333 | 685 | /* handler_irq needs to find the irq. cookie is seen signed in |
686 | * sun4v_dev_mondo and treated as a non ivector_table delivery. | |
e18e2a00 | 687 | */ |
ee6a9333 | 688 | ihd->bucket.__irq = irq; |
689 | cookie = ~__pa(&ihd->bucket); | |
1da177e4 | 690 | |
ee6a9333 | 691 | hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie); |
692 | if (hv_error) | |
693 | pr_err("HV vintr set cookie failed = %ld\n", hv_error); | |
694 | ||
695 | return hv_error; | |
e18e2a00 | 696 | } |
1da177e4 | 697 | |
ee6a9333 | 698 | static void cookie_handler_data(struct irq_handler_data *data, |
699 | u32 devhandle, unsigned int devino) | |
4a907dec | 700 | { |
ee6a9333 | 701 | data->dev_handle = devhandle; |
702 | data->dev_ino = devino; | |
703 | } | |
4a907dec | 704 | |
ee6a9333 | 705 | static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino, |
706 | struct irq_chip *chip) | |
707 | { | |
708 | unsigned long hv_error; | |
709 | unsigned int irq; | |
710 | ||
711 | irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip); | |
712 | ||
713 | hv_error = cookie_assign(irq, devhandle, devino); | |
714 | if (hv_error) { | |
715 | irq_free(irq); | |
716 | irq = 0; | |
717 | } | |
718 | ||
719 | return irq; | |
4a907dec DM |
720 | } |
721 | ||
ee6a9333 | 722 | static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino) |
4a907dec | 723 | { |
fe41493f | 724 | unsigned int irq; |
b80e6998 | 725 | |
ee6a9333 | 726 | irq = cookie_exists(devhandle, devino); |
727 | if (irq) | |
728 | goto out; | |
25ad403f | 729 | |
ee6a9333 | 730 | irq = cookie_build_irq(devhandle, devino, &sun4v_virq); |
25ad403f | 731 | |
ee6a9333 | 732 | out: |
733 | return irq; | |
734 | } | |
b80e6998 | 735 | |
ee6a9333 | 736 | static void sysino_set_bucket(unsigned int irq) |
737 | { | |
738 | struct irq_handler_data *ihd = irq_get_handler_data(irq); | |
739 | struct ino_bucket *bucket; | |
740 | unsigned long sysino; | |
741 | ||
742 | sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino); | |
743 | BUG_ON(sysino >= nr_ivec); | |
744 | bucket = &ivector_table[sysino]; | |
fe41493f | 745 | bucket_set_irq(__pa(bucket), irq); |
ee6a9333 | 746 | } |
8d57d3ad | 747 | |
ee6a9333 | 748 | static void sysino_handler_data(struct irq_handler_data *data, |
749 | u32 devhandle, unsigned int devino) | |
750 | { | |
751 | unsigned long sysino; | |
4a907dec | 752 | |
ee6a9333 | 753 | sysino = sun4v_devino_to_sysino(devhandle, devino); |
754 | data->sysino = sysino; | |
755 | } | |
4a907dec | 756 | |
ee6a9333 | 757 | static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino, |
758 | struct irq_chip *chip) | |
759 | { | |
760 | unsigned int irq; | |
4a907dec | 761 | |
ee6a9333 | 762 | irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip); |
763 | if (!irq) | |
764 | goto out; | |
b80e6998 | 765 | |
ee6a9333 | 766 | sysino_set_bucket(irq); |
767 | out: | |
768 | return irq; | |
769 | } | |
4a907dec | 770 | |
ee6a9333 | 771 | static int sun4v_build_sysino(u32 devhandle, unsigned int devino) |
772 | { | |
773 | int irq; | |
774 | ||
775 | irq = sysino_exists(devhandle, devino); | |
776 | if (irq) | |
777 | goto out; | |
778 | ||
779 | irq = sysino_build_irq(devhandle, devino, &sun4v_irq); | |
780 | out: | |
fe41493f | 781 | return irq; |
4a907dec DM |
782 | } |
783 | ||
ee6a9333 | 784 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
e18e2a00 | 785 | { |
ee6a9333 | 786 | unsigned int irq; |
ab66a50e | 787 | |
ee6a9333 | 788 | if (sun4v_cookie_only_virqs()) |
789 | irq = sun4v_build_cookie(devhandle, devino); | |
790 | else | |
791 | irq = sun4v_build_sysino(devhandle, devino); | |
6a76267f | 792 | |
ee6a9333 | 793 | return irq; |
794 | } | |
795 | ||
796 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |
797 | { | |
798 | int irq; | |
799 | ||
800 | irq = cookie_build_irq(devhandle, devino, &sun4v_virq); | |
801 | if (!irq) | |
802 | goto out; | |
803 | ||
804 | /* This is borrowed from the original function. | |
805 | */ | |
806 | irq_set_status_flags(irq, IRQ_NOAUTOEN); | |
807 | ||
808 | out: | |
809 | return irq; | |
1da177e4 LT |
810 | } |
811 | ||
4f70f7a9 DM |
812 | void *hardirq_stack[NR_CPUS]; |
813 | void *softirq_stack[NR_CPUS]; | |
814 | ||
d4d1ec48 | 815 | void __irq_entry handler_irq(int pil, struct pt_regs *regs) |
1da177e4 | 816 | { |
eb2d8d60 | 817 | unsigned long pstate, bucket_pa; |
6d24c8dc | 818 | struct pt_regs *old_regs; |
4f70f7a9 | 819 | void *orig_sp; |
1da177e4 | 820 | |
d4d1ec48 | 821 | clear_softint(1 << pil); |
1da177e4 | 822 | |
6d24c8dc | 823 | old_regs = set_irq_regs(regs); |
1da177e4 | 824 | irq_enter(); |
1da177e4 | 825 | |
a650d383 DM |
826 | /* Grab an atomic snapshot of the pending IVECs. */ |
827 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | |
828 | "wrpr %0, %3, %%pstate\n\t" | |
829 | "ldx [%2], %1\n\t" | |
830 | "stx %%g0, [%2]\n\t" | |
831 | "wrpr %0, 0x0, %%pstate\n\t" | |
eb2d8d60 DM |
832 | : "=&r" (pstate), "=&r" (bucket_pa) |
833 | : "r" (irq_work_pa(smp_processor_id())), | |
a650d383 DM |
834 | "i" (PSTATE_IE) |
835 | : "memory"); | |
836 | ||
4f70f7a9 DM |
837 | orig_sp = set_hardirq_stack(); |
838 | ||
eb2d8d60 DM |
839 | while (bucket_pa) { |
840 | unsigned long next_pa; | |
fe41493f | 841 | unsigned int irq; |
1da177e4 | 842 | |
42d5f99b | 843 | next_pa = bucket_get_chain_pa(bucket_pa); |
fe41493f | 844 | irq = bucket_get_irq(bucket_pa); |
42d5f99b | 845 | bucket_clear_chain_pa(bucket_pa); |
fd0504c3 | 846 | |
fcd8d4f4 | 847 | generic_handle_irq(irq); |
eb2d8d60 DM |
848 | |
849 | bucket_pa = next_pa; | |
1da177e4 | 850 | } |
e18e2a00 | 851 | |
4f70f7a9 DM |
852 | restore_hardirq_stack(orig_sp); |
853 | ||
1da177e4 | 854 | irq_exit(); |
6d24c8dc | 855 | set_irq_regs(old_regs); |
1da177e4 LT |
856 | } |
857 | ||
7d65f4a6 | 858 | void do_softirq_own_stack(void) |
4f70f7a9 | 859 | { |
7d65f4a6 | 860 | void *orig_sp, *sp = softirq_stack[smp_processor_id()]; |
4f70f7a9 | 861 | |
7d65f4a6 | 862 | sp += THREAD_SIZE - 192 - STACK_BIAS; |
4f70f7a9 | 863 | |
7d65f4a6 FW |
864 | __asm__ __volatile__("mov %%sp, %0\n\t" |
865 | "mov %1, %%sp" | |
866 | : "=&r" (orig_sp) | |
867 | : "r" (sp)); | |
868 | __do_softirq(); | |
869 | __asm__ __volatile__("mov %0, %%sp" | |
870 | : : "r" (orig_sp)); | |
4f70f7a9 DM |
871 | } |
872 | ||
e0204409 DM |
873 | #ifdef CONFIG_HOTPLUG_CPU |
874 | void fixup_irqs(void) | |
875 | { | |
876 | unsigned int irq; | |
877 | ||
878 | for (irq = 0; irq < NR_IRQS; irq++) { | |
16741ea0 | 879 | struct irq_desc *desc = irq_to_desc(irq); |
ee6a9333 | 880 | struct irq_data *data; |
e0204409 DM |
881 | unsigned long flags; |
882 | ||
ee6a9333 | 883 | if (!desc) |
884 | continue; | |
885 | data = irq_desc_get_irq_data(desc); | |
16741ea0 TG |
886 | raw_spin_lock_irqsave(&desc->lock, flags); |
887 | if (desc->action && !irqd_is_per_cpu(data)) { | |
4832b992 SR |
888 | if (data->chip->irq_set_affinity) |
889 | data->chip->irq_set_affinity(data, | |
d7185a98 JL |
890 | irq_data_get_affinity_mask(data), |
891 | false); | |
e0204409 | 892 | } |
16741ea0 | 893 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
e0204409 | 894 | } |
2eb2f779 DM |
895 | |
896 | tick_ops->disable_irq(); | |
e0204409 DM |
897 | } |
898 | #endif | |
899 | ||
cdd5186f DM |
900 | struct sun5_timer { |
901 | u64 count0; | |
902 | u64 limit0; | |
903 | u64 count1; | |
904 | u64 limit1; | |
905 | }; | |
1da177e4 | 906 | |
cdd5186f | 907 | static struct sun5_timer *prom_timers; |
1da177e4 LT |
908 | static u64 prom_limit0, prom_limit1; |
909 | ||
910 | static void map_prom_timers(void) | |
911 | { | |
25c7581b | 912 | struct device_node *dp; |
6a23acf3 | 913 | const unsigned int *addr; |
1da177e4 LT |
914 | |
915 | /* PROM timer node hangs out in the top level of device siblings... */ | |
25c7581b DM |
916 | dp = of_find_node_by_path("/"); |
917 | dp = dp->child; | |
918 | while (dp) { | |
919 | if (!strcmp(dp->name, "counter-timer")) | |
920 | break; | |
921 | dp = dp->sibling; | |
922 | } | |
1da177e4 LT |
923 | |
924 | /* Assume if node is not present, PROM uses different tick mechanism | |
925 | * which we should not care about. | |
926 | */ | |
25c7581b | 927 | if (!dp) { |
1da177e4 LT |
928 | prom_timers = (struct sun5_timer *) 0; |
929 | return; | |
930 | } | |
931 | ||
932 | /* If PROM is really using this, it must be mapped by him. */ | |
25c7581b DM |
933 | addr = of_get_property(dp, "address", NULL); |
934 | if (!addr) { | |
1da177e4 LT |
935 | prom_printf("PROM does not have timer mapped, trying to continue.\n"); |
936 | prom_timers = (struct sun5_timer *) 0; | |
937 | return; | |
938 | } | |
939 | prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); | |
940 | } | |
941 | ||
942 | static void kill_prom_timer(void) | |
943 | { | |
944 | if (!prom_timers) | |
945 | return; | |
946 | ||
947 | /* Save them away for later. */ | |
948 | prom_limit0 = prom_timers->limit0; | |
949 | prom_limit1 = prom_timers->limit1; | |
950 | ||
ee906c9e | 951 | /* Just as in sun4c PROM uses timer which ticks at IRQ 14. |
1da177e4 LT |
952 | * We turn both off here just to be paranoid. |
953 | */ | |
954 | prom_timers->limit0 = 0; | |
955 | prom_timers->limit1 = 0; | |
956 | ||
957 | /* Wheee, eat the interrupt packet too... */ | |
958 | __asm__ __volatile__( | |
959 | " mov 0x40, %%g2\n" | |
960 | " ldxa [%%g0] %0, %%g1\n" | |
961 | " ldxa [%%g2] %1, %%g1\n" | |
962 | " stxa %%g0, [%%g0] %0\n" | |
963 | " membar #Sync\n" | |
964 | : /* no outputs */ | |
965 | : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) | |
966 | : "g1", "g2"); | |
967 | } | |
968 | ||
9843099f | 969 | void notrace init_irqwork_curcpu(void) |
1da177e4 | 970 | { |
1da177e4 LT |
971 | int cpu = hard_smp_processor_id(); |
972 | ||
eb2d8d60 | 973 | trap_block[cpu].irq_worklist_pa = 0UL; |
1da177e4 LT |
974 | } |
975 | ||
5cbc3073 DM |
976 | /* Please be very careful with register_one_mondo() and |
977 | * sun4v_register_mondo_queues(). | |
978 | * | |
979 | * On SMP this gets invoked from the CPU trampoline before | |
980 | * the cpu has fully taken over the trap table from OBP, | |
981 | * and it's kernel stack + %g6 thread register state is | |
982 | * not fully cooked yet. | |
983 | * | |
984 | * Therefore you cannot make any OBP calls, not even prom_printf, | |
985 | * from these two routines. | |
986 | */ | |
2066aadd PG |
987 | static void notrace register_one_mondo(unsigned long paddr, unsigned long type, |
988 | unsigned long qmask) | |
ac29c11d | 989 | { |
5cbc3073 | 990 | unsigned long num_entries = (qmask + 1) / 64; |
94f8762d DM |
991 | unsigned long status; |
992 | ||
993 | status = sun4v_cpu_qconf(type, paddr, num_entries); | |
994 | if (status != HV_EOK) { | |
995 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " | |
996 | "err %lu\n", type, paddr, num_entries, status); | |
ac29c11d DM |
997 | prom_halt(); |
998 | } | |
999 | } | |
1000 | ||
2066aadd | 1001 | void notrace sun4v_register_mondo_queues(int this_cpu) |
5b0c0572 | 1002 | { |
b5a37e96 DM |
1003 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
1004 | ||
5cbc3073 DM |
1005 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, |
1006 | tb->cpu_mondo_qmask); | |
1007 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, | |
1008 | tb->dev_mondo_qmask); | |
1009 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, | |
1010 | tb->resum_qmask); | |
1011 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, | |
1012 | tb->nonresum_qmask); | |
b5a37e96 DM |
1013 | } |
1014 | ||
14a2ff6e DM |
1015 | /* Each queue region must be a power of 2 multiple of 64 bytes in |
1016 | * size. The base real address must be aligned to the size of the | |
1017 | * region. Thus, an 8KB queue must be 8KB aligned, for example. | |
1018 | */ | |
1019 | static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) | |
b5a37e96 | 1020 | { |
5cbc3073 | 1021 | unsigned long size = PAGE_ALIGN(qmask + 1); |
14a2ff6e DM |
1022 | unsigned long order = get_order(size); |
1023 | unsigned long p; | |
5b0c0572 | 1024 | |
7a7dc961 | 1025 | p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
5cbc3073 | 1026 | if (!p) { |
14a2ff6e | 1027 | prom_printf("SUN4V: Error, cannot allocate queue.\n"); |
5b0c0572 DM |
1028 | prom_halt(); |
1029 | } | |
1030 | ||
5cbc3073 | 1031 | *pa_ptr = __pa(p); |
5b0c0572 DM |
1032 | } |
1033 | ||
b434e719 | 1034 | static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) |
1d2f1f90 DM |
1035 | { |
1036 | #ifdef CONFIG_SMP | |
14a2ff6e | 1037 | unsigned long page; |
c79a1373 | 1038 | void *mondo, *p; |
1d2f1f90 | 1039 | |
c79a1373 JC |
1040 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); |
1041 | ||
1042 | /* Make sure mondo block is 64byte aligned */ | |
1043 | p = kzalloc(127, GFP_KERNEL); | |
1044 | if (!p) { | |
1045 | prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); | |
1046 | prom_halt(); | |
1047 | } | |
1048 | mondo = (void *)(((unsigned long)p + 63) & ~0x3f); | |
1049 | tb->cpu_mondo_block_pa = __pa(mondo); | |
1d2f1f90 | 1050 | |
14a2ff6e | 1051 | page = get_zeroed_page(GFP_KERNEL); |
1d2f1f90 | 1052 | if (!page) { |
c79a1373 | 1053 | prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); |
1d2f1f90 DM |
1054 | prom_halt(); |
1055 | } | |
1056 | ||
c79a1373 | 1057 | tb->cpu_list_pa = __pa(page); |
1d2f1f90 DM |
1058 | #endif |
1059 | } | |
1060 | ||
b434e719 DM |
1061 | /* Allocate mondo and error queues for all possible cpus. */ |
1062 | static void __init sun4v_init_mondo_queues(void) | |
ac29c11d | 1063 | { |
b434e719 | 1064 | int cpu; |
ac29c11d | 1065 | |
b434e719 DM |
1066 | for_each_possible_cpu(cpu) { |
1067 | struct trap_per_cpu *tb = &trap_block[cpu]; | |
1d2f1f90 | 1068 | |
14a2ff6e DM |
1069 | alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); |
1070 | alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); | |
1071 | alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); | |
1072 | alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); | |
1073 | alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); | |
1074 | alloc_one_queue(&tb->nonresum_kernel_buf_pa, | |
1075 | tb->nonresum_qmask); | |
43f58923 DM |
1076 | } |
1077 | } | |
1078 | ||
1079 | static void __init init_send_mondo_info(void) | |
1080 | { | |
1081 | int cpu; | |
1082 | ||
1083 | for_each_possible_cpu(cpu) { | |
1084 | struct trap_per_cpu *tb = &trap_block[cpu]; | |
1d2f1f90 | 1085 | |
b434e719 | 1086 | init_cpu_send_mondo_info(tb); |
72aff53f | 1087 | } |
ac29c11d DM |
1088 | } |
1089 | ||
e18e2a00 DM |
1090 | static struct irqaction timer_irq_action = { |
1091 | .name = "timer", | |
1092 | }; | |
1093 | ||
ee6a9333 | 1094 | static void __init irq_ivector_init(void) |
1da177e4 | 1095 | { |
ee6a9333 | 1096 | unsigned long size, order; |
1097 | unsigned int ivecs; | |
10397e40 | 1098 | |
ee6a9333 | 1099 | /* If we are doing cookie only VIRQs then we do not need the ivector |
1100 | * table to process interrupts. | |
1101 | */ | |
1102 | if (sun4v_cookie_only_virqs()) | |
1103 | return; | |
1da177e4 | 1104 | |
ee6a9333 | 1105 | ivecs = size_nr_ivec(); |
1106 | size = sizeof(struct ino_bucket) * ivecs; | |
1107 | order = get_order(size); | |
1108 | ivector_table = (struct ino_bucket *) | |
1109 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
10397e40 DM |
1110 | if (!ivector_table) { |
1111 | prom_printf("Fatal error, cannot allocate ivector_table\n"); | |
1112 | prom_halt(); | |
1113 | } | |
42d5f99b DM |
1114 | __flush_dcache_range((unsigned long) ivector_table, |
1115 | ((unsigned long) ivector_table) + size); | |
10397e40 DM |
1116 | |
1117 | ivector_table_pa = __pa(ivector_table); | |
ee6a9333 | 1118 | } |
1119 | ||
1120 | /* Only invoked on boot processor.*/ | |
1121 | void __init init_IRQ(void) | |
1122 | { | |
1123 | irq_init_hv(); | |
1124 | irq_ivector_init(); | |
1125 | map_prom_timers(); | |
1126 | kill_prom_timer(); | |
eb2d8d60 | 1127 | |
ac29c11d | 1128 | if (tlb_type == hypervisor) |
b434e719 | 1129 | sun4v_init_mondo_queues(); |
ac29c11d | 1130 | |
43f58923 DM |
1131 | init_send_mondo_info(); |
1132 | ||
1133 | if (tlb_type == hypervisor) { | |
1134 | /* Load up the boot cpu's entries. */ | |
1135 | sun4v_register_mondo_queues(hard_smp_processor_id()); | |
1136 | } | |
1137 | ||
1da177e4 LT |
1138 | /* We need to clear any IRQ's pending in the soft interrupt |
1139 | * registers, a spurious one could be left around from the | |
1140 | * PROM timer which we just disabled. | |
1141 | */ | |
1142 | clear_softint(get_softint()); | |
1143 | ||
1144 | /* Now that ivector table is initialized, it is safe | |
1145 | * to receive IRQ vector traps. We will normally take | |
1146 | * one or two right now, in case some device PROM used | |
1147 | * to boot us wants to speak to us. We just ignore them. | |
1148 | */ | |
1149 | __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" | |
1150 | "or %%g1, %0, %%g1\n\t" | |
1151 | "wrpr %%g1, 0x0, %%pstate" | |
1152 | : /* No outputs */ | |
1153 | : "i" (PSTATE_IE) | |
1154 | : "g1"); | |
1da177e4 | 1155 | |
16741ea0 | 1156 | irq_to_desc(0)->action = &timer_irq_action; |
1da177e4 | 1157 | } |