]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #undef DEBUG | |
24 | ||
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/poll.h> | |
29 | #include <linux/ptrace.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/wait.h> | |
32 | ||
33 | #include <asm/io.h> | |
34 | #include <asm/prom.h> | |
35 | #include <linux/mutex.h> | |
36 | #include <asm/spu.h> | |
37 | #include <asm/spu_priv1.h> | |
38 | #include <asm/mmu_context.h> | |
39 | ||
40 | #include "interrupt.h" | |
41 | ||
42 | const struct spu_priv1_ops *spu_priv1_ops; | |
43 | ||
44 | EXPORT_SYMBOL_GPL(spu_priv1_ops); | |
45 | ||
46 | static int __spu_trap_invalid_dma(struct spu *spu) | |
47 | { | |
48 | pr_debug("%s\n", __FUNCTION__); | |
49 | force_sig(SIGBUS, /* info, */ current); | |
50 | return 0; | |
51 | } | |
52 | ||
53 | static int __spu_trap_dma_align(struct spu *spu) | |
54 | { | |
55 | pr_debug("%s\n", __FUNCTION__); | |
56 | force_sig(SIGBUS, /* info, */ current); | |
57 | return 0; | |
58 | } | |
59 | ||
60 | static int __spu_trap_error(struct spu *spu) | |
61 | { | |
62 | pr_debug("%s\n", __FUNCTION__); | |
63 | force_sig(SIGILL, /* info, */ current); | |
64 | return 0; | |
65 | } | |
66 | ||
67 | static void spu_restart_dma(struct spu *spu) | |
68 | { | |
69 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
70 | ||
71 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) | |
72 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); | |
73 | } | |
74 | ||
75 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |
76 | { | |
77 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
78 | struct mm_struct *mm = spu->mm; | |
79 | u64 esid, vsid, llp; | |
80 | ||
81 | pr_debug("%s\n", __FUNCTION__); | |
82 | ||
83 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { | |
84 | /* SLBs are pre-loaded for context switch, so | |
85 | * we should never get here! | |
86 | */ | |
87 | printk("%s: invalid access during switch!\n", __func__); | |
88 | return 1; | |
89 | } | |
90 | if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { | |
91 | /* Future: support kernel segments so that drivers | |
92 | * can use SPUs. | |
93 | */ | |
94 | pr_debug("invalid region access at %016lx\n", ea); | |
95 | return 1; | |
96 | } | |
97 | ||
98 | esid = (ea & ESID_MASK) | SLB_ESID_V; | |
99 | #ifdef CONFIG_HUGETLB_PAGE | |
100 | if (in_hugepage_area(mm->context, ea)) | |
101 | llp = mmu_psize_defs[mmu_huge_psize].sllp; | |
102 | else | |
103 | #endif | |
104 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | |
105 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | | |
106 | SLB_VSID_USER | llp; | |
107 | ||
108 | out_be64(&priv2->slb_index_W, spu->slb_replace); | |
109 | out_be64(&priv2->slb_vsid_RW, vsid); | |
110 | out_be64(&priv2->slb_esid_RW, esid); | |
111 | ||
112 | spu->slb_replace++; | |
113 | if (spu->slb_replace >= 8) | |
114 | spu->slb_replace = 0; | |
115 | ||
116 | spu_restart_dma(spu); | |
117 | ||
118 | return 0; | |
119 | } | |
120 | ||
121 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX | |
122 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |
123 | { | |
124 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); | |
125 | ||
126 | /* Handle kernel space hash faults immediately. | |
127 | User hash faults need to be deferred to process context. */ | |
128 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | |
129 | && REGION_ID(ea) != USER_REGION_ID | |
130 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | |
131 | spu_restart_dma(spu); | |
132 | return 0; | |
133 | } | |
134 | ||
135 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { | |
136 | printk("%s: invalid access during switch!\n", __func__); | |
137 | return 1; | |
138 | } | |
139 | ||
140 | spu->dar = ea; | |
141 | spu->dsisr = dsisr; | |
142 | mb(); | |
143 | if (spu->stop_callback) | |
144 | spu->stop_callback(spu); | |
145 | return 0; | |
146 | } | |
147 | ||
148 | static int __spu_trap_mailbox(struct spu *spu) | |
149 | { | |
150 | if (spu->ibox_callback) | |
151 | spu->ibox_callback(spu); | |
152 | ||
153 | /* atomically disable SPU mailbox interrupts */ | |
154 | spin_lock(&spu->register_lock); | |
155 | spu_int_mask_and(spu, 2, ~0x1); | |
156 | spin_unlock(&spu->register_lock); | |
157 | return 0; | |
158 | } | |
159 | ||
160 | static int __spu_trap_stop(struct spu *spu) | |
161 | { | |
162 | pr_debug("%s\n", __FUNCTION__); | |
163 | if (spu->stop_callback) | |
164 | spu->stop_callback(spu); | |
165 | return 0; | |
166 | } | |
167 | ||
168 | static int __spu_trap_halt(struct spu *spu) | |
169 | { | |
170 | pr_debug("%s\n", __FUNCTION__); | |
171 | if (spu->stop_callback) | |
172 | spu->stop_callback(spu); | |
173 | return 0; | |
174 | } | |
175 | ||
176 | static int __spu_trap_tag_group(struct spu *spu) | |
177 | { | |
178 | pr_debug("%s\n", __FUNCTION__); | |
179 | spu->mfc_callback(spu); | |
180 | return 0; | |
181 | } | |
182 | ||
183 | static int __spu_trap_spubox(struct spu *spu) | |
184 | { | |
185 | if (spu->wbox_callback) | |
186 | spu->wbox_callback(spu); | |
187 | ||
188 | /* atomically disable SPU mailbox interrupts */ | |
189 | spin_lock(&spu->register_lock); | |
190 | spu_int_mask_and(spu, 2, ~0x10); | |
191 | spin_unlock(&spu->register_lock); | |
192 | return 0; | |
193 | } | |
194 | ||
195 | static irqreturn_t | |
196 | spu_irq_class_0(int irq, void *data, struct pt_regs *regs) | |
197 | { | |
198 | struct spu *spu; | |
199 | ||
200 | spu = data; | |
201 | spu->class_0_pending = 1; | |
202 | if (spu->stop_callback) | |
203 | spu->stop_callback(spu); | |
204 | ||
205 | return IRQ_HANDLED; | |
206 | } | |
207 | ||
208 | int | |
209 | spu_irq_class_0_bottom(struct spu *spu) | |
210 | { | |
211 | unsigned long stat, mask; | |
212 | ||
213 | spu->class_0_pending = 0; | |
214 | ||
215 | mask = spu_int_mask_get(spu, 0); | |
216 | stat = spu_int_stat_get(spu, 0); | |
217 | ||
218 | stat &= mask; | |
219 | ||
220 | if (stat & 1) /* invalid MFC DMA */ | |
221 | __spu_trap_invalid_dma(spu); | |
222 | ||
223 | if (stat & 2) /* invalid DMA alignment */ | |
224 | __spu_trap_dma_align(spu); | |
225 | ||
226 | if (stat & 4) /* error on SPU */ | |
227 | __spu_trap_error(spu); | |
228 | ||
229 | spu_int_stat_clear(spu, 0, stat); | |
230 | ||
231 | return (stat & 0x7) ? -EIO : 0; | |
232 | } | |
233 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); | |
234 | ||
235 | static irqreturn_t | |
236 | spu_irq_class_1(int irq, void *data, struct pt_regs *regs) | |
237 | { | |
238 | struct spu *spu; | |
239 | unsigned long stat, mask, dar, dsisr; | |
240 | ||
241 | spu = data; | |
242 | ||
243 | /* atomically read & clear class1 status. */ | |
244 | spin_lock(&spu->register_lock); | |
245 | mask = spu_int_mask_get(spu, 1); | |
246 | stat = spu_int_stat_get(spu, 1) & mask; | |
247 | dar = spu_mfc_dar_get(spu); | |
248 | dsisr = spu_mfc_dsisr_get(spu); | |
249 | if (stat & 2) /* mapping fault */ | |
250 | spu_mfc_dsisr_set(spu, 0ul); | |
251 | spu_int_stat_clear(spu, 1, stat); | |
252 | spin_unlock(&spu->register_lock); | |
253 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, | |
254 | dar, dsisr); | |
255 | ||
256 | if (stat & 1) /* segment fault */ | |
257 | __spu_trap_data_seg(spu, dar); | |
258 | ||
259 | if (stat & 2) { /* mapping fault */ | |
260 | __spu_trap_data_map(spu, dar, dsisr); | |
261 | } | |
262 | ||
263 | if (stat & 4) /* ls compare & suspend on get */ | |
264 | ; | |
265 | ||
266 | if (stat & 8) /* ls compare & suspend on put */ | |
267 | ; | |
268 | ||
269 | return stat ? IRQ_HANDLED : IRQ_NONE; | |
270 | } | |
271 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); | |
272 | ||
273 | static irqreturn_t | |
274 | spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |
275 | { | |
276 | struct spu *spu; | |
277 | unsigned long stat; | |
278 | unsigned long mask; | |
279 | ||
280 | spu = data; | |
281 | stat = spu_int_stat_get(spu, 2); | |
282 | mask = spu_int_mask_get(spu, 2); | |
283 | ||
284 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); | |
285 | ||
286 | stat &= mask; | |
287 | ||
288 | if (stat & 1) /* PPC core mailbox */ | |
289 | __spu_trap_mailbox(spu); | |
290 | ||
291 | if (stat & 2) /* SPU stop-and-signal */ | |
292 | __spu_trap_stop(spu); | |
293 | ||
294 | if (stat & 4) /* SPU halted */ | |
295 | __spu_trap_halt(spu); | |
296 | ||
297 | if (stat & 8) /* DMA tag group complete */ | |
298 | __spu_trap_tag_group(spu); | |
299 | ||
300 | if (stat & 0x10) /* SPU mailbox threshold */ | |
301 | __spu_trap_spubox(spu); | |
302 | ||
303 | spu_int_stat_clear(spu, 2, stat); | |
304 | return stat ? IRQ_HANDLED : IRQ_NONE; | |
305 | } | |
306 | ||
307 | static int | |
308 | spu_request_irqs(struct spu *spu) | |
309 | { | |
310 | int ret; | |
311 | int irq_base; | |
312 | ||
313 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | |
314 | ||
315 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); | |
316 | ret = request_irq(irq_base + spu->isrc, | |
317 | spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu); | |
318 | if (ret) | |
319 | goto out; | |
320 | ||
321 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | |
322 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | |
323 | spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu); | |
324 | if (ret) | |
325 | goto out1; | |
326 | ||
327 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | |
328 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | |
329 | spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu); | |
330 | if (ret) | |
331 | goto out2; | |
332 | goto out; | |
333 | ||
334 | out2: | |
335 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | |
336 | out1: | |
337 | free_irq(irq_base + spu->isrc, spu); | |
338 | out: | |
339 | return ret; | |
340 | } | |
341 | ||
342 | static void | |
343 | spu_free_irqs(struct spu *spu) | |
344 | { | |
345 | int irq_base; | |
346 | ||
347 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | |
348 | ||
349 | free_irq(irq_base + spu->isrc, spu); | |
350 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | |
351 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); | |
352 | } | |
353 | ||
354 | static LIST_HEAD(spu_list); | |
355 | static DEFINE_MUTEX(spu_mutex); | |
356 | ||
357 | static void spu_init_channels(struct spu *spu) | |
358 | { | |
359 | static const struct { | |
360 | unsigned channel; | |
361 | unsigned count; | |
362 | } zero_list[] = { | |
363 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
364 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
365 | }, count_list[] = { | |
366 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
367 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
368 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
369 | }; | |
370 | struct spu_priv2 __iomem *priv2; | |
371 | int i; | |
372 | ||
373 | priv2 = spu->priv2; | |
374 | ||
375 | /* initialize all channel data to zero */ | |
376 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
377 | int count; | |
378 | ||
379 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
380 | for (count = 0; count < zero_list[i].count; count++) | |
381 | out_be64(&priv2->spu_chnldata_RW, 0); | |
382 | } | |
383 | ||
384 | /* initialize channel counts to meaningful values */ | |
385 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
386 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
387 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
388 | } | |
389 | } | |
390 | ||
391 | struct spu *spu_alloc(void) | |
392 | { | |
393 | struct spu *spu; | |
394 | ||
395 | mutex_lock(&spu_mutex); | |
396 | if (!list_empty(&spu_list)) { | |
397 | spu = list_entry(spu_list.next, struct spu, list); | |
398 | list_del_init(&spu->list); | |
399 | pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); | |
400 | } else { | |
401 | pr_debug("No SPU left\n"); | |
402 | spu = NULL; | |
403 | } | |
404 | mutex_unlock(&spu_mutex); | |
405 | ||
406 | if (spu) | |
407 | spu_init_channels(spu); | |
408 | ||
409 | return spu; | |
410 | } | |
411 | EXPORT_SYMBOL_GPL(spu_alloc); | |
412 | ||
413 | void spu_free(struct spu *spu) | |
414 | { | |
415 | mutex_lock(&spu_mutex); | |
416 | list_add_tail(&spu->list, &spu_list); | |
417 | mutex_unlock(&spu_mutex); | |
418 | } | |
419 | EXPORT_SYMBOL_GPL(spu_free); | |
420 | ||
421 | static int spu_handle_mm_fault(struct spu *spu) | |
422 | { | |
423 | struct mm_struct *mm = spu->mm; | |
424 | struct vm_area_struct *vma; | |
425 | u64 ea, dsisr, is_write; | |
426 | int ret; | |
427 | ||
428 | ea = spu->dar; | |
429 | dsisr = spu->dsisr; | |
430 | #if 0 | |
431 | if (!IS_VALID_EA(ea)) { | |
432 | return -EFAULT; | |
433 | } | |
434 | #endif /* XXX */ | |
435 | if (mm == NULL) { | |
436 | return -EFAULT; | |
437 | } | |
438 | if (mm->pgd == NULL) { | |
439 | return -EFAULT; | |
440 | } | |
441 | ||
442 | down_read(&mm->mmap_sem); | |
443 | vma = find_vma(mm, ea); | |
444 | if (!vma) | |
445 | goto bad_area; | |
446 | if (vma->vm_start <= ea) | |
447 | goto good_area; | |
448 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
449 | goto bad_area; | |
450 | #if 0 | |
451 | if (expand_stack(vma, ea)) | |
452 | goto bad_area; | |
453 | #endif /* XXX */ | |
454 | good_area: | |
455 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | |
456 | if (is_write) { | |
457 | if (!(vma->vm_flags & VM_WRITE)) | |
458 | goto bad_area; | |
459 | } else { | |
460 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | |
461 | goto bad_area; | |
462 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
463 | goto bad_area; | |
464 | } | |
465 | ret = 0; | |
466 | switch (handle_mm_fault(mm, vma, ea, is_write)) { | |
467 | case VM_FAULT_MINOR: | |
468 | current->min_flt++; | |
469 | break; | |
470 | case VM_FAULT_MAJOR: | |
471 | current->maj_flt++; | |
472 | break; | |
473 | case VM_FAULT_SIGBUS: | |
474 | ret = -EFAULT; | |
475 | goto bad_area; | |
476 | case VM_FAULT_OOM: | |
477 | ret = -ENOMEM; | |
478 | goto bad_area; | |
479 | default: | |
480 | BUG(); | |
481 | } | |
482 | up_read(&mm->mmap_sem); | |
483 | return ret; | |
484 | ||
485 | bad_area: | |
486 | up_read(&mm->mmap_sem); | |
487 | return -EFAULT; | |
488 | } | |
489 | ||
490 | int spu_irq_class_1_bottom(struct spu *spu) | |
491 | { | |
492 | u64 ea, dsisr, access, error = 0UL; | |
493 | int ret = 0; | |
494 | ||
495 | ea = spu->dar; | |
496 | dsisr = spu->dsisr; | |
497 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { | |
498 | u64 flags; | |
499 | ||
500 | access = (_PAGE_PRESENT | _PAGE_USER); | |
501 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; | |
502 | local_irq_save(flags); | |
503 | if (hash_page(ea, access, 0x300) != 0) | |
504 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | |
505 | local_irq_restore(flags); | |
506 | } | |
507 | if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { | |
508 | if ((ret = spu_handle_mm_fault(spu)) != 0) | |
509 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | |
510 | else | |
511 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; | |
512 | } | |
513 | spu->dar = 0UL; | |
514 | spu->dsisr = 0UL; | |
515 | if (!error) { | |
516 | spu_restart_dma(spu); | |
517 | } else { | |
518 | __spu_trap_invalid_dma(spu); | |
519 | } | |
520 | return ret; | |
521 | } | |
522 | ||
523 | static int __init find_spu_node_id(struct device_node *spe) | |
524 | { | |
525 | unsigned int *id; | |
526 | struct device_node *cpu; | |
527 | cpu = spe->parent->parent; | |
528 | id = (unsigned int *)get_property(cpu, "node-id", NULL); | |
529 | return id ? *id : 0; | |
530 | } | |
531 | ||
532 | static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe, | |
533 | const char *prop) | |
534 | { | |
535 | static DEFINE_MUTEX(add_spumem_mutex); | |
536 | ||
537 | struct address_prop { | |
538 | unsigned long address; | |
539 | unsigned int len; | |
540 | } __attribute__((packed)) *p; | |
541 | int proplen; | |
542 | ||
543 | unsigned long start_pfn, nr_pages; | |
544 | struct pglist_data *pgdata; | |
545 | struct zone *zone; | |
546 | int ret; | |
547 | ||
548 | p = (void*)get_property(spe, prop, &proplen); | |
549 | WARN_ON(proplen != sizeof (*p)); | |
550 | ||
551 | start_pfn = p->address >> PAGE_SHIFT; | |
552 | nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
553 | ||
554 | pgdata = NODE_DATA(spu->nid); | |
555 | zone = pgdata->node_zones; | |
556 | ||
557 | /* XXX rethink locking here */ | |
558 | mutex_lock(&add_spumem_mutex); | |
559 | ret = __add_pages(zone, start_pfn, nr_pages); | |
560 | mutex_unlock(&add_spumem_mutex); | |
561 | ||
562 | return ret; | |
563 | } | |
564 | ||
565 | static void __iomem * __init map_spe_prop(struct spu *spu, | |
566 | struct device_node *n, const char *name) | |
567 | { | |
568 | struct address_prop { | |
569 | unsigned long address; | |
570 | unsigned int len; | |
571 | } __attribute__((packed)) *prop; | |
572 | ||
573 | void *p; | |
574 | int proplen; | |
575 | void* ret = NULL; | |
576 | int err = 0; | |
577 | ||
578 | p = get_property(n, name, &proplen); | |
579 | if (proplen != sizeof (struct address_prop)) | |
580 | return NULL; | |
581 | ||
582 | prop = p; | |
583 | ||
584 | err = cell_spuprop_present(spu, n, name); | |
585 | if (err && (err != -EEXIST)) | |
586 | goto out; | |
587 | ||
588 | ret = ioremap(prop->address, prop->len); | |
589 | ||
590 | out: | |
591 | return ret; | |
592 | } | |
593 | ||
594 | static void spu_unmap(struct spu *spu) | |
595 | { | |
596 | iounmap(spu->priv2); | |
597 | iounmap(spu->priv1); | |
598 | iounmap(spu->problem); | |
599 | iounmap((u8 __iomem *)spu->local_store); | |
600 | } | |
601 | ||
602 | static int __init spu_map_device(struct spu *spu, struct device_node *node) | |
603 | { | |
604 | char *prop; | |
605 | int ret; | |
606 | ||
607 | ret = -ENODEV; | |
608 | prop = get_property(node, "isrc", NULL); | |
609 | if (!prop) | |
610 | goto out; | |
611 | spu->isrc = *(unsigned int *)prop; | |
612 | ||
613 | spu->name = get_property(node, "name", NULL); | |
614 | if (!spu->name) | |
615 | goto out; | |
616 | ||
617 | prop = get_property(node, "local-store", NULL); | |
618 | if (!prop) | |
619 | goto out; | |
620 | spu->local_store_phys = *(unsigned long *)prop; | |
621 | ||
622 | /* we use local store as ram, not io memory */ | |
623 | spu->local_store = (void __force *) | |
624 | map_spe_prop(spu, node, "local-store"); | |
625 | if (!spu->local_store) | |
626 | goto out; | |
627 | ||
628 | prop = get_property(node, "problem", NULL); | |
629 | if (!prop) | |
630 | goto out_unmap; | |
631 | spu->problem_phys = *(unsigned long *)prop; | |
632 | ||
633 | spu->problem= map_spe_prop(spu, node, "problem"); | |
634 | if (!spu->problem) | |
635 | goto out_unmap; | |
636 | ||
637 | spu->priv1= map_spe_prop(spu, node, "priv1"); | |
638 | /* priv1 is not available on a hypervisor */ | |
639 | ||
640 | spu->priv2= map_spe_prop(spu, node, "priv2"); | |
641 | if (!spu->priv2) | |
642 | goto out_unmap; | |
643 | ret = 0; | |
644 | goto out; | |
645 | ||
646 | out_unmap: | |
647 | spu_unmap(spu); | |
648 | out: | |
649 | return ret; | |
650 | } | |
651 | ||
652 | struct sysdev_class spu_sysdev_class = { | |
653 | set_kset_name("spu") | |
654 | }; | |
655 | ||
656 | static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf) | |
657 | { | |
658 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
659 | return sprintf(buf, "%d\n", spu->isrc); | |
660 | ||
661 | } | |
662 | static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL); | |
663 | ||
664 | extern int attach_sysdev_to_node(struct sys_device *dev, int nid); | |
665 | ||
666 | static int spu_create_sysdev(struct spu *spu) | |
667 | { | |
668 | int ret; | |
669 | ||
670 | spu->sysdev.id = spu->number; | |
671 | spu->sysdev.cls = &spu_sysdev_class; | |
672 | ret = sysdev_register(&spu->sysdev); | |
673 | if (ret) { | |
674 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", | |
675 | spu->number); | |
676 | return ret; | |
677 | } | |
678 | ||
679 | sysdev_create_file(&spu->sysdev, &attr_isrc); | |
680 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
685 | static void spu_destroy_sysdev(struct spu *spu) | |
686 | { | |
687 | sysdev_remove_file(&spu->sysdev, &attr_isrc); | |
688 | sysfs_remove_device_from_node(&spu->sysdev, spu->nid); | |
689 | sysdev_unregister(&spu->sysdev); | |
690 | } | |
691 | ||
692 | static int __init create_spu(struct device_node *spe) | |
693 | { | |
694 | struct spu *spu; | |
695 | int ret; | |
696 | static int number; | |
697 | ||
698 | ret = -ENOMEM; | |
699 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); | |
700 | if (!spu) | |
701 | goto out; | |
702 | ||
703 | ret = spu_map_device(spu, spe); | |
704 | if (ret) | |
705 | goto out_free; | |
706 | ||
707 | spu->node = find_spu_node_id(spe); | |
708 | spu->nid = of_node_to_nid(spe); | |
709 | if (spu->nid == -1) | |
710 | spu->nid = 0; | |
711 | spin_lock_init(&spu->register_lock); | |
712 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); | |
713 | spu_mfc_sr1_set(spu, 0x33); | |
714 | mutex_lock(&spu_mutex); | |
715 | ||
716 | spu->number = number++; | |
717 | ret = spu_request_irqs(spu); | |
718 | if (ret) | |
719 | goto out_unmap; | |
720 | ||
721 | ret = spu_create_sysdev(spu); | |
722 | if (ret) | |
723 | goto out_free_irqs; | |
724 | ||
725 | list_add(&spu->list, &spu_list); | |
726 | mutex_unlock(&spu_mutex); | |
727 | ||
728 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", | |
729 | spu->name, spu->isrc, spu->local_store, | |
730 | spu->problem, spu->priv1, spu->priv2, spu->number); | |
731 | goto out; | |
732 | ||
733 | out_free_irqs: | |
734 | spu_free_irqs(spu); | |
735 | ||
736 | out_unmap: | |
737 | mutex_unlock(&spu_mutex); | |
738 | spu_unmap(spu); | |
739 | out_free: | |
740 | kfree(spu); | |
741 | out: | |
742 | return ret; | |
743 | } | |
744 | ||
745 | static void destroy_spu(struct spu *spu) | |
746 | { | |
747 | list_del_init(&spu->list); | |
748 | ||
749 | spu_destroy_sysdev(spu); | |
750 | spu_free_irqs(spu); | |
751 | spu_unmap(spu); | |
752 | kfree(spu); | |
753 | } | |
754 | ||
755 | static void cleanup_spu_base(void) | |
756 | { | |
757 | struct spu *spu, *tmp; | |
758 | mutex_lock(&spu_mutex); | |
759 | list_for_each_entry_safe(spu, tmp, &spu_list, list) | |
760 | destroy_spu(spu); | |
761 | mutex_unlock(&spu_mutex); | |
762 | sysdev_class_unregister(&spu_sysdev_class); | |
763 | } | |
764 | module_exit(cleanup_spu_base); | |
765 | ||
766 | static int __init init_spu_base(void) | |
767 | { | |
768 | struct device_node *node; | |
769 | int ret; | |
770 | ||
771 | /* create sysdev class for spus */ | |
772 | ret = sysdev_class_register(&spu_sysdev_class); | |
773 | if (ret) | |
774 | return ret; | |
775 | ||
776 | ret = -ENODEV; | |
777 | for (node = of_find_node_by_type(NULL, "spe"); | |
778 | node; node = of_find_node_by_type(node, "spe")) { | |
779 | ret = create_spu(node); | |
780 | if (ret) { | |
781 | printk(KERN_WARNING "%s: Error initializing %s\n", | |
782 | __FUNCTION__, node->name); | |
783 | cleanup_spu_base(); | |
784 | break; | |
785 | } | |
786 | } | |
787 | /* in some old firmware versions, the spe is called 'spc', so we | |
788 | look for that as well */ | |
789 | for (node = of_find_node_by_type(NULL, "spc"); | |
790 | node; node = of_find_node_by_type(node, "spc")) { | |
791 | ret = create_spu(node); | |
792 | if (ret) { | |
793 | printk(KERN_WARNING "%s: Error initializing %s\n", | |
794 | __FUNCTION__, node->name); | |
795 | cleanup_spu_base(); | |
796 | break; | |
797 | } | |
798 | } | |
799 | return ret; | |
800 | } | |
801 | module_init(init_spu_base); | |
802 | ||
803 | MODULE_LICENSE("GPL"); | |
804 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |