]>
Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb | 23 | #undef DEBUG |
67207b96 AB |
24 | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/poll.h> | |
29 | #include <linux/ptrace.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/wait.h> | |
32 | ||
33 | #include <asm/io.h> | |
34 | #include <asm/prom.h> | |
35 | #include <asm/semaphore.h> | |
36 | #include <asm/spu.h> | |
37 | #include <asm/mmu_context.h> | |
38 | ||
39 | #include "interrupt.h" | |
40 | ||
41 | static int __spu_trap_invalid_dma(struct spu *spu) | |
42 | { | |
43 | pr_debug("%s\n", __FUNCTION__); | |
44 | force_sig(SIGBUS, /* info, */ current); | |
45 | return 0; | |
46 | } | |
47 | ||
48 | static int __spu_trap_dma_align(struct spu *spu) | |
49 | { | |
50 | pr_debug("%s\n", __FUNCTION__); | |
51 | force_sig(SIGBUS, /* info, */ current); | |
52 | return 0; | |
53 | } | |
54 | ||
55 | static int __spu_trap_error(struct spu *spu) | |
56 | { | |
57 | pr_debug("%s\n", __FUNCTION__); | |
58 | force_sig(SIGILL, /* info, */ current); | |
59 | return 0; | |
60 | } | |
61 | ||
62 | static void spu_restart_dma(struct spu *spu) | |
63 | { | |
64 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
5473af04 | 65 | |
8837d921 | 66 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
5473af04 | 67 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
67207b96 AB |
68 | } |
69 | ||
70 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |
71 | { | |
8b3d6663 AB |
72 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
73 | struct mm_struct *mm = spu->mm; | |
74 | u64 esid, vsid; | |
67207b96 AB |
75 | |
76 | pr_debug("%s\n", __FUNCTION__); | |
77 | ||
8837d921 | 78 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
8b3d6663 AB |
79 | /* SLBs are pre-loaded for context switch, so |
80 | * we should never get here! | |
81 | */ | |
5473af04 MN |
82 | printk("%s: invalid access during switch!\n", __func__); |
83 | return 1; | |
84 | } | |
8b3d6663 AB |
85 | if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { |
86 | /* Future: support kernel segments so that drivers | |
87 | * can use SPUs. | |
88 | */ | |
67207b96 AB |
89 | pr_debug("invalid region access at %016lx\n", ea); |
90 | return 1; | |
91 | } | |
92 | ||
8b3d6663 AB |
93 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
94 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; | |
95 | if (in_hugepage_area(mm->context, ea)) | |
96 | vsid |= SLB_VSID_L; | |
67207b96 | 97 | |
8b3d6663 AB |
98 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
99 | out_be64(&priv2->slb_vsid_RW, vsid); | |
100 | out_be64(&priv2->slb_esid_RW, esid); | |
101 | ||
102 | spu->slb_replace++; | |
67207b96 AB |
103 | if (spu->slb_replace >= 8) |
104 | spu->slb_replace = 0; | |
105 | ||
67207b96 AB |
106 | spu_restart_dma(spu); |
107 | ||
67207b96 AB |
108 | return 0; |
109 | } | |
110 | ||
5473af04 | 111 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
8b3d6663 | 112 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
67207b96 | 113 | { |
67207b96 | 114 | pr_debug("%s\n", __FUNCTION__); |
67207b96 | 115 | |
5473af04 MN |
116 | /* Handle kernel space hash faults immediately. |
117 | User hash faults need to be deferred to process context. */ | |
118 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | |
119 | && REGION_ID(ea) != USER_REGION_ID | |
120 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | |
121 | spu_restart_dma(spu); | |
122 | return 0; | |
123 | } | |
124 | ||
8837d921 | 125 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
5473af04 MN |
126 | printk("%s: invalid access during switch!\n", __func__); |
127 | return 1; | |
128 | } | |
67207b96 | 129 | |
8b3d6663 AB |
130 | spu->dar = ea; |
131 | spu->dsisr = dsisr; | |
132 | mb(); | |
5110459f AB |
133 | if (spu->stop_callback) |
134 | spu->stop_callback(spu); | |
67207b96 AB |
135 | return 0; |
136 | } | |
137 | ||
138 | static int __spu_trap_mailbox(struct spu *spu) | |
139 | { | |
8b3d6663 AB |
140 | if (spu->ibox_callback) |
141 | spu->ibox_callback(spu); | |
67207b96 AB |
142 | |
143 | /* atomically disable SPU mailbox interrupts */ | |
144 | spin_lock(&spu->register_lock); | |
145 | out_be64(&spu->priv1->int_mask_class2_RW, | |
146 | in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1); | |
147 | spin_unlock(&spu->register_lock); | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static int __spu_trap_stop(struct spu *spu) | |
152 | { | |
153 | pr_debug("%s\n", __FUNCTION__); | |
154 | spu->stop_code = in_be32(&spu->problem->spu_status_R); | |
5110459f AB |
155 | if (spu->stop_callback) |
156 | spu->stop_callback(spu); | |
67207b96 AB |
157 | return 0; |
158 | } | |
159 | ||
160 | static int __spu_trap_halt(struct spu *spu) | |
161 | { | |
162 | pr_debug("%s\n", __FUNCTION__); | |
163 | spu->stop_code = in_be32(&spu->problem->spu_status_R); | |
5110459f AB |
164 | if (spu->stop_callback) |
165 | spu->stop_callback(spu); | |
67207b96 AB |
166 | return 0; |
167 | } | |
168 | ||
169 | static int __spu_trap_tag_group(struct spu *spu) | |
170 | { | |
171 | pr_debug("%s\n", __FUNCTION__); | |
172 | /* wake_up(&spu->dma_wq); */ | |
173 | return 0; | |
174 | } | |
175 | ||
176 | static int __spu_trap_spubox(struct spu *spu) | |
177 | { | |
8b3d6663 AB |
178 | if (spu->wbox_callback) |
179 | spu->wbox_callback(spu); | |
67207b96 AB |
180 | |
181 | /* atomically disable SPU mailbox interrupts */ | |
182 | spin_lock(&spu->register_lock); | |
183 | out_be64(&spu->priv1->int_mask_class2_RW, | |
184 | in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10); | |
185 | spin_unlock(&spu->register_lock); | |
186 | return 0; | |
187 | } | |
188 | ||
189 | static irqreturn_t | |
190 | spu_irq_class_0(int irq, void *data, struct pt_regs *regs) | |
191 | { | |
192 | struct spu *spu; | |
193 | ||
194 | spu = data; | |
195 | spu->class_0_pending = 1; | |
5110459f AB |
196 | if (spu->stop_callback) |
197 | spu->stop_callback(spu); | |
67207b96 AB |
198 | |
199 | return IRQ_HANDLED; | |
200 | } | |
201 | ||
5110459f | 202 | int |
67207b96 AB |
203 | spu_irq_class_0_bottom(struct spu *spu) |
204 | { | |
3a843d7c | 205 | unsigned long stat, mask; |
67207b96 AB |
206 | |
207 | spu->class_0_pending = 0; | |
208 | ||
3a843d7c | 209 | mask = in_be64(&spu->priv1->int_mask_class0_RW); |
67207b96 AB |
210 | stat = in_be64(&spu->priv1->int_stat_class0_RW); |
211 | ||
3a843d7c AB |
212 | stat &= mask; |
213 | ||
67207b96 AB |
214 | if (stat & 1) /* invalid MFC DMA */ |
215 | __spu_trap_invalid_dma(spu); | |
216 | ||
217 | if (stat & 2) /* invalid DMA alignment */ | |
218 | __spu_trap_dma_align(spu); | |
219 | ||
220 | if (stat & 4) /* error on SPU */ | |
221 | __spu_trap_error(spu); | |
222 | ||
223 | out_be64(&spu->priv1->int_stat_class0_RW, stat); | |
5110459f AB |
224 | |
225 | return (stat & 0x7) ? -EIO : 0; | |
67207b96 | 226 | } |
5110459f | 227 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
67207b96 AB |
228 | |
229 | static irqreturn_t | |
230 | spu_irq_class_1(int irq, void *data, struct pt_regs *regs) | |
231 | { | |
232 | struct spu *spu; | |
8b3d6663 | 233 | unsigned long stat, mask, dar, dsisr; |
67207b96 AB |
234 | |
235 | spu = data; | |
8b3d6663 AB |
236 | |
237 | /* atomically read & clear class1 status. */ | |
238 | spin_lock(&spu->register_lock); | |
239 | mask = in_be64(&spu->priv1->int_mask_class1_RW); | |
240 | stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask; | |
67207b96 | 241 | dar = in_be64(&spu->priv1->mfc_dar_RW); |
8b3d6663 | 242 | dsisr = in_be64(&spu->priv1->mfc_dsisr_RW); |
38307341 AB |
243 | if (stat & 2) /* mapping fault */ |
244 | out_be64(&spu->priv1->mfc_dsisr_RW, 0UL); | |
8b3d6663 AB |
245 | out_be64(&spu->priv1->int_stat_class1_RW, stat); |
246 | spin_unlock(&spu->register_lock); | |
67207b96 AB |
247 | |
248 | if (stat & 1) /* segment fault */ | |
249 | __spu_trap_data_seg(spu, dar); | |
250 | ||
251 | if (stat & 2) { /* mapping fault */ | |
8b3d6663 | 252 | __spu_trap_data_map(spu, dar, dsisr); |
67207b96 AB |
253 | } |
254 | ||
255 | if (stat & 4) /* ls compare & suspend on get */ | |
256 | ; | |
257 | ||
258 | if (stat & 8) /* ls compare & suspend on put */ | |
259 | ; | |
260 | ||
67207b96 AB |
261 | return stat ? IRQ_HANDLED : IRQ_NONE; |
262 | } | |
5110459f | 263 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); |
67207b96 AB |
264 | |
265 | static irqreturn_t | |
266 | spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |
267 | { | |
268 | struct spu *spu; | |
269 | unsigned long stat; | |
3a843d7c | 270 | unsigned long mask; |
67207b96 AB |
271 | |
272 | spu = data; | |
273 | stat = in_be64(&spu->priv1->int_stat_class2_RW); | |
3a843d7c | 274 | mask = in_be64(&spu->priv1->int_mask_class2_RW); |
67207b96 | 275 | |
3a843d7c | 276 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
67207b96 | 277 | |
3a843d7c | 278 | stat &= mask; |
67207b96 AB |
279 | |
280 | if (stat & 1) /* PPC core mailbox */ | |
281 | __spu_trap_mailbox(spu); | |
282 | ||
283 | if (stat & 2) /* SPU stop-and-signal */ | |
284 | __spu_trap_stop(spu); | |
285 | ||
286 | if (stat & 4) /* SPU halted */ | |
287 | __spu_trap_halt(spu); | |
288 | ||
289 | if (stat & 8) /* DMA tag group complete */ | |
290 | __spu_trap_tag_group(spu); | |
291 | ||
292 | if (stat & 0x10) /* SPU mailbox threshold */ | |
293 | __spu_trap_spubox(spu); | |
294 | ||
295 | out_be64(&spu->priv1->int_stat_class2_RW, stat); | |
296 | return stat ? IRQ_HANDLED : IRQ_NONE; | |
297 | } | |
298 | ||
299 | static int | |
300 | spu_request_irqs(struct spu *spu) | |
301 | { | |
302 | int ret; | |
303 | int irq_base; | |
304 | ||
305 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | |
306 | ||
307 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); | |
308 | ret = request_irq(irq_base + spu->isrc, | |
309 | spu_irq_class_0, 0, spu->irq_c0, spu); | |
310 | if (ret) | |
311 | goto out; | |
312 | out_be64(&spu->priv1->int_mask_class0_RW, 0x7); | |
313 | ||
314 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | |
315 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | |
316 | spu_irq_class_1, 0, spu->irq_c1, spu); | |
317 | if (ret) | |
318 | goto out1; | |
319 | out_be64(&spu->priv1->int_mask_class1_RW, 0x3); | |
320 | ||
321 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | |
322 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | |
323 | spu_irq_class_2, 0, spu->irq_c2, spu); | |
324 | if (ret) | |
325 | goto out2; | |
326 | out_be64(&spu->priv1->int_mask_class2_RW, 0xe); | |
327 | goto out; | |
328 | ||
329 | out2: | |
330 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | |
331 | out1: | |
332 | free_irq(irq_base + spu->isrc, spu); | |
333 | out: | |
334 | return ret; | |
335 | } | |
336 | ||
337 | static void | |
338 | spu_free_irqs(struct spu *spu) | |
339 | { | |
340 | int irq_base; | |
341 | ||
342 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | |
343 | ||
344 | free_irq(irq_base + spu->isrc, spu); | |
345 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | |
346 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); | |
347 | } | |
348 | ||
349 | static LIST_HEAD(spu_list); | |
350 | static DECLARE_MUTEX(spu_mutex); | |
351 | ||
352 | static void spu_init_channels(struct spu *spu) | |
353 | { | |
354 | static const struct { | |
355 | unsigned channel; | |
356 | unsigned count; | |
357 | } zero_list[] = { | |
358 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
359 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
360 | }, count_list[] = { | |
361 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
362 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
363 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
364 | }; | |
365 | struct spu_priv2 *priv2; | |
366 | int i; | |
367 | ||
368 | priv2 = spu->priv2; | |
369 | ||
370 | /* initialize all channel data to zero */ | |
371 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
372 | int count; | |
373 | ||
374 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
375 | for (count = 0; count < zero_list[i].count; count++) | |
376 | out_be64(&priv2->spu_chnldata_RW, 0); | |
377 | } | |
378 | ||
379 | /* initialize channel counts to meaningful values */ | |
380 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
381 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
382 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
383 | } | |
384 | } | |
385 | ||
386 | static void spu_init_regs(struct spu *spu) | |
387 | { | |
388 | out_be64(&spu->priv1->int_mask_class0_RW, 0x7); | |
389 | out_be64(&spu->priv1->int_mask_class1_RW, 0x3); | |
390 | out_be64(&spu->priv1->int_mask_class2_RW, 0xe); | |
391 | } | |
392 | ||
393 | struct spu *spu_alloc(void) | |
394 | { | |
395 | struct spu *spu; | |
396 | ||
397 | down(&spu_mutex); | |
398 | if (!list_empty(&spu_list)) { | |
399 | spu = list_entry(spu_list.next, struct spu, list); | |
400 | list_del_init(&spu->list); | |
401 | pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); | |
402 | } else { | |
403 | pr_debug("No SPU left\n"); | |
404 | spu = NULL; | |
405 | } | |
406 | up(&spu_mutex); | |
407 | ||
408 | if (spu) { | |
409 | spu_init_channels(spu); | |
410 | spu_init_regs(spu); | |
411 | } | |
412 | ||
413 | return spu; | |
414 | } | |
39c73c33 | 415 | EXPORT_SYMBOL_GPL(spu_alloc); |
67207b96 AB |
416 | |
417 | void spu_free(struct spu *spu) | |
418 | { | |
419 | down(&spu_mutex); | |
67207b96 AB |
420 | list_add_tail(&spu->list, &spu_list); |
421 | up(&spu_mutex); | |
422 | } | |
39c73c33 | 423 | EXPORT_SYMBOL_GPL(spu_free); |
67207b96 | 424 | |
67207b96 AB |
425 | static int spu_handle_mm_fault(struct spu *spu) |
426 | { | |
67207b96 AB |
427 | struct mm_struct *mm = spu->mm; |
428 | struct vm_area_struct *vma; | |
429 | u64 ea, dsisr, is_write; | |
430 | int ret; | |
431 | ||
8b3d6663 AB |
432 | ea = spu->dar; |
433 | dsisr = spu->dsisr; | |
67207b96 AB |
434 | #if 0 |
435 | if (!IS_VALID_EA(ea)) { | |
436 | return -EFAULT; | |
437 | } | |
438 | #endif /* XXX */ | |
439 | if (mm == NULL) { | |
440 | return -EFAULT; | |
441 | } | |
442 | if (mm->pgd == NULL) { | |
443 | return -EFAULT; | |
444 | } | |
445 | ||
446 | down_read(&mm->mmap_sem); | |
447 | vma = find_vma(mm, ea); | |
448 | if (!vma) | |
449 | goto bad_area; | |
450 | if (vma->vm_start <= ea) | |
451 | goto good_area; | |
452 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
453 | goto bad_area; | |
454 | #if 0 | |
455 | if (expand_stack(vma, ea)) | |
456 | goto bad_area; | |
457 | #endif /* XXX */ | |
458 | good_area: | |
459 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | |
460 | if (is_write) { | |
461 | if (!(vma->vm_flags & VM_WRITE)) | |
462 | goto bad_area; | |
463 | } else { | |
464 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | |
465 | goto bad_area; | |
466 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
467 | goto bad_area; | |
468 | } | |
469 | ret = 0; | |
470 | switch (handle_mm_fault(mm, vma, ea, is_write)) { | |
471 | case VM_FAULT_MINOR: | |
472 | current->min_flt++; | |
473 | break; | |
474 | case VM_FAULT_MAJOR: | |
475 | current->maj_flt++; | |
476 | break; | |
477 | case VM_FAULT_SIGBUS: | |
478 | ret = -EFAULT; | |
479 | goto bad_area; | |
480 | case VM_FAULT_OOM: | |
481 | ret = -ENOMEM; | |
482 | goto bad_area; | |
483 | default: | |
484 | BUG(); | |
485 | } | |
486 | up_read(&mm->mmap_sem); | |
487 | return ret; | |
488 | ||
489 | bad_area: | |
490 | up_read(&mm->mmap_sem); | |
491 | return -EFAULT; | |
492 | } | |
493 | ||
5110459f | 494 | int spu_irq_class_1_bottom(struct spu *spu) |
67207b96 | 495 | { |
67207b96 AB |
496 | u64 ea, dsisr, access, error = 0UL; |
497 | int ret = 0; | |
498 | ||
8b3d6663 AB |
499 | ea = spu->dar; |
500 | dsisr = spu->dsisr; | |
67207b96 | 501 | if (dsisr & MFC_DSISR_PTE_NOT_FOUND) { |
8b3d6663 AB |
502 | access = (_PAGE_PRESENT | _PAGE_USER); |
503 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; | |
67207b96 AB |
504 | if (hash_page(ea, access, 0x300) != 0) |
505 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | |
506 | } | |
507 | if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) || | |
508 | (dsisr & MFC_DSISR_ACCESS_DENIED)) { | |
509 | if ((ret = spu_handle_mm_fault(spu)) != 0) | |
510 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | |
511 | else | |
512 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; | |
513 | } | |
8b3d6663 AB |
514 | spu->dar = 0UL; |
515 | spu->dsisr = 0UL; | |
516 | if (!error) { | |
67207b96 | 517 | spu_restart_dma(spu); |
8b3d6663 AB |
518 | } else { |
519 | __spu_trap_invalid_dma(spu); | |
520 | } | |
67207b96 AB |
521 | return ret; |
522 | } | |
523 | ||
67207b96 AB |
524 | static void __iomem * __init map_spe_prop(struct device_node *n, |
525 | const char *name) | |
526 | { | |
527 | struct address_prop { | |
528 | unsigned long address; | |
529 | unsigned int len; | |
530 | } __attribute__((packed)) *prop; | |
531 | ||
532 | void *p; | |
533 | int proplen; | |
534 | ||
535 | p = get_property(n, name, &proplen); | |
536 | if (proplen != sizeof (struct address_prop)) | |
537 | return NULL; | |
538 | ||
539 | prop = p; | |
540 | ||
541 | return ioremap(prop->address, prop->len); | |
542 | } | |
543 | ||
544 | static void spu_unmap(struct spu *spu) | |
545 | { | |
546 | iounmap(spu->priv2); | |
547 | iounmap(spu->priv1); | |
548 | iounmap(spu->problem); | |
549 | iounmap((u8 __iomem *)spu->local_store); | |
550 | } | |
551 | ||
552 | static int __init spu_map_device(struct spu *spu, struct device_node *spe) | |
553 | { | |
554 | char *prop; | |
555 | int ret; | |
556 | ||
557 | ret = -ENODEV; | |
558 | prop = get_property(spe, "isrc", NULL); | |
559 | if (!prop) | |
560 | goto out; | |
561 | spu->isrc = *(unsigned int *)prop; | |
562 | ||
563 | spu->name = get_property(spe, "name", NULL); | |
564 | if (!spu->name) | |
565 | goto out; | |
566 | ||
567 | prop = get_property(spe, "local-store", NULL); | |
568 | if (!prop) | |
569 | goto out; | |
570 | spu->local_store_phys = *(unsigned long *)prop; | |
571 | ||
572 | /* we use local store as ram, not io memory */ | |
573 | spu->local_store = (void __force *)map_spe_prop(spe, "local-store"); | |
574 | if (!spu->local_store) | |
575 | goto out; | |
576 | ||
577 | spu->problem= map_spe_prop(spe, "problem"); | |
578 | if (!spu->problem) | |
579 | goto out_unmap; | |
580 | ||
581 | spu->priv1= map_spe_prop(spe, "priv1"); | |
582 | if (!spu->priv1) | |
583 | goto out_unmap; | |
584 | ||
585 | spu->priv2= map_spe_prop(spe, "priv2"); | |
586 | if (!spu->priv2) | |
587 | goto out_unmap; | |
588 | ret = 0; | |
589 | goto out; | |
590 | ||
591 | out_unmap: | |
592 | spu_unmap(spu); | |
593 | out: | |
594 | return ret; | |
595 | } | |
596 | ||
597 | static int __init find_spu_node_id(struct device_node *spe) | |
598 | { | |
599 | unsigned int *id; | |
600 | struct device_node *cpu; | |
601 | ||
602 | cpu = spe->parent->parent; | |
603 | id = (unsigned int *)get_property(cpu, "node-id", NULL); | |
604 | ||
605 | return id ? *id : 0; | |
606 | } | |
607 | ||
608 | static int __init create_spu(struct device_node *spe) | |
609 | { | |
610 | struct spu *spu; | |
611 | int ret; | |
612 | static int number; | |
613 | ||
614 | ret = -ENOMEM; | |
615 | spu = kmalloc(sizeof (*spu), GFP_KERNEL); | |
616 | if (!spu) | |
617 | goto out; | |
618 | ||
619 | ret = spu_map_device(spu, spe); | |
620 | if (ret) | |
621 | goto out_free; | |
622 | ||
623 | spu->node = find_spu_node_id(spe); | |
624 | spu->stop_code = 0; | |
625 | spu->slb_replace = 0; | |
626 | spu->mm = NULL; | |
8b3d6663 AB |
627 | spu->ctx = NULL; |
628 | spu->rq = NULL; | |
629 | spu->pid = 0; | |
67207b96 | 630 | spu->class_0_pending = 0; |
5473af04 | 631 | spu->flags = 0UL; |
8b3d6663 AB |
632 | spu->dar = 0UL; |
633 | spu->dsisr = 0UL; | |
67207b96 AB |
634 | spin_lock_init(&spu->register_lock); |
635 | ||
636 | out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); | |
637 | out_be64(&spu->priv1->mfc_sr1_RW, 0x33); | |
638 | ||
8b3d6663 AB |
639 | spu->ibox_callback = NULL; |
640 | spu->wbox_callback = NULL; | |
5110459f | 641 | spu->stop_callback = NULL; |
67207b96 AB |
642 | |
643 | down(&spu_mutex); | |
644 | spu->number = number++; | |
645 | ret = spu_request_irqs(spu); | |
646 | if (ret) | |
647 | goto out_unmap; | |
648 | ||
649 | list_add(&spu->list, &spu_list); | |
650 | up(&spu_mutex); | |
651 | ||
652 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", | |
653 | spu->name, spu->isrc, spu->local_store, | |
654 | spu->problem, spu->priv1, spu->priv2, spu->number); | |
655 | goto out; | |
656 | ||
657 | out_unmap: | |
658 | up(&spu_mutex); | |
659 | spu_unmap(spu); | |
660 | out_free: | |
661 | kfree(spu); | |
662 | out: | |
663 | return ret; | |
664 | } | |
665 | ||
666 | static void destroy_spu(struct spu *spu) | |
667 | { | |
668 | list_del_init(&spu->list); | |
669 | ||
670 | spu_free_irqs(spu); | |
671 | spu_unmap(spu); | |
672 | kfree(spu); | |
673 | } | |
674 | ||
675 | static void cleanup_spu_base(void) | |
676 | { | |
677 | struct spu *spu, *tmp; | |
678 | down(&spu_mutex); | |
679 | list_for_each_entry_safe(spu, tmp, &spu_list, list) | |
680 | destroy_spu(spu); | |
681 | up(&spu_mutex); | |
682 | } | |
683 | module_exit(cleanup_spu_base); | |
684 | ||
685 | static int __init init_spu_base(void) | |
686 | { | |
687 | struct device_node *node; | |
688 | int ret; | |
689 | ||
690 | ret = -ENODEV; | |
691 | for (node = of_find_node_by_type(NULL, "spe"); | |
692 | node; node = of_find_node_by_type(node, "spe")) { | |
693 | ret = create_spu(node); | |
694 | if (ret) { | |
695 | printk(KERN_WARNING "%s: Error initializing %s\n", | |
696 | __FUNCTION__, node->name); | |
697 | cleanup_spu_base(); | |
698 | break; | |
699 | } | |
700 | } | |
701 | /* in some old firmware versions, the spe is called 'spc', so we | |
702 | look for that as well */ | |
703 | for (node = of_find_node_by_type(NULL, "spc"); | |
704 | node; node = of_find_node_by_type(node, "spc")) { | |
705 | ret = create_spu(node); | |
706 | if (ret) { | |
707 | printk(KERN_WARNING "%s: Error initializing %s\n", | |
708 | __FUNCTION__, node->name); | |
709 | cleanup_spu_base(); | |
710 | break; | |
711 | } | |
712 | } | |
713 | return ret; | |
714 | } | |
715 | module_init(init_spu_base); | |
716 | ||
717 | MODULE_LICENSE("GPL"); | |
718 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |