]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/powerpc/platforms/cell/spufs/file.c
[POWERPC] spufs: turn run_sema into run_mutex
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / platforms / cell / spufs / file.c
CommitLineData
67207b96
AB
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
a33a7d73
AB
23#undef DEBUG
24
67207b96
AB
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
d88cfffa 28#include <linux/pagemap.h>
67207b96 29#include <linux/poll.h>
5110459f 30#include <linux/ptrace.h>
67207b96
AB
31
32#include <asm/io.h>
33#include <asm/semaphore.h>
34#include <asm/spu.h>
b9e3bd77 35#include <asm/spu_info.h>
67207b96
AB
36#include <asm/uaccess.h>
37
38#include "spufs.h"
39
27d5bf2a
BH
40#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
41
67207b96
AB
42static int
43spufs_mem_open(struct inode *inode, struct file *file)
44{
45 struct spufs_inode_info *i = SPUFS_I(inode);
6df10a82 46 struct spu_context *ctx = i->i_ctx;
43c2bbd9
CH
47
48 spin_lock(&ctx->mapping_lock);
6df10a82 49 file->private_data = ctx;
43c2bbd9
CH
50 if (!i->i_openers++)
51 ctx->local_store = inode->i_mapping;
52 spin_unlock(&ctx->mapping_lock);
53 smp_wmb();
54 return 0;
55}
56
57static int
58spufs_mem_release(struct inode *inode, struct file *file)
59{
60 struct spufs_inode_info *i = SPUFS_I(inode);
61 struct spu_context *ctx = i->i_ctx;
62
63 spin_lock(&ctx->mapping_lock);
64 if (!--i->i_openers)
65 ctx->local_store = NULL;
66 spin_unlock(&ctx->mapping_lock);
17e0e270 67 smp_wmb();
67207b96
AB
68 return 0;
69}
70
bf1ab978
DGM
71static ssize_t
72__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73 size_t size, loff_t *pos)
74{
75 char *local_store = ctx->ops->get_ls(ctx);
76 return simple_read_from_buffer(buffer, size, pos, local_store,
77 LS_SIZE);
78}
79
67207b96
AB
80static ssize_t
81spufs_mem_read(struct file *file, char __user *buffer,
82 size_t size, loff_t *pos)
83{
bf1ab978 84 struct spu_context *ctx = file->private_data;
aa0ed2bd 85 ssize_t ret;
67207b96 86
8b3d6663 87 spu_acquire(ctx);
bf1ab978 88 ret = __spufs_mem_read(ctx, buffer, size, pos);
8b3d6663 89 spu_release(ctx);
67207b96
AB
90 return ret;
91}
92
93static ssize_t
94spufs_mem_write(struct file *file, const char __user *buffer,
aa0ed2bd 95 size_t size, loff_t *ppos)
67207b96
AB
96{
97 struct spu_context *ctx = file->private_data;
8b3d6663 98 char *local_store;
aa0ed2bd 99 loff_t pos = *ppos;
8b3d6663 100 int ret;
67207b96 101
aa0ed2bd
AB
102 if (pos < 0)
103 return -EINVAL;
104 if (pos > LS_SIZE)
67207b96 105 return -EFBIG;
aa0ed2bd
AB
106 if (size > LS_SIZE - pos)
107 size = LS_SIZE - pos;
8b3d6663
AB
108
109 spu_acquire(ctx);
8b3d6663 110 local_store = ctx->ops->get_ls(ctx);
aa0ed2bd 111 ret = copy_from_user(local_store + pos, buffer, size);
8b3d6663 112 spu_release(ctx);
aa0ed2bd
AB
113
114 if (ret)
115 return -EFAULT;
116 *ppos = pos + size;
117 return size;
67207b96
AB
118}
119
78bde53e
BH
120static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121 unsigned long address)
8b3d6663 122{
8b3d6663 123 struct spu_context *ctx = vma->vm_file->private_data;
78bde53e
BH
124 unsigned long pfn, offset = address - vma->vm_start;
125
8b3d6663
AB
126 offset += vma->vm_pgoff << PAGE_SHIFT;
127
128b8546
MN
128 if (offset >= LS_SIZE)
129 return NOPFN_SIGBUS;
130
8b3d6663
AB
131 spu_acquire(ctx);
132
ac91cb8d
AB
133 if (ctx->state == SPU_STATE_SAVED) {
134 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
932f535d 135 & ~_PAGE_NO_CACHE);
78bde53e 136 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
ac91cb8d
AB
137 } else {
138 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
78bde53e
BH
139 | _PAGE_NO_CACHE);
140 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
ac91cb8d 141 }
78bde53e 142 vm_insert_pfn(vma, address, pfn);
8b3d6663 143
78bde53e 144 spu_release(ctx);
8b3d6663 145
78bde53e 146 return NOPFN_REFAULT;
8b3d6663
AB
147}
148
78bde53e 149
8b3d6663 150static struct vm_operations_struct spufs_mem_mmap_vmops = {
78bde53e 151 .nopfn = spufs_mem_mmap_nopfn,
8b3d6663
AB
152};
153
67207b96
AB
154static int
155spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
156{
8b3d6663
AB
157 if (!(vma->vm_flags & VM_SHARED))
158 return -EINVAL;
67207b96 159
78bde53e 160 vma->vm_flags |= VM_IO | VM_PFNMAP;
8b3d6663
AB
161 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
162 | _PAGE_NO_CACHE);
163
164 vma->vm_ops = &spufs_mem_mmap_vmops;
67207b96
AB
165 return 0;
166}
167
5dfe4c96 168static const struct file_operations spufs_mem_fops = {
67207b96 169 .open = spufs_mem_open,
43c2bbd9 170 .release = spufs_mem_release,
67207b96
AB
171 .read = spufs_mem_read,
172 .write = spufs_mem_write,
8b3d6663 173 .llseek = generic_file_llseek,
67207b96 174 .mmap = spufs_mem_mmap,
8b3d6663
AB
175};
176
78bde53e 177static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
6df10a82 178 unsigned long address,
78bde53e 179 unsigned long ps_offs,
27d5bf2a 180 unsigned long ps_size)
6df10a82 181{
6df10a82 182 struct spu_context *ctx = vma->vm_file->private_data;
78bde53e 183 unsigned long area, offset = address - vma->vm_start;
6df10a82
MN
184 int ret;
185
186 offset += vma->vm_pgoff << PAGE_SHIFT;
27d5bf2a 187 if (offset >= ps_size)
78bde53e 188 return NOPFN_SIGBUS;
6df10a82 189
78bde53e
BH
190 /* error here usually means a signal.. we might want to test
191 * the error code more precisely though
192 */
26bec673 193 ret = spu_acquire_runnable(ctx, 0);
6df10a82 194 if (ret)
78bde53e 195 return NOPFN_REFAULT;
6df10a82
MN
196
197 area = ctx->spu->problem_phys + ps_offs;
78bde53e 198 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
6df10a82
MN
199 spu_release(ctx);
200
78bde53e 201 return NOPFN_REFAULT;
6df10a82
MN
202}
203
27d5bf2a 204#if SPUFS_MMAP_4K
78bde53e
BH
205static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
206 unsigned long address)
6df10a82 207{
78bde53e 208 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
6df10a82
MN
209}
210
211static struct vm_operations_struct spufs_cntl_mmap_vmops = {
78bde53e 212 .nopfn = spufs_cntl_mmap_nopfn,
6df10a82
MN
213};
214
215/*
216 * mmap support for problem state control area [0x4000 - 0x4fff].
6df10a82
MN
217 */
218static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
219{
220 if (!(vma->vm_flags & VM_SHARED))
221 return -EINVAL;
222
78bde53e 223 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 224 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 225 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
226
227 vma->vm_ops = &spufs_cntl_mmap_vmops;
228 return 0;
229}
27d5bf2a
BH
230#else /* SPUFS_MMAP_4K */
231#define spufs_cntl_mmap NULL
232#endif /* !SPUFS_MMAP_4K */
6df10a82 233
e1dbff2b 234static u64 spufs_cntl_get(void *data)
6df10a82 235{
e1dbff2b
AB
236 struct spu_context *ctx = data;
237 u64 val;
6df10a82 238
e1dbff2b
AB
239 spu_acquire(ctx);
240 val = ctx->ops->status_read(ctx);
241 spu_release(ctx);
242
243 return val;
6df10a82
MN
244}
245
e1dbff2b 246static void spufs_cntl_set(void *data, u64 val)
6df10a82 247{
e1dbff2b
AB
248 struct spu_context *ctx = data;
249
250 spu_acquire(ctx);
251 ctx->ops->runcntl_write(ctx, val);
252 spu_release(ctx);
6df10a82
MN
253}
254
e1dbff2b 255static int spufs_cntl_open(struct inode *inode, struct file *file)
6df10a82 256{
e1dbff2b
AB
257 struct spufs_inode_info *i = SPUFS_I(inode);
258 struct spu_context *ctx = i->i_ctx;
259
43c2bbd9 260 spin_lock(&ctx->mapping_lock);
e1dbff2b 261 file->private_data = ctx;
43c2bbd9
CH
262 if (!i->i_openers++)
263 ctx->cntl = inode->i_mapping;
264 spin_unlock(&ctx->mapping_lock);
17e0e270 265 smp_wmb();
e1dbff2b
AB
266 return simple_attr_open(inode, file, spufs_cntl_get,
267 spufs_cntl_set, "0x%08lx");
6df10a82
MN
268}
269
43c2bbd9
CH
270static int
271spufs_cntl_release(struct inode *inode, struct file *file)
272{
273 struct spufs_inode_info *i = SPUFS_I(inode);
274 struct spu_context *ctx = i->i_ctx;
275
276 simple_attr_close(inode, file);
277
278 spin_lock(&ctx->mapping_lock);
279 if (!--i->i_openers)
280 ctx->cntl = NULL;
281 spin_unlock(&ctx->mapping_lock);
282 smp_wmb();
283 return 0;
284}
285
5dfe4c96 286static const struct file_operations spufs_cntl_fops = {
6df10a82 287 .open = spufs_cntl_open,
43c2bbd9 288 .release = spufs_cntl_release,
e1dbff2b
AB
289 .read = simple_attr_read,
290 .write = simple_attr_write,
6df10a82 291 .mmap = spufs_cntl_mmap,
6df10a82
MN
292};
293
8b3d6663
AB
294static int
295spufs_regs_open(struct inode *inode, struct file *file)
296{
297 struct spufs_inode_info *i = SPUFS_I(inode);
298 file->private_data = i->i_ctx;
299 return 0;
300}
301
bf1ab978
DGM
302static ssize_t
303__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
304 size_t size, loff_t *pos)
305{
306 struct spu_lscsa *lscsa = ctx->csa.lscsa;
307 return simple_read_from_buffer(buffer, size, pos,
308 lscsa->gprs, sizeof lscsa->gprs);
309}
310
8b3d6663
AB
311static ssize_t
312spufs_regs_read(struct file *file, char __user *buffer,
313 size_t size, loff_t *pos)
314{
8b3d6663 315 int ret;
bf1ab978 316 struct spu_context *ctx = file->private_data;
8b3d6663
AB
317
318 spu_acquire_saved(ctx);
bf1ab978 319 ret = __spufs_regs_read(ctx, buffer, size, pos);
8b3d6663
AB
320 spu_release(ctx);
321 return ret;
322}
323
324static ssize_t
325spufs_regs_write(struct file *file, const char __user *buffer,
326 size_t size, loff_t *pos)
327{
328 struct spu_context *ctx = file->private_data;
329 struct spu_lscsa *lscsa = ctx->csa.lscsa;
330 int ret;
331
332 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
333 if (size <= 0)
334 return -EFBIG;
335 *pos += size;
336
337 spu_acquire_saved(ctx);
338
339 ret = copy_from_user(lscsa->gprs + *pos - size,
340 buffer, size) ? -EFAULT : size;
341
342 spu_release(ctx);
343 return ret;
344}
345
5dfe4c96 346static const struct file_operations spufs_regs_fops = {
8b3d6663
AB
347 .open = spufs_regs_open,
348 .read = spufs_regs_read,
349 .write = spufs_regs_write,
67207b96
AB
350 .llseek = generic_file_llseek,
351};
352
bf1ab978
DGM
353static ssize_t
354__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
355 size_t size, loff_t * pos)
356{
357 struct spu_lscsa *lscsa = ctx->csa.lscsa;
358 return simple_read_from_buffer(buffer, size, pos,
359 &lscsa->fpcr, sizeof(lscsa->fpcr));
360}
361
8b3d6663
AB
362static ssize_t
363spufs_fpcr_read(struct file *file, char __user * buffer,
364 size_t size, loff_t * pos)
365{
8b3d6663 366 int ret;
bf1ab978 367 struct spu_context *ctx = file->private_data;
8b3d6663
AB
368
369 spu_acquire_saved(ctx);
bf1ab978 370 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
8b3d6663
AB
371 spu_release(ctx);
372 return ret;
373}
374
375static ssize_t
376spufs_fpcr_write(struct file *file, const char __user * buffer,
377 size_t size, loff_t * pos)
378{
379 struct spu_context *ctx = file->private_data;
380 struct spu_lscsa *lscsa = ctx->csa.lscsa;
381 int ret;
382
383 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
384 if (size <= 0)
385 return -EFBIG;
386 *pos += size;
387
388 spu_acquire_saved(ctx);
389
390 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
391 buffer, size) ? -EFAULT : size;
392
393 spu_release(ctx);
394 return ret;
395}
396
5dfe4c96 397static const struct file_operations spufs_fpcr_fops = {
8b3d6663
AB
398 .open = spufs_regs_open,
399 .read = spufs_fpcr_read,
400 .write = spufs_fpcr_write,
401 .llseek = generic_file_llseek,
402};
403
67207b96
AB
404/* generic open function for all pipe-like files */
405static int spufs_pipe_open(struct inode *inode, struct file *file)
406{
407 struct spufs_inode_info *i = SPUFS_I(inode);
408 file->private_data = i->i_ctx;
409
410 return nonseekable_open(inode, file);
411}
412
cdcc89bb
AB
413/*
414 * Read as many bytes from the mailbox as possible, until
415 * one of the conditions becomes true:
416 *
417 * - no more data available in the mailbox
418 * - end of the user provided buffer
419 * - end of the mapped area
420 */
67207b96
AB
421static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
422 size_t len, loff_t *pos)
423{
8b3d6663 424 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
425 u32 mbox_data, __user *udata;
426 ssize_t count;
67207b96
AB
427
428 if (len < 4)
429 return -EINVAL;
430
cdcc89bb
AB
431 if (!access_ok(VERIFY_WRITE, buf, len))
432 return -EFAULT;
433
434 udata = (void __user *)buf;
435
8b3d6663 436 spu_acquire(ctx);
274cef5e 437 for (count = 0; (count + 4) <= len; count += 4, udata++) {
cdcc89bb
AB
438 int ret;
439 ret = ctx->ops->mbox_read(ctx, &mbox_data);
440 if (ret == 0)
441 break;
442
443 /*
444 * at the end of the mapped area, we can fault
445 * but still need to return the data we have
446 * read successfully so far.
447 */
448 ret = __put_user(mbox_data, udata);
449 if (ret) {
450 if (!count)
451 count = -EFAULT;
452 break;
453 }
454 }
8b3d6663 455 spu_release(ctx);
67207b96 456
cdcc89bb
AB
457 if (!count)
458 count = -EAGAIN;
67207b96 459
cdcc89bb 460 return count;
67207b96
AB
461}
462
5dfe4c96 463static const struct file_operations spufs_mbox_fops = {
67207b96
AB
464 .open = spufs_pipe_open,
465 .read = spufs_mbox_read,
466};
467
468static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
469 size_t len, loff_t *pos)
470{
8b3d6663 471 struct spu_context *ctx = file->private_data;
67207b96
AB
472 u32 mbox_stat;
473
474 if (len < 4)
475 return -EINVAL;
476
8b3d6663
AB
477 spu_acquire(ctx);
478
479 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
480
481 spu_release(ctx);
67207b96
AB
482
483 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
484 return -EFAULT;
485
486 return 4;
487}
488
5dfe4c96 489static const struct file_operations spufs_mbox_stat_fops = {
67207b96
AB
490 .open = spufs_pipe_open,
491 .read = spufs_mbox_stat_read,
492};
493
494/* low-level ibox access function */
8b3d6663 495size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
67207b96 496{
8b3d6663
AB
497 return ctx->ops->ibox_read(ctx, data);
498}
67207b96 499
8b3d6663
AB
500static int spufs_ibox_fasync(int fd, struct file *file, int on)
501{
502 struct spu_context *ctx = file->private_data;
67207b96 503
8b3d6663 504 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
67207b96 505}
67207b96 506
8b3d6663
AB
507/* interrupt-level ibox callback function. */
508void spufs_ibox_callback(struct spu *spu)
67207b96 509{
8b3d6663
AB
510 struct spu_context *ctx = spu->ctx;
511
512 wake_up_all(&ctx->ibox_wq);
513 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
67207b96
AB
514}
515
cdcc89bb
AB
516/*
517 * Read as many bytes from the interrupt mailbox as possible, until
518 * one of the conditions becomes true:
519 *
520 * - no more data available in the mailbox
521 * - end of the user provided buffer
522 * - end of the mapped area
523 *
524 * If the file is opened without O_NONBLOCK, we wait here until
525 * any data is available, but return when we have been able to
526 * read something.
527 */
67207b96
AB
528static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
529 size_t len, loff_t *pos)
530{
8b3d6663 531 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
532 u32 ibox_data, __user *udata;
533 ssize_t count;
67207b96
AB
534
535 if (len < 4)
536 return -EINVAL;
537
cdcc89bb
AB
538 if (!access_ok(VERIFY_WRITE, buf, len))
539 return -EFAULT;
540
541 udata = (void __user *)buf;
542
8b3d6663 543 spu_acquire(ctx);
67207b96 544
cdcc89bb
AB
545 /* wait only for the first element */
546 count = 0;
67207b96 547 if (file->f_flags & O_NONBLOCK) {
8b3d6663 548 if (!spu_ibox_read(ctx, &ibox_data))
cdcc89bb 549 count = -EAGAIN;
67207b96 550 } else {
cdcc89bb 551 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
67207b96 552 }
cdcc89bb
AB
553 if (count)
554 goto out;
67207b96 555
cdcc89bb
AB
556 /* if we can't write at all, return -EFAULT */
557 count = __put_user(ibox_data, udata);
558 if (count)
559 goto out;
8b3d6663 560
cdcc89bb
AB
561 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
562 int ret;
563 ret = ctx->ops->ibox_read(ctx, &ibox_data);
564 if (ret == 0)
565 break;
566 /*
567 * at the end of the mapped area, we can fault
568 * but still need to return the data we have
569 * read successfully so far.
570 */
571 ret = __put_user(ibox_data, udata);
572 if (ret)
573 break;
574 }
67207b96 575
cdcc89bb
AB
576out:
577 spu_release(ctx);
67207b96 578
cdcc89bb 579 return count;
67207b96
AB
580}
581
582static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
583{
8b3d6663 584 struct spu_context *ctx = file->private_data;
67207b96
AB
585 unsigned int mask;
586
8b3d6663 587 poll_wait(file, &ctx->ibox_wq, wait);
67207b96 588
3a843d7c
AB
589 spu_acquire(ctx);
590 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
591 spu_release(ctx);
67207b96
AB
592
593 return mask;
594}
595
5dfe4c96 596static const struct file_operations spufs_ibox_fops = {
67207b96
AB
597 .open = spufs_pipe_open,
598 .read = spufs_ibox_read,
599 .poll = spufs_ibox_poll,
600 .fasync = spufs_ibox_fasync,
601};
602
603static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
604 size_t len, loff_t *pos)
605{
8b3d6663 606 struct spu_context *ctx = file->private_data;
67207b96
AB
607 u32 ibox_stat;
608
609 if (len < 4)
610 return -EINVAL;
611
8b3d6663
AB
612 spu_acquire(ctx);
613 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
614 spu_release(ctx);
67207b96
AB
615
616 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
617 return -EFAULT;
618
619 return 4;
620}
621
5dfe4c96 622static const struct file_operations spufs_ibox_stat_fops = {
67207b96
AB
623 .open = spufs_pipe_open,
624 .read = spufs_ibox_stat_read,
625};
626
627/* low-level mailbox write */
8b3d6663 628size_t spu_wbox_write(struct spu_context *ctx, u32 data)
67207b96 629{
8b3d6663
AB
630 return ctx->ops->wbox_write(ctx, data);
631}
67207b96 632
8b3d6663
AB
633static int spufs_wbox_fasync(int fd, struct file *file, int on)
634{
635 struct spu_context *ctx = file->private_data;
636 int ret;
67207b96 637
8b3d6663 638 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
67207b96 639
67207b96
AB
640 return ret;
641}
67207b96 642
8b3d6663
AB
643/* interrupt-level wbox callback function. */
644void spufs_wbox_callback(struct spu *spu)
67207b96 645{
8b3d6663
AB
646 struct spu_context *ctx = spu->ctx;
647
648 wake_up_all(&ctx->wbox_wq);
649 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
67207b96
AB
650}
651
cdcc89bb
AB
652/*
653 * Write as many bytes to the interrupt mailbox as possible, until
654 * one of the conditions becomes true:
655 *
656 * - the mailbox is full
657 * - end of the user provided buffer
658 * - end of the mapped area
659 *
660 * If the file is opened without O_NONBLOCK, we wait here until
661 * space is availabyl, but return when we have been able to
662 * write something.
663 */
67207b96
AB
664static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
665 size_t len, loff_t *pos)
666{
8b3d6663 667 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
668 u32 wbox_data, __user *udata;
669 ssize_t count;
67207b96
AB
670
671 if (len < 4)
672 return -EINVAL;
673
cdcc89bb
AB
674 udata = (void __user *)buf;
675 if (!access_ok(VERIFY_READ, buf, len))
676 return -EFAULT;
677
678 if (__get_user(wbox_data, udata))
67207b96
AB
679 return -EFAULT;
680
8b3d6663
AB
681 spu_acquire(ctx);
682
cdcc89bb
AB
683 /*
684 * make sure we can at least write one element, by waiting
685 * in case of !O_NONBLOCK
686 */
687 count = 0;
67207b96 688 if (file->f_flags & O_NONBLOCK) {
8b3d6663 689 if (!spu_wbox_write(ctx, wbox_data))
cdcc89bb 690 count = -EAGAIN;
67207b96 691 } else {
cdcc89bb 692 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
67207b96
AB
693 }
694
cdcc89bb
AB
695 if (count)
696 goto out;
8b3d6663 697
cdcc89bb
AB
698 /* write aѕ much as possible */
699 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
700 int ret;
701 ret = __get_user(wbox_data, udata);
702 if (ret)
703 break;
704
705 ret = spu_wbox_write(ctx, wbox_data);
706 if (ret == 0)
707 break;
708 }
709
710out:
711 spu_release(ctx);
712 return count;
67207b96
AB
713}
714
715static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
716{
8b3d6663 717 struct spu_context *ctx = file->private_data;
67207b96
AB
718 unsigned int mask;
719
8b3d6663 720 poll_wait(file, &ctx->wbox_wq, wait);
67207b96 721
3a843d7c
AB
722 spu_acquire(ctx);
723 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
724 spu_release(ctx);
67207b96
AB
725
726 return mask;
727}
728
5dfe4c96 729static const struct file_operations spufs_wbox_fops = {
67207b96
AB
730 .open = spufs_pipe_open,
731 .write = spufs_wbox_write,
732 .poll = spufs_wbox_poll,
733 .fasync = spufs_wbox_fasync,
734};
735
736static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
737 size_t len, loff_t *pos)
738{
8b3d6663 739 struct spu_context *ctx = file->private_data;
67207b96
AB
740 u32 wbox_stat;
741
742 if (len < 4)
743 return -EINVAL;
744
8b3d6663
AB
745 spu_acquire(ctx);
746 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
747 spu_release(ctx);
67207b96
AB
748
749 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
750 return -EFAULT;
751
752 return 4;
753}
754
5dfe4c96 755static const struct file_operations spufs_wbox_stat_fops = {
67207b96
AB
756 .open = spufs_pipe_open,
757 .read = spufs_wbox_stat_read,
758};
759
6df10a82
MN
760static int spufs_signal1_open(struct inode *inode, struct file *file)
761{
762 struct spufs_inode_info *i = SPUFS_I(inode);
763 struct spu_context *ctx = i->i_ctx;
43c2bbd9
CH
764
765 spin_lock(&ctx->mapping_lock);
6df10a82 766 file->private_data = ctx;
43c2bbd9
CH
767 if (!i->i_openers++)
768 ctx->signal1 = inode->i_mapping;
769 spin_unlock(&ctx->mapping_lock);
17e0e270 770 smp_wmb();
6df10a82
MN
771 return nonseekable_open(inode, file);
772}
773
43c2bbd9
CH
774static int
775spufs_signal1_release(struct inode *inode, struct file *file)
776{
777 struct spufs_inode_info *i = SPUFS_I(inode);
778 struct spu_context *ctx = i->i_ctx;
779
780 spin_lock(&ctx->mapping_lock);
781 if (!--i->i_openers)
782 ctx->signal1 = NULL;
783 spin_unlock(&ctx->mapping_lock);
784 smp_wmb();
785 return 0;
786}
787
bf1ab978 788static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
789 size_t len, loff_t *pos)
790{
17f88ceb 791 int ret = 0;
67207b96
AB
792 u32 data;
793
67207b96
AB
794 if (len < 4)
795 return -EINVAL;
796
17f88ceb
DGM
797 if (ctx->csa.spu_chnlcnt_RW[3]) {
798 data = ctx->csa.spu_chnldata_RW[3];
799 ret = 4;
800 }
8b3d6663 801
17f88ceb
DGM
802 if (!ret)
803 goto out;
804
67207b96
AB
805 if (copy_to_user(buf, &data, 4))
806 return -EFAULT;
807
17f88ceb
DGM
808out:
809 return ret;
67207b96
AB
810}
811
bf1ab978
DGM
812static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
813 size_t len, loff_t *pos)
814{
815 int ret;
816 struct spu_context *ctx = file->private_data;
817
818 spu_acquire_saved(ctx);
819 ret = __spufs_signal1_read(ctx, buf, len, pos);
820 spu_release(ctx);
821
822 return ret;
823}
824
67207b96
AB
825static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
826 size_t len, loff_t *pos)
827{
828 struct spu_context *ctx;
67207b96
AB
829 u32 data;
830
831 ctx = file->private_data;
67207b96
AB
832
833 if (len < 4)
834 return -EINVAL;
835
836 if (copy_from_user(&data, buf, 4))
837 return -EFAULT;
838
8b3d6663
AB
839 spu_acquire(ctx);
840 ctx->ops->signal1_write(ctx, data);
841 spu_release(ctx);
67207b96
AB
842
843 return 4;
844}
845
78bde53e
BH
846static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
847 unsigned long address)
6df10a82 848{
27d5bf2a 849#if PAGE_SIZE == 0x1000
78bde53e 850 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
27d5bf2a
BH
851#elif PAGE_SIZE == 0x10000
852 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
853 * signal 1 and 2 area
854 */
78bde53e 855 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
27d5bf2a
BH
856#else
857#error unsupported page size
858#endif
6df10a82
MN
859}
860
861static struct vm_operations_struct spufs_signal1_mmap_vmops = {
78bde53e 862 .nopfn = spufs_signal1_mmap_nopfn,
6df10a82
MN
863};
864
865static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
866{
867 if (!(vma->vm_flags & VM_SHARED))
868 return -EINVAL;
869
78bde53e 870 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 871 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 872 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
873
874 vma->vm_ops = &spufs_signal1_mmap_vmops;
875 return 0;
876}
6df10a82 877
5dfe4c96 878static const struct file_operations spufs_signal1_fops = {
6df10a82 879 .open = spufs_signal1_open,
43c2bbd9 880 .release = spufs_signal1_release,
67207b96
AB
881 .read = spufs_signal1_read,
882 .write = spufs_signal1_write,
6df10a82 883 .mmap = spufs_signal1_mmap,
67207b96
AB
884};
885
6df10a82
MN
886static int spufs_signal2_open(struct inode *inode, struct file *file)
887{
888 struct spufs_inode_info *i = SPUFS_I(inode);
889 struct spu_context *ctx = i->i_ctx;
43c2bbd9
CH
890
891 spin_lock(&ctx->mapping_lock);
6df10a82 892 file->private_data = ctx;
43c2bbd9
CH
893 if (!i->i_openers++)
894 ctx->signal2 = inode->i_mapping;
895 spin_unlock(&ctx->mapping_lock);
17e0e270 896 smp_wmb();
6df10a82
MN
897 return nonseekable_open(inode, file);
898}
899
43c2bbd9
CH
900static int
901spufs_signal2_release(struct inode *inode, struct file *file)
902{
903 struct spufs_inode_info *i = SPUFS_I(inode);
904 struct spu_context *ctx = i->i_ctx;
905
906 spin_lock(&ctx->mapping_lock);
907 if (!--i->i_openers)
908 ctx->signal2 = NULL;
909 spin_unlock(&ctx->mapping_lock);
910 smp_wmb();
911 return 0;
912}
913
bf1ab978 914static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
915 size_t len, loff_t *pos)
916{
17f88ceb 917 int ret = 0;
67207b96
AB
918 u32 data;
919
67207b96
AB
920 if (len < 4)
921 return -EINVAL;
922
17f88ceb
DGM
923 if (ctx->csa.spu_chnlcnt_RW[4]) {
924 data = ctx->csa.spu_chnldata_RW[4];
925 ret = 4;
926 }
8b3d6663 927
17f88ceb
DGM
928 if (!ret)
929 goto out;
930
67207b96
AB
931 if (copy_to_user(buf, &data, 4))
932 return -EFAULT;
933
17f88ceb 934out:
bf1ab978
DGM
935 return ret;
936}
937
938static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
939 size_t len, loff_t *pos)
940{
941 struct spu_context *ctx = file->private_data;
942 int ret;
943
944 spu_acquire_saved(ctx);
945 ret = __spufs_signal2_read(ctx, buf, len, pos);
946 spu_release(ctx);
947
948 return ret;
67207b96
AB
949}
950
951static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
952 size_t len, loff_t *pos)
953{
954 struct spu_context *ctx;
67207b96
AB
955 u32 data;
956
957 ctx = file->private_data;
67207b96
AB
958
959 if (len < 4)
960 return -EINVAL;
961
962 if (copy_from_user(&data, buf, 4))
963 return -EFAULT;
964
8b3d6663
AB
965 spu_acquire(ctx);
966 ctx->ops->signal2_write(ctx, data);
967 spu_release(ctx);
67207b96
AB
968
969 return 4;
970}
971
27d5bf2a 972#if SPUFS_MMAP_4K
78bde53e
BH
973static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
974 unsigned long address)
6df10a82 975{
27d5bf2a 976#if PAGE_SIZE == 0x1000
78bde53e 977 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
27d5bf2a
BH
978#elif PAGE_SIZE == 0x10000
979 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
980 * signal 1 and 2 area
981 */
78bde53e 982 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
27d5bf2a
BH
983#else
984#error unsupported page size
985#endif
6df10a82
MN
986}
987
988static struct vm_operations_struct spufs_signal2_mmap_vmops = {
78bde53e 989 .nopfn = spufs_signal2_mmap_nopfn,
6df10a82
MN
990};
991
992static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
993{
994 if (!(vma->vm_flags & VM_SHARED))
995 return -EINVAL;
996
78bde53e 997 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 998 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 999 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
1000
1001 vma->vm_ops = &spufs_signal2_mmap_vmops;
1002 return 0;
1003}
27d5bf2a
BH
1004#else /* SPUFS_MMAP_4K */
1005#define spufs_signal2_mmap NULL
1006#endif /* !SPUFS_MMAP_4K */
6df10a82 1007
5dfe4c96 1008static const struct file_operations spufs_signal2_fops = {
6df10a82 1009 .open = spufs_signal2_open,
43c2bbd9 1010 .release = spufs_signal2_release,
67207b96
AB
1011 .read = spufs_signal2_read,
1012 .write = spufs_signal2_write,
6df10a82 1013 .mmap = spufs_signal2_mmap,
67207b96
AB
1014};
1015
1016static void spufs_signal1_type_set(void *data, u64 val)
1017{
1018 struct spu_context *ctx = data;
67207b96 1019
8b3d6663
AB
1020 spu_acquire(ctx);
1021 ctx->ops->signal1_type_set(ctx, val);
1022 spu_release(ctx);
67207b96
AB
1023}
1024
bf1ab978
DGM
1025static u64 __spufs_signal1_type_get(void *data)
1026{
1027 struct spu_context *ctx = data;
1028 return ctx->ops->signal1_type_get(ctx);
1029}
1030
67207b96
AB
1031static u64 spufs_signal1_type_get(void *data)
1032{
1033 struct spu_context *ctx = data;
8b3d6663
AB
1034 u64 ret;
1035
1036 spu_acquire(ctx);
bf1ab978 1037 ret = __spufs_signal1_type_get(data);
8b3d6663
AB
1038 spu_release(ctx);
1039
1040 return ret;
67207b96
AB
1041}
1042DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1043 spufs_signal1_type_set, "%llu");
1044
1045static void spufs_signal2_type_set(void *data, u64 val)
1046{
1047 struct spu_context *ctx = data;
67207b96 1048
8b3d6663
AB
1049 spu_acquire(ctx);
1050 ctx->ops->signal2_type_set(ctx, val);
1051 spu_release(ctx);
67207b96
AB
1052}
1053
bf1ab978
DGM
1054static u64 __spufs_signal2_type_get(void *data)
1055{
1056 struct spu_context *ctx = data;
1057 return ctx->ops->signal2_type_get(ctx);
1058}
1059
67207b96
AB
1060static u64 spufs_signal2_type_get(void *data)
1061{
1062 struct spu_context *ctx = data;
8b3d6663
AB
1063 u64 ret;
1064
1065 spu_acquire(ctx);
bf1ab978 1066 ret = __spufs_signal2_type_get(data);
8b3d6663
AB
1067 spu_release(ctx);
1068
1069 return ret;
67207b96
AB
1070}
1071DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1072 spufs_signal2_type_set, "%llu");
1073
27d5bf2a 1074#if SPUFS_MMAP_4K
78bde53e
BH
1075static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1076 unsigned long address)
d9379c4b 1077{
78bde53e 1078 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
d9379c4b
AB
1079}
1080
1081static struct vm_operations_struct spufs_mss_mmap_vmops = {
78bde53e 1082 .nopfn = spufs_mss_mmap_nopfn,
d9379c4b
AB
1083};
1084
1085/*
1086 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
d9379c4b
AB
1087 */
1088static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1089{
1090 if (!(vma->vm_flags & VM_SHARED))
1091 return -EINVAL;
1092
78bde53e 1093 vma->vm_flags |= VM_IO | VM_PFNMAP;
d9379c4b 1094 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 1095 | _PAGE_NO_CACHE | _PAGE_GUARDED);
d9379c4b
AB
1096
1097 vma->vm_ops = &spufs_mss_mmap_vmops;
1098 return 0;
1099}
27d5bf2a
BH
1100#else /* SPUFS_MMAP_4K */
1101#define spufs_mss_mmap NULL
1102#endif /* !SPUFS_MMAP_4K */
d9379c4b
AB
1103
1104static int spufs_mss_open(struct inode *inode, struct file *file)
1105{
1106 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1107 struct spu_context *ctx = i->i_ctx;
d9379c4b
AB
1108
1109 file->private_data = i->i_ctx;
43c2bbd9
CH
1110
1111 spin_lock(&ctx->mapping_lock);
1112 if (!i->i_openers++)
1113 ctx->mss = inode->i_mapping;
1114 spin_unlock(&ctx->mapping_lock);
17e0e270 1115 smp_wmb();
d9379c4b
AB
1116 return nonseekable_open(inode, file);
1117}
1118
43c2bbd9
CH
1119static int
1120spufs_mss_release(struct inode *inode, struct file *file)
1121{
1122 struct spufs_inode_info *i = SPUFS_I(inode);
1123 struct spu_context *ctx = i->i_ctx;
1124
1125 spin_lock(&ctx->mapping_lock);
1126 if (!--i->i_openers)
1127 ctx->mss = NULL;
1128 spin_unlock(&ctx->mapping_lock);
1129 smp_wmb();
1130 return 0;
1131}
1132
5dfe4c96 1133static const struct file_operations spufs_mss_fops = {
d9379c4b 1134 .open = spufs_mss_open,
43c2bbd9 1135 .release = spufs_mss_release,
d9379c4b 1136 .mmap = spufs_mss_mmap,
27d5bf2a
BH
1137};
1138
78bde53e
BH
1139static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1140 unsigned long address)
27d5bf2a 1141{
78bde53e 1142 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
27d5bf2a
BH
1143}
1144
1145static struct vm_operations_struct spufs_psmap_mmap_vmops = {
78bde53e 1146 .nopfn = spufs_psmap_mmap_nopfn,
27d5bf2a
BH
1147};
1148
1149/*
1150 * mmap support for full problem state area [0x00000 - 0x1ffff].
1151 */
1152static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1153{
1154 if (!(vma->vm_flags & VM_SHARED))
1155 return -EINVAL;
1156
78bde53e 1157 vma->vm_flags |= VM_IO | VM_PFNMAP;
27d5bf2a
BH
1158 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1159 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1160
1161 vma->vm_ops = &spufs_psmap_mmap_vmops;
1162 return 0;
1163}
1164
1165static int spufs_psmap_open(struct inode *inode, struct file *file)
1166{
1167 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1168 struct spu_context *ctx = i->i_ctx;
27d5bf2a 1169
43c2bbd9 1170 spin_lock(&ctx->mapping_lock);
27d5bf2a 1171 file->private_data = i->i_ctx;
43c2bbd9
CH
1172 if (!i->i_openers++)
1173 ctx->psmap = inode->i_mapping;
1174 spin_unlock(&ctx->mapping_lock);
17e0e270 1175 smp_wmb();
27d5bf2a
BH
1176 return nonseekable_open(inode, file);
1177}
1178
43c2bbd9
CH
1179static int
1180spufs_psmap_release(struct inode *inode, struct file *file)
1181{
1182 struct spufs_inode_info *i = SPUFS_I(inode);
1183 struct spu_context *ctx = i->i_ctx;
1184
1185 spin_lock(&ctx->mapping_lock);
1186 if (!--i->i_openers)
1187 ctx->psmap = NULL;
1188 spin_unlock(&ctx->mapping_lock);
1189 smp_wmb();
1190 return 0;
1191}
1192
5dfe4c96 1193static const struct file_operations spufs_psmap_fops = {
27d5bf2a 1194 .open = spufs_psmap_open,
43c2bbd9 1195 .release = spufs_psmap_release,
27d5bf2a 1196 .mmap = spufs_psmap_mmap,
d9379c4b
AB
1197};
1198
1199
27d5bf2a 1200#if SPUFS_MMAP_4K
78bde53e
BH
1201static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1202 unsigned long address)
6df10a82 1203{
78bde53e 1204 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
6df10a82
MN
1205}
1206
1207static struct vm_operations_struct spufs_mfc_mmap_vmops = {
78bde53e 1208 .nopfn = spufs_mfc_mmap_nopfn,
6df10a82
MN
1209};
1210
1211/*
1212 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
6df10a82
MN
1213 */
1214static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1215{
1216 if (!(vma->vm_flags & VM_SHARED))
1217 return -EINVAL;
1218
78bde53e 1219 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 1220 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 1221 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
1222
1223 vma->vm_ops = &spufs_mfc_mmap_vmops;
1224 return 0;
1225}
27d5bf2a
BH
1226#else /* SPUFS_MMAP_4K */
1227#define spufs_mfc_mmap NULL
1228#endif /* !SPUFS_MMAP_4K */
a33a7d73
AB
1229
1230static int spufs_mfc_open(struct inode *inode, struct file *file)
1231{
1232 struct spufs_inode_info *i = SPUFS_I(inode);
1233 struct spu_context *ctx = i->i_ctx;
1234
1235 /* we don't want to deal with DMA into other processes */
1236 if (ctx->owner != current->mm)
1237 return -EINVAL;
1238
1239 if (atomic_read(&inode->i_count) != 1)
1240 return -EBUSY;
1241
43c2bbd9 1242 spin_lock(&ctx->mapping_lock);
a33a7d73 1243 file->private_data = ctx;
43c2bbd9
CH
1244 if (!i->i_openers++)
1245 ctx->mfc = inode->i_mapping;
1246 spin_unlock(&ctx->mapping_lock);
17e0e270 1247 smp_wmb();
a33a7d73
AB
1248 return nonseekable_open(inode, file);
1249}
1250
43c2bbd9
CH
1251static int
1252spufs_mfc_release(struct inode *inode, struct file *file)
1253{
1254 struct spufs_inode_info *i = SPUFS_I(inode);
1255 struct spu_context *ctx = i->i_ctx;
1256
1257 spin_lock(&ctx->mapping_lock);
1258 if (!--i->i_openers)
1259 ctx->mfc = NULL;
1260 spin_unlock(&ctx->mapping_lock);
1261 smp_wmb();
1262 return 0;
1263}
1264
a33a7d73
AB
1265/* interrupt-level mfc callback function. */
1266void spufs_mfc_callback(struct spu *spu)
1267{
1268 struct spu_context *ctx = spu->ctx;
1269
1270 wake_up_all(&ctx->mfc_wq);
1271
1272 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1273 if (ctx->mfc_fasync) {
1274 u32 free_elements, tagstatus;
1275 unsigned int mask;
1276
1277 /* no need for spu_acquire in interrupt context */
1278 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1279 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1280
1281 mask = 0;
1282 if (free_elements & 0xffff)
1283 mask |= POLLOUT;
1284 if (tagstatus & ctx->tagwait)
1285 mask |= POLLIN;
1286
1287 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1288 }
1289}
1290
1291static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1292{
1293 /* See if there is one tag group is complete */
1294 /* FIXME we need locking around tagwait */
1295 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1296 ctx->tagwait &= ~*status;
1297 if (*status)
1298 return 1;
1299
1300 /* enable interrupt waiting for any tag group,
1301 may silently fail if interrupts are already enabled */
1302 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1303 return 0;
1304}
1305
1306static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1307 size_t size, loff_t *pos)
1308{
1309 struct spu_context *ctx = file->private_data;
1310 int ret = -EINVAL;
1311 u32 status;
1312
1313 if (size != 4)
1314 goto out;
1315
1316 spu_acquire(ctx);
1317 if (file->f_flags & O_NONBLOCK) {
1318 status = ctx->ops->read_mfc_tagstatus(ctx);
1319 if (!(status & ctx->tagwait))
1320 ret = -EAGAIN;
1321 else
1322 ctx->tagwait &= ~status;
1323 } else {
1324 ret = spufs_wait(ctx->mfc_wq,
1325 spufs_read_mfc_tagstatus(ctx, &status));
1326 }
1327 spu_release(ctx);
1328
1329 if (ret)
1330 goto out;
1331
1332 ret = 4;
1333 if (copy_to_user(buffer, &status, 4))
1334 ret = -EFAULT;
1335
1336out:
1337 return ret;
1338}
1339
1340static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1341{
1342 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1343 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1344
1345 switch (cmd->cmd) {
1346 case MFC_PUT_CMD:
1347 case MFC_PUTF_CMD:
1348 case MFC_PUTB_CMD:
1349 case MFC_GET_CMD:
1350 case MFC_GETF_CMD:
1351 case MFC_GETB_CMD:
1352 break;
1353 default:
1354 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1355 return -EIO;
1356 }
1357
1358 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1359 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1360 cmd->ea, cmd->lsa);
1361 return -EIO;
1362 }
1363
1364 switch (cmd->size & 0xf) {
1365 case 1:
1366 break;
1367 case 2:
1368 if (cmd->lsa & 1)
1369 goto error;
1370 break;
1371 case 4:
1372 if (cmd->lsa & 3)
1373 goto error;
1374 break;
1375 case 8:
1376 if (cmd->lsa & 7)
1377 goto error;
1378 break;
1379 case 0:
1380 if (cmd->lsa & 15)
1381 goto error;
1382 break;
1383 error:
1384 default:
1385 pr_debug("invalid DMA alignment %x for size %x\n",
1386 cmd->lsa & 0xf, cmd->size);
1387 return -EIO;
1388 }
1389
1390 if (cmd->size > 16 * 1024) {
1391 pr_debug("invalid DMA size %x\n", cmd->size);
1392 return -EIO;
1393 }
1394
1395 if (cmd->tag & 0xfff0) {
1396 /* we reserve the higher tag numbers for kernel use */
1397 pr_debug("invalid DMA tag\n");
1398 return -EIO;
1399 }
1400
1401 if (cmd->class) {
1402 /* not supported in this version */
1403 pr_debug("invalid DMA class\n");
1404 return -EIO;
1405 }
1406
1407 return 0;
1408}
1409
1410static int spu_send_mfc_command(struct spu_context *ctx,
1411 struct mfc_dma_command cmd,
1412 int *error)
1413{
1414 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1415 if (*error == -EAGAIN) {
1416 /* wait for any tag group to complete
1417 so we have space for the new command */
1418 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1419 /* try again, because the queue might be
1420 empty again */
1421 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1422 if (*error == -EAGAIN)
1423 return 0;
1424 }
1425 return 1;
1426}
1427
1428static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1429 size_t size, loff_t *pos)
1430{
1431 struct spu_context *ctx = file->private_data;
1432 struct mfc_dma_command cmd;
1433 int ret = -EINVAL;
1434
1435 if (size != sizeof cmd)
1436 goto out;
1437
1438 ret = -EFAULT;
1439 if (copy_from_user(&cmd, buffer, sizeof cmd))
1440 goto out;
1441
1442 ret = spufs_check_valid_dma(&cmd);
1443 if (ret)
1444 goto out;
1445
26bec673 1446 spu_acquire_runnable(ctx, 0);
a33a7d73
AB
1447 if (file->f_flags & O_NONBLOCK) {
1448 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1449 } else {
1450 int status;
1451 ret = spufs_wait(ctx->mfc_wq,
1452 spu_send_mfc_command(ctx, cmd, &status));
1453 if (status)
1454 ret = status;
1455 }
1456 spu_release(ctx);
1457
1458 if (ret)
1459 goto out;
1460
1461 ctx->tagwait |= 1 << cmd.tag;
3692dc66 1462 ret = size;
a33a7d73
AB
1463
1464out:
1465 return ret;
1466}
1467
1468static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1469{
1470 struct spu_context *ctx = file->private_data;
1471 u32 free_elements, tagstatus;
1472 unsigned int mask;
1473
1474 spu_acquire(ctx);
1475 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1476 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1477 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1478 spu_release(ctx);
1479
1480 poll_wait(file, &ctx->mfc_wq, wait);
1481
1482 mask = 0;
1483 if (free_elements & 0xffff)
1484 mask |= POLLOUT | POLLWRNORM;
1485 if (tagstatus & ctx->tagwait)
1486 mask |= POLLIN | POLLRDNORM;
1487
1488 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1489 free_elements, tagstatus, ctx->tagwait);
1490
1491 return mask;
1492}
1493
73b6af8a 1494static int spufs_mfc_flush(struct file *file, fl_owner_t id)
a33a7d73
AB
1495{
1496 struct spu_context *ctx = file->private_data;
1497 int ret;
1498
1499 spu_acquire(ctx);
1500#if 0
1501/* this currently hangs */
1502 ret = spufs_wait(ctx->mfc_wq,
1503 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1504 if (ret)
1505 goto out;
1506 ret = spufs_wait(ctx->mfc_wq,
1507 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1508out:
1509#else
1510 ret = 0;
1511#endif
1512 spu_release(ctx);
1513
1514 return ret;
1515}
1516
1517static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1518 int datasync)
1519{
73b6af8a 1520 return spufs_mfc_flush(file, NULL);
a33a7d73
AB
1521}
1522
1523static int spufs_mfc_fasync(int fd, struct file *file, int on)
1524{
1525 struct spu_context *ctx = file->private_data;
1526
1527 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1528}
1529
5dfe4c96 1530static const struct file_operations spufs_mfc_fops = {
a33a7d73 1531 .open = spufs_mfc_open,
43c2bbd9 1532 .release = spufs_mfc_release,
a33a7d73
AB
1533 .read = spufs_mfc_read,
1534 .write = spufs_mfc_write,
1535 .poll = spufs_mfc_poll,
1536 .flush = spufs_mfc_flush,
1537 .fsync = spufs_mfc_fsync,
1538 .fasync = spufs_mfc_fasync,
6df10a82 1539 .mmap = spufs_mfc_mmap,
a33a7d73
AB
1540};
1541
67207b96
AB
1542static void spufs_npc_set(void *data, u64 val)
1543{
1544 struct spu_context *ctx = data;
8b3d6663
AB
1545 spu_acquire(ctx);
1546 ctx->ops->npc_write(ctx, val);
1547 spu_release(ctx);
67207b96
AB
1548}
1549
1550static u64 spufs_npc_get(void *data)
1551{
1552 struct spu_context *ctx = data;
1553 u64 ret;
8b3d6663
AB
1554 spu_acquire(ctx);
1555 ret = ctx->ops->npc_read(ctx);
1556 spu_release(ctx);
67207b96
AB
1557 return ret;
1558}
9b5047e2
DGM
1559DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1560 "0x%llx\n")
67207b96 1561
8b3d6663
AB
1562static void spufs_decr_set(void *data, u64 val)
1563{
1564 struct spu_context *ctx = data;
1565 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1566 spu_acquire_saved(ctx);
1567 lscsa->decr.slot[0] = (u32) val;
1568 spu_release(ctx);
1569}
1570
bf1ab978 1571static u64 __spufs_decr_get(void *data)
8b3d6663
AB
1572{
1573 struct spu_context *ctx = data;
1574 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1575 return lscsa->decr.slot[0];
1576}
1577
1578static u64 spufs_decr_get(void *data)
1579{
1580 struct spu_context *ctx = data;
8b3d6663
AB
1581 u64 ret;
1582 spu_acquire_saved(ctx);
bf1ab978 1583 ret = __spufs_decr_get(data);
8b3d6663
AB
1584 spu_release(ctx);
1585 return ret;
1586}
1587DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
9b5047e2 1588 "0x%llx\n")
8b3d6663
AB
1589
1590static void spufs_decr_status_set(void *data, u64 val)
1591{
1592 struct spu_context *ctx = data;
1593 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1594 spu_acquire_saved(ctx);
1595 lscsa->decr_status.slot[0] = (u32) val;
1596 spu_release(ctx);
1597}
1598
bf1ab978 1599static u64 __spufs_decr_status_get(void *data)
8b3d6663
AB
1600{
1601 struct spu_context *ctx = data;
1602 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1603 return lscsa->decr_status.slot[0];
1604}
1605
1606static u64 spufs_decr_status_get(void *data)
1607{
1608 struct spu_context *ctx = data;
8b3d6663
AB
1609 u64 ret;
1610 spu_acquire_saved(ctx);
bf1ab978 1611 ret = __spufs_decr_status_get(data);
8b3d6663
AB
1612 spu_release(ctx);
1613 return ret;
1614}
1615DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
9b5047e2 1616 spufs_decr_status_set, "0x%llx\n")
8b3d6663 1617
8b3d6663
AB
1618static void spufs_event_mask_set(void *data, u64 val)
1619{
1620 struct spu_context *ctx = data;
1621 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1622 spu_acquire_saved(ctx);
1623 lscsa->event_mask.slot[0] = (u32) val;
1624 spu_release(ctx);
1625}
1626
bf1ab978 1627static u64 __spufs_event_mask_get(void *data)
8b3d6663
AB
1628{
1629 struct spu_context *ctx = data;
1630 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1631 return lscsa->event_mask.slot[0];
1632}
1633
1634static u64 spufs_event_mask_get(void *data)
1635{
1636 struct spu_context *ctx = data;
8b3d6663
AB
1637 u64 ret;
1638 spu_acquire_saved(ctx);
bf1ab978 1639 ret = __spufs_event_mask_get(data);
8b3d6663
AB
1640 spu_release(ctx);
1641 return ret;
1642}
1643DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
9b5047e2 1644 spufs_event_mask_set, "0x%llx\n")
8b3d6663 1645
bf1ab978 1646static u64 __spufs_event_status_get(void *data)
b9e3bd77
DGM
1647{
1648 struct spu_context *ctx = data;
1649 struct spu_state *state = &ctx->csa;
b9e3bd77 1650 u64 stat;
b9e3bd77
DGM
1651 stat = state->spu_chnlcnt_RW[0];
1652 if (stat)
bf1ab978
DGM
1653 return state->spu_chnldata_RW[0];
1654 return 0;
1655}
1656
1657static u64 spufs_event_status_get(void *data)
1658{
1659 struct spu_context *ctx = data;
1660 u64 ret = 0;
1661
1662 spu_acquire_saved(ctx);
1663 ret = __spufs_event_status_get(data);
b9e3bd77
DGM
1664 spu_release(ctx);
1665 return ret;
1666}
1667DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1668 NULL, "0x%llx\n")
1669
8b3d6663
AB
1670static void spufs_srr0_set(void *data, u64 val)
1671{
1672 struct spu_context *ctx = data;
1673 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1674 spu_acquire_saved(ctx);
1675 lscsa->srr0.slot[0] = (u32) val;
1676 spu_release(ctx);
1677}
1678
1679static u64 spufs_srr0_get(void *data)
1680{
1681 struct spu_context *ctx = data;
1682 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1683 u64 ret;
1684 spu_acquire_saved(ctx);
1685 ret = lscsa->srr0.slot[0];
1686 spu_release(ctx);
1687 return ret;
1688}
1689DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
9b5047e2 1690 "0x%llx\n")
8b3d6663 1691
7b1a7014
AB
1692static u64 spufs_id_get(void *data)
1693{
1694 struct spu_context *ctx = data;
1695 u64 num;
1696
1697 spu_acquire(ctx);
1698 if (ctx->state == SPU_STATE_RUNNABLE)
1699 num = ctx->spu->number;
1700 else
1701 num = (unsigned int)-1;
1702 spu_release(ctx);
1703
1704 return num;
1705}
e45d6634 1706DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
7b1a7014 1707
bf1ab978 1708static u64 __spufs_object_id_get(void *data)
86767277
AB
1709{
1710 struct spu_context *ctx = data;
1711 return ctx->object_id;
1712}
1713
bf1ab978
DGM
1714static u64 spufs_object_id_get(void *data)
1715{
1716 /* FIXME: Should there really be no locking here? */
1717 return __spufs_object_id_get(data);
1718}
1719
86767277
AB
1720static void spufs_object_id_set(void *data, u64 id)
1721{
1722 struct spu_context *ctx = data;
1723 ctx->object_id = id;
1724}
1725
1726DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1727 spufs_object_id_set, "0x%llx\n");
1728
bf1ab978
DGM
1729static u64 __spufs_lslr_get(void *data)
1730{
1731 struct spu_context *ctx = data;
1732 return ctx->csa.priv2.spu_lslr_RW;
1733}
1734
b9e3bd77
DGM
1735static u64 spufs_lslr_get(void *data)
1736{
1737 struct spu_context *ctx = data;
1738 u64 ret;
1739
1740 spu_acquire_saved(ctx);
bf1ab978 1741 ret = __spufs_lslr_get(data);
b9e3bd77
DGM
1742 spu_release(ctx);
1743
1744 return ret;
1745}
1746DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1747
1748static int spufs_info_open(struct inode *inode, struct file *file)
1749{
1750 struct spufs_inode_info *i = SPUFS_I(inode);
1751 struct spu_context *ctx = i->i_ctx;
1752 file->private_data = ctx;
1753 return 0;
1754}
1755
bf1ab978
DGM
1756static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1757 char __user *buf, size_t len, loff_t *pos)
1758{
1759 u32 mbox_stat;
1760 u32 data;
1761
1762 mbox_stat = ctx->csa.prob.mb_stat_R;
1763 if (mbox_stat & 0x0000ff) {
1764 data = ctx->csa.prob.pu_mb_R;
1765 }
1766
1767 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1768}
1769
69a2f00c
DGM
1770static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1771 size_t len, loff_t *pos)
1772{
bf1ab978 1773 int ret;
69a2f00c 1774 struct spu_context *ctx = file->private_data;
69a2f00c
DGM
1775
1776 if (!access_ok(VERIFY_WRITE, buf, len))
1777 return -EFAULT;
1778
1779 spu_acquire_saved(ctx);
1780 spin_lock(&ctx->csa.register_lock);
bf1ab978 1781 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
69a2f00c
DGM
1782 spin_unlock(&ctx->csa.register_lock);
1783 spu_release(ctx);
1784
bf1ab978 1785 return ret;
69a2f00c
DGM
1786}
1787
5dfe4c96 1788static const struct file_operations spufs_mbox_info_fops = {
69a2f00c
DGM
1789 .open = spufs_info_open,
1790 .read = spufs_mbox_info_read,
1791 .llseek = generic_file_llseek,
1792};
1793
bf1ab978
DGM
1794static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1795 char __user *buf, size_t len, loff_t *pos)
1796{
1797 u32 ibox_stat;
1798 u32 data;
1799
1800 ibox_stat = ctx->csa.prob.mb_stat_R;
1801 if (ibox_stat & 0xff0000) {
1802 data = ctx->csa.priv2.puint_mb_R;
1803 }
1804
1805 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1806}
1807
69a2f00c
DGM
1808static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1809 size_t len, loff_t *pos)
1810{
1811 struct spu_context *ctx = file->private_data;
bf1ab978 1812 int ret;
69a2f00c
DGM
1813
1814 if (!access_ok(VERIFY_WRITE, buf, len))
1815 return -EFAULT;
1816
1817 spu_acquire_saved(ctx);
1818 spin_lock(&ctx->csa.register_lock);
bf1ab978 1819 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
69a2f00c
DGM
1820 spin_unlock(&ctx->csa.register_lock);
1821 spu_release(ctx);
1822
bf1ab978 1823 return ret;
69a2f00c
DGM
1824}
1825
5dfe4c96 1826static const struct file_operations spufs_ibox_info_fops = {
69a2f00c
DGM
1827 .open = spufs_info_open,
1828 .read = spufs_ibox_info_read,
1829 .llseek = generic_file_llseek,
1830};
1831
bf1ab978
DGM
1832static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1833 char __user *buf, size_t len, loff_t *pos)
69a2f00c 1834{
69a2f00c
DGM
1835 int i, cnt;
1836 u32 data[4];
1837 u32 wbox_stat;
1838
bf1ab978
DGM
1839 wbox_stat = ctx->csa.prob.mb_stat_R;
1840 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1841 for (i = 0; i < cnt; i++) {
1842 data[i] = ctx->csa.spu_mailbox_data[i];
1843 }
1844
1845 return simple_read_from_buffer(buf, len, pos, &data,
1846 cnt * sizeof(u32));
1847}
1848
1849static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1850 size_t len, loff_t *pos)
1851{
1852 struct spu_context *ctx = file->private_data;
1853 int ret;
1854
69a2f00c
DGM
1855 if (!access_ok(VERIFY_WRITE, buf, len))
1856 return -EFAULT;
1857
1858 spu_acquire_saved(ctx);
1859 spin_lock(&ctx->csa.register_lock);
bf1ab978 1860 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
69a2f00c
DGM
1861 spin_unlock(&ctx->csa.register_lock);
1862 spu_release(ctx);
1863
bf1ab978 1864 return ret;
69a2f00c
DGM
1865}
1866
5dfe4c96 1867static const struct file_operations spufs_wbox_info_fops = {
69a2f00c
DGM
1868 .open = spufs_info_open,
1869 .read = spufs_wbox_info_read,
1870 .llseek = generic_file_llseek,
1871};
1872
bf1ab978
DGM
1873static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1874 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 1875{
b9e3bd77
DGM
1876 struct spu_dma_info info;
1877 struct mfc_cq_sr *qp, *spuqp;
1878 int i;
1879
b9e3bd77
DGM
1880 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1881 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1882 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1883 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1884 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1885 for (i = 0; i < 16; i++) {
1886 qp = &info.dma_info_command_data[i];
1887 spuqp = &ctx->csa.priv2.spuq[i];
1888
1889 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1890 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1891 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1892 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1893 }
b9e3bd77
DGM
1894
1895 return simple_read_from_buffer(buf, len, pos, &info,
1896 sizeof info);
1897}
1898
bf1ab978
DGM
1899static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1900 size_t len, loff_t *pos)
1901{
1902 struct spu_context *ctx = file->private_data;
1903 int ret;
1904
1905 if (!access_ok(VERIFY_WRITE, buf, len))
1906 return -EFAULT;
1907
1908 spu_acquire_saved(ctx);
1909 spin_lock(&ctx->csa.register_lock);
1910 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1911 spin_unlock(&ctx->csa.register_lock);
1912 spu_release(ctx);
1913
1914 return ret;
1915}
1916
5dfe4c96 1917static const struct file_operations spufs_dma_info_fops = {
b9e3bd77
DGM
1918 .open = spufs_info_open,
1919 .read = spufs_dma_info_read,
1920};
1921
bf1ab978
DGM
1922static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1923 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 1924{
b9e3bd77 1925 struct spu_proxydma_info info;
b9e3bd77 1926 struct mfc_cq_sr *qp, *puqp;
bf1ab978 1927 int ret = sizeof info;
b9e3bd77
DGM
1928 int i;
1929
1930 if (len < ret)
1931 return -EINVAL;
1932
1933 if (!access_ok(VERIFY_WRITE, buf, len))
1934 return -EFAULT;
1935
b9e3bd77
DGM
1936 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1937 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1938 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1939 for (i = 0; i < 8; i++) {
1940 qp = &info.proxydma_info_command_data[i];
1941 puqp = &ctx->csa.priv2.puq[i];
1942
1943 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1944 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1945 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1946 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1947 }
bf1ab978
DGM
1948
1949 return simple_read_from_buffer(buf, len, pos, &info,
1950 sizeof info);
1951}
1952
1953static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1954 size_t len, loff_t *pos)
1955{
1956 struct spu_context *ctx = file->private_data;
1957 int ret;
1958
1959 spu_acquire_saved(ctx);
1960 spin_lock(&ctx->csa.register_lock);
1961 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
b9e3bd77
DGM
1962 spin_unlock(&ctx->csa.register_lock);
1963 spu_release(ctx);
1964
b9e3bd77
DGM
1965 return ret;
1966}
1967
5dfe4c96 1968static const struct file_operations spufs_proxydma_info_fops = {
b9e3bd77
DGM
1969 .open = spufs_info_open,
1970 .read = spufs_proxydma_info_read,
1971};
1972
67207b96
AB
1973struct tree_descr spufs_dir_contents[] = {
1974 { "mem", &spufs_mem_fops, 0666, },
8b3d6663 1975 { "regs", &spufs_regs_fops, 0666, },
67207b96
AB
1976 { "mbox", &spufs_mbox_fops, 0444, },
1977 { "ibox", &spufs_ibox_fops, 0444, },
1978 { "wbox", &spufs_wbox_fops, 0222, },
1979 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1980 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1981 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1982 { "signal1", &spufs_signal1_fops, 0666, },
1983 { "signal2", &spufs_signal2_fops, 0666, },
1984 { "signal1_type", &spufs_signal1_type, 0666, },
1985 { "signal2_type", &spufs_signal2_type, 0666, },
6df10a82 1986 { "cntl", &spufs_cntl_fops, 0666, },
8b3d6663 1987 { "fpcr", &spufs_fpcr_fops, 0666, },
b9e3bd77
DGM
1988 { "lslr", &spufs_lslr_ops, 0444, },
1989 { "mfc", &spufs_mfc_fops, 0666, },
1990 { "mss", &spufs_mss_fops, 0666, },
1991 { "npc", &spufs_npc_ops, 0666, },
1992 { "srr0", &spufs_srr0_ops, 0666, },
8b3d6663
AB
1993 { "decr", &spufs_decr_ops, 0666, },
1994 { "decr_status", &spufs_decr_status_ops, 0666, },
8b3d6663 1995 { "event_mask", &spufs_event_mask_ops, 0666, },
b9e3bd77 1996 { "event_status", &spufs_event_status_ops, 0444, },
27d5bf2a 1997 { "psmap", &spufs_psmap_fops, 0666, },
86767277
AB
1998 { "phys-id", &spufs_id_ops, 0666, },
1999 { "object-id", &spufs_object_id_ops, 0666, },
69a2f00c
DGM
2000 { "mbox_info", &spufs_mbox_info_fops, 0444, },
2001 { "ibox_info", &spufs_ibox_info_fops, 0444, },
2002 { "wbox_info", &spufs_wbox_info_fops, 0444, },
b9e3bd77
DGM
2003 { "dma_info", &spufs_dma_info_fops, 0444, },
2004 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
67207b96
AB
2005 {},
2006};
5737edd1
MN
2007
2008struct tree_descr spufs_dir_nosched_contents[] = {
2009 { "mem", &spufs_mem_fops, 0666, },
2010 { "mbox", &spufs_mbox_fops, 0444, },
2011 { "ibox", &spufs_ibox_fops, 0444, },
2012 { "wbox", &spufs_wbox_fops, 0222, },
2013 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2014 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2015 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2016 { "signal1", &spufs_signal1_fops, 0666, },
2017 { "signal2", &spufs_signal2_fops, 0666, },
2018 { "signal1_type", &spufs_signal1_type, 0666, },
2019 { "signal2_type", &spufs_signal2_type, 0666, },
2020 { "mss", &spufs_mss_fops, 0666, },
2021 { "mfc", &spufs_mfc_fops, 0666, },
2022 { "cntl", &spufs_cntl_fops, 0666, },
2023 { "npc", &spufs_npc_ops, 0666, },
2024 { "psmap", &spufs_psmap_fops, 0666, },
2025 { "phys-id", &spufs_id_ops, 0666, },
2026 { "object-id", &spufs_object_id_ops, 0666, },
2027 {},
2028};
bf1ab978
DGM
2029
2030struct spufs_coredump_reader spufs_coredump_read[] = {
2031 { "regs", __spufs_regs_read, NULL, 128 * 16 },
2032 { "fpcr", __spufs_fpcr_read, NULL, 16 },
2033 { "lslr", NULL, __spufs_lslr_get, 11 },
2034 { "decr", NULL, __spufs_decr_get, 11 },
2035 { "decr_status", NULL, __spufs_decr_status_get, 11 },
2036 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
2037 { "signal1", __spufs_signal1_read, NULL, 4 },
2038 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2039 { "signal2", __spufs_signal2_read, NULL, 4 },
2040 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2041 { "event_mask", NULL, __spufs_event_mask_get, 8 },
2042 { "event_status", NULL, __spufs_event_status_get, 8 },
2043 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2044 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2045 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2046 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2047 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2048 { "object-id", NULL, __spufs_object_id_get, 19 },
2049 { },
2050};
2051int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
2052