]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/powerpc/platforms/cell/spufs/file.c
[POWERPC] spufs: don't set reserved bits in spu interrupt status
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / platforms / cell / spufs / file.c
CommitLineData
67207b96
AB
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
a33a7d73
AB
23#undef DEBUG
24
67207b96
AB
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
d88cfffa 28#include <linux/pagemap.h>
67207b96 29#include <linux/poll.h>
5110459f 30#include <linux/ptrace.h>
cbe709c1 31#include <linux/seq_file.h>
67207b96
AB
32
33#include <asm/io.h>
34#include <asm/semaphore.h>
35#include <asm/spu.h>
b9e3bd77 36#include <asm/spu_info.h>
67207b96
AB
37#include <asm/uaccess.h>
38
39#include "spufs.h"
40
27d5bf2a
BH
41#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
42
cbe709c1 43
67207b96
AB
44static int
45spufs_mem_open(struct inode *inode, struct file *file)
46{
47 struct spufs_inode_info *i = SPUFS_I(inode);
6df10a82 48 struct spu_context *ctx = i->i_ctx;
43c2bbd9 49
47d3a5fa 50 mutex_lock(&ctx->mapping_lock);
6df10a82 51 file->private_data = ctx;
43c2bbd9
CH
52 if (!i->i_openers++)
53 ctx->local_store = inode->i_mapping;
47d3a5fa 54 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
55 return 0;
56}
57
58static int
59spufs_mem_release(struct inode *inode, struct file *file)
60{
61 struct spufs_inode_info *i = SPUFS_I(inode);
62 struct spu_context *ctx = i->i_ctx;
63
47d3a5fa 64 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
65 if (!--i->i_openers)
66 ctx->local_store = NULL;
47d3a5fa 67 mutex_unlock(&ctx->mapping_lock);
67207b96
AB
68 return 0;
69}
70
bf1ab978
DGM
71static ssize_t
72__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73 size_t size, loff_t *pos)
74{
75 char *local_store = ctx->ops->get_ls(ctx);
76 return simple_read_from_buffer(buffer, size, pos, local_store,
77 LS_SIZE);
78}
79
67207b96
AB
80static ssize_t
81spufs_mem_read(struct file *file, char __user *buffer,
82 size_t size, loff_t *pos)
83{
bf1ab978 84 struct spu_context *ctx = file->private_data;
aa0ed2bd 85 ssize_t ret;
67207b96 86
8b3d6663 87 spu_acquire(ctx);
bf1ab978 88 ret = __spufs_mem_read(ctx, buffer, size, pos);
8b3d6663 89 spu_release(ctx);
67207b96
AB
90 return ret;
91}
92
93static ssize_t
94spufs_mem_write(struct file *file, const char __user *buffer,
aa0ed2bd 95 size_t size, loff_t *ppos)
67207b96
AB
96{
97 struct spu_context *ctx = file->private_data;
8b3d6663 98 char *local_store;
aa0ed2bd 99 loff_t pos = *ppos;
8b3d6663 100 int ret;
67207b96 101
aa0ed2bd
AB
102 if (pos < 0)
103 return -EINVAL;
104 if (pos > LS_SIZE)
67207b96 105 return -EFBIG;
aa0ed2bd
AB
106 if (size > LS_SIZE - pos)
107 size = LS_SIZE - pos;
8b3d6663
AB
108
109 spu_acquire(ctx);
8b3d6663 110 local_store = ctx->ops->get_ls(ctx);
aa0ed2bd 111 ret = copy_from_user(local_store + pos, buffer, size);
8b3d6663 112 spu_release(ctx);
aa0ed2bd
AB
113
114 if (ret)
115 return -EFAULT;
116 *ppos = pos + size;
117 return size;
67207b96
AB
118}
119
78bde53e
BH
120static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121 unsigned long address)
8b3d6663 122{
f1fa74f4
BH
123 struct spu_context *ctx = vma->vm_file->private_data;
124 unsigned long pfn, offset, addr0 = address;
125#ifdef CONFIG_SPU_FS_64K_LS
126 struct spu_state *csa = &ctx->csa;
127 int psize;
128
129 /* Check what page size we are using */
130 psize = get_slice_psize(vma->vm_mm, address);
131
132 /* Some sanity checking */
133 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
134
135 /* Wow, 64K, cool, we need to align the address though */
136 if (csa->use_big_pages) {
137 BUG_ON(vma->vm_start & 0xffff);
138 address &= ~0xfffful;
139 }
140#endif /* CONFIG_SPU_FS_64K_LS */
8b3d6663 141
f1fa74f4 142 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
128b8546
MN
143 if (offset >= LS_SIZE)
144 return NOPFN_SIGBUS;
145
f1fa74f4
BH
146 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 addr0, address, offset);
148
8b3d6663
AB
149 spu_acquire(ctx);
150
ac91cb8d
AB
151 if (ctx->state == SPU_STATE_SAVED) {
152 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
932f535d 153 & ~_PAGE_NO_CACHE);
78bde53e 154 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
ac91cb8d
AB
155 } else {
156 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
78bde53e
BH
157 | _PAGE_NO_CACHE);
158 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
ac91cb8d 159 }
78bde53e 160 vm_insert_pfn(vma, address, pfn);
8b3d6663 161
78bde53e 162 spu_release(ctx);
8b3d6663 163
78bde53e 164 return NOPFN_REFAULT;
8b3d6663
AB
165}
166
78bde53e 167
8b3d6663 168static struct vm_operations_struct spufs_mem_mmap_vmops = {
78bde53e 169 .nopfn = spufs_mem_mmap_nopfn,
8b3d6663
AB
170};
171
f1fa74f4 172static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
67207b96 173{
f1fa74f4
BH
174#ifdef CONFIG_SPU_FS_64K_LS
175 struct spu_context *ctx = file->private_data;
176 struct spu_state *csa = &ctx->csa;
177
178 /* Sanity check VMA alignment */
179 if (csa->use_big_pages) {
180 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
182 vma->vm_pgoff);
183 if (vma->vm_start & 0xffff)
184 return -EINVAL;
185 if (vma->vm_pgoff & 0xf)
186 return -EINVAL;
187 }
188#endif /* CONFIG_SPU_FS_64K_LS */
189
8b3d6663
AB
190 if (!(vma->vm_flags & VM_SHARED))
191 return -EINVAL;
67207b96 192
78bde53e 193 vma->vm_flags |= VM_IO | VM_PFNMAP;
8b3d6663
AB
194 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
195 | _PAGE_NO_CACHE);
196
197 vma->vm_ops = &spufs_mem_mmap_vmops;
67207b96
AB
198 return 0;
199}
200
f1fa74f4 201#ifdef CONFIG_SPU_FS_64K_LS
1238819a
SS
202static unsigned long spufs_get_unmapped_area(struct file *file,
203 unsigned long addr, unsigned long len, unsigned long pgoff,
204 unsigned long flags)
f1fa74f4
BH
205{
206 struct spu_context *ctx = file->private_data;
207 struct spu_state *csa = &ctx->csa;
208
209 /* If not using big pages, fallback to normal MM g_u_a */
210 if (!csa->use_big_pages)
211 return current->mm->get_unmapped_area(file, addr, len,
212 pgoff, flags);
213
214 /* Else, try to obtain a 64K pages slice */
215 return slice_get_unmapped_area(addr, len, flags,
216 MMU_PAGE_64K, 1, 0);
217}
218#endif /* CONFIG_SPU_FS_64K_LS */
219
5dfe4c96 220static const struct file_operations spufs_mem_fops = {
7022543e
JK
221 .open = spufs_mem_open,
222 .release = spufs_mem_release,
223 .read = spufs_mem_read,
224 .write = spufs_mem_write,
225 .llseek = generic_file_llseek,
226 .mmap = spufs_mem_mmap,
f1fa74f4
BH
227#ifdef CONFIG_SPU_FS_64K_LS
228 .get_unmapped_area = spufs_get_unmapped_area,
229#endif
8b3d6663
AB
230};
231
78bde53e 232static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
6df10a82 233 unsigned long address,
78bde53e 234 unsigned long ps_offs,
27d5bf2a 235 unsigned long ps_size)
6df10a82 236{
6df10a82 237 struct spu_context *ctx = vma->vm_file->private_data;
78bde53e 238 unsigned long area, offset = address - vma->vm_start;
6df10a82
MN
239
240 offset += vma->vm_pgoff << PAGE_SHIFT;
27d5bf2a 241 if (offset >= ps_size)
78bde53e 242 return NOPFN_SIGBUS;
6df10a82 243
33bfd7a7
AB
244 /*
245 * We have to wait for context to be loaded before we have
246 * pages to hand out to the user, but we don't want to wait
247 * with the mmap_sem held.
248 * It is possible to drop the mmap_sem here, but then we need
249 * to return NOPFN_REFAULT because the mappings may have
250 * hanged.
78bde53e 251 */
33bfd7a7
AB
252 spu_acquire(ctx);
253 if (ctx->state == SPU_STATE_SAVED) {
254 up_read(&current->mm->mmap_sem);
255 spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
256 down_read(&current->mm->mmap_sem);
257 goto out;
258 }
6df10a82
MN
259
260 area = ctx->spu->problem_phys + ps_offs;
78bde53e 261 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
33bfd7a7
AB
262
263out:
6df10a82
MN
264 spu_release(ctx);
265
78bde53e 266 return NOPFN_REFAULT;
6df10a82
MN
267}
268
27d5bf2a 269#if SPUFS_MMAP_4K
78bde53e
BH
270static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
271 unsigned long address)
6df10a82 272{
78bde53e 273 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
6df10a82
MN
274}
275
276static struct vm_operations_struct spufs_cntl_mmap_vmops = {
78bde53e 277 .nopfn = spufs_cntl_mmap_nopfn,
6df10a82
MN
278};
279
280/*
281 * mmap support for problem state control area [0x4000 - 0x4fff].
6df10a82
MN
282 */
283static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
284{
285 if (!(vma->vm_flags & VM_SHARED))
286 return -EINVAL;
287
78bde53e 288 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 289 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 290 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
291
292 vma->vm_ops = &spufs_cntl_mmap_vmops;
293 return 0;
294}
27d5bf2a
BH
295#else /* SPUFS_MMAP_4K */
296#define spufs_cntl_mmap NULL
297#endif /* !SPUFS_MMAP_4K */
6df10a82 298
e1dbff2b 299static u64 spufs_cntl_get(void *data)
6df10a82 300{
e1dbff2b
AB
301 struct spu_context *ctx = data;
302 u64 val;
6df10a82 303
e1dbff2b
AB
304 spu_acquire(ctx);
305 val = ctx->ops->status_read(ctx);
306 spu_release(ctx);
307
308 return val;
6df10a82
MN
309}
310
e1dbff2b 311static void spufs_cntl_set(void *data, u64 val)
6df10a82 312{
e1dbff2b
AB
313 struct spu_context *ctx = data;
314
315 spu_acquire(ctx);
316 ctx->ops->runcntl_write(ctx, val);
317 spu_release(ctx);
6df10a82
MN
318}
319
e1dbff2b 320static int spufs_cntl_open(struct inode *inode, struct file *file)
6df10a82 321{
e1dbff2b
AB
322 struct spufs_inode_info *i = SPUFS_I(inode);
323 struct spu_context *ctx = i->i_ctx;
324
47d3a5fa 325 mutex_lock(&ctx->mapping_lock);
e1dbff2b 326 file->private_data = ctx;
43c2bbd9
CH
327 if (!i->i_openers++)
328 ctx->cntl = inode->i_mapping;
47d3a5fa 329 mutex_unlock(&ctx->mapping_lock);
e1dbff2b
AB
330 return simple_attr_open(inode, file, spufs_cntl_get,
331 spufs_cntl_set, "0x%08lx");
6df10a82
MN
332}
333
43c2bbd9
CH
334static int
335spufs_cntl_release(struct inode *inode, struct file *file)
336{
337 struct spufs_inode_info *i = SPUFS_I(inode);
338 struct spu_context *ctx = i->i_ctx;
339
340 simple_attr_close(inode, file);
341
47d3a5fa 342 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
343 if (!--i->i_openers)
344 ctx->cntl = NULL;
47d3a5fa 345 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
346 return 0;
347}
348
5dfe4c96 349static const struct file_operations spufs_cntl_fops = {
6df10a82 350 .open = spufs_cntl_open,
43c2bbd9 351 .release = spufs_cntl_release,
e1dbff2b
AB
352 .read = simple_attr_read,
353 .write = simple_attr_write,
6df10a82 354 .mmap = spufs_cntl_mmap,
6df10a82
MN
355};
356
8b3d6663
AB
357static int
358spufs_regs_open(struct inode *inode, struct file *file)
359{
360 struct spufs_inode_info *i = SPUFS_I(inode);
361 file->private_data = i->i_ctx;
362 return 0;
363}
364
bf1ab978
DGM
365static ssize_t
366__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
367 size_t size, loff_t *pos)
368{
369 struct spu_lscsa *lscsa = ctx->csa.lscsa;
370 return simple_read_from_buffer(buffer, size, pos,
371 lscsa->gprs, sizeof lscsa->gprs);
372}
373
8b3d6663
AB
374static ssize_t
375spufs_regs_read(struct file *file, char __user *buffer,
376 size_t size, loff_t *pos)
377{
8b3d6663 378 int ret;
bf1ab978 379 struct spu_context *ctx = file->private_data;
8b3d6663
AB
380
381 spu_acquire_saved(ctx);
bf1ab978 382 ret = __spufs_regs_read(ctx, buffer, size, pos);
27b1ea09 383 spu_release_saved(ctx);
8b3d6663
AB
384 return ret;
385}
386
387static ssize_t
388spufs_regs_write(struct file *file, const char __user *buffer,
389 size_t size, loff_t *pos)
390{
391 struct spu_context *ctx = file->private_data;
392 struct spu_lscsa *lscsa = ctx->csa.lscsa;
393 int ret;
394
395 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
396 if (size <= 0)
397 return -EFBIG;
398 *pos += size;
399
400 spu_acquire_saved(ctx);
401
402 ret = copy_from_user(lscsa->gprs + *pos - size,
403 buffer, size) ? -EFAULT : size;
404
27b1ea09 405 spu_release_saved(ctx);
8b3d6663
AB
406 return ret;
407}
408
5dfe4c96 409static const struct file_operations spufs_regs_fops = {
8b3d6663
AB
410 .open = spufs_regs_open,
411 .read = spufs_regs_read,
412 .write = spufs_regs_write,
67207b96
AB
413 .llseek = generic_file_llseek,
414};
415
bf1ab978
DGM
416static ssize_t
417__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
418 size_t size, loff_t * pos)
419{
420 struct spu_lscsa *lscsa = ctx->csa.lscsa;
421 return simple_read_from_buffer(buffer, size, pos,
422 &lscsa->fpcr, sizeof(lscsa->fpcr));
423}
424
8b3d6663
AB
425static ssize_t
426spufs_fpcr_read(struct file *file, char __user * buffer,
427 size_t size, loff_t * pos)
428{
8b3d6663 429 int ret;
bf1ab978 430 struct spu_context *ctx = file->private_data;
8b3d6663
AB
431
432 spu_acquire_saved(ctx);
bf1ab978 433 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
27b1ea09 434 spu_release_saved(ctx);
8b3d6663
AB
435 return ret;
436}
437
438static ssize_t
439spufs_fpcr_write(struct file *file, const char __user * buffer,
440 size_t size, loff_t * pos)
441{
442 struct spu_context *ctx = file->private_data;
443 struct spu_lscsa *lscsa = ctx->csa.lscsa;
444 int ret;
445
446 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
447 if (size <= 0)
448 return -EFBIG;
449 *pos += size;
450
451 spu_acquire_saved(ctx);
452
453 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
454 buffer, size) ? -EFAULT : size;
455
27b1ea09 456 spu_release_saved(ctx);
8b3d6663
AB
457 return ret;
458}
459
5dfe4c96 460static const struct file_operations spufs_fpcr_fops = {
8b3d6663
AB
461 .open = spufs_regs_open,
462 .read = spufs_fpcr_read,
463 .write = spufs_fpcr_write,
464 .llseek = generic_file_llseek,
465};
466
67207b96
AB
467/* generic open function for all pipe-like files */
468static int spufs_pipe_open(struct inode *inode, struct file *file)
469{
470 struct spufs_inode_info *i = SPUFS_I(inode);
471 file->private_data = i->i_ctx;
472
473 return nonseekable_open(inode, file);
474}
475
cdcc89bb
AB
476/*
477 * Read as many bytes from the mailbox as possible, until
478 * one of the conditions becomes true:
479 *
480 * - no more data available in the mailbox
481 * - end of the user provided buffer
482 * - end of the mapped area
483 */
67207b96
AB
484static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
485 size_t len, loff_t *pos)
486{
8b3d6663 487 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
488 u32 mbox_data, __user *udata;
489 ssize_t count;
67207b96
AB
490
491 if (len < 4)
492 return -EINVAL;
493
cdcc89bb
AB
494 if (!access_ok(VERIFY_WRITE, buf, len))
495 return -EFAULT;
496
497 udata = (void __user *)buf;
498
8b3d6663 499 spu_acquire(ctx);
274cef5e 500 for (count = 0; (count + 4) <= len; count += 4, udata++) {
cdcc89bb
AB
501 int ret;
502 ret = ctx->ops->mbox_read(ctx, &mbox_data);
503 if (ret == 0)
504 break;
505
506 /*
507 * at the end of the mapped area, we can fault
508 * but still need to return the data we have
509 * read successfully so far.
510 */
511 ret = __put_user(mbox_data, udata);
512 if (ret) {
513 if (!count)
514 count = -EFAULT;
515 break;
516 }
517 }
8b3d6663 518 spu_release(ctx);
67207b96 519
cdcc89bb
AB
520 if (!count)
521 count = -EAGAIN;
67207b96 522
cdcc89bb 523 return count;
67207b96
AB
524}
525
5dfe4c96 526static const struct file_operations spufs_mbox_fops = {
67207b96
AB
527 .open = spufs_pipe_open,
528 .read = spufs_mbox_read,
529};
530
531static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
532 size_t len, loff_t *pos)
533{
8b3d6663 534 struct spu_context *ctx = file->private_data;
67207b96
AB
535 u32 mbox_stat;
536
537 if (len < 4)
538 return -EINVAL;
539
8b3d6663
AB
540 spu_acquire(ctx);
541
542 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
543
544 spu_release(ctx);
67207b96
AB
545
546 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
547 return -EFAULT;
548
549 return 4;
550}
551
5dfe4c96 552static const struct file_operations spufs_mbox_stat_fops = {
67207b96
AB
553 .open = spufs_pipe_open,
554 .read = spufs_mbox_stat_read,
555};
556
557/* low-level ibox access function */
8b3d6663 558size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
67207b96 559{
8b3d6663
AB
560 return ctx->ops->ibox_read(ctx, data);
561}
67207b96 562
8b3d6663
AB
563static int spufs_ibox_fasync(int fd, struct file *file, int on)
564{
565 struct spu_context *ctx = file->private_data;
67207b96 566
8b3d6663 567 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
67207b96 568}
67207b96 569
8b3d6663
AB
570/* interrupt-level ibox callback function. */
571void spufs_ibox_callback(struct spu *spu)
67207b96 572{
8b3d6663
AB
573 struct spu_context *ctx = spu->ctx;
574
575 wake_up_all(&ctx->ibox_wq);
576 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
67207b96
AB
577}
578
cdcc89bb
AB
579/*
580 * Read as many bytes from the interrupt mailbox as possible, until
581 * one of the conditions becomes true:
582 *
583 * - no more data available in the mailbox
584 * - end of the user provided buffer
585 * - end of the mapped area
586 *
587 * If the file is opened without O_NONBLOCK, we wait here until
588 * any data is available, but return when we have been able to
589 * read something.
590 */
67207b96
AB
591static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
592 size_t len, loff_t *pos)
593{
8b3d6663 594 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
595 u32 ibox_data, __user *udata;
596 ssize_t count;
67207b96
AB
597
598 if (len < 4)
599 return -EINVAL;
600
cdcc89bb
AB
601 if (!access_ok(VERIFY_WRITE, buf, len))
602 return -EFAULT;
603
604 udata = (void __user *)buf;
605
8b3d6663 606 spu_acquire(ctx);
67207b96 607
cdcc89bb
AB
608 /* wait only for the first element */
609 count = 0;
67207b96 610 if (file->f_flags & O_NONBLOCK) {
8b3d6663 611 if (!spu_ibox_read(ctx, &ibox_data))
cdcc89bb 612 count = -EAGAIN;
67207b96 613 } else {
cdcc89bb 614 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
67207b96 615 }
cdcc89bb
AB
616 if (count)
617 goto out;
67207b96 618
cdcc89bb
AB
619 /* if we can't write at all, return -EFAULT */
620 count = __put_user(ibox_data, udata);
621 if (count)
622 goto out;
8b3d6663 623
cdcc89bb
AB
624 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
625 int ret;
626 ret = ctx->ops->ibox_read(ctx, &ibox_data);
627 if (ret == 0)
628 break;
629 /*
630 * at the end of the mapped area, we can fault
631 * but still need to return the data we have
632 * read successfully so far.
633 */
634 ret = __put_user(ibox_data, udata);
635 if (ret)
636 break;
637 }
67207b96 638
cdcc89bb
AB
639out:
640 spu_release(ctx);
67207b96 641
cdcc89bb 642 return count;
67207b96
AB
643}
644
645static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
646{
8b3d6663 647 struct spu_context *ctx = file->private_data;
67207b96
AB
648 unsigned int mask;
649
8b3d6663 650 poll_wait(file, &ctx->ibox_wq, wait);
67207b96 651
3a843d7c
AB
652 spu_acquire(ctx);
653 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
654 spu_release(ctx);
67207b96
AB
655
656 return mask;
657}
658
5dfe4c96 659static const struct file_operations spufs_ibox_fops = {
67207b96
AB
660 .open = spufs_pipe_open,
661 .read = spufs_ibox_read,
662 .poll = spufs_ibox_poll,
663 .fasync = spufs_ibox_fasync,
664};
665
666static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
667 size_t len, loff_t *pos)
668{
8b3d6663 669 struct spu_context *ctx = file->private_data;
67207b96
AB
670 u32 ibox_stat;
671
672 if (len < 4)
673 return -EINVAL;
674
8b3d6663
AB
675 spu_acquire(ctx);
676 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
677 spu_release(ctx);
67207b96
AB
678
679 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
680 return -EFAULT;
681
682 return 4;
683}
684
5dfe4c96 685static const struct file_operations spufs_ibox_stat_fops = {
67207b96
AB
686 .open = spufs_pipe_open,
687 .read = spufs_ibox_stat_read,
688};
689
690/* low-level mailbox write */
8b3d6663 691size_t spu_wbox_write(struct spu_context *ctx, u32 data)
67207b96 692{
8b3d6663
AB
693 return ctx->ops->wbox_write(ctx, data);
694}
67207b96 695
8b3d6663
AB
696static int spufs_wbox_fasync(int fd, struct file *file, int on)
697{
698 struct spu_context *ctx = file->private_data;
699 int ret;
67207b96 700
8b3d6663 701 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
67207b96 702
67207b96
AB
703 return ret;
704}
67207b96 705
8b3d6663
AB
706/* interrupt-level wbox callback function. */
707void spufs_wbox_callback(struct spu *spu)
67207b96 708{
8b3d6663
AB
709 struct spu_context *ctx = spu->ctx;
710
711 wake_up_all(&ctx->wbox_wq);
712 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
67207b96
AB
713}
714
cdcc89bb
AB
715/*
716 * Write as many bytes to the interrupt mailbox as possible, until
717 * one of the conditions becomes true:
718 *
719 * - the mailbox is full
720 * - end of the user provided buffer
721 * - end of the mapped area
722 *
723 * If the file is opened without O_NONBLOCK, we wait here until
724 * space is availabyl, but return when we have been able to
725 * write something.
726 */
67207b96
AB
727static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
728 size_t len, loff_t *pos)
729{
8b3d6663 730 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
731 u32 wbox_data, __user *udata;
732 ssize_t count;
67207b96
AB
733
734 if (len < 4)
735 return -EINVAL;
736
cdcc89bb
AB
737 udata = (void __user *)buf;
738 if (!access_ok(VERIFY_READ, buf, len))
739 return -EFAULT;
740
741 if (__get_user(wbox_data, udata))
67207b96
AB
742 return -EFAULT;
743
8b3d6663
AB
744 spu_acquire(ctx);
745
cdcc89bb
AB
746 /*
747 * make sure we can at least write one element, by waiting
748 * in case of !O_NONBLOCK
749 */
750 count = 0;
67207b96 751 if (file->f_flags & O_NONBLOCK) {
8b3d6663 752 if (!spu_wbox_write(ctx, wbox_data))
cdcc89bb 753 count = -EAGAIN;
67207b96 754 } else {
cdcc89bb 755 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
67207b96
AB
756 }
757
cdcc89bb
AB
758 if (count)
759 goto out;
8b3d6663 760
96de0e25 761 /* write as much as possible */
cdcc89bb
AB
762 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
763 int ret;
764 ret = __get_user(wbox_data, udata);
765 if (ret)
766 break;
767
768 ret = spu_wbox_write(ctx, wbox_data);
769 if (ret == 0)
770 break;
771 }
772
773out:
774 spu_release(ctx);
775 return count;
67207b96
AB
776}
777
778static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
779{
8b3d6663 780 struct spu_context *ctx = file->private_data;
67207b96
AB
781 unsigned int mask;
782
8b3d6663 783 poll_wait(file, &ctx->wbox_wq, wait);
67207b96 784
3a843d7c
AB
785 spu_acquire(ctx);
786 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
787 spu_release(ctx);
67207b96
AB
788
789 return mask;
790}
791
5dfe4c96 792static const struct file_operations spufs_wbox_fops = {
67207b96
AB
793 .open = spufs_pipe_open,
794 .write = spufs_wbox_write,
795 .poll = spufs_wbox_poll,
796 .fasync = spufs_wbox_fasync,
797};
798
799static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
800 size_t len, loff_t *pos)
801{
8b3d6663 802 struct spu_context *ctx = file->private_data;
67207b96
AB
803 u32 wbox_stat;
804
805 if (len < 4)
806 return -EINVAL;
807
8b3d6663
AB
808 spu_acquire(ctx);
809 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
810 spu_release(ctx);
67207b96
AB
811
812 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
813 return -EFAULT;
814
815 return 4;
816}
817
5dfe4c96 818static const struct file_operations spufs_wbox_stat_fops = {
67207b96
AB
819 .open = spufs_pipe_open,
820 .read = spufs_wbox_stat_read,
821};
822
6df10a82
MN
823static int spufs_signal1_open(struct inode *inode, struct file *file)
824{
825 struct spufs_inode_info *i = SPUFS_I(inode);
826 struct spu_context *ctx = i->i_ctx;
43c2bbd9 827
47d3a5fa 828 mutex_lock(&ctx->mapping_lock);
6df10a82 829 file->private_data = ctx;
43c2bbd9
CH
830 if (!i->i_openers++)
831 ctx->signal1 = inode->i_mapping;
47d3a5fa 832 mutex_unlock(&ctx->mapping_lock);
6df10a82
MN
833 return nonseekable_open(inode, file);
834}
835
43c2bbd9
CH
836static int
837spufs_signal1_release(struct inode *inode, struct file *file)
838{
839 struct spufs_inode_info *i = SPUFS_I(inode);
840 struct spu_context *ctx = i->i_ctx;
841
47d3a5fa 842 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
843 if (!--i->i_openers)
844 ctx->signal1 = NULL;
47d3a5fa 845 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
846 return 0;
847}
848
bf1ab978 849static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
850 size_t len, loff_t *pos)
851{
17f88ceb 852 int ret = 0;
67207b96
AB
853 u32 data;
854
67207b96
AB
855 if (len < 4)
856 return -EINVAL;
857
17f88ceb
DGM
858 if (ctx->csa.spu_chnlcnt_RW[3]) {
859 data = ctx->csa.spu_chnldata_RW[3];
860 ret = 4;
861 }
8b3d6663 862
17f88ceb
DGM
863 if (!ret)
864 goto out;
865
67207b96
AB
866 if (copy_to_user(buf, &data, 4))
867 return -EFAULT;
868
17f88ceb
DGM
869out:
870 return ret;
67207b96
AB
871}
872
bf1ab978
DGM
873static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
874 size_t len, loff_t *pos)
875{
876 int ret;
877 struct spu_context *ctx = file->private_data;
878
879 spu_acquire_saved(ctx);
880 ret = __spufs_signal1_read(ctx, buf, len, pos);
27b1ea09 881 spu_release_saved(ctx);
bf1ab978
DGM
882
883 return ret;
884}
885
67207b96
AB
886static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
887 size_t len, loff_t *pos)
888{
889 struct spu_context *ctx;
67207b96
AB
890 u32 data;
891
892 ctx = file->private_data;
67207b96
AB
893
894 if (len < 4)
895 return -EINVAL;
896
897 if (copy_from_user(&data, buf, 4))
898 return -EFAULT;
899
8b3d6663
AB
900 spu_acquire(ctx);
901 ctx->ops->signal1_write(ctx, data);
902 spu_release(ctx);
67207b96
AB
903
904 return 4;
905}
906
78bde53e
BH
907static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
908 unsigned long address)
6df10a82 909{
27d5bf2a 910#if PAGE_SIZE == 0x1000
78bde53e 911 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
27d5bf2a
BH
912#elif PAGE_SIZE == 0x10000
913 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
914 * signal 1 and 2 area
915 */
78bde53e 916 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
27d5bf2a
BH
917#else
918#error unsupported page size
919#endif
6df10a82
MN
920}
921
922static struct vm_operations_struct spufs_signal1_mmap_vmops = {
78bde53e 923 .nopfn = spufs_signal1_mmap_nopfn,
6df10a82
MN
924};
925
926static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
927{
928 if (!(vma->vm_flags & VM_SHARED))
929 return -EINVAL;
930
78bde53e 931 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 932 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 933 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
934
935 vma->vm_ops = &spufs_signal1_mmap_vmops;
936 return 0;
937}
6df10a82 938
5dfe4c96 939static const struct file_operations spufs_signal1_fops = {
6df10a82 940 .open = spufs_signal1_open,
43c2bbd9 941 .release = spufs_signal1_release,
67207b96
AB
942 .read = spufs_signal1_read,
943 .write = spufs_signal1_write,
6df10a82 944 .mmap = spufs_signal1_mmap,
67207b96
AB
945};
946
d054b36f
JK
947static const struct file_operations spufs_signal1_nosched_fops = {
948 .open = spufs_signal1_open,
949 .release = spufs_signal1_release,
950 .write = spufs_signal1_write,
951 .mmap = spufs_signal1_mmap,
952};
953
6df10a82
MN
954static int spufs_signal2_open(struct inode *inode, struct file *file)
955{
956 struct spufs_inode_info *i = SPUFS_I(inode);
957 struct spu_context *ctx = i->i_ctx;
43c2bbd9 958
47d3a5fa 959 mutex_lock(&ctx->mapping_lock);
6df10a82 960 file->private_data = ctx;
43c2bbd9
CH
961 if (!i->i_openers++)
962 ctx->signal2 = inode->i_mapping;
47d3a5fa 963 mutex_unlock(&ctx->mapping_lock);
6df10a82
MN
964 return nonseekable_open(inode, file);
965}
966
43c2bbd9
CH
967static int
968spufs_signal2_release(struct inode *inode, struct file *file)
969{
970 struct spufs_inode_info *i = SPUFS_I(inode);
971 struct spu_context *ctx = i->i_ctx;
972
47d3a5fa 973 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
974 if (!--i->i_openers)
975 ctx->signal2 = NULL;
47d3a5fa 976 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
977 return 0;
978}
979
bf1ab978 980static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
981 size_t len, loff_t *pos)
982{
17f88ceb 983 int ret = 0;
67207b96
AB
984 u32 data;
985
67207b96
AB
986 if (len < 4)
987 return -EINVAL;
988
17f88ceb
DGM
989 if (ctx->csa.spu_chnlcnt_RW[4]) {
990 data = ctx->csa.spu_chnldata_RW[4];
991 ret = 4;
992 }
8b3d6663 993
17f88ceb
DGM
994 if (!ret)
995 goto out;
996
67207b96
AB
997 if (copy_to_user(buf, &data, 4))
998 return -EFAULT;
999
17f88ceb 1000out:
bf1ab978
DGM
1001 return ret;
1002}
1003
1004static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1005 size_t len, loff_t *pos)
1006{
1007 struct spu_context *ctx = file->private_data;
1008 int ret;
1009
1010 spu_acquire_saved(ctx);
1011 ret = __spufs_signal2_read(ctx, buf, len, pos);
27b1ea09 1012 spu_release_saved(ctx);
bf1ab978
DGM
1013
1014 return ret;
67207b96
AB
1015}
1016
1017static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1018 size_t len, loff_t *pos)
1019{
1020 struct spu_context *ctx;
67207b96
AB
1021 u32 data;
1022
1023 ctx = file->private_data;
67207b96
AB
1024
1025 if (len < 4)
1026 return -EINVAL;
1027
1028 if (copy_from_user(&data, buf, 4))
1029 return -EFAULT;
1030
8b3d6663
AB
1031 spu_acquire(ctx);
1032 ctx->ops->signal2_write(ctx, data);
1033 spu_release(ctx);
67207b96
AB
1034
1035 return 4;
1036}
1037
27d5bf2a 1038#if SPUFS_MMAP_4K
78bde53e
BH
1039static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1040 unsigned long address)
6df10a82 1041{
27d5bf2a 1042#if PAGE_SIZE == 0x1000
78bde53e 1043 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
27d5bf2a
BH
1044#elif PAGE_SIZE == 0x10000
1045 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1046 * signal 1 and 2 area
1047 */
78bde53e 1048 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
27d5bf2a
BH
1049#else
1050#error unsupported page size
1051#endif
6df10a82
MN
1052}
1053
1054static struct vm_operations_struct spufs_signal2_mmap_vmops = {
78bde53e 1055 .nopfn = spufs_signal2_mmap_nopfn,
6df10a82
MN
1056};
1057
1058static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1059{
1060 if (!(vma->vm_flags & VM_SHARED))
1061 return -EINVAL;
1062
78bde53e 1063 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 1064 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 1065 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
1066
1067 vma->vm_ops = &spufs_signal2_mmap_vmops;
1068 return 0;
1069}
27d5bf2a
BH
1070#else /* SPUFS_MMAP_4K */
1071#define spufs_signal2_mmap NULL
1072#endif /* !SPUFS_MMAP_4K */
6df10a82 1073
5dfe4c96 1074static const struct file_operations spufs_signal2_fops = {
6df10a82 1075 .open = spufs_signal2_open,
43c2bbd9 1076 .release = spufs_signal2_release,
67207b96
AB
1077 .read = spufs_signal2_read,
1078 .write = spufs_signal2_write,
6df10a82 1079 .mmap = spufs_signal2_mmap,
67207b96
AB
1080};
1081
d054b36f
JK
1082static const struct file_operations spufs_signal2_nosched_fops = {
1083 .open = spufs_signal2_open,
1084 .release = spufs_signal2_release,
1085 .write = spufs_signal2_write,
1086 .mmap = spufs_signal2_mmap,
1087};
1088
104f0cc2
ME
1089/*
1090 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1091 * work of acquiring (or not) the SPU context before calling through
1092 * to the actual get routine. The set routine is called directly.
1093 */
1094#define SPU_ATTR_NOACQUIRE 0
1095#define SPU_ATTR_ACQUIRE 1
1096#define SPU_ATTR_ACQUIRE_SAVED 2
1097
1098#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1099static u64 __##__get(void *data) \
1100{ \
1101 struct spu_context *ctx = data; \
1102 u64 ret; \
1103 \
1104 if (__acquire == SPU_ATTR_ACQUIRE) { \
1105 spu_acquire(ctx); \
1106 ret = __get(ctx); \
1107 spu_release(ctx); \
1108 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1109 spu_acquire_saved(ctx); \
1110 ret = __get(ctx); \
1111 spu_release_saved(ctx); \
1112 } else \
1113 ret = __get(ctx); \
1114 \
1115 return ret; \
1116} \
1117DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1118
67207b96
AB
1119static void spufs_signal1_type_set(void *data, u64 val)
1120{
1121 struct spu_context *ctx = data;
67207b96 1122
8b3d6663
AB
1123 spu_acquire(ctx);
1124 ctx->ops->signal1_type_set(ctx, val);
1125 spu_release(ctx);
67207b96
AB
1126}
1127
104f0cc2 1128static u64 spufs_signal1_type_get(struct spu_context *ctx)
bf1ab978 1129{
bf1ab978
DGM
1130 return ctx->ops->signal1_type_get(ctx);
1131}
104f0cc2
ME
1132DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1133 spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
bf1ab978 1134
67207b96
AB
1135
1136static void spufs_signal2_type_set(void *data, u64 val)
1137{
1138 struct spu_context *ctx = data;
67207b96 1139
8b3d6663
AB
1140 spu_acquire(ctx);
1141 ctx->ops->signal2_type_set(ctx, val);
1142 spu_release(ctx);
67207b96
AB
1143}
1144
104f0cc2 1145static u64 spufs_signal2_type_get(struct spu_context *ctx)
bf1ab978 1146{
bf1ab978
DGM
1147 return ctx->ops->signal2_type_get(ctx);
1148}
104f0cc2
ME
1149DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1150 spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);
67207b96 1151
27d5bf2a 1152#if SPUFS_MMAP_4K
78bde53e
BH
1153static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1154 unsigned long address)
d9379c4b 1155{
78bde53e 1156 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
d9379c4b
AB
1157}
1158
1159static struct vm_operations_struct spufs_mss_mmap_vmops = {
78bde53e 1160 .nopfn = spufs_mss_mmap_nopfn,
d9379c4b
AB
1161};
1162
1163/*
1164 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
d9379c4b
AB
1165 */
1166static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1167{
1168 if (!(vma->vm_flags & VM_SHARED))
1169 return -EINVAL;
1170
78bde53e 1171 vma->vm_flags |= VM_IO | VM_PFNMAP;
d9379c4b 1172 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 1173 | _PAGE_NO_CACHE | _PAGE_GUARDED);
d9379c4b
AB
1174
1175 vma->vm_ops = &spufs_mss_mmap_vmops;
1176 return 0;
1177}
27d5bf2a
BH
1178#else /* SPUFS_MMAP_4K */
1179#define spufs_mss_mmap NULL
1180#endif /* !SPUFS_MMAP_4K */
d9379c4b
AB
1181
1182static int spufs_mss_open(struct inode *inode, struct file *file)
1183{
1184 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1185 struct spu_context *ctx = i->i_ctx;
d9379c4b
AB
1186
1187 file->private_data = i->i_ctx;
43c2bbd9 1188
47d3a5fa 1189 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1190 if (!i->i_openers++)
1191 ctx->mss = inode->i_mapping;
47d3a5fa 1192 mutex_unlock(&ctx->mapping_lock);
d9379c4b
AB
1193 return nonseekable_open(inode, file);
1194}
1195
43c2bbd9
CH
1196static int
1197spufs_mss_release(struct inode *inode, struct file *file)
1198{
1199 struct spufs_inode_info *i = SPUFS_I(inode);
1200 struct spu_context *ctx = i->i_ctx;
1201
47d3a5fa 1202 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1203 if (!--i->i_openers)
1204 ctx->mss = NULL;
47d3a5fa 1205 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1206 return 0;
1207}
1208
5dfe4c96 1209static const struct file_operations spufs_mss_fops = {
d9379c4b 1210 .open = spufs_mss_open,
43c2bbd9 1211 .release = spufs_mss_release,
d9379c4b 1212 .mmap = spufs_mss_mmap,
27d5bf2a
BH
1213};
1214
78bde53e
BH
1215static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1216 unsigned long address)
27d5bf2a 1217{
78bde53e 1218 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
27d5bf2a
BH
1219}
1220
1221static struct vm_operations_struct spufs_psmap_mmap_vmops = {
78bde53e 1222 .nopfn = spufs_psmap_mmap_nopfn,
27d5bf2a
BH
1223};
1224
1225/*
1226 * mmap support for full problem state area [0x00000 - 0x1ffff].
1227 */
1228static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1229{
1230 if (!(vma->vm_flags & VM_SHARED))
1231 return -EINVAL;
1232
78bde53e 1233 vma->vm_flags |= VM_IO | VM_PFNMAP;
27d5bf2a
BH
1234 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1235 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1236
1237 vma->vm_ops = &spufs_psmap_mmap_vmops;
1238 return 0;
1239}
1240
1241static int spufs_psmap_open(struct inode *inode, struct file *file)
1242{
1243 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1244 struct spu_context *ctx = i->i_ctx;
27d5bf2a 1245
47d3a5fa 1246 mutex_lock(&ctx->mapping_lock);
27d5bf2a 1247 file->private_data = i->i_ctx;
43c2bbd9
CH
1248 if (!i->i_openers++)
1249 ctx->psmap = inode->i_mapping;
47d3a5fa 1250 mutex_unlock(&ctx->mapping_lock);
27d5bf2a
BH
1251 return nonseekable_open(inode, file);
1252}
1253
43c2bbd9
CH
1254static int
1255spufs_psmap_release(struct inode *inode, struct file *file)
1256{
1257 struct spufs_inode_info *i = SPUFS_I(inode);
1258 struct spu_context *ctx = i->i_ctx;
1259
47d3a5fa 1260 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1261 if (!--i->i_openers)
1262 ctx->psmap = NULL;
47d3a5fa 1263 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1264 return 0;
1265}
1266
5dfe4c96 1267static const struct file_operations spufs_psmap_fops = {
27d5bf2a 1268 .open = spufs_psmap_open,
43c2bbd9 1269 .release = spufs_psmap_release,
27d5bf2a 1270 .mmap = spufs_psmap_mmap,
d9379c4b
AB
1271};
1272
1273
27d5bf2a 1274#if SPUFS_MMAP_4K
78bde53e
BH
1275static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1276 unsigned long address)
6df10a82 1277{
78bde53e 1278 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
6df10a82
MN
1279}
1280
1281static struct vm_operations_struct spufs_mfc_mmap_vmops = {
78bde53e 1282 .nopfn = spufs_mfc_mmap_nopfn,
6df10a82
MN
1283};
1284
1285/*
1286 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
6df10a82
MN
1287 */
1288static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1289{
1290 if (!(vma->vm_flags & VM_SHARED))
1291 return -EINVAL;
1292
78bde53e 1293 vma->vm_flags |= VM_IO | VM_PFNMAP;
6df10a82 1294 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 1295 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
1296
1297 vma->vm_ops = &spufs_mfc_mmap_vmops;
1298 return 0;
1299}
27d5bf2a
BH
1300#else /* SPUFS_MMAP_4K */
1301#define spufs_mfc_mmap NULL
1302#endif /* !SPUFS_MMAP_4K */
a33a7d73
AB
1303
1304static int spufs_mfc_open(struct inode *inode, struct file *file)
1305{
1306 struct spufs_inode_info *i = SPUFS_I(inode);
1307 struct spu_context *ctx = i->i_ctx;
1308
1309 /* we don't want to deal with DMA into other processes */
1310 if (ctx->owner != current->mm)
1311 return -EINVAL;
1312
1313 if (atomic_read(&inode->i_count) != 1)
1314 return -EBUSY;
1315
47d3a5fa 1316 mutex_lock(&ctx->mapping_lock);
a33a7d73 1317 file->private_data = ctx;
43c2bbd9
CH
1318 if (!i->i_openers++)
1319 ctx->mfc = inode->i_mapping;
47d3a5fa 1320 mutex_unlock(&ctx->mapping_lock);
a33a7d73
AB
1321 return nonseekable_open(inode, file);
1322}
1323
43c2bbd9
CH
1324static int
1325spufs_mfc_release(struct inode *inode, struct file *file)
1326{
1327 struct spufs_inode_info *i = SPUFS_I(inode);
1328 struct spu_context *ctx = i->i_ctx;
1329
47d3a5fa 1330 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1331 if (!--i->i_openers)
1332 ctx->mfc = NULL;
47d3a5fa 1333 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1334 return 0;
1335}
1336
a33a7d73
AB
1337/* interrupt-level mfc callback function. */
1338void spufs_mfc_callback(struct spu *spu)
1339{
1340 struct spu_context *ctx = spu->ctx;
1341
1342 wake_up_all(&ctx->mfc_wq);
1343
1344 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1345 if (ctx->mfc_fasync) {
1346 u32 free_elements, tagstatus;
1347 unsigned int mask;
1348
1349 /* no need for spu_acquire in interrupt context */
1350 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1351 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1352
1353 mask = 0;
1354 if (free_elements & 0xffff)
1355 mask |= POLLOUT;
1356 if (tagstatus & ctx->tagwait)
1357 mask |= POLLIN;
1358
1359 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1360 }
1361}
1362
1363static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1364{
1365 /* See if there is one tag group is complete */
1366 /* FIXME we need locking around tagwait */
1367 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1368 ctx->tagwait &= ~*status;
1369 if (*status)
1370 return 1;
1371
1372 /* enable interrupt waiting for any tag group,
1373 may silently fail if interrupts are already enabled */
1374 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1375 return 0;
1376}
1377
1378static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1379 size_t size, loff_t *pos)
1380{
1381 struct spu_context *ctx = file->private_data;
1382 int ret = -EINVAL;
1383 u32 status;
1384
1385 if (size != 4)
1386 goto out;
1387
1388 spu_acquire(ctx);
1389 if (file->f_flags & O_NONBLOCK) {
1390 status = ctx->ops->read_mfc_tagstatus(ctx);
1391 if (!(status & ctx->tagwait))
1392 ret = -EAGAIN;
1393 else
1394 ctx->tagwait &= ~status;
1395 } else {
1396 ret = spufs_wait(ctx->mfc_wq,
1397 spufs_read_mfc_tagstatus(ctx, &status));
1398 }
1399 spu_release(ctx);
1400
1401 if (ret)
1402 goto out;
1403
1404 ret = 4;
1405 if (copy_to_user(buffer, &status, 4))
1406 ret = -EFAULT;
1407
1408out:
1409 return ret;
1410}
1411
1412static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1413{
1414 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1415 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1416
1417 switch (cmd->cmd) {
1418 case MFC_PUT_CMD:
1419 case MFC_PUTF_CMD:
1420 case MFC_PUTB_CMD:
1421 case MFC_GET_CMD:
1422 case MFC_GETF_CMD:
1423 case MFC_GETB_CMD:
1424 break;
1425 default:
1426 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1427 return -EIO;
1428 }
1429
1430 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1431 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1432 cmd->ea, cmd->lsa);
1433 return -EIO;
1434 }
1435
1436 switch (cmd->size & 0xf) {
1437 case 1:
1438 break;
1439 case 2:
1440 if (cmd->lsa & 1)
1441 goto error;
1442 break;
1443 case 4:
1444 if (cmd->lsa & 3)
1445 goto error;
1446 break;
1447 case 8:
1448 if (cmd->lsa & 7)
1449 goto error;
1450 break;
1451 case 0:
1452 if (cmd->lsa & 15)
1453 goto error;
1454 break;
1455 error:
1456 default:
1457 pr_debug("invalid DMA alignment %x for size %x\n",
1458 cmd->lsa & 0xf, cmd->size);
1459 return -EIO;
1460 }
1461
1462 if (cmd->size > 16 * 1024) {
1463 pr_debug("invalid DMA size %x\n", cmd->size);
1464 return -EIO;
1465 }
1466
1467 if (cmd->tag & 0xfff0) {
1468 /* we reserve the higher tag numbers for kernel use */
1469 pr_debug("invalid DMA tag\n");
1470 return -EIO;
1471 }
1472
1473 if (cmd->class) {
1474 /* not supported in this version */
1475 pr_debug("invalid DMA class\n");
1476 return -EIO;
1477 }
1478
1479 return 0;
1480}
1481
1482static int spu_send_mfc_command(struct spu_context *ctx,
1483 struct mfc_dma_command cmd,
1484 int *error)
1485{
1486 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1487 if (*error == -EAGAIN) {
1488 /* wait for any tag group to complete
1489 so we have space for the new command */
1490 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1491 /* try again, because the queue might be
1492 empty again */
1493 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1494 if (*error == -EAGAIN)
1495 return 0;
1496 }
1497 return 1;
1498}
1499
1500static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1501 size_t size, loff_t *pos)
1502{
1503 struct spu_context *ctx = file->private_data;
1504 struct mfc_dma_command cmd;
1505 int ret = -EINVAL;
1506
1507 if (size != sizeof cmd)
1508 goto out;
1509
1510 ret = -EFAULT;
1511 if (copy_from_user(&cmd, buffer, sizeof cmd))
1512 goto out;
1513
1514 ret = spufs_check_valid_dma(&cmd);
1515 if (ret)
1516 goto out;
1517
33bfd7a7
AB
1518 spu_acquire(ctx);
1519 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
577f8f10
AM
1520 if (ret)
1521 goto out;
1522
a33a7d73
AB
1523 if (file->f_flags & O_NONBLOCK) {
1524 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1525 } else {
1526 int status;
1527 ret = spufs_wait(ctx->mfc_wq,
1528 spu_send_mfc_command(ctx, cmd, &status));
1529 if (status)
1530 ret = status;
1531 }
a33a7d73
AB
1532
1533 if (ret)
933b0e35 1534 goto out_unlock;
a33a7d73
AB
1535
1536 ctx->tagwait |= 1 << cmd.tag;
3692dc66 1537 ret = size;
a33a7d73 1538
933b0e35
KA
1539out_unlock:
1540 spu_release(ctx);
a33a7d73
AB
1541out:
1542 return ret;
1543}
1544
1545static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1546{
1547 struct spu_context *ctx = file->private_data;
1548 u32 free_elements, tagstatus;
1549 unsigned int mask;
1550
933b0e35
KA
1551 poll_wait(file, &ctx->mfc_wq, wait);
1552
a33a7d73
AB
1553 spu_acquire(ctx);
1554 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1555 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1556 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1557 spu_release(ctx);
1558
a33a7d73
AB
1559 mask = 0;
1560 if (free_elements & 0xffff)
1561 mask |= POLLOUT | POLLWRNORM;
1562 if (tagstatus & ctx->tagwait)
1563 mask |= POLLIN | POLLRDNORM;
1564
1565 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1566 free_elements, tagstatus, ctx->tagwait);
1567
1568 return mask;
1569}
1570
73b6af8a 1571static int spufs_mfc_flush(struct file *file, fl_owner_t id)
a33a7d73
AB
1572{
1573 struct spu_context *ctx = file->private_data;
1574 int ret;
1575
1576 spu_acquire(ctx);
1577#if 0
1578/* this currently hangs */
1579 ret = spufs_wait(ctx->mfc_wq,
1580 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1581 if (ret)
1582 goto out;
1583 ret = spufs_wait(ctx->mfc_wq,
1584 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1585out:
1586#else
1587 ret = 0;
1588#endif
1589 spu_release(ctx);
1590
1591 return ret;
1592}
1593
1594static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1595 int datasync)
1596{
73b6af8a 1597 return spufs_mfc_flush(file, NULL);
a33a7d73
AB
1598}
1599
1600static int spufs_mfc_fasync(int fd, struct file *file, int on)
1601{
1602 struct spu_context *ctx = file->private_data;
1603
1604 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1605}
1606
5dfe4c96 1607static const struct file_operations spufs_mfc_fops = {
a33a7d73 1608 .open = spufs_mfc_open,
43c2bbd9 1609 .release = spufs_mfc_release,
a33a7d73
AB
1610 .read = spufs_mfc_read,
1611 .write = spufs_mfc_write,
1612 .poll = spufs_mfc_poll,
1613 .flush = spufs_mfc_flush,
1614 .fsync = spufs_mfc_fsync,
1615 .fasync = spufs_mfc_fasync,
6df10a82 1616 .mmap = spufs_mfc_mmap,
a33a7d73
AB
1617};
1618
67207b96
AB
1619static void spufs_npc_set(void *data, u64 val)
1620{
1621 struct spu_context *ctx = data;
8b3d6663
AB
1622 spu_acquire(ctx);
1623 ctx->ops->npc_write(ctx, val);
1624 spu_release(ctx);
67207b96
AB
1625}
1626
104f0cc2 1627static u64 spufs_npc_get(struct spu_context *ctx)
78810ff6
ME
1628{
1629 return ctx->ops->npc_read(ctx);
1630}
104f0cc2
ME
1631DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1632 "0x%llx\n", SPU_ATTR_ACQUIRE);
67207b96 1633
8b3d6663
AB
1634static void spufs_decr_set(void *data, u64 val)
1635{
1636 struct spu_context *ctx = data;
1637 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1638 spu_acquire_saved(ctx);
1639 lscsa->decr.slot[0] = (u32) val;
27b1ea09 1640 spu_release_saved(ctx);
8b3d6663
AB
1641}
1642
104f0cc2 1643static u64 spufs_decr_get(struct spu_context *ctx)
8b3d6663 1644{
8b3d6663 1645 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1646 return lscsa->decr.slot[0];
1647}
104f0cc2
ME
1648DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1649 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
8b3d6663
AB
1650
1651static void spufs_decr_status_set(void *data, u64 val)
1652{
1653 struct spu_context *ctx = data;
8b3d6663 1654 spu_acquire_saved(ctx);
d40a01d4
MN
1655 if (val)
1656 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1657 else
1658 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
27b1ea09 1659 spu_release_saved(ctx);
8b3d6663
AB
1660}
1661
104f0cc2 1662static u64 spufs_decr_status_get(struct spu_context *ctx)
8b3d6663 1663{
d40a01d4
MN
1664 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1665 return SPU_DECR_STATUS_RUNNING;
1666 else
1667 return 0;
bf1ab978 1668}
104f0cc2
ME
1669DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1670 spufs_decr_status_set, "0x%llx\n",
1671 SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1672
8b3d6663
AB
1673static void spufs_event_mask_set(void *data, u64 val)
1674{
1675 struct spu_context *ctx = data;
1676 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1677 spu_acquire_saved(ctx);
1678 lscsa->event_mask.slot[0] = (u32) val;
27b1ea09 1679 spu_release_saved(ctx);
8b3d6663
AB
1680}
1681
104f0cc2 1682static u64 spufs_event_mask_get(struct spu_context *ctx)
8b3d6663 1683{
8b3d6663 1684 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1685 return lscsa->event_mask.slot[0];
1686}
1687
104f0cc2
ME
1688DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1689 spufs_event_mask_set, "0x%llx\n",
1690 SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1691
104f0cc2 1692static u64 spufs_event_status_get(struct spu_context *ctx)
b9e3bd77 1693{
b9e3bd77 1694 struct spu_state *state = &ctx->csa;
b9e3bd77 1695 u64 stat;
b9e3bd77
DGM
1696 stat = state->spu_chnlcnt_RW[0];
1697 if (stat)
bf1ab978
DGM
1698 return state->spu_chnldata_RW[0];
1699 return 0;
1700}
104f0cc2
ME
1701DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1702 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
b9e3bd77 1703
8b3d6663
AB
1704static void spufs_srr0_set(void *data, u64 val)
1705{
1706 struct spu_context *ctx = data;
1707 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1708 spu_acquire_saved(ctx);
1709 lscsa->srr0.slot[0] = (u32) val;
27b1ea09 1710 spu_release_saved(ctx);
8b3d6663
AB
1711}
1712
104f0cc2 1713static u64 spufs_srr0_get(struct spu_context *ctx)
8b3d6663 1714{
8b3d6663 1715 struct spu_lscsa *lscsa = ctx->csa.lscsa;
104f0cc2 1716 return lscsa->srr0.slot[0];
8b3d6663 1717}
104f0cc2
ME
1718DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1719 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
8b3d6663 1720
104f0cc2 1721static u64 spufs_id_get(struct spu_context *ctx)
7b1a7014 1722{
7b1a7014
AB
1723 u64 num;
1724
7b1a7014
AB
1725 if (ctx->state == SPU_STATE_RUNNABLE)
1726 num = ctx->spu->number;
1727 else
1728 num = (unsigned int)-1;
7b1a7014
AB
1729
1730 return num;
1731}
104f0cc2
ME
1732DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1733 SPU_ATTR_ACQUIRE)
7b1a7014 1734
104f0cc2 1735static u64 spufs_object_id_get(struct spu_context *ctx)
bf1ab978
DGM
1736{
1737 /* FIXME: Should there really be no locking here? */
104f0cc2 1738 return ctx->object_id;
bf1ab978
DGM
1739}
1740
86767277
AB
1741static void spufs_object_id_set(void *data, u64 id)
1742{
1743 struct spu_context *ctx = data;
1744 ctx->object_id = id;
1745}
1746
104f0cc2
ME
1747DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1748 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
86767277 1749
104f0cc2 1750static u64 spufs_lslr_get(struct spu_context *ctx)
bf1ab978 1751{
bf1ab978
DGM
1752 return ctx->csa.priv2.spu_lslr_RW;
1753}
104f0cc2
ME
1754DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1755 SPU_ATTR_ACQUIRE_SAVED);
b9e3bd77
DGM
1756
1757static int spufs_info_open(struct inode *inode, struct file *file)
1758{
1759 struct spufs_inode_info *i = SPUFS_I(inode);
1760 struct spu_context *ctx = i->i_ctx;
1761 file->private_data = ctx;
1762 return 0;
1763}
1764
cbe709c1
BH
1765static int spufs_caps_show(struct seq_file *s, void *private)
1766{
1767 struct spu_context *ctx = s->private;
1768
1769 if (!(ctx->flags & SPU_CREATE_NOSCHED))
1770 seq_puts(s, "sched\n");
1771 if (!(ctx->flags & SPU_CREATE_ISOLATE))
1772 seq_puts(s, "step\n");
1773 return 0;
1774}
1775
1776static int spufs_caps_open(struct inode *inode, struct file *file)
1777{
1778 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1779}
1780
1781static const struct file_operations spufs_caps_fops = {
1782 .open = spufs_caps_open,
1783 .read = seq_read,
1784 .llseek = seq_lseek,
1785 .release = single_release,
1786};
1787
bf1ab978
DGM
1788static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1789 char __user *buf, size_t len, loff_t *pos)
1790{
1791 u32 mbox_stat;
1792 u32 data;
1793
1794 mbox_stat = ctx->csa.prob.mb_stat_R;
1795 if (mbox_stat & 0x0000ff) {
1796 data = ctx->csa.prob.pu_mb_R;
1797 }
1798
1799 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1800}
1801
69a2f00c
DGM
1802static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1803 size_t len, loff_t *pos)
1804{
bf1ab978 1805 int ret;
69a2f00c 1806 struct spu_context *ctx = file->private_data;
69a2f00c
DGM
1807
1808 if (!access_ok(VERIFY_WRITE, buf, len))
1809 return -EFAULT;
1810
1811 spu_acquire_saved(ctx);
1812 spin_lock(&ctx->csa.register_lock);
bf1ab978 1813 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
69a2f00c 1814 spin_unlock(&ctx->csa.register_lock);
27b1ea09 1815 spu_release_saved(ctx);
69a2f00c 1816
bf1ab978 1817 return ret;
69a2f00c
DGM
1818}
1819
5dfe4c96 1820static const struct file_operations spufs_mbox_info_fops = {
69a2f00c
DGM
1821 .open = spufs_info_open,
1822 .read = spufs_mbox_info_read,
1823 .llseek = generic_file_llseek,
1824};
1825
bf1ab978
DGM
1826static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1827 char __user *buf, size_t len, loff_t *pos)
1828{
1829 u32 ibox_stat;
1830 u32 data;
1831
1832 ibox_stat = ctx->csa.prob.mb_stat_R;
1833 if (ibox_stat & 0xff0000) {
1834 data = ctx->csa.priv2.puint_mb_R;
1835 }
1836
1837 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1838}
1839
69a2f00c
DGM
1840static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1841 size_t len, loff_t *pos)
1842{
1843 struct spu_context *ctx = file->private_data;
bf1ab978 1844 int ret;
69a2f00c
DGM
1845
1846 if (!access_ok(VERIFY_WRITE, buf, len))
1847 return -EFAULT;
1848
1849 spu_acquire_saved(ctx);
1850 spin_lock(&ctx->csa.register_lock);
bf1ab978 1851 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
69a2f00c 1852 spin_unlock(&ctx->csa.register_lock);
27b1ea09 1853 spu_release_saved(ctx);
69a2f00c 1854
bf1ab978 1855 return ret;
69a2f00c
DGM
1856}
1857
5dfe4c96 1858static const struct file_operations spufs_ibox_info_fops = {
69a2f00c
DGM
1859 .open = spufs_info_open,
1860 .read = spufs_ibox_info_read,
1861 .llseek = generic_file_llseek,
1862};
1863
bf1ab978
DGM
1864static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1865 char __user *buf, size_t len, loff_t *pos)
69a2f00c 1866{
69a2f00c
DGM
1867 int i, cnt;
1868 u32 data[4];
1869 u32 wbox_stat;
1870
bf1ab978
DGM
1871 wbox_stat = ctx->csa.prob.mb_stat_R;
1872 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1873 for (i = 0; i < cnt; i++) {
1874 data[i] = ctx->csa.spu_mailbox_data[i];
1875 }
1876
1877 return simple_read_from_buffer(buf, len, pos, &data,
1878 cnt * sizeof(u32));
1879}
1880
1881static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1882 size_t len, loff_t *pos)
1883{
1884 struct spu_context *ctx = file->private_data;
1885 int ret;
1886
69a2f00c
DGM
1887 if (!access_ok(VERIFY_WRITE, buf, len))
1888 return -EFAULT;
1889
1890 spu_acquire_saved(ctx);
1891 spin_lock(&ctx->csa.register_lock);
bf1ab978 1892 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
69a2f00c 1893 spin_unlock(&ctx->csa.register_lock);
27b1ea09 1894 spu_release_saved(ctx);
69a2f00c 1895
bf1ab978 1896 return ret;
69a2f00c
DGM
1897}
1898
5dfe4c96 1899static const struct file_operations spufs_wbox_info_fops = {
69a2f00c
DGM
1900 .open = spufs_info_open,
1901 .read = spufs_wbox_info_read,
1902 .llseek = generic_file_llseek,
1903};
1904
bf1ab978
DGM
1905static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1906 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 1907{
b9e3bd77
DGM
1908 struct spu_dma_info info;
1909 struct mfc_cq_sr *qp, *spuqp;
1910 int i;
1911
b9e3bd77
DGM
1912 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1913 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1914 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1915 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1916 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1917 for (i = 0; i < 16; i++) {
1918 qp = &info.dma_info_command_data[i];
1919 spuqp = &ctx->csa.priv2.spuq[i];
1920
1921 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1922 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1923 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1924 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1925 }
b9e3bd77
DGM
1926
1927 return simple_read_from_buffer(buf, len, pos, &info,
1928 sizeof info);
1929}
1930
bf1ab978
DGM
1931static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1932 size_t len, loff_t *pos)
1933{
1934 struct spu_context *ctx = file->private_data;
1935 int ret;
1936
1937 if (!access_ok(VERIFY_WRITE, buf, len))
1938 return -EFAULT;
1939
1940 spu_acquire_saved(ctx);
1941 spin_lock(&ctx->csa.register_lock);
1942 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1943 spin_unlock(&ctx->csa.register_lock);
27b1ea09 1944 spu_release_saved(ctx);
bf1ab978
DGM
1945
1946 return ret;
1947}
1948
5dfe4c96 1949static const struct file_operations spufs_dma_info_fops = {
b9e3bd77
DGM
1950 .open = spufs_info_open,
1951 .read = spufs_dma_info_read,
1952};
1953
bf1ab978
DGM
1954static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1955 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 1956{
b9e3bd77 1957 struct spu_proxydma_info info;
b9e3bd77 1958 struct mfc_cq_sr *qp, *puqp;
bf1ab978 1959 int ret = sizeof info;
b9e3bd77
DGM
1960 int i;
1961
1962 if (len < ret)
1963 return -EINVAL;
1964
1965 if (!access_ok(VERIFY_WRITE, buf, len))
1966 return -EFAULT;
1967
b9e3bd77
DGM
1968 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1969 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1970 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1971 for (i = 0; i < 8; i++) {
1972 qp = &info.proxydma_info_command_data[i];
1973 puqp = &ctx->csa.priv2.puq[i];
1974
1975 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1976 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1977 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1978 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1979 }
bf1ab978
DGM
1980
1981 return simple_read_from_buffer(buf, len, pos, &info,
1982 sizeof info);
1983}
1984
1985static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1986 size_t len, loff_t *pos)
1987{
1988 struct spu_context *ctx = file->private_data;
1989 int ret;
1990
1991 spu_acquire_saved(ctx);
1992 spin_lock(&ctx->csa.register_lock);
1993 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
b9e3bd77 1994 spin_unlock(&ctx->csa.register_lock);
27b1ea09 1995 spu_release_saved(ctx);
b9e3bd77 1996
b9e3bd77
DGM
1997 return ret;
1998}
1999
5dfe4c96 2000static const struct file_operations spufs_proxydma_info_fops = {
b9e3bd77
DGM
2001 .open = spufs_info_open,
2002 .read = spufs_proxydma_info_read,
2003};
2004
476273ad
CH
2005static int spufs_show_tid(struct seq_file *s, void *private)
2006{
2007 struct spu_context *ctx = s->private;
2008
2009 seq_printf(s, "%d\n", ctx->tid);
2010 return 0;
2011}
2012
2013static int spufs_tid_open(struct inode *inode, struct file *file)
2014{
2015 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2016}
2017
2018static const struct file_operations spufs_tid_fops = {
2019 .open = spufs_tid_open,
2020 .read = seq_read,
2021 .llseek = seq_lseek,
2022 .release = single_release,
2023};
2024
e9f8a0b6
CH
2025static const char *ctx_state_names[] = {
2026 "user", "system", "iowait", "loaded"
2027};
2028
2029static unsigned long long spufs_acct_time(struct spu_context *ctx,
27ec41d3 2030 enum spu_utilization_state state)
e9f8a0b6 2031{
27ec41d3
AD
2032 struct timespec ts;
2033 unsigned long long time = ctx->stats.times[state];
e9f8a0b6 2034
27ec41d3
AD
2035 /*
2036 * In general, utilization statistics are updated by the controlling
2037 * thread as the spu context moves through various well defined
2038 * state transitions, but if the context is lazily loaded its
2039 * utilization statistics are not updated as the controlling thread
2040 * is not tightly coupled with the execution of the spu context. We
2041 * calculate and apply the time delta from the last recorded state
2042 * of the spu context.
2043 */
2044 if (ctx->spu && ctx->stats.util_state == state) {
2045 ktime_get_ts(&ts);
2046 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2047 }
e9f8a0b6 2048
27ec41d3 2049 return time / NSEC_PER_MSEC;
e9f8a0b6
CH
2050}
2051
2052static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2053{
2054 unsigned long long slb_flts = ctx->stats.slb_flt;
2055
2056 if (ctx->state == SPU_STATE_RUNNABLE) {
2057 slb_flts += (ctx->spu->stats.slb_flt -
2058 ctx->stats.slb_flt_base);
2059 }
2060
2061 return slb_flts;
2062}
2063
2064static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2065{
2066 unsigned long long class2_intrs = ctx->stats.class2_intr;
2067
2068 if (ctx->state == SPU_STATE_RUNNABLE) {
2069 class2_intrs += (ctx->spu->stats.class2_intr -
2070 ctx->stats.class2_intr_base);
2071 }
2072
2073 return class2_intrs;
2074}
2075
2076
2077static int spufs_show_stat(struct seq_file *s, void *private)
2078{
2079 struct spu_context *ctx = s->private;
2080
2081 spu_acquire(ctx);
2082 seq_printf(s, "%s %llu %llu %llu %llu "
2083 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
27ec41d3
AD
2084 ctx_state_names[ctx->stats.util_state],
2085 spufs_acct_time(ctx, SPU_UTIL_USER),
2086 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2087 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2088 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
e9f8a0b6
CH
2089 ctx->stats.vol_ctx_switch,
2090 ctx->stats.invol_ctx_switch,
2091 spufs_slb_flts(ctx),
2092 ctx->stats.hash_flt,
2093 ctx->stats.min_flt,
2094 ctx->stats.maj_flt,
2095 spufs_class2_intrs(ctx),
2096 ctx->stats.libassist);
2097 spu_release(ctx);
2098 return 0;
2099}
2100
2101static int spufs_stat_open(struct inode *inode, struct file *file)
2102{
2103 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2104}
2105
2106static const struct file_operations spufs_stat_fops = {
2107 .open = spufs_stat_open,
2108 .read = seq_read,
2109 .llseek = seq_lseek,
2110 .release = single_release,
2111};
2112
2113
67207b96 2114struct tree_descr spufs_dir_contents[] = {
cbe709c1 2115 { "capabilities", &spufs_caps_fops, 0444, },
67207b96 2116 { "mem", &spufs_mem_fops, 0666, },
8b3d6663 2117 { "regs", &spufs_regs_fops, 0666, },
67207b96
AB
2118 { "mbox", &spufs_mbox_fops, 0444, },
2119 { "ibox", &spufs_ibox_fops, 0444, },
2120 { "wbox", &spufs_wbox_fops, 0222, },
2121 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2122 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2123 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
603c4612
JK
2124 { "signal1", &spufs_signal1_fops, 0666, },
2125 { "signal2", &spufs_signal2_fops, 0666, },
67207b96
AB
2126 { "signal1_type", &spufs_signal1_type, 0666, },
2127 { "signal2_type", &spufs_signal2_type, 0666, },
6df10a82 2128 { "cntl", &spufs_cntl_fops, 0666, },
8b3d6663 2129 { "fpcr", &spufs_fpcr_fops, 0666, },
b9e3bd77
DGM
2130 { "lslr", &spufs_lslr_ops, 0444, },
2131 { "mfc", &spufs_mfc_fops, 0666, },
2132 { "mss", &spufs_mss_fops, 0666, },
2133 { "npc", &spufs_npc_ops, 0666, },
2134 { "srr0", &spufs_srr0_ops, 0666, },
8b3d6663
AB
2135 { "decr", &spufs_decr_ops, 0666, },
2136 { "decr_status", &spufs_decr_status_ops, 0666, },
8b3d6663 2137 { "event_mask", &spufs_event_mask_ops, 0666, },
b9e3bd77 2138 { "event_status", &spufs_event_status_ops, 0444, },
27d5bf2a 2139 { "psmap", &spufs_psmap_fops, 0666, },
86767277
AB
2140 { "phys-id", &spufs_id_ops, 0666, },
2141 { "object-id", &spufs_object_id_ops, 0666, },
69a2f00c
DGM
2142 { "mbox_info", &spufs_mbox_info_fops, 0444, },
2143 { "ibox_info", &spufs_ibox_info_fops, 0444, },
2144 { "wbox_info", &spufs_wbox_info_fops, 0444, },
b9e3bd77
DGM
2145 { "dma_info", &spufs_dma_info_fops, 0444, },
2146 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
476273ad 2147 { "tid", &spufs_tid_fops, 0444, },
e9f8a0b6 2148 { "stat", &spufs_stat_fops, 0444, },
67207b96
AB
2149 {},
2150};
5737edd1
MN
2151
2152struct tree_descr spufs_dir_nosched_contents[] = {
cbe709c1 2153 { "capabilities", &spufs_caps_fops, 0444, },
5737edd1
MN
2154 { "mem", &spufs_mem_fops, 0666, },
2155 { "mbox", &spufs_mbox_fops, 0444, },
2156 { "ibox", &spufs_ibox_fops, 0444, },
2157 { "wbox", &spufs_wbox_fops, 0222, },
2158 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2159 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2160 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
d054b36f
JK
2161 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2162 { "signal2", &spufs_signal2_nosched_fops, 0222, },
5737edd1
MN
2163 { "signal1_type", &spufs_signal1_type, 0666, },
2164 { "signal2_type", &spufs_signal2_type, 0666, },
2165 { "mss", &spufs_mss_fops, 0666, },
2166 { "mfc", &spufs_mfc_fops, 0666, },
2167 { "cntl", &spufs_cntl_fops, 0666, },
2168 { "npc", &spufs_npc_ops, 0666, },
2169 { "psmap", &spufs_psmap_fops, 0666, },
2170 { "phys-id", &spufs_id_ops, 0666, },
2171 { "object-id", &spufs_object_id_ops, 0666, },
476273ad 2172 { "tid", &spufs_tid_fops, 0444, },
e9f8a0b6 2173 { "stat", &spufs_stat_fops, 0444, },
5737edd1
MN
2174 {},
2175};
bf1ab978
DGM
2176
2177struct spufs_coredump_reader spufs_coredump_read[] = {
4fca9c42
ME
2178 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2179 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
104f0cc2
ME
2180 { "lslr", NULL, spufs_lslr_get, 19 },
2181 { "decr", NULL, spufs_decr_get, 19 },
2182 { "decr_status", NULL, spufs_decr_status_get, 19 },
4fca9c42
ME
2183 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2184 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
104f0cc2 2185 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
4fca9c42 2186 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
104f0cc2
ME
2187 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2188 { "event_mask", NULL, spufs_event_mask_get, 19 },
2189 { "event_status", NULL, spufs_event_status_get, 19 },
4fca9c42
ME
2190 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2191 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2192 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2193 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2194 { "proxydma_info", __spufs_proxydma_info_read,
2195 NULL, sizeof(struct spu_proxydma_info)},
104f0cc2
ME
2196 { "object-id", NULL, spufs_object_id_get, 19 },
2197 { "npc", NULL, spufs_npc_get, 19 },
936d5bf1 2198 { NULL },
bf1ab978 2199};