]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/platforms/cell/spufs/file.c
Replace <asm/uaccess.h> with <linux/uaccess.h> globally
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / platforms / cell / spufs / file.c
CommitLineData
67207b96
AB
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
a33a7d73
AB
23#undef DEBUG
24
67207b96
AB
25#include <linux/fs.h>
26#include <linux/ioctl.h>
4b16f8e2 27#include <linux/export.h>
d88cfffa 28#include <linux/pagemap.h>
67207b96 29#include <linux/poll.h>
5110459f 30#include <linux/ptrace.h>
cbe709c1 31#include <linux/seq_file.h>
5a0e3ad6 32#include <linux/slab.h>
67207b96
AB
33
34#include <asm/io.h>
dfe1e09f 35#include <asm/time.h>
67207b96 36#include <asm/spu.h>
b9e3bd77 37#include <asm/spu_info.h>
7c0f6ba6 38#include <linux/uaccess.h>
67207b96
AB
39
40#include "spufs.h"
ae142e0c 41#include "sputrace.h"
67207b96 42
27d5bf2a
BH
43#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44
197b1a82
CH
45/* Simple attribute files */
46struct spufs_attr {
47 int (*get)(void *, u64 *);
48 int (*set)(void *, u64);
49 char get_buf[24]; /* enough to store a u64 and "\n\0" */
50 char set_buf[24];
51 void *data;
52 const char *fmt; /* format for read operation */
53 struct mutex mutex; /* protects access to these buffers */
54};
55
56static int spufs_attr_open(struct inode *inode, struct file *file,
57 int (*get)(void *, u64 *), int (*set)(void *, u64),
58 const char *fmt)
59{
60 struct spufs_attr *attr;
61
62 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63 if (!attr)
64 return -ENOMEM;
65
66 attr->get = get;
67 attr->set = set;
68 attr->data = inode->i_private;
69 attr->fmt = fmt;
70 mutex_init(&attr->mutex);
71 file->private_data = attr;
72
73 return nonseekable_open(inode, file);
74}
75
76static int spufs_attr_release(struct inode *inode, struct file *file)
77{
78 kfree(file->private_data);
79 return 0;
80}
81
82static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83 size_t len, loff_t *ppos)
84{
85 struct spufs_attr *attr;
86 size_t size;
87 ssize_t ret;
88
89 attr = file->private_data;
90 if (!attr->get)
91 return -EACCES;
92
93 ret = mutex_lock_interruptible(&attr->mutex);
94 if (ret)
95 return ret;
96
97 if (*ppos) { /* continued read */
98 size = strlen(attr->get_buf);
99 } else { /* first read */
100 u64 val;
101 ret = attr->get(attr->data, &val);
102 if (ret)
103 goto out;
104
105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106 attr->fmt, (unsigned long long)val);
107 }
108
109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110out:
111 mutex_unlock(&attr->mutex);
112 return ret;
113}
114
115static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116 size_t len, loff_t *ppos)
117{
118 struct spufs_attr *attr;
119 u64 val;
120 size_t size;
121 ssize_t ret;
122
123 attr = file->private_data;
124 if (!attr->set)
125 return -EACCES;
126
127 ret = mutex_lock_interruptible(&attr->mutex);
128 if (ret)
129 return ret;
130
131 ret = -EFAULT;
132 size = min(sizeof(attr->set_buf) - 1, len);
133 if (copy_from_user(attr->set_buf, buf, size))
134 goto out;
135
136 ret = len; /* claim we got the whole input */
137 attr->set_buf[size] = '\0';
138 val = simple_strtol(attr->set_buf, NULL, 0);
139 attr->set(attr->data, val);
140out:
141 mutex_unlock(&attr->mutex);
142 return ret;
143}
144
145#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146static int __fops ## _open(struct inode *inode, struct file *file) \
147{ \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150} \
828c0950 151static const struct file_operations __fops = { \
197b1a82
CH
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
fc15351d 156 .llseek = generic_file_llseek, \
197b1a82
CH
157};
158
cbe709c1 159
67207b96
AB
160static int
161spufs_mem_open(struct inode *inode, struct file *file)
162{
163 struct spufs_inode_info *i = SPUFS_I(inode);
6df10a82 164 struct spu_context *ctx = i->i_ctx;
43c2bbd9 165
47d3a5fa 166 mutex_lock(&ctx->mapping_lock);
6df10a82 167 file->private_data = ctx;
43c2bbd9
CH
168 if (!i->i_openers++)
169 ctx->local_store = inode->i_mapping;
47d3a5fa 170 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
171 return 0;
172}
173
174static int
175spufs_mem_release(struct inode *inode, struct file *file)
176{
177 struct spufs_inode_info *i = SPUFS_I(inode);
178 struct spu_context *ctx = i->i_ctx;
179
47d3a5fa 180 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
181 if (!--i->i_openers)
182 ctx->local_store = NULL;
47d3a5fa 183 mutex_unlock(&ctx->mapping_lock);
67207b96
AB
184 return 0;
185}
186
bf1ab978
DGM
187static ssize_t
188__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
189 size_t size, loff_t *pos)
190{
191 char *local_store = ctx->ops->get_ls(ctx);
192 return simple_read_from_buffer(buffer, size, pos, local_store,
193 LS_SIZE);
194}
195
67207b96
AB
196static ssize_t
197spufs_mem_read(struct file *file, char __user *buffer,
198 size_t size, loff_t *pos)
199{
bf1ab978 200 struct spu_context *ctx = file->private_data;
aa0ed2bd 201 ssize_t ret;
67207b96 202
c9101bdb
CH
203 ret = spu_acquire(ctx);
204 if (ret)
205 return ret;
bf1ab978 206 ret = __spufs_mem_read(ctx, buffer, size, pos);
8b3d6663 207 spu_release(ctx);
c9101bdb 208
67207b96
AB
209 return ret;
210}
211
212static ssize_t
213spufs_mem_write(struct file *file, const char __user *buffer,
aa0ed2bd 214 size_t size, loff_t *ppos)
67207b96
AB
215{
216 struct spu_context *ctx = file->private_data;
8b3d6663 217 char *local_store;
aa0ed2bd 218 loff_t pos = *ppos;
8b3d6663 219 int ret;
67207b96 220
aa0ed2bd 221 if (pos > LS_SIZE)
67207b96 222 return -EFBIG;
8b3d6663 223
c9101bdb
CH
224 ret = spu_acquire(ctx);
225 if (ret)
226 return ret;
227
8b3d6663 228 local_store = ctx->ops->get_ls(ctx);
63c3b9d7 229 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
8b3d6663 230 spu_release(ctx);
aa0ed2bd 231
aa0ed2bd 232 return size;
67207b96
AB
233}
234
b1e2270f
NP
235static int
236spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8b3d6663 237{
f1fa74f4 238 struct spu_context *ctx = vma->vm_file->private_data;
b1e2270f
NP
239 unsigned long pfn, offset;
240
b1e2270f 241 offset = vmf->pgoff << PAGE_SHIFT;
128b8546 242 if (offset >= LS_SIZE)
b1e2270f 243 return VM_FAULT_SIGBUS;
128b8546 244
b1e2270f 245 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
1a29d85e 246 vmf->address, offset);
f1fa74f4 247
c9101bdb 248 if (spu_acquire(ctx))
b1e2270f 249 return VM_FAULT_NOPAGE;
8b3d6663 250
ac91cb8d 251 if (ctx->state == SPU_STATE_SAVED) {
64b3d0e8 252 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
78bde53e 253 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
ac91cb8d 254 } else {
64b3d0e8 255 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
78bde53e 256 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
ac91cb8d 257 }
1a29d85e 258 vm_insert_pfn(vma, vmf->address, pfn);
8b3d6663 259
78bde53e 260 spu_release(ctx);
8b3d6663 261
b1e2270f 262 return VM_FAULT_NOPAGE;
8b3d6663
AB
263}
264
a352894d
BH
265static int spufs_mem_mmap_access(struct vm_area_struct *vma,
266 unsigned long address,
267 void *buf, int len, int write)
268{
269 struct spu_context *ctx = vma->vm_file->private_data;
270 unsigned long offset = address - vma->vm_start;
271 char *local_store;
272
273 if (write && !(vma->vm_flags & VM_WRITE))
274 return -EACCES;
275 if (spu_acquire(ctx))
276 return -EINTR;
277 if ((offset + len) > vma->vm_end)
278 len = vma->vm_end - offset;
279 local_store = ctx->ops->get_ls(ctx);
280 if (write)
281 memcpy_toio(local_store + offset, buf, len);
282 else
283 memcpy_fromio(buf, local_store + offset, len);
284 spu_release(ctx);
285 return len;
286}
78bde53e 287
f0f37e2f 288static const struct vm_operations_struct spufs_mem_mmap_vmops = {
b1e2270f 289 .fault = spufs_mem_mmap_fault,
a352894d 290 .access = spufs_mem_mmap_access,
8b3d6663
AB
291};
292
f1fa74f4 293static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
67207b96 294{
8b3d6663
AB
295 if (!(vma->vm_flags & VM_SHARED))
296 return -EINVAL;
67207b96 297
78bde53e 298 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 299 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
8b3d6663
AB
300
301 vma->vm_ops = &spufs_mem_mmap_vmops;
67207b96
AB
302 return 0;
303}
304
5dfe4c96 305static const struct file_operations spufs_mem_fops = {
7022543e
JK
306 .open = spufs_mem_open,
307 .release = spufs_mem_release,
308 .read = spufs_mem_read,
309 .write = spufs_mem_write,
310 .llseek = generic_file_llseek,
311 .mmap = spufs_mem_mmap,
8b3d6663
AB
312};
313
b1e2270f
NP
314static int spufs_ps_fault(struct vm_area_struct *vma,
315 struct vm_fault *vmf,
78bde53e 316 unsigned long ps_offs,
27d5bf2a 317 unsigned long ps_size)
6df10a82 318{
6df10a82 319 struct spu_context *ctx = vma->vm_file->private_data;
b1e2270f 320 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
eebead5b 321 int ret = 0;
6df10a82 322
b1e2270f 323 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
038200cf 324
27d5bf2a 325 if (offset >= ps_size)
b1e2270f 326 return VM_FAULT_SIGBUS;
6df10a82 327
60657263
JK
328 if (fatal_signal_pending(current))
329 return VM_FAULT_SIGBUS;
330
d5883137
JK
331 /*
332 * Because we release the mmap_sem, the context may be destroyed while
333 * we're in spu_wait. Grab an extra reference so it isn't destroyed
334 * in the meantime.
335 */
336 get_spu_context(ctx);
337
33bfd7a7
AB
338 /*
339 * We have to wait for context to be loaded before we have
340 * pages to hand out to the user, but we don't want to wait
341 * with the mmap_sem held.
342 * It is possible to drop the mmap_sem here, but then we need
b1e2270f 343 * to return VM_FAULT_NOPAGE because the mappings may have
33bfd7a7 344 * hanged.
78bde53e 345 */
c9101bdb 346 if (spu_acquire(ctx))
d5883137 347 goto refault;
c9101bdb 348
33bfd7a7
AB
349 if (ctx->state == SPU_STATE_SAVED) {
350 up_read(&current->mm->mmap_sem);
b1e2270f 351 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
eebead5b 352 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
b1e2270f 353 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
33bfd7a7 354 down_read(&current->mm->mmap_sem);
c9101bdb
CH
355 } else {
356 area = ctx->spu->problem_phys + ps_offs;
1a29d85e 357 vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
b1e2270f 358 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
33bfd7a7 359 }
6df10a82 360
eebead5b
CH
361 if (!ret)
362 spu_release(ctx);
d5883137
JK
363
364refault:
365 put_spu_context(ctx);
b1e2270f 366 return VM_FAULT_NOPAGE;
6df10a82
MN
367}
368
27d5bf2a 369#if SPUFS_MMAP_4K
b1e2270f
NP
370static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
371 struct vm_fault *vmf)
6df10a82 372{
87ff6090 373 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
6df10a82
MN
374}
375
f0f37e2f 376static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
b1e2270f 377 .fault = spufs_cntl_mmap_fault,
6df10a82
MN
378};
379
380/*
381 * mmap support for problem state control area [0x4000 - 0x4fff].
6df10a82
MN
382 */
383static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
384{
385 if (!(vma->vm_flags & VM_SHARED))
386 return -EINVAL;
387
78bde53e 388 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 389 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
390
391 vma->vm_ops = &spufs_cntl_mmap_vmops;
392 return 0;
393}
27d5bf2a
BH
394#else /* SPUFS_MMAP_4K */
395#define spufs_cntl_mmap NULL
396#endif /* !SPUFS_MMAP_4K */
6df10a82 397
197b1a82 398static int spufs_cntl_get(void *data, u64 *val)
6df10a82 399{
e1dbff2b 400 struct spu_context *ctx = data;
c9101bdb 401 int ret;
6df10a82 402
c9101bdb
CH
403 ret = spu_acquire(ctx);
404 if (ret)
405 return ret;
197b1a82 406 *val = ctx->ops->status_read(ctx);
e1dbff2b
AB
407 spu_release(ctx);
408
197b1a82 409 return 0;
6df10a82
MN
410}
411
197b1a82 412static int spufs_cntl_set(void *data, u64 val)
6df10a82 413{
e1dbff2b 414 struct spu_context *ctx = data;
c9101bdb 415 int ret;
e1dbff2b 416
c9101bdb
CH
417 ret = spu_acquire(ctx);
418 if (ret)
419 return ret;
e1dbff2b
AB
420 ctx->ops->runcntl_write(ctx, val);
421 spu_release(ctx);
197b1a82
CH
422
423 return 0;
6df10a82
MN
424}
425
e1dbff2b 426static int spufs_cntl_open(struct inode *inode, struct file *file)
6df10a82 427{
e1dbff2b
AB
428 struct spufs_inode_info *i = SPUFS_I(inode);
429 struct spu_context *ctx = i->i_ctx;
430
47d3a5fa 431 mutex_lock(&ctx->mapping_lock);
e1dbff2b 432 file->private_data = ctx;
43c2bbd9
CH
433 if (!i->i_openers++)
434 ctx->cntl = inode->i_mapping;
47d3a5fa 435 mutex_unlock(&ctx->mapping_lock);
8b88b099 436 return simple_attr_open(inode, file, spufs_cntl_get,
e1dbff2b 437 spufs_cntl_set, "0x%08lx");
6df10a82
MN
438}
439
43c2bbd9
CH
440static int
441spufs_cntl_release(struct inode *inode, struct file *file)
442{
443 struct spufs_inode_info *i = SPUFS_I(inode);
444 struct spu_context *ctx = i->i_ctx;
445
74bedc4d 446 simple_attr_release(inode, file);
43c2bbd9 447
47d3a5fa 448 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
449 if (!--i->i_openers)
450 ctx->cntl = NULL;
47d3a5fa 451 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
452 return 0;
453}
454
5dfe4c96 455static const struct file_operations spufs_cntl_fops = {
6df10a82 456 .open = spufs_cntl_open,
43c2bbd9 457 .release = spufs_cntl_release,
8b88b099
CH
458 .read = simple_attr_read,
459 .write = simple_attr_write,
fc15351d 460 .llseek = generic_file_llseek,
6df10a82 461 .mmap = spufs_cntl_mmap,
6df10a82
MN
462};
463
8b3d6663
AB
464static int
465spufs_regs_open(struct inode *inode, struct file *file)
466{
467 struct spufs_inode_info *i = SPUFS_I(inode);
468 file->private_data = i->i_ctx;
469 return 0;
470}
471
bf1ab978
DGM
472static ssize_t
473__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
474 size_t size, loff_t *pos)
475{
476 struct spu_lscsa *lscsa = ctx->csa.lscsa;
477 return simple_read_from_buffer(buffer, size, pos,
478 lscsa->gprs, sizeof lscsa->gprs);
479}
480
8b3d6663
AB
481static ssize_t
482spufs_regs_read(struct file *file, char __user *buffer,
483 size_t size, loff_t *pos)
484{
8b3d6663 485 int ret;
bf1ab978 486 struct spu_context *ctx = file->private_data;
8b3d6663 487
f027faa2
JK
488 /* pre-check for file position: if we'd return EOF, there's no point
489 * causing a deschedule */
490 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
491 return 0;
492
c9101bdb
CH
493 ret = spu_acquire_saved(ctx);
494 if (ret)
495 return ret;
bf1ab978 496 ret = __spufs_regs_read(ctx, buffer, size, pos);
27b1ea09 497 spu_release_saved(ctx);
8b3d6663
AB
498 return ret;
499}
500
501static ssize_t
502spufs_regs_write(struct file *file, const char __user *buffer,
503 size_t size, loff_t *pos)
504{
505 struct spu_context *ctx = file->private_data;
506 struct spu_lscsa *lscsa = ctx->csa.lscsa;
507 int ret;
508
d219889b 509 if (*pos >= sizeof(lscsa->gprs))
8b3d6663 510 return -EFBIG;
d219889b 511
c9101bdb
CH
512 ret = spu_acquire_saved(ctx);
513 if (ret)
514 return ret;
8b3d6663 515
63c3b9d7
AM
516 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
517 buffer, size);
8b3d6663 518
27b1ea09 519 spu_release_saved(ctx);
63c3b9d7 520 return size;
8b3d6663
AB
521}
522
5dfe4c96 523static const struct file_operations spufs_regs_fops = {
8b3d6663
AB
524 .open = spufs_regs_open,
525 .read = spufs_regs_read,
526 .write = spufs_regs_write,
67207b96
AB
527 .llseek = generic_file_llseek,
528};
529
bf1ab978
DGM
530static ssize_t
531__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
532 size_t size, loff_t * pos)
533{
534 struct spu_lscsa *lscsa = ctx->csa.lscsa;
535 return simple_read_from_buffer(buffer, size, pos,
536 &lscsa->fpcr, sizeof(lscsa->fpcr));
537}
538
8b3d6663
AB
539static ssize_t
540spufs_fpcr_read(struct file *file, char __user * buffer,
541 size_t size, loff_t * pos)
542{
8b3d6663 543 int ret;
bf1ab978 544 struct spu_context *ctx = file->private_data;
8b3d6663 545
c9101bdb
CH
546 ret = spu_acquire_saved(ctx);
547 if (ret)
548 return ret;
bf1ab978 549 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
27b1ea09 550 spu_release_saved(ctx);
8b3d6663
AB
551 return ret;
552}
553
554static ssize_t
555spufs_fpcr_write(struct file *file, const char __user * buffer,
556 size_t size, loff_t * pos)
557{
558 struct spu_context *ctx = file->private_data;
559 struct spu_lscsa *lscsa = ctx->csa.lscsa;
560 int ret;
561
d219889b 562 if (*pos >= sizeof(lscsa->fpcr))
8b3d6663 563 return -EFBIG;
8b3d6663 564
c9101bdb
CH
565 ret = spu_acquire_saved(ctx);
566 if (ret)
567 return ret;
8b3d6663 568
63c3b9d7
AM
569 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
570 buffer, size);
8b3d6663 571
27b1ea09 572 spu_release_saved(ctx);
63c3b9d7 573 return size;
8b3d6663
AB
574}
575
5dfe4c96 576static const struct file_operations spufs_fpcr_fops = {
8b3d6663
AB
577 .open = spufs_regs_open,
578 .read = spufs_fpcr_read,
579 .write = spufs_fpcr_write,
580 .llseek = generic_file_llseek,
581};
582
67207b96
AB
583/* generic open function for all pipe-like files */
584static int spufs_pipe_open(struct inode *inode, struct file *file)
585{
586 struct spufs_inode_info *i = SPUFS_I(inode);
587 file->private_data = i->i_ctx;
588
589 return nonseekable_open(inode, file);
590}
591
cdcc89bb
AB
592/*
593 * Read as many bytes from the mailbox as possible, until
594 * one of the conditions becomes true:
595 *
596 * - no more data available in the mailbox
597 * - end of the user provided buffer
598 * - end of the mapped area
599 */
67207b96
AB
600static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
601 size_t len, loff_t *pos)
602{
8b3d6663 603 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
604 u32 mbox_data, __user *udata;
605 ssize_t count;
67207b96
AB
606
607 if (len < 4)
608 return -EINVAL;
609
cdcc89bb
AB
610 if (!access_ok(VERIFY_WRITE, buf, len))
611 return -EFAULT;
612
613 udata = (void __user *)buf;
614
c9101bdb
CH
615 count = spu_acquire(ctx);
616 if (count)
617 return count;
618
274cef5e 619 for (count = 0; (count + 4) <= len; count += 4, udata++) {
cdcc89bb
AB
620 int ret;
621 ret = ctx->ops->mbox_read(ctx, &mbox_data);
622 if (ret == 0)
623 break;
624
625 /*
626 * at the end of the mapped area, we can fault
627 * but still need to return the data we have
628 * read successfully so far.
629 */
630 ret = __put_user(mbox_data, udata);
631 if (ret) {
632 if (!count)
633 count = -EFAULT;
634 break;
635 }
636 }
8b3d6663 637 spu_release(ctx);
67207b96 638
cdcc89bb
AB
639 if (!count)
640 count = -EAGAIN;
67207b96 641
cdcc89bb 642 return count;
67207b96
AB
643}
644
5dfe4c96 645static const struct file_operations spufs_mbox_fops = {
67207b96
AB
646 .open = spufs_pipe_open,
647 .read = spufs_mbox_read,
fc15351d 648 .llseek = no_llseek,
67207b96
AB
649};
650
651static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
652 size_t len, loff_t *pos)
653{
8b3d6663 654 struct spu_context *ctx = file->private_data;
c9101bdb 655 ssize_t ret;
67207b96
AB
656 u32 mbox_stat;
657
658 if (len < 4)
659 return -EINVAL;
660
c9101bdb
CH
661 ret = spu_acquire(ctx);
662 if (ret)
663 return ret;
8b3d6663
AB
664
665 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
666
667 spu_release(ctx);
67207b96
AB
668
669 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
670 return -EFAULT;
671
672 return 4;
673}
674
5dfe4c96 675static const struct file_operations spufs_mbox_stat_fops = {
67207b96
AB
676 .open = spufs_pipe_open,
677 .read = spufs_mbox_stat_read,
fc15351d 678 .llseek = no_llseek,
67207b96
AB
679};
680
681/* low-level ibox access function */
8b3d6663 682size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
67207b96 683{
8b3d6663
AB
684 return ctx->ops->ibox_read(ctx, data);
685}
67207b96 686
8b3d6663
AB
687static int spufs_ibox_fasync(int fd, struct file *file, int on)
688{
689 struct spu_context *ctx = file->private_data;
67207b96 690
8b3d6663 691 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
67207b96 692}
67207b96 693
8b3d6663
AB
694/* interrupt-level ibox callback function. */
695void spufs_ibox_callback(struct spu *spu)
67207b96 696{
8b3d6663
AB
697 struct spu_context *ctx = spu->ctx;
698
e65c2f6f
LB
699 if (!ctx)
700 return;
701
8b3d6663
AB
702 wake_up_all(&ctx->ibox_wq);
703 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
67207b96
AB
704}
705
cdcc89bb
AB
706/*
707 * Read as many bytes from the interrupt mailbox as possible, until
708 * one of the conditions becomes true:
709 *
710 * - no more data available in the mailbox
711 * - end of the user provided buffer
712 * - end of the mapped area
713 *
714 * If the file is opened without O_NONBLOCK, we wait here until
715 * any data is available, but return when we have been able to
716 * read something.
717 */
67207b96
AB
718static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
719 size_t len, loff_t *pos)
720{
8b3d6663 721 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
722 u32 ibox_data, __user *udata;
723 ssize_t count;
67207b96
AB
724
725 if (len < 4)
726 return -EINVAL;
727
cdcc89bb
AB
728 if (!access_ok(VERIFY_WRITE, buf, len))
729 return -EFAULT;
730
731 udata = (void __user *)buf;
732
c9101bdb
CH
733 count = spu_acquire(ctx);
734 if (count)
eebead5b 735 goto out;
67207b96 736
cdcc89bb
AB
737 /* wait only for the first element */
738 count = 0;
67207b96 739 if (file->f_flags & O_NONBLOCK) {
eebead5b 740 if (!spu_ibox_read(ctx, &ibox_data)) {
cdcc89bb 741 count = -EAGAIN;
eebead5b
CH
742 goto out_unlock;
743 }
67207b96 744 } else {
cdcc89bb 745 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
eebead5b
CH
746 if (count)
747 goto out;
67207b96
AB
748 }
749
cdcc89bb
AB
750 /* if we can't write at all, return -EFAULT */
751 count = __put_user(ibox_data, udata);
752 if (count)
eebead5b 753 goto out_unlock;
8b3d6663 754
cdcc89bb
AB
755 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
756 int ret;
757 ret = ctx->ops->ibox_read(ctx, &ibox_data);
758 if (ret == 0)
759 break;
760 /*
761 * at the end of the mapped area, we can fault
762 * but still need to return the data we have
763 * read successfully so far.
764 */
765 ret = __put_user(ibox_data, udata);
766 if (ret)
767 break;
768 }
67207b96 769
eebead5b 770out_unlock:
cdcc89bb 771 spu_release(ctx);
eebead5b 772out:
cdcc89bb 773 return count;
67207b96
AB
774}
775
776static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
777{
8b3d6663 778 struct spu_context *ctx = file->private_data;
67207b96
AB
779 unsigned int mask;
780
8b3d6663 781 poll_wait(file, &ctx->ibox_wq, wait);
67207b96 782
c9101bdb
CH
783 /*
784 * For now keep this uninterruptible and also ignore the rule
785 * that poll should not sleep. Will be fixed later.
786 */
787 mutex_lock(&ctx->state_mutex);
3a843d7c
AB
788 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
789 spu_release(ctx);
67207b96
AB
790
791 return mask;
792}
793
5dfe4c96 794static const struct file_operations spufs_ibox_fops = {
67207b96
AB
795 .open = spufs_pipe_open,
796 .read = spufs_ibox_read,
797 .poll = spufs_ibox_poll,
798 .fasync = spufs_ibox_fasync,
fc15351d 799 .llseek = no_llseek,
67207b96
AB
800};
801
802static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
803 size_t len, loff_t *pos)
804{
8b3d6663 805 struct spu_context *ctx = file->private_data;
c9101bdb 806 ssize_t ret;
67207b96
AB
807 u32 ibox_stat;
808
809 if (len < 4)
810 return -EINVAL;
811
c9101bdb
CH
812 ret = spu_acquire(ctx);
813 if (ret)
814 return ret;
8b3d6663
AB
815 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
816 spu_release(ctx);
67207b96
AB
817
818 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
819 return -EFAULT;
820
821 return 4;
822}
823
5dfe4c96 824static const struct file_operations spufs_ibox_stat_fops = {
67207b96
AB
825 .open = spufs_pipe_open,
826 .read = spufs_ibox_stat_read,
fc15351d 827 .llseek = no_llseek,
67207b96
AB
828};
829
830/* low-level mailbox write */
8b3d6663 831size_t spu_wbox_write(struct spu_context *ctx, u32 data)
67207b96 832{
8b3d6663
AB
833 return ctx->ops->wbox_write(ctx, data);
834}
67207b96 835
8b3d6663
AB
836static int spufs_wbox_fasync(int fd, struct file *file, int on)
837{
838 struct spu_context *ctx = file->private_data;
839 int ret;
67207b96 840
8b3d6663 841 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
67207b96 842
67207b96
AB
843 return ret;
844}
67207b96 845
8b3d6663
AB
846/* interrupt-level wbox callback function. */
847void spufs_wbox_callback(struct spu *spu)
67207b96 848{
8b3d6663
AB
849 struct spu_context *ctx = spu->ctx;
850
e65c2f6f
LB
851 if (!ctx)
852 return;
853
8b3d6663
AB
854 wake_up_all(&ctx->wbox_wq);
855 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
67207b96
AB
856}
857
cdcc89bb
AB
858/*
859 * Write as many bytes to the interrupt mailbox as possible, until
860 * one of the conditions becomes true:
861 *
862 * - the mailbox is full
863 * - end of the user provided buffer
864 * - end of the mapped area
865 *
866 * If the file is opened without O_NONBLOCK, we wait here until
027dfac6 867 * space is available, but return when we have been able to
cdcc89bb
AB
868 * write something.
869 */
67207b96
AB
870static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
871 size_t len, loff_t *pos)
872{
8b3d6663 873 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
874 u32 wbox_data, __user *udata;
875 ssize_t count;
67207b96
AB
876
877 if (len < 4)
878 return -EINVAL;
879
cdcc89bb
AB
880 udata = (void __user *)buf;
881 if (!access_ok(VERIFY_READ, buf, len))
882 return -EFAULT;
883
884 if (__get_user(wbox_data, udata))
67207b96
AB
885 return -EFAULT;
886
c9101bdb
CH
887 count = spu_acquire(ctx);
888 if (count)
eebead5b 889 goto out;
8b3d6663 890
cdcc89bb
AB
891 /*
892 * make sure we can at least write one element, by waiting
893 * in case of !O_NONBLOCK
894 */
895 count = 0;
67207b96 896 if (file->f_flags & O_NONBLOCK) {
eebead5b 897 if (!spu_wbox_write(ctx, wbox_data)) {
cdcc89bb 898 count = -EAGAIN;
eebead5b
CH
899 goto out_unlock;
900 }
67207b96 901 } else {
cdcc89bb 902 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
eebead5b
CH
903 if (count)
904 goto out;
67207b96
AB
905 }
906
8b3d6663 907
96de0e25 908 /* write as much as possible */
cdcc89bb
AB
909 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
910 int ret;
911 ret = __get_user(wbox_data, udata);
912 if (ret)
913 break;
914
915 ret = spu_wbox_write(ctx, wbox_data);
916 if (ret == 0)
917 break;
918 }
919
eebead5b 920out_unlock:
cdcc89bb 921 spu_release(ctx);
eebead5b 922out:
cdcc89bb 923 return count;
67207b96
AB
924}
925
926static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
927{
8b3d6663 928 struct spu_context *ctx = file->private_data;
67207b96
AB
929 unsigned int mask;
930
8b3d6663 931 poll_wait(file, &ctx->wbox_wq, wait);
67207b96 932
c9101bdb
CH
933 /*
934 * For now keep this uninterruptible and also ignore the rule
935 * that poll should not sleep. Will be fixed later.
936 */
937 mutex_lock(&ctx->state_mutex);
3a843d7c
AB
938 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
939 spu_release(ctx);
67207b96
AB
940
941 return mask;
942}
943
5dfe4c96 944static const struct file_operations spufs_wbox_fops = {
67207b96
AB
945 .open = spufs_pipe_open,
946 .write = spufs_wbox_write,
947 .poll = spufs_wbox_poll,
948 .fasync = spufs_wbox_fasync,
fc15351d 949 .llseek = no_llseek,
67207b96
AB
950};
951
952static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
953 size_t len, loff_t *pos)
954{
8b3d6663 955 struct spu_context *ctx = file->private_data;
c9101bdb 956 ssize_t ret;
67207b96
AB
957 u32 wbox_stat;
958
959 if (len < 4)
960 return -EINVAL;
961
c9101bdb
CH
962 ret = spu_acquire(ctx);
963 if (ret)
964 return ret;
8b3d6663
AB
965 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
966 spu_release(ctx);
67207b96
AB
967
968 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
969 return -EFAULT;
970
971 return 4;
972}
973
5dfe4c96 974static const struct file_operations spufs_wbox_stat_fops = {
67207b96
AB
975 .open = spufs_pipe_open,
976 .read = spufs_wbox_stat_read,
fc15351d 977 .llseek = no_llseek,
67207b96
AB
978};
979
6df10a82
MN
980static int spufs_signal1_open(struct inode *inode, struct file *file)
981{
982 struct spufs_inode_info *i = SPUFS_I(inode);
983 struct spu_context *ctx = i->i_ctx;
43c2bbd9 984
47d3a5fa 985 mutex_lock(&ctx->mapping_lock);
6df10a82 986 file->private_data = ctx;
43c2bbd9
CH
987 if (!i->i_openers++)
988 ctx->signal1 = inode->i_mapping;
47d3a5fa 989 mutex_unlock(&ctx->mapping_lock);
6df10a82
MN
990 return nonseekable_open(inode, file);
991}
992
43c2bbd9
CH
993static int
994spufs_signal1_release(struct inode *inode, struct file *file)
995{
996 struct spufs_inode_info *i = SPUFS_I(inode);
997 struct spu_context *ctx = i->i_ctx;
998
47d3a5fa 999 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1000 if (!--i->i_openers)
1001 ctx->signal1 = NULL;
47d3a5fa 1002 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1003 return 0;
1004}
1005
bf1ab978 1006static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
1007 size_t len, loff_t *pos)
1008{
17f88ceb 1009 int ret = 0;
67207b96
AB
1010 u32 data;
1011
67207b96
AB
1012 if (len < 4)
1013 return -EINVAL;
1014
17f88ceb
DGM
1015 if (ctx->csa.spu_chnlcnt_RW[3]) {
1016 data = ctx->csa.spu_chnldata_RW[3];
1017 ret = 4;
1018 }
8b3d6663 1019
17f88ceb
DGM
1020 if (!ret)
1021 goto out;
1022
67207b96
AB
1023 if (copy_to_user(buf, &data, 4))
1024 return -EFAULT;
1025
17f88ceb
DGM
1026out:
1027 return ret;
67207b96
AB
1028}
1029
bf1ab978
DGM
1030static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1031 size_t len, loff_t *pos)
1032{
1033 int ret;
1034 struct spu_context *ctx = file->private_data;
1035
c9101bdb
CH
1036 ret = spu_acquire_saved(ctx);
1037 if (ret)
1038 return ret;
bf1ab978 1039 ret = __spufs_signal1_read(ctx, buf, len, pos);
27b1ea09 1040 spu_release_saved(ctx);
bf1ab978
DGM
1041
1042 return ret;
1043}
1044
67207b96
AB
1045static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1046 size_t len, loff_t *pos)
1047{
1048 struct spu_context *ctx;
c9101bdb 1049 ssize_t ret;
67207b96
AB
1050 u32 data;
1051
1052 ctx = file->private_data;
67207b96
AB
1053
1054 if (len < 4)
1055 return -EINVAL;
1056
1057 if (copy_from_user(&data, buf, 4))
1058 return -EFAULT;
1059
c9101bdb
CH
1060 ret = spu_acquire(ctx);
1061 if (ret)
1062 return ret;
8b3d6663
AB
1063 ctx->ops->signal1_write(ctx, data);
1064 spu_release(ctx);
67207b96
AB
1065
1066 return 4;
1067}
1068
b1e2270f
NP
1069static int
1070spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6df10a82 1071{
87ff6090
JK
1072#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1073 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1074#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
27d5bf2a
BH
1075 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1076 * signal 1 and 2 area
1077 */
87ff6090 1078 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
27d5bf2a
BH
1079#else
1080#error unsupported page size
1081#endif
6df10a82
MN
1082}
1083
f0f37e2f 1084static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
b1e2270f 1085 .fault = spufs_signal1_mmap_fault,
6df10a82
MN
1086};
1087
1088static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1089{
1090 if (!(vma->vm_flags & VM_SHARED))
1091 return -EINVAL;
1092
78bde53e 1093 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1094 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
1095
1096 vma->vm_ops = &spufs_signal1_mmap_vmops;
1097 return 0;
1098}
6df10a82 1099
5dfe4c96 1100static const struct file_operations spufs_signal1_fops = {
6df10a82 1101 .open = spufs_signal1_open,
43c2bbd9 1102 .release = spufs_signal1_release,
67207b96
AB
1103 .read = spufs_signal1_read,
1104 .write = spufs_signal1_write,
6df10a82 1105 .mmap = spufs_signal1_mmap,
fc15351d 1106 .llseek = no_llseek,
67207b96
AB
1107};
1108
d054b36f
JK
1109static const struct file_operations spufs_signal1_nosched_fops = {
1110 .open = spufs_signal1_open,
1111 .release = spufs_signal1_release,
1112 .write = spufs_signal1_write,
1113 .mmap = spufs_signal1_mmap,
fc15351d 1114 .llseek = no_llseek,
d054b36f
JK
1115};
1116
6df10a82
MN
1117static int spufs_signal2_open(struct inode *inode, struct file *file)
1118{
1119 struct spufs_inode_info *i = SPUFS_I(inode);
1120 struct spu_context *ctx = i->i_ctx;
43c2bbd9 1121
47d3a5fa 1122 mutex_lock(&ctx->mapping_lock);
6df10a82 1123 file->private_data = ctx;
43c2bbd9
CH
1124 if (!i->i_openers++)
1125 ctx->signal2 = inode->i_mapping;
47d3a5fa 1126 mutex_unlock(&ctx->mapping_lock);
6df10a82
MN
1127 return nonseekable_open(inode, file);
1128}
1129
43c2bbd9
CH
1130static int
1131spufs_signal2_release(struct inode *inode, struct file *file)
1132{
1133 struct spufs_inode_info *i = SPUFS_I(inode);
1134 struct spu_context *ctx = i->i_ctx;
1135
47d3a5fa 1136 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1137 if (!--i->i_openers)
1138 ctx->signal2 = NULL;
47d3a5fa 1139 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1140 return 0;
1141}
1142
bf1ab978 1143static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
1144 size_t len, loff_t *pos)
1145{
17f88ceb 1146 int ret = 0;
67207b96
AB
1147 u32 data;
1148
67207b96
AB
1149 if (len < 4)
1150 return -EINVAL;
1151
17f88ceb
DGM
1152 if (ctx->csa.spu_chnlcnt_RW[4]) {
1153 data = ctx->csa.spu_chnldata_RW[4];
1154 ret = 4;
1155 }
8b3d6663 1156
17f88ceb
DGM
1157 if (!ret)
1158 goto out;
1159
67207b96
AB
1160 if (copy_to_user(buf, &data, 4))
1161 return -EFAULT;
1162
17f88ceb 1163out:
bf1ab978
DGM
1164 return ret;
1165}
1166
1167static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1168 size_t len, loff_t *pos)
1169{
1170 struct spu_context *ctx = file->private_data;
1171 int ret;
1172
c9101bdb
CH
1173 ret = spu_acquire_saved(ctx);
1174 if (ret)
1175 return ret;
bf1ab978 1176 ret = __spufs_signal2_read(ctx, buf, len, pos);
27b1ea09 1177 spu_release_saved(ctx);
bf1ab978
DGM
1178
1179 return ret;
67207b96
AB
1180}
1181
1182static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1183 size_t len, loff_t *pos)
1184{
1185 struct spu_context *ctx;
c9101bdb 1186 ssize_t ret;
67207b96
AB
1187 u32 data;
1188
1189 ctx = file->private_data;
67207b96
AB
1190
1191 if (len < 4)
1192 return -EINVAL;
1193
1194 if (copy_from_user(&data, buf, 4))
1195 return -EFAULT;
1196
c9101bdb
CH
1197 ret = spu_acquire(ctx);
1198 if (ret)
1199 return ret;
8b3d6663
AB
1200 ctx->ops->signal2_write(ctx, data);
1201 spu_release(ctx);
67207b96
AB
1202
1203 return 4;
1204}
1205
27d5bf2a 1206#if SPUFS_MMAP_4K
b1e2270f
NP
1207static int
1208spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6df10a82 1209{
87ff6090
JK
1210#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1211 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1212#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
27d5bf2a
BH
1213 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1214 * signal 1 and 2 area
1215 */
87ff6090 1216 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
27d5bf2a
BH
1217#else
1218#error unsupported page size
1219#endif
6df10a82
MN
1220}
1221
f0f37e2f 1222static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
b1e2270f 1223 .fault = spufs_signal2_mmap_fault,
6df10a82
MN
1224};
1225
1226static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1227{
1228 if (!(vma->vm_flags & VM_SHARED))
1229 return -EINVAL;
1230
78bde53e 1231 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
1233
1234 vma->vm_ops = &spufs_signal2_mmap_vmops;
1235 return 0;
1236}
27d5bf2a
BH
1237#else /* SPUFS_MMAP_4K */
1238#define spufs_signal2_mmap NULL
1239#endif /* !SPUFS_MMAP_4K */
6df10a82 1240
5dfe4c96 1241static const struct file_operations spufs_signal2_fops = {
6df10a82 1242 .open = spufs_signal2_open,
43c2bbd9 1243 .release = spufs_signal2_release,
67207b96
AB
1244 .read = spufs_signal2_read,
1245 .write = spufs_signal2_write,
6df10a82 1246 .mmap = spufs_signal2_mmap,
fc15351d 1247 .llseek = no_llseek,
67207b96
AB
1248};
1249
d054b36f
JK
1250static const struct file_operations spufs_signal2_nosched_fops = {
1251 .open = spufs_signal2_open,
1252 .release = spufs_signal2_release,
1253 .write = spufs_signal2_write,
1254 .mmap = spufs_signal2_mmap,
fc15351d 1255 .llseek = no_llseek,
d054b36f
JK
1256};
1257
104f0cc2
ME
1258/*
1259 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1260 * work of acquiring (or not) the SPU context before calling through
1261 * to the actual get routine. The set routine is called directly.
1262 */
1263#define SPU_ATTR_NOACQUIRE 0
1264#define SPU_ATTR_ACQUIRE 1
1265#define SPU_ATTR_ACQUIRE_SAVED 2
1266
1267#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
197b1a82 1268static int __##__get(void *data, u64 *val) \
104f0cc2
ME
1269{ \
1270 struct spu_context *ctx = data; \
c9101bdb 1271 int ret = 0; \
104f0cc2
ME
1272 \
1273 if (__acquire == SPU_ATTR_ACQUIRE) { \
c9101bdb
CH
1274 ret = spu_acquire(ctx); \
1275 if (ret) \
1276 return ret; \
197b1a82 1277 *val = __get(ctx); \
104f0cc2
ME
1278 spu_release(ctx); \
1279 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
c9101bdb
CH
1280 ret = spu_acquire_saved(ctx); \
1281 if (ret) \
1282 return ret; \
197b1a82 1283 *val = __get(ctx); \
104f0cc2
ME
1284 spu_release_saved(ctx); \
1285 } else \
197b1a82 1286 *val = __get(ctx); \
104f0cc2 1287 \
197b1a82 1288 return 0; \
104f0cc2 1289} \
197b1a82 1290DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
104f0cc2 1291
197b1a82 1292static int spufs_signal1_type_set(void *data, u64 val)
67207b96
AB
1293{
1294 struct spu_context *ctx = data;
c9101bdb 1295 int ret;
67207b96 1296
c9101bdb
CH
1297 ret = spu_acquire(ctx);
1298 if (ret)
1299 return ret;
8b3d6663
AB
1300 ctx->ops->signal1_type_set(ctx, val);
1301 spu_release(ctx);
197b1a82
CH
1302
1303 return 0;
67207b96
AB
1304}
1305
104f0cc2 1306static u64 spufs_signal1_type_get(struct spu_context *ctx)
bf1ab978 1307{
bf1ab978
DGM
1308 return ctx->ops->signal1_type_get(ctx);
1309}
104f0cc2 1310DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
af8b44e0 1311 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
bf1ab978 1312
67207b96 1313
197b1a82 1314static int spufs_signal2_type_set(void *data, u64 val)
67207b96
AB
1315{
1316 struct spu_context *ctx = data;
c9101bdb 1317 int ret;
67207b96 1318
c9101bdb
CH
1319 ret = spu_acquire(ctx);
1320 if (ret)
1321 return ret;
8b3d6663
AB
1322 ctx->ops->signal2_type_set(ctx, val);
1323 spu_release(ctx);
197b1a82
CH
1324
1325 return 0;
67207b96
AB
1326}
1327
104f0cc2 1328static u64 spufs_signal2_type_get(struct spu_context *ctx)
bf1ab978 1329{
bf1ab978
DGM
1330 return ctx->ops->signal2_type_get(ctx);
1331}
104f0cc2 1332DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
af8b44e0 1333 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
67207b96 1334
27d5bf2a 1335#if SPUFS_MMAP_4K
b1e2270f
NP
1336static int
1337spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
d9379c4b 1338{
87ff6090 1339 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
d9379c4b
AB
1340}
1341
f0f37e2f 1342static const struct vm_operations_struct spufs_mss_mmap_vmops = {
b1e2270f 1343 .fault = spufs_mss_mmap_fault,
d9379c4b
AB
1344};
1345
1346/*
1347 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
d9379c4b
AB
1348 */
1349static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1350{
1351 if (!(vma->vm_flags & VM_SHARED))
1352 return -EINVAL;
1353
78bde53e 1354 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1355 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
d9379c4b
AB
1356
1357 vma->vm_ops = &spufs_mss_mmap_vmops;
1358 return 0;
1359}
27d5bf2a
BH
1360#else /* SPUFS_MMAP_4K */
1361#define spufs_mss_mmap NULL
1362#endif /* !SPUFS_MMAP_4K */
d9379c4b
AB
1363
1364static int spufs_mss_open(struct inode *inode, struct file *file)
1365{
1366 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1367 struct spu_context *ctx = i->i_ctx;
d9379c4b
AB
1368
1369 file->private_data = i->i_ctx;
43c2bbd9 1370
47d3a5fa 1371 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1372 if (!i->i_openers++)
1373 ctx->mss = inode->i_mapping;
47d3a5fa 1374 mutex_unlock(&ctx->mapping_lock);
d9379c4b
AB
1375 return nonseekable_open(inode, file);
1376}
1377
43c2bbd9
CH
1378static int
1379spufs_mss_release(struct inode *inode, struct file *file)
1380{
1381 struct spufs_inode_info *i = SPUFS_I(inode);
1382 struct spu_context *ctx = i->i_ctx;
1383
47d3a5fa 1384 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1385 if (!--i->i_openers)
1386 ctx->mss = NULL;
47d3a5fa 1387 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1388 return 0;
1389}
1390
5dfe4c96 1391static const struct file_operations spufs_mss_fops = {
d9379c4b 1392 .open = spufs_mss_open,
43c2bbd9 1393 .release = spufs_mss_release,
d9379c4b 1394 .mmap = spufs_mss_mmap,
fc15351d 1395 .llseek = no_llseek,
27d5bf2a
BH
1396};
1397
b1e2270f
NP
1398static int
1399spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
27d5bf2a 1400{
87ff6090 1401 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
27d5bf2a
BH
1402}
1403
f0f37e2f 1404static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
b1e2270f 1405 .fault = spufs_psmap_mmap_fault,
27d5bf2a
BH
1406};
1407
1408/*
1409 * mmap support for full problem state area [0x00000 - 0x1ffff].
1410 */
1411static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1412{
1413 if (!(vma->vm_flags & VM_SHARED))
1414 return -EINVAL;
1415
78bde53e 1416 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1417 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
27d5bf2a
BH
1418
1419 vma->vm_ops = &spufs_psmap_mmap_vmops;
1420 return 0;
1421}
1422
1423static int spufs_psmap_open(struct inode *inode, struct file *file)
1424{
1425 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1426 struct spu_context *ctx = i->i_ctx;
27d5bf2a 1427
47d3a5fa 1428 mutex_lock(&ctx->mapping_lock);
27d5bf2a 1429 file->private_data = i->i_ctx;
43c2bbd9
CH
1430 if (!i->i_openers++)
1431 ctx->psmap = inode->i_mapping;
47d3a5fa 1432 mutex_unlock(&ctx->mapping_lock);
27d5bf2a
BH
1433 return nonseekable_open(inode, file);
1434}
1435
43c2bbd9
CH
1436static int
1437spufs_psmap_release(struct inode *inode, struct file *file)
1438{
1439 struct spufs_inode_info *i = SPUFS_I(inode);
1440 struct spu_context *ctx = i->i_ctx;
1441
47d3a5fa 1442 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1443 if (!--i->i_openers)
1444 ctx->psmap = NULL;
47d3a5fa 1445 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1446 return 0;
1447}
1448
5dfe4c96 1449static const struct file_operations spufs_psmap_fops = {
27d5bf2a 1450 .open = spufs_psmap_open,
43c2bbd9 1451 .release = spufs_psmap_release,
27d5bf2a 1452 .mmap = spufs_psmap_mmap,
fc15351d 1453 .llseek = no_llseek,
d9379c4b
AB
1454};
1455
1456
27d5bf2a 1457#if SPUFS_MMAP_4K
b1e2270f
NP
1458static int
1459spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6df10a82 1460{
87ff6090 1461 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
6df10a82
MN
1462}
1463
f0f37e2f 1464static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
b1e2270f 1465 .fault = spufs_mfc_mmap_fault,
6df10a82
MN
1466};
1467
1468/*
1469 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
6df10a82
MN
1470 */
1471static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1472{
1473 if (!(vma->vm_flags & VM_SHARED))
1474 return -EINVAL;
1475
78bde53e 1476 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1477 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
1478
1479 vma->vm_ops = &spufs_mfc_mmap_vmops;
1480 return 0;
1481}
27d5bf2a
BH
1482#else /* SPUFS_MMAP_4K */
1483#define spufs_mfc_mmap NULL
1484#endif /* !SPUFS_MMAP_4K */
a33a7d73
AB
1485
1486static int spufs_mfc_open(struct inode *inode, struct file *file)
1487{
1488 struct spufs_inode_info *i = SPUFS_I(inode);
1489 struct spu_context *ctx = i->i_ctx;
1490
1491 /* we don't want to deal with DMA into other processes */
1492 if (ctx->owner != current->mm)
1493 return -EINVAL;
1494
1495 if (atomic_read(&inode->i_count) != 1)
1496 return -EBUSY;
1497
47d3a5fa 1498 mutex_lock(&ctx->mapping_lock);
a33a7d73 1499 file->private_data = ctx;
43c2bbd9
CH
1500 if (!i->i_openers++)
1501 ctx->mfc = inode->i_mapping;
47d3a5fa 1502 mutex_unlock(&ctx->mapping_lock);
a33a7d73
AB
1503 return nonseekable_open(inode, file);
1504}
1505
43c2bbd9
CH
1506static int
1507spufs_mfc_release(struct inode *inode, struct file *file)
1508{
1509 struct spufs_inode_info *i = SPUFS_I(inode);
1510 struct spu_context *ctx = i->i_ctx;
1511
47d3a5fa 1512 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1513 if (!--i->i_openers)
1514 ctx->mfc = NULL;
47d3a5fa 1515 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1516 return 0;
1517}
1518
a33a7d73
AB
1519/* interrupt-level mfc callback function. */
1520void spufs_mfc_callback(struct spu *spu)
1521{
1522 struct spu_context *ctx = spu->ctx;
1523
e65c2f6f
LB
1524 if (!ctx)
1525 return;
1526
a33a7d73
AB
1527 wake_up_all(&ctx->mfc_wq);
1528
e48b1b45 1529 pr_debug("%s %s\n", __func__, spu->name);
a33a7d73
AB
1530 if (ctx->mfc_fasync) {
1531 u32 free_elements, tagstatus;
1532 unsigned int mask;
1533
1534 /* no need for spu_acquire in interrupt context */
1535 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1536 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1537
1538 mask = 0;
1539 if (free_elements & 0xffff)
1540 mask |= POLLOUT;
1541 if (tagstatus & ctx->tagwait)
1542 mask |= POLLIN;
1543
1544 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1545 }
1546}
1547
1548static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1549{
1550 /* See if there is one tag group is complete */
1551 /* FIXME we need locking around tagwait */
1552 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1553 ctx->tagwait &= ~*status;
1554 if (*status)
1555 return 1;
1556
1557 /* enable interrupt waiting for any tag group,
1558 may silently fail if interrupts are already enabled */
1559 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1560 return 0;
1561}
1562
1563static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1564 size_t size, loff_t *pos)
1565{
1566 struct spu_context *ctx = file->private_data;
1567 int ret = -EINVAL;
1568 u32 status;
1569
1570 if (size != 4)
1571 goto out;
1572
c9101bdb
CH
1573 ret = spu_acquire(ctx);
1574 if (ret)
1575 return ret;
1576
1577 ret = -EINVAL;
a33a7d73
AB
1578 if (file->f_flags & O_NONBLOCK) {
1579 status = ctx->ops->read_mfc_tagstatus(ctx);
1580 if (!(status & ctx->tagwait))
1581 ret = -EAGAIN;
1582 else
c9101bdb 1583 /* XXX(hch): shouldn't we clear ret here? */
a33a7d73
AB
1584 ctx->tagwait &= ~status;
1585 } else {
1586 ret = spufs_wait(ctx->mfc_wq,
1587 spufs_read_mfc_tagstatus(ctx, &status));
eebead5b
CH
1588 if (ret)
1589 goto out;
a33a7d73
AB
1590 }
1591 spu_release(ctx);
1592
a33a7d73
AB
1593 ret = 4;
1594 if (copy_to_user(buffer, &status, 4))
1595 ret = -EFAULT;
1596
1597out:
1598 return ret;
1599}
1600
1601static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1602{
9477e455 1603 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
a33a7d73
AB
1604 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1605
1606 switch (cmd->cmd) {
1607 case MFC_PUT_CMD:
1608 case MFC_PUTF_CMD:
1609 case MFC_PUTB_CMD:
1610 case MFC_GET_CMD:
1611 case MFC_GETF_CMD:
1612 case MFC_GETB_CMD:
1613 break;
1614 default:
1615 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1616 return -EIO;
1617 }
1618
1619 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
9477e455 1620 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
a33a7d73
AB
1621 cmd->ea, cmd->lsa);
1622 return -EIO;
1623 }
1624
1625 switch (cmd->size & 0xf) {
1626 case 1:
1627 break;
1628 case 2:
1629 if (cmd->lsa & 1)
1630 goto error;
1631 break;
1632 case 4:
1633 if (cmd->lsa & 3)
1634 goto error;
1635 break;
1636 case 8:
1637 if (cmd->lsa & 7)
1638 goto error;
1639 break;
1640 case 0:
1641 if (cmd->lsa & 15)
1642 goto error;
1643 break;
1644 error:
1645 default:
1646 pr_debug("invalid DMA alignment %x for size %x\n",
1647 cmd->lsa & 0xf, cmd->size);
1648 return -EIO;
1649 }
1650
1651 if (cmd->size > 16 * 1024) {
1652 pr_debug("invalid DMA size %x\n", cmd->size);
1653 return -EIO;
1654 }
1655
1656 if (cmd->tag & 0xfff0) {
1657 /* we reserve the higher tag numbers for kernel use */
1658 pr_debug("invalid DMA tag\n");
1659 return -EIO;
1660 }
1661
1662 if (cmd->class) {
1663 /* not supported in this version */
1664 pr_debug("invalid DMA class\n");
1665 return -EIO;
1666 }
1667
1668 return 0;
1669}
1670
1671static int spu_send_mfc_command(struct spu_context *ctx,
1672 struct mfc_dma_command cmd,
1673 int *error)
1674{
1675 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1676 if (*error == -EAGAIN) {
1677 /* wait for any tag group to complete
1678 so we have space for the new command */
1679 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1680 /* try again, because the queue might be
1681 empty again */
1682 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1683 if (*error == -EAGAIN)
1684 return 0;
1685 }
1686 return 1;
1687}
1688
1689static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1690 size_t size, loff_t *pos)
1691{
1692 struct spu_context *ctx = file->private_data;
1693 struct mfc_dma_command cmd;
1694 int ret = -EINVAL;
1695
1696 if (size != sizeof cmd)
1697 goto out;
1698
1699 ret = -EFAULT;
1700 if (copy_from_user(&cmd, buffer, sizeof cmd))
1701 goto out;
1702
1703 ret = spufs_check_valid_dma(&cmd);
1704 if (ret)
1705 goto out;
1706
c9101bdb
CH
1707 ret = spu_acquire(ctx);
1708 if (ret)
1709 goto out;
1710
33bfd7a7 1711 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
577f8f10
AM
1712 if (ret)
1713 goto out;
1714
a33a7d73
AB
1715 if (file->f_flags & O_NONBLOCK) {
1716 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1717 } else {
1718 int status;
1719 ret = spufs_wait(ctx->mfc_wq,
1720 spu_send_mfc_command(ctx, cmd, &status));
eebead5b
CH
1721 if (ret)
1722 goto out;
a33a7d73
AB
1723 if (status)
1724 ret = status;
1725 }
a33a7d73
AB
1726
1727 if (ret)
933b0e35 1728 goto out_unlock;
a33a7d73
AB
1729
1730 ctx->tagwait |= 1 << cmd.tag;
3692dc66 1731 ret = size;
a33a7d73 1732
933b0e35
KA
1733out_unlock:
1734 spu_release(ctx);
a33a7d73
AB
1735out:
1736 return ret;
1737}
1738
1739static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1740{
1741 struct spu_context *ctx = file->private_data;
1742 u32 free_elements, tagstatus;
1743 unsigned int mask;
1744
933b0e35
KA
1745 poll_wait(file, &ctx->mfc_wq, wait);
1746
c9101bdb
CH
1747 /*
1748 * For now keep this uninterruptible and also ignore the rule
1749 * that poll should not sleep. Will be fixed later.
1750 */
1751 mutex_lock(&ctx->state_mutex);
a33a7d73
AB
1752 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1753 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1754 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1755 spu_release(ctx);
1756
a33a7d73
AB
1757 mask = 0;
1758 if (free_elements & 0xffff)
1759 mask |= POLLOUT | POLLWRNORM;
1760 if (tagstatus & ctx->tagwait)
1761 mask |= POLLIN | POLLRDNORM;
1762
e48b1b45 1763 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
a33a7d73
AB
1764 free_elements, tagstatus, ctx->tagwait);
1765
1766 return mask;
1767}
1768
73b6af8a 1769static int spufs_mfc_flush(struct file *file, fl_owner_t id)
a33a7d73
AB
1770{
1771 struct spu_context *ctx = file->private_data;
1772 int ret;
1773
c9101bdb
CH
1774 ret = spu_acquire(ctx);
1775 if (ret)
eebead5b 1776 goto out;
a33a7d73
AB
1777#if 0
1778/* this currently hangs */
1779 ret = spufs_wait(ctx->mfc_wq,
1780 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1781 if (ret)
1782 goto out;
1783 ret = spufs_wait(ctx->mfc_wq,
1784 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
eebead5b
CH
1785 if (ret)
1786 goto out;
a33a7d73
AB
1787#else
1788 ret = 0;
1789#endif
1790 spu_release(ctx);
eebead5b 1791out:
a33a7d73
AB
1792 return ret;
1793}
1794
02c24a82
JB
1795static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1796{
496ad9aa 1797 struct inode *inode = file_inode(file);
02c24a82
JB
1798 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1799 if (!err) {
5955102c 1800 inode_lock(inode);
02c24a82 1801 err = spufs_mfc_flush(file, NULL);
5955102c 1802 inode_unlock(inode);
02c24a82
JB
1803 }
1804 return err;
a33a7d73
AB
1805}
1806
1807static int spufs_mfc_fasync(int fd, struct file *file, int on)
1808{
1809 struct spu_context *ctx = file->private_data;
1810
1811 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1812}
1813
5dfe4c96 1814static const struct file_operations spufs_mfc_fops = {
a33a7d73 1815 .open = spufs_mfc_open,
43c2bbd9 1816 .release = spufs_mfc_release,
a33a7d73
AB
1817 .read = spufs_mfc_read,
1818 .write = spufs_mfc_write,
1819 .poll = spufs_mfc_poll,
1820 .flush = spufs_mfc_flush,
1821 .fsync = spufs_mfc_fsync,
1822 .fasync = spufs_mfc_fasync,
6df10a82 1823 .mmap = spufs_mfc_mmap,
fc15351d 1824 .llseek = no_llseek,
a33a7d73
AB
1825};
1826
197b1a82 1827static int spufs_npc_set(void *data, u64 val)
67207b96
AB
1828{
1829 struct spu_context *ctx = data;
c9101bdb
CH
1830 int ret;
1831
1832 ret = spu_acquire(ctx);
1833 if (ret)
1834 return ret;
8b3d6663
AB
1835 ctx->ops->npc_write(ctx, val);
1836 spu_release(ctx);
197b1a82
CH
1837
1838 return 0;
67207b96
AB
1839}
1840
104f0cc2 1841static u64 spufs_npc_get(struct spu_context *ctx)
78810ff6
ME
1842{
1843 return ctx->ops->npc_read(ctx);
1844}
104f0cc2
ME
1845DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1846 "0x%llx\n", SPU_ATTR_ACQUIRE);
67207b96 1847
197b1a82 1848static int spufs_decr_set(void *data, u64 val)
8b3d6663
AB
1849{
1850 struct spu_context *ctx = data;
1851 struct spu_lscsa *lscsa = ctx->csa.lscsa;
c9101bdb
CH
1852 int ret;
1853
1854 ret = spu_acquire_saved(ctx);
1855 if (ret)
1856 return ret;
8b3d6663 1857 lscsa->decr.slot[0] = (u32) val;
27b1ea09 1858 spu_release_saved(ctx);
197b1a82
CH
1859
1860 return 0;
8b3d6663
AB
1861}
1862
104f0cc2 1863static u64 spufs_decr_get(struct spu_context *ctx)
8b3d6663 1864{
8b3d6663 1865 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1866 return lscsa->decr.slot[0];
1867}
104f0cc2
ME
1868DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1869 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1870
197b1a82 1871static int spufs_decr_status_set(void *data, u64 val)
8b3d6663
AB
1872{
1873 struct spu_context *ctx = data;
c9101bdb
CH
1874 int ret;
1875
1876 ret = spu_acquire_saved(ctx);
1877 if (ret)
1878 return ret;
d40a01d4
MN
1879 if (val)
1880 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1881 else
1882 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
27b1ea09 1883 spu_release_saved(ctx);
197b1a82
CH
1884
1885 return 0;
8b3d6663
AB
1886}
1887
104f0cc2 1888static u64 spufs_decr_status_get(struct spu_context *ctx)
8b3d6663 1889{
d40a01d4
MN
1890 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1891 return SPU_DECR_STATUS_RUNNING;
1892 else
1893 return 0;
bf1ab978 1894}
104f0cc2
ME
1895DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1896 spufs_decr_status_set, "0x%llx\n",
1897 SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1898
197b1a82 1899static int spufs_event_mask_set(void *data, u64 val)
8b3d6663
AB
1900{
1901 struct spu_context *ctx = data;
1902 struct spu_lscsa *lscsa = ctx->csa.lscsa;
c9101bdb
CH
1903 int ret;
1904
1905 ret = spu_acquire_saved(ctx);
1906 if (ret)
1907 return ret;
8b3d6663 1908 lscsa->event_mask.slot[0] = (u32) val;
27b1ea09 1909 spu_release_saved(ctx);
197b1a82
CH
1910
1911 return 0;
8b3d6663
AB
1912}
1913
104f0cc2 1914static u64 spufs_event_mask_get(struct spu_context *ctx)
8b3d6663 1915{
8b3d6663 1916 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1917 return lscsa->event_mask.slot[0];
1918}
1919
104f0cc2
ME
1920DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1921 spufs_event_mask_set, "0x%llx\n",
1922 SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1923
104f0cc2 1924static u64 spufs_event_status_get(struct spu_context *ctx)
b9e3bd77 1925{
b9e3bd77 1926 struct spu_state *state = &ctx->csa;
b9e3bd77 1927 u64 stat;
b9e3bd77
DGM
1928 stat = state->spu_chnlcnt_RW[0];
1929 if (stat)
bf1ab978
DGM
1930 return state->spu_chnldata_RW[0];
1931 return 0;
1932}
104f0cc2
ME
1933DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1934 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
b9e3bd77 1935
197b1a82 1936static int spufs_srr0_set(void *data, u64 val)
8b3d6663
AB
1937{
1938 struct spu_context *ctx = data;
1939 struct spu_lscsa *lscsa = ctx->csa.lscsa;
c9101bdb
CH
1940 int ret;
1941
1942 ret = spu_acquire_saved(ctx);
1943 if (ret)
1944 return ret;
8b3d6663 1945 lscsa->srr0.slot[0] = (u32) val;
27b1ea09 1946 spu_release_saved(ctx);
197b1a82
CH
1947
1948 return 0;
8b3d6663
AB
1949}
1950
104f0cc2 1951static u64 spufs_srr0_get(struct spu_context *ctx)
8b3d6663 1952{
8b3d6663 1953 struct spu_lscsa *lscsa = ctx->csa.lscsa;
104f0cc2 1954 return lscsa->srr0.slot[0];
8b3d6663 1955}
104f0cc2
ME
1956DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1957 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
8b3d6663 1958
104f0cc2 1959static u64 spufs_id_get(struct spu_context *ctx)
7b1a7014 1960{
7b1a7014
AB
1961 u64 num;
1962
7b1a7014
AB
1963 if (ctx->state == SPU_STATE_RUNNABLE)
1964 num = ctx->spu->number;
1965 else
1966 num = (unsigned int)-1;
7b1a7014
AB
1967
1968 return num;
1969}
104f0cc2
ME
1970DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1971 SPU_ATTR_ACQUIRE)
7b1a7014 1972
104f0cc2 1973static u64 spufs_object_id_get(struct spu_context *ctx)
bf1ab978
DGM
1974{
1975 /* FIXME: Should there really be no locking here? */
104f0cc2 1976 return ctx->object_id;
bf1ab978
DGM
1977}
1978
197b1a82 1979static int spufs_object_id_set(void *data, u64 id)
86767277
AB
1980{
1981 struct spu_context *ctx = data;
1982 ctx->object_id = id;
197b1a82
CH
1983
1984 return 0;
86767277
AB
1985}
1986
104f0cc2
ME
1987DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1988 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
86767277 1989
104f0cc2 1990static u64 spufs_lslr_get(struct spu_context *ctx)
bf1ab978 1991{
bf1ab978
DGM
1992 return ctx->csa.priv2.spu_lslr_RW;
1993}
104f0cc2
ME
1994DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1995 SPU_ATTR_ACQUIRE_SAVED);
b9e3bd77
DGM
1996
1997static int spufs_info_open(struct inode *inode, struct file *file)
1998{
1999 struct spufs_inode_info *i = SPUFS_I(inode);
2000 struct spu_context *ctx = i->i_ctx;
2001 file->private_data = ctx;
2002 return 0;
2003}
2004
cbe709c1
BH
2005static int spufs_caps_show(struct seq_file *s, void *private)
2006{
2007 struct spu_context *ctx = s->private;
2008
2009 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2010 seq_puts(s, "sched\n");
2011 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2012 seq_puts(s, "step\n");
2013 return 0;
2014}
2015
2016static int spufs_caps_open(struct inode *inode, struct file *file)
2017{
2018 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2019}
2020
2021static const struct file_operations spufs_caps_fops = {
2022 .open = spufs_caps_open,
2023 .read = seq_read,
2024 .llseek = seq_lseek,
2025 .release = single_release,
2026};
2027
bf1ab978
DGM
2028static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2029 char __user *buf, size_t len, loff_t *pos)
2030{
bf1ab978
DGM
2031 u32 data;
2032
cbea9238
JK
2033 /* EOF if there's no entry in the mbox */
2034 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2035 return 0;
2036
2037 data = ctx->csa.prob.pu_mb_R;
bf1ab978
DGM
2038
2039 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2040}
2041
69a2f00c
DGM
2042static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2043 size_t len, loff_t *pos)
2044{
bf1ab978 2045 int ret;
69a2f00c 2046 struct spu_context *ctx = file->private_data;
69a2f00c
DGM
2047
2048 if (!access_ok(VERIFY_WRITE, buf, len))
2049 return -EFAULT;
2050
c9101bdb
CH
2051 ret = spu_acquire_saved(ctx);
2052 if (ret)
2053 return ret;
69a2f00c 2054 spin_lock(&ctx->csa.register_lock);
bf1ab978 2055 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
69a2f00c 2056 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2057 spu_release_saved(ctx);
69a2f00c 2058
bf1ab978 2059 return ret;
69a2f00c
DGM
2060}
2061
5dfe4c96 2062static const struct file_operations spufs_mbox_info_fops = {
69a2f00c
DGM
2063 .open = spufs_info_open,
2064 .read = spufs_mbox_info_read,
2065 .llseek = generic_file_llseek,
2066};
2067
bf1ab978
DGM
2068static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2069 char __user *buf, size_t len, loff_t *pos)
2070{
bf1ab978
DGM
2071 u32 data;
2072
cbea9238
JK
2073 /* EOF if there's no entry in the ibox */
2074 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2075 return 0;
2076
2077 data = ctx->csa.priv2.puint_mb_R;
bf1ab978
DGM
2078
2079 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2080}
2081
69a2f00c
DGM
2082static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2083 size_t len, loff_t *pos)
2084{
2085 struct spu_context *ctx = file->private_data;
bf1ab978 2086 int ret;
69a2f00c
DGM
2087
2088 if (!access_ok(VERIFY_WRITE, buf, len))
2089 return -EFAULT;
2090
c9101bdb
CH
2091 ret = spu_acquire_saved(ctx);
2092 if (ret)
2093 return ret;
69a2f00c 2094 spin_lock(&ctx->csa.register_lock);
bf1ab978 2095 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
69a2f00c 2096 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2097 spu_release_saved(ctx);
69a2f00c 2098
bf1ab978 2099 return ret;
69a2f00c
DGM
2100}
2101
5dfe4c96 2102static const struct file_operations spufs_ibox_info_fops = {
69a2f00c
DGM
2103 .open = spufs_info_open,
2104 .read = spufs_ibox_info_read,
2105 .llseek = generic_file_llseek,
2106};
2107
bf1ab978
DGM
2108static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2109 char __user *buf, size_t len, loff_t *pos)
69a2f00c 2110{
69a2f00c
DGM
2111 int i, cnt;
2112 u32 data[4];
2113 u32 wbox_stat;
2114
bf1ab978
DGM
2115 wbox_stat = ctx->csa.prob.mb_stat_R;
2116 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2117 for (i = 0; i < cnt; i++) {
2118 data[i] = ctx->csa.spu_mailbox_data[i];
2119 }
2120
2121 return simple_read_from_buffer(buf, len, pos, &data,
2122 cnt * sizeof(u32));
2123}
2124
2125static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2126 size_t len, loff_t *pos)
2127{
2128 struct spu_context *ctx = file->private_data;
2129 int ret;
2130
69a2f00c
DGM
2131 if (!access_ok(VERIFY_WRITE, buf, len))
2132 return -EFAULT;
2133
c9101bdb
CH
2134 ret = spu_acquire_saved(ctx);
2135 if (ret)
2136 return ret;
69a2f00c 2137 spin_lock(&ctx->csa.register_lock);
bf1ab978 2138 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
69a2f00c 2139 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2140 spu_release_saved(ctx);
69a2f00c 2141
bf1ab978 2142 return ret;
69a2f00c
DGM
2143}
2144
5dfe4c96 2145static const struct file_operations spufs_wbox_info_fops = {
69a2f00c
DGM
2146 .open = spufs_info_open,
2147 .read = spufs_wbox_info_read,
2148 .llseek = generic_file_llseek,
2149};
2150
bf1ab978
DGM
2151static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2152 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 2153{
b9e3bd77
DGM
2154 struct spu_dma_info info;
2155 struct mfc_cq_sr *qp, *spuqp;
2156 int i;
2157
b9e3bd77
DGM
2158 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2159 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2160 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2161 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2162 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2163 for (i = 0; i < 16; i++) {
2164 qp = &info.dma_info_command_data[i];
2165 spuqp = &ctx->csa.priv2.spuq[i];
2166
2167 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2168 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2169 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2170 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2171 }
b9e3bd77
DGM
2172
2173 return simple_read_from_buffer(buf, len, pos, &info,
2174 sizeof info);
2175}
2176
bf1ab978
DGM
2177static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2178 size_t len, loff_t *pos)
2179{
2180 struct spu_context *ctx = file->private_data;
2181 int ret;
2182
2183 if (!access_ok(VERIFY_WRITE, buf, len))
2184 return -EFAULT;
2185
c9101bdb
CH
2186 ret = spu_acquire_saved(ctx);
2187 if (ret)
2188 return ret;
bf1ab978
DGM
2189 spin_lock(&ctx->csa.register_lock);
2190 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2191 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2192 spu_release_saved(ctx);
bf1ab978
DGM
2193
2194 return ret;
2195}
2196
5dfe4c96 2197static const struct file_operations spufs_dma_info_fops = {
b9e3bd77
DGM
2198 .open = spufs_info_open,
2199 .read = spufs_dma_info_read,
fc15351d 2200 .llseek = no_llseek,
b9e3bd77
DGM
2201};
2202
bf1ab978
DGM
2203static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2204 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 2205{
b9e3bd77 2206 struct spu_proxydma_info info;
b9e3bd77 2207 struct mfc_cq_sr *qp, *puqp;
bf1ab978 2208 int ret = sizeof info;
b9e3bd77
DGM
2209 int i;
2210
2211 if (len < ret)
2212 return -EINVAL;
2213
2214 if (!access_ok(VERIFY_WRITE, buf, len))
2215 return -EFAULT;
2216
b9e3bd77
DGM
2217 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2218 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2219 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2220 for (i = 0; i < 8; i++) {
2221 qp = &info.proxydma_info_command_data[i];
2222 puqp = &ctx->csa.priv2.puq[i];
2223
2224 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2225 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2226 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2227 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2228 }
bf1ab978
DGM
2229
2230 return simple_read_from_buffer(buf, len, pos, &info,
2231 sizeof info);
2232}
2233
2234static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2235 size_t len, loff_t *pos)
2236{
2237 struct spu_context *ctx = file->private_data;
2238 int ret;
2239
c9101bdb
CH
2240 ret = spu_acquire_saved(ctx);
2241 if (ret)
2242 return ret;
bf1ab978
DGM
2243 spin_lock(&ctx->csa.register_lock);
2244 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
b9e3bd77 2245 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2246 spu_release_saved(ctx);
b9e3bd77 2247
b9e3bd77
DGM
2248 return ret;
2249}
2250
5dfe4c96 2251static const struct file_operations spufs_proxydma_info_fops = {
b9e3bd77
DGM
2252 .open = spufs_info_open,
2253 .read = spufs_proxydma_info_read,
fc15351d 2254 .llseek = no_llseek,
b9e3bd77
DGM
2255};
2256
476273ad
CH
2257static int spufs_show_tid(struct seq_file *s, void *private)
2258{
2259 struct spu_context *ctx = s->private;
2260
2261 seq_printf(s, "%d\n", ctx->tid);
2262 return 0;
2263}
2264
2265static int spufs_tid_open(struct inode *inode, struct file *file)
2266{
2267 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2268}
2269
2270static const struct file_operations spufs_tid_fops = {
2271 .open = spufs_tid_open,
2272 .read = seq_read,
2273 .llseek = seq_lseek,
2274 .release = single_release,
2275};
2276
e9f8a0b6
CH
2277static const char *ctx_state_names[] = {
2278 "user", "system", "iowait", "loaded"
2279};
2280
2281static unsigned long long spufs_acct_time(struct spu_context *ctx,
27ec41d3 2282 enum spu_utilization_state state)
e9f8a0b6 2283{
27ec41d3 2284 unsigned long long time = ctx->stats.times[state];
e9f8a0b6 2285
27ec41d3
AD
2286 /*
2287 * In general, utilization statistics are updated by the controlling
2288 * thread as the spu context moves through various well defined
2289 * state transitions, but if the context is lazily loaded its
2290 * utilization statistics are not updated as the controlling thread
2291 * is not tightly coupled with the execution of the spu context. We
2292 * calculate and apply the time delta from the last recorded state
2293 * of the spu context.
2294 */
2295 if (ctx->spu && ctx->stats.util_state == state) {
f2dec1ea 2296 time += ktime_get_ns() - ctx->stats.tstamp;
27ec41d3 2297 }
e9f8a0b6 2298
27ec41d3 2299 return time / NSEC_PER_MSEC;
e9f8a0b6
CH
2300}
2301
2302static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2303{
2304 unsigned long long slb_flts = ctx->stats.slb_flt;
2305
2306 if (ctx->state == SPU_STATE_RUNNABLE) {
2307 slb_flts += (ctx->spu->stats.slb_flt -
2308 ctx->stats.slb_flt_base);
2309 }
2310
2311 return slb_flts;
2312}
2313
2314static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2315{
2316 unsigned long long class2_intrs = ctx->stats.class2_intr;
2317
2318 if (ctx->state == SPU_STATE_RUNNABLE) {
2319 class2_intrs += (ctx->spu->stats.class2_intr -
2320 ctx->stats.class2_intr_base);
2321 }
2322
2323 return class2_intrs;
2324}
2325
2326
2327static int spufs_show_stat(struct seq_file *s, void *private)
2328{
2329 struct spu_context *ctx = s->private;
c9101bdb
CH
2330 int ret;
2331
2332 ret = spu_acquire(ctx);
2333 if (ret)
2334 return ret;
e9f8a0b6 2335
e9f8a0b6
CH
2336 seq_printf(s, "%s %llu %llu %llu %llu "
2337 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
27ec41d3
AD
2338 ctx_state_names[ctx->stats.util_state],
2339 spufs_acct_time(ctx, SPU_UTIL_USER),
2340 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2341 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2342 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
e9f8a0b6
CH
2343 ctx->stats.vol_ctx_switch,
2344 ctx->stats.invol_ctx_switch,
2345 spufs_slb_flts(ctx),
2346 ctx->stats.hash_flt,
2347 ctx->stats.min_flt,
2348 ctx->stats.maj_flt,
2349 spufs_class2_intrs(ctx),
2350 ctx->stats.libassist);
2351 spu_release(ctx);
2352 return 0;
2353}
2354
2355static int spufs_stat_open(struct inode *inode, struct file *file)
2356{
2357 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2358}
2359
2360static const struct file_operations spufs_stat_fops = {
2361 .open = spufs_stat_open,
2362 .read = seq_read,
2363 .llseek = seq_lseek,
2364 .release = single_release,
2365};
2366
5158e9b5
CH
2367static inline int spufs_switch_log_used(struct spu_context *ctx)
2368{
2369 return (ctx->switch_log->head - ctx->switch_log->tail) %
2370 SWITCH_LOG_BUFSIZE;
2371}
2372
2373static inline int spufs_switch_log_avail(struct spu_context *ctx)
2374{
2375 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2376}
2377
2378static int spufs_switch_log_open(struct inode *inode, struct file *file)
2379{
2380 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
f5ed0eb6
JK
2381 int rc;
2382
2383 rc = spu_acquire(ctx);
2384 if (rc)
2385 return rc;
5158e9b5 2386
5158e9b5 2387 if (ctx->switch_log) {
f5ed0eb6
JK
2388 rc = -EBUSY;
2389 goto out;
5158e9b5 2390 }
f5ed0eb6 2391
837ef884 2392 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
f5ed0eb6
JK
2393 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2394 GFP_KERNEL);
2395
2396 if (!ctx->switch_log) {
2397 rc = -ENOMEM;
2398 goto out;
2399 }
2400
837ef884 2401 ctx->switch_log->head = ctx->switch_log->tail = 0;
f5ed0eb6
JK
2402 init_waitqueue_head(&ctx->switch_log->wait);
2403 rc = 0;
2404
2405out:
2406 spu_release(ctx);
2407 return rc;
2408}
2409
2410static int spufs_switch_log_release(struct inode *inode, struct file *file)
2411{
2412 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2413 int rc;
2414
2415 rc = spu_acquire(ctx);
2416 if (rc)
2417 return rc;
2418
2419 kfree(ctx->switch_log);
2420 ctx->switch_log = NULL;
2421 spu_release(ctx);
5158e9b5
CH
2422
2423 return 0;
5158e9b5
CH
2424}
2425
2426static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2427{
2428 struct switch_log_entry *p;
2429
2430 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2431
2432 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2433 (unsigned int) p->tstamp.tv_sec,
2434 (unsigned int) p->tstamp.tv_nsec,
2435 p->spu_id,
2436 (unsigned int) p->type,
2437 (unsigned int) p->val,
2438 (unsigned long long) p->timebase);
2439}
2440
2441static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2442 size_t len, loff_t *ppos)
2443{
496ad9aa 2444 struct inode *inode = file_inode(file);
5158e9b5
CH
2445 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2446 int error = 0, cnt = 0;
2447
17e37675 2448 if (!buf)
5158e9b5
CH
2449 return -EINVAL;
2450
f5ed0eb6
JK
2451 error = spu_acquire(ctx);
2452 if (error)
2453 return error;
2454
5158e9b5
CH
2455 while (cnt < len) {
2456 char tbuf[128];
2457 int width;
2458
14f693ee
JK
2459 if (spufs_switch_log_used(ctx) == 0) {
2460 if (cnt > 0) {
2461 /* If there's data ready to go, we can
2462 * just return straight away */
2463 break;
2464
2465 } else if (file->f_flags & O_NONBLOCK) {
f5ed0eb6
JK
2466 error = -EAGAIN;
2467 break;
14f693ee
JK
2468
2469 } else {
2470 /* spufs_wait will drop the mutex and
2471 * re-acquire, but since we're in read(), the
2472 * file cannot be _released (and so
2473 * ctx->switch_log is stable).
2474 */
2475 error = spufs_wait(ctx->switch_log->wait,
2476 spufs_switch_log_used(ctx) > 0);
2477
2478 /* On error, spufs_wait returns without the
2479 * state mutex held */
2480 if (error)
2481 return error;
2482
2483 /* We may have had entries read from underneath
2484 * us while we dropped the mutex in spufs_wait,
2485 * so re-check */
2486 if (spufs_switch_log_used(ctx) == 0)
2487 continue;
f5ed0eb6 2488 }
5158e9b5
CH
2489 }
2490
5158e9b5 2491 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
f5ed0eb6 2492 if (width < len)
5158e9b5
CH
2493 ctx->switch_log->tail =
2494 (ctx->switch_log->tail + 1) %
2495 SWITCH_LOG_BUFSIZE;
f5ed0eb6
JK
2496 else
2497 /* If the record is greater than space available return
2498 * partial buffer (so far) */
5158e9b5
CH
2499 break;
2500
2501 error = copy_to_user(buf + cnt, tbuf, width);
2502 if (error)
2503 break;
2504 cnt += width;
2505 }
2506
f5ed0eb6
JK
2507 spu_release(ctx);
2508
5158e9b5
CH
2509 return cnt == 0 ? error : cnt;
2510}
2511
2512static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2513{
496ad9aa 2514 struct inode *inode = file_inode(file);
5158e9b5
CH
2515 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2516 unsigned int mask = 0;
f5ed0eb6 2517 int rc;
5158e9b5
CH
2518
2519 poll_wait(file, &ctx->switch_log->wait, wait);
2520
f5ed0eb6
JK
2521 rc = spu_acquire(ctx);
2522 if (rc)
2523 return rc;
2524
5158e9b5
CH
2525 if (spufs_switch_log_used(ctx) > 0)
2526 mask |= POLLIN;
2527
f5ed0eb6
JK
2528 spu_release(ctx);
2529
5158e9b5
CH
2530 return mask;
2531}
2532
2533static const struct file_operations spufs_switch_log_fops = {
f5ed0eb6
JK
2534 .open = spufs_switch_log_open,
2535 .read = spufs_switch_log_read,
2536 .poll = spufs_switch_log_poll,
2537 .release = spufs_switch_log_release,
fc15351d 2538 .llseek = no_llseek,
5158e9b5
CH
2539};
2540
f5ed0eb6
JK
2541/**
2542 * Log a context switch event to a switch log reader.
2543 *
2544 * Must be called with ctx->state_mutex held.
2545 */
5158e9b5
CH
2546void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2547 u32 type, u32 val)
2548{
2549 if (!ctx->switch_log)
2550 return;
2551
5158e9b5
CH
2552 if (spufs_switch_log_avail(ctx) > 1) {
2553 struct switch_log_entry *p;
2554
2555 p = ctx->switch_log->log + ctx->switch_log->head;
2556 ktime_get_ts(&p->tstamp);
2557 p->timebase = get_tb();
2558 p->spu_id = spu ? spu->number : -1;
2559 p->type = type;
2560 p->val = val;
2561
2562 ctx->switch_log->head =
2563 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2564 }
5158e9b5
CH
2565
2566 wake_up(&ctx->switch_log->wait);
2567}
e9f8a0b6 2568
46deed69
LB
2569static int spufs_show_ctx(struct seq_file *s, void *private)
2570{
2571 struct spu_context *ctx = s->private;
2572 u64 mfc_control_RW;
2573
2574 mutex_lock(&ctx->state_mutex);
2575 if (ctx->spu) {
2576 struct spu *spu = ctx->spu;
2577 struct spu_priv2 __iomem *priv2 = spu->priv2;
2578
2579 spin_lock_irq(&spu->register_lock);
2580 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2581 spin_unlock_irq(&spu->register_lock);
2582 } else {
2583 struct spu_state *csa = &ctx->csa;
2584
2585 mfc_control_RW = csa->priv2.mfc_control_RW;
2586 }
2587
2588 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
9477e455 2589 " %c %llx %llx %llx %llx %x %x\n",
46deed69
LB
2590 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2591 ctx->flags,
2592 ctx->sched_flags,
2593 ctx->prio,
2594 ctx->time_slice,
2595 ctx->spu ? ctx->spu->number : -1,
2596 !list_empty(&ctx->rq) ? 'q' : ' ',
2597 ctx->csa.class_0_pending,
2598 ctx->csa.class_0_dar,
2599 ctx->csa.class_1_dsisr,
2600 mfc_control_RW,
2601 ctx->ops->runcntl_read(ctx),
2602 ctx->ops->status_read(ctx));
2603
2604 mutex_unlock(&ctx->state_mutex);
2605
2606 return 0;
2607}
2608
2609static int spufs_ctx_open(struct inode *inode, struct file *file)
2610{
2611 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2612}
2613
2614static const struct file_operations spufs_ctx_fops = {
2615 .open = spufs_ctx_open,
2616 .read = seq_read,
2617 .llseek = seq_lseek,
2618 .release = single_release,
2619};
2620
74254647 2621const struct spufs_tree_descr spufs_dir_contents[] = {
cbe709c1 2622 { "capabilities", &spufs_caps_fops, 0444, },
6f7dde81
JK
2623 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2624 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
67207b96
AB
2625 { "mbox", &spufs_mbox_fops, 0444, },
2626 { "ibox", &spufs_ibox_fops, 0444, },
2627 { "wbox", &spufs_wbox_fops, 0222, },
6f7dde81
JK
2628 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2629 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2630 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
603c4612
JK
2631 { "signal1", &spufs_signal1_fops, 0666, },
2632 { "signal2", &spufs_signal2_fops, 0666, },
67207b96
AB
2633 { "signal1_type", &spufs_signal1_type, 0666, },
2634 { "signal2_type", &spufs_signal2_type, 0666, },
6df10a82 2635 { "cntl", &spufs_cntl_fops, 0666, },
6f7dde81 2636 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
b9e3bd77
DGM
2637 { "lslr", &spufs_lslr_ops, 0444, },
2638 { "mfc", &spufs_mfc_fops, 0666, },
2639 { "mss", &spufs_mss_fops, 0666, },
2640 { "npc", &spufs_npc_ops, 0666, },
2641 { "srr0", &spufs_srr0_ops, 0666, },
8b3d6663
AB
2642 { "decr", &spufs_decr_ops, 0666, },
2643 { "decr_status", &spufs_decr_status_ops, 0666, },
8b3d6663 2644 { "event_mask", &spufs_event_mask_ops, 0666, },
b9e3bd77 2645 { "event_status", &spufs_event_status_ops, 0444, },
6f7dde81 2646 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
86767277
AB
2647 { "phys-id", &spufs_id_ops, 0666, },
2648 { "object-id", &spufs_object_id_ops, 0666, },
6f7dde81
JK
2649 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2650 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2651 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2652 { "dma_info", &spufs_dma_info_fops, 0444,
2653 sizeof(struct spu_dma_info), },
2654 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2655 sizeof(struct spu_proxydma_info)},
476273ad 2656 { "tid", &spufs_tid_fops, 0444, },
e9f8a0b6 2657 { "stat", &spufs_stat_fops, 0444, },
5158e9b5 2658 { "switch_log", &spufs_switch_log_fops, 0444 },
67207b96
AB
2659 {},
2660};
5737edd1 2661
74254647 2662const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
cbe709c1 2663 { "capabilities", &spufs_caps_fops, 0444, },
6f7dde81 2664 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
5737edd1
MN
2665 { "mbox", &spufs_mbox_fops, 0444, },
2666 { "ibox", &spufs_ibox_fops, 0444, },
2667 { "wbox", &spufs_wbox_fops, 0222, },
6f7dde81
JK
2668 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2669 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2670 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
d054b36f
JK
2671 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2672 { "signal2", &spufs_signal2_nosched_fops, 0222, },
5737edd1
MN
2673 { "signal1_type", &spufs_signal1_type, 0666, },
2674 { "signal2_type", &spufs_signal2_type, 0666, },
2675 { "mss", &spufs_mss_fops, 0666, },
2676 { "mfc", &spufs_mfc_fops, 0666, },
2677 { "cntl", &spufs_cntl_fops, 0666, },
2678 { "npc", &spufs_npc_ops, 0666, },
6f7dde81 2679 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
5737edd1
MN
2680 { "phys-id", &spufs_id_ops, 0666, },
2681 { "object-id", &spufs_object_id_ops, 0666, },
476273ad 2682 { "tid", &spufs_tid_fops, 0444, },
e9f8a0b6 2683 { "stat", &spufs_stat_fops, 0444, },
2c3e4787
JK
2684 {},
2685};
2686
74254647 2687const struct spufs_tree_descr spufs_dir_debug_contents[] = {
46deed69 2688 { ".ctx", &spufs_ctx_fops, 0444, },
5737edd1
MN
2689 {},
2690};
bf1ab978 2691
74254647 2692const struct spufs_coredump_reader spufs_coredump_read[] = {
4fca9c42
ME
2693 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2694 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
104f0cc2
ME
2695 { "lslr", NULL, spufs_lslr_get, 19 },
2696 { "decr", NULL, spufs_decr_get, 19 },
2697 { "decr_status", NULL, spufs_decr_status_get, 19 },
4fca9c42
ME
2698 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2699 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
104f0cc2 2700 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
4fca9c42 2701 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
104f0cc2
ME
2702 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2703 { "event_mask", NULL, spufs_event_mask_get, 19 },
2704 { "event_status", NULL, spufs_event_status_get, 19 },
4fca9c42
ME
2705 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2706 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2707 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2708 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2709 { "proxydma_info", __spufs_proxydma_info_read,
2710 NULL, sizeof(struct spu_proxydma_info)},
104f0cc2
ME
2711 { "object-id", NULL, spufs_object_id_get, 19 },
2712 { "npc", NULL, spufs_npc_get, 19 },
936d5bf1 2713 { NULL },
bf1ab978 2714};