2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/export.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
37 #include <asm/spu_info.h>
38 #include <linux/uaccess.h>
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 /* Simple attribute files */
47 int (*get
)(void *, u64
*);
48 int (*set
)(void *, u64
);
49 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
52 const char *fmt
; /* format for read operation */
53 struct mutex mutex
; /* protects access to these buffers */
56 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
57 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
60 struct spufs_attr
*attr
;
62 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
68 attr
->data
= inode
->i_private
;
70 mutex_init(&attr
->mutex
);
71 file
->private_data
= attr
;
73 return nonseekable_open(inode
, file
);
76 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
78 kfree(file
->private_data
);
82 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
83 size_t len
, loff_t
*ppos
)
85 struct spufs_attr
*attr
;
89 attr
= file
->private_data
;
93 ret
= mutex_lock_interruptible(&attr
->mutex
);
97 if (*ppos
) { /* continued read */
98 size
= strlen(attr
->get_buf
);
99 } else { /* first read */
101 ret
= attr
->get(attr
->data
, &val
);
105 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
106 attr
->fmt
, (unsigned long long)val
);
109 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
111 mutex_unlock(&attr
->mutex
);
115 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
116 size_t len
, loff_t
*ppos
)
118 struct spufs_attr
*attr
;
123 attr
= file
->private_data
;
127 ret
= mutex_lock_interruptible(&attr
->mutex
);
132 size
= min(sizeof(attr
->set_buf
) - 1, len
);
133 if (copy_from_user(attr
->set_buf
, buf
, size
))
136 ret
= len
; /* claim we got the whole input */
137 attr
->set_buf
[size
] = '\0';
138 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
139 attr
->set(attr
->data
, val
);
141 mutex_unlock(&attr
->mutex
);
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146 static int __fops ## _open(struct inode *inode, struct file *file) \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
151 static const struct file_operations __fops = { \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
156 .llseek = generic_file_llseek, \
161 spufs_mem_open(struct inode
*inode
, struct file
*file
)
163 struct spufs_inode_info
*i
= SPUFS_I(inode
);
164 struct spu_context
*ctx
= i
->i_ctx
;
166 mutex_lock(&ctx
->mapping_lock
);
167 file
->private_data
= ctx
;
169 ctx
->local_store
= inode
->i_mapping
;
170 mutex_unlock(&ctx
->mapping_lock
);
175 spufs_mem_release(struct inode
*inode
, struct file
*file
)
177 struct spufs_inode_info
*i
= SPUFS_I(inode
);
178 struct spu_context
*ctx
= i
->i_ctx
;
180 mutex_lock(&ctx
->mapping_lock
);
182 ctx
->local_store
= NULL
;
183 mutex_unlock(&ctx
->mapping_lock
);
188 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
189 size_t size
, loff_t
*pos
)
191 char *local_store
= ctx
->ops
->get_ls(ctx
);
192 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
197 spufs_mem_read(struct file
*file
, char __user
*buffer
,
198 size_t size
, loff_t
*pos
)
200 struct spu_context
*ctx
= file
->private_data
;
203 ret
= spu_acquire(ctx
);
206 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
213 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
214 size_t size
, loff_t
*ppos
)
216 struct spu_context
*ctx
= file
->private_data
;
224 ret
= spu_acquire(ctx
);
228 local_store
= ctx
->ops
->get_ls(ctx
);
229 size
= simple_write_to_buffer(local_store
, LS_SIZE
, ppos
, buffer
, size
);
236 spufs_mem_mmap_fault(struct vm_fault
*vmf
)
238 struct vm_area_struct
*vma
= vmf
->vma
;
239 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
240 unsigned long pfn
, offset
;
243 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
244 if (offset
>= LS_SIZE
)
245 return VM_FAULT_SIGBUS
;
247 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
248 vmf
->address
, offset
);
250 if (spu_acquire(ctx
))
251 return VM_FAULT_NOPAGE
;
253 if (ctx
->state
== SPU_STATE_SAVED
) {
254 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
255 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
257 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
258 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
260 ret
= vmf_insert_pfn(vma
, vmf
->address
, pfn
);
267 static int spufs_mem_mmap_access(struct vm_area_struct
*vma
,
268 unsigned long address
,
269 void *buf
, int len
, int write
)
271 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
272 unsigned long offset
= address
- vma
->vm_start
;
275 if (write
&& !(vma
->vm_flags
& VM_WRITE
))
277 if (spu_acquire(ctx
))
279 if ((offset
+ len
) > vma
->vm_end
)
280 len
= vma
->vm_end
- offset
;
281 local_store
= ctx
->ops
->get_ls(ctx
);
283 memcpy_toio(local_store
+ offset
, buf
, len
);
285 memcpy_fromio(buf
, local_store
+ offset
, len
);
290 static const struct vm_operations_struct spufs_mem_mmap_vmops
= {
291 .fault
= spufs_mem_mmap_fault
,
292 .access
= spufs_mem_mmap_access
,
295 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
297 if (!(vma
->vm_flags
& VM_SHARED
))
300 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
301 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
303 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
307 static const struct file_operations spufs_mem_fops
= {
308 .open
= spufs_mem_open
,
309 .release
= spufs_mem_release
,
310 .read
= spufs_mem_read
,
311 .write
= spufs_mem_write
,
312 .llseek
= generic_file_llseek
,
313 .mmap
= spufs_mem_mmap
,
316 static vm_fault_t
spufs_ps_fault(struct vm_fault
*vmf
,
317 unsigned long ps_offs
,
318 unsigned long ps_size
)
320 struct spu_context
*ctx
= vmf
->vma
->vm_file
->private_data
;
321 unsigned long area
, offset
= vmf
->pgoff
<< PAGE_SHIFT
;
323 vm_fault_t ret
= VM_FAULT_NOPAGE
;
325 spu_context_nospu_trace(spufs_ps_fault__enter
, ctx
);
327 if (offset
>= ps_size
)
328 return VM_FAULT_SIGBUS
;
330 if (fatal_signal_pending(current
))
331 return VM_FAULT_SIGBUS
;
334 * Because we release the mmap_sem, the context may be destroyed while
335 * we're in spu_wait. Grab an extra reference so it isn't destroyed
338 get_spu_context(ctx
);
341 * We have to wait for context to be loaded before we have
342 * pages to hand out to the user, but we don't want to wait
343 * with the mmap_sem held.
344 * It is possible to drop the mmap_sem here, but then we need
345 * to return VM_FAULT_NOPAGE because the mappings may have
348 if (spu_acquire(ctx
))
351 if (ctx
->state
== SPU_STATE_SAVED
) {
352 up_read(¤t
->mm
->mmap_sem
);
353 spu_context_nospu_trace(spufs_ps_fault__sleep
, ctx
);
354 err
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
355 spu_context_trace(spufs_ps_fault__wake
, ctx
, ctx
->spu
);
356 down_read(¤t
->mm
->mmap_sem
);
358 area
= ctx
->spu
->problem_phys
+ ps_offs
;
359 ret
= vmf_insert_pfn(vmf
->vma
, vmf
->address
,
360 (area
+ offset
) >> PAGE_SHIFT
);
361 spu_context_trace(spufs_ps_fault__insert
, ctx
, ctx
->spu
);
368 put_spu_context(ctx
);
373 static vm_fault_t
spufs_cntl_mmap_fault(struct vm_fault
*vmf
)
375 return spufs_ps_fault(vmf
, 0x4000, SPUFS_CNTL_MAP_SIZE
);
378 static const struct vm_operations_struct spufs_cntl_mmap_vmops
= {
379 .fault
= spufs_cntl_mmap_fault
,
383 * mmap support for problem state control area [0x4000 - 0x4fff].
385 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
387 if (!(vma
->vm_flags
& VM_SHARED
))
390 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
391 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
393 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
396 #else /* SPUFS_MMAP_4K */
397 #define spufs_cntl_mmap NULL
398 #endif /* !SPUFS_MMAP_4K */
400 static int spufs_cntl_get(void *data
, u64
*val
)
402 struct spu_context
*ctx
= data
;
405 ret
= spu_acquire(ctx
);
408 *val
= ctx
->ops
->status_read(ctx
);
414 static int spufs_cntl_set(void *data
, u64 val
)
416 struct spu_context
*ctx
= data
;
419 ret
= spu_acquire(ctx
);
422 ctx
->ops
->runcntl_write(ctx
, val
);
428 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
430 struct spufs_inode_info
*i
= SPUFS_I(inode
);
431 struct spu_context
*ctx
= i
->i_ctx
;
433 mutex_lock(&ctx
->mapping_lock
);
434 file
->private_data
= ctx
;
436 ctx
->cntl
= inode
->i_mapping
;
437 mutex_unlock(&ctx
->mapping_lock
);
438 return simple_attr_open(inode
, file
, spufs_cntl_get
,
439 spufs_cntl_set
, "0x%08lx");
443 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
445 struct spufs_inode_info
*i
= SPUFS_I(inode
);
446 struct spu_context
*ctx
= i
->i_ctx
;
448 simple_attr_release(inode
, file
);
450 mutex_lock(&ctx
->mapping_lock
);
453 mutex_unlock(&ctx
->mapping_lock
);
457 static const struct file_operations spufs_cntl_fops
= {
458 .open
= spufs_cntl_open
,
459 .release
= spufs_cntl_release
,
460 .read
= simple_attr_read
,
461 .write
= simple_attr_write
,
462 .llseek
= generic_file_llseek
,
463 .mmap
= spufs_cntl_mmap
,
467 spufs_regs_open(struct inode
*inode
, struct file
*file
)
469 struct spufs_inode_info
*i
= SPUFS_I(inode
);
470 file
->private_data
= i
->i_ctx
;
475 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
476 size_t size
, loff_t
*pos
)
478 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
479 return simple_read_from_buffer(buffer
, size
, pos
,
480 lscsa
->gprs
, sizeof lscsa
->gprs
);
484 spufs_regs_read(struct file
*file
, char __user
*buffer
,
485 size_t size
, loff_t
*pos
)
488 struct spu_context
*ctx
= file
->private_data
;
490 /* pre-check for file position: if we'd return EOF, there's no point
491 * causing a deschedule */
492 if (*pos
>= sizeof(ctx
->csa
.lscsa
->gprs
))
495 ret
= spu_acquire_saved(ctx
);
498 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
499 spu_release_saved(ctx
);
504 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
505 size_t size
, loff_t
*pos
)
507 struct spu_context
*ctx
= file
->private_data
;
508 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
511 if (*pos
>= sizeof(lscsa
->gprs
))
514 ret
= spu_acquire_saved(ctx
);
518 size
= simple_write_to_buffer(lscsa
->gprs
, sizeof(lscsa
->gprs
), pos
,
521 spu_release_saved(ctx
);
525 static const struct file_operations spufs_regs_fops
= {
526 .open
= spufs_regs_open
,
527 .read
= spufs_regs_read
,
528 .write
= spufs_regs_write
,
529 .llseek
= generic_file_llseek
,
533 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
534 size_t size
, loff_t
* pos
)
536 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
537 return simple_read_from_buffer(buffer
, size
, pos
,
538 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
542 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
543 size_t size
, loff_t
* pos
)
546 struct spu_context
*ctx
= file
->private_data
;
548 ret
= spu_acquire_saved(ctx
);
551 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
552 spu_release_saved(ctx
);
557 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
558 size_t size
, loff_t
* pos
)
560 struct spu_context
*ctx
= file
->private_data
;
561 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
564 if (*pos
>= sizeof(lscsa
->fpcr
))
567 ret
= spu_acquire_saved(ctx
);
571 size
= simple_write_to_buffer(&lscsa
->fpcr
, sizeof(lscsa
->fpcr
), pos
,
574 spu_release_saved(ctx
);
578 static const struct file_operations spufs_fpcr_fops
= {
579 .open
= spufs_regs_open
,
580 .read
= spufs_fpcr_read
,
581 .write
= spufs_fpcr_write
,
582 .llseek
= generic_file_llseek
,
585 /* generic open function for all pipe-like files */
586 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
588 struct spufs_inode_info
*i
= SPUFS_I(inode
);
589 file
->private_data
= i
->i_ctx
;
591 return stream_open(inode
, file
);
595 * Read as many bytes from the mailbox as possible, until
596 * one of the conditions becomes true:
598 * - no more data available in the mailbox
599 * - end of the user provided buffer
600 * - end of the mapped area
602 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
603 size_t len
, loff_t
*pos
)
605 struct spu_context
*ctx
= file
->private_data
;
606 u32 mbox_data
, __user
*udata
;
612 if (!access_ok(buf
, len
))
615 udata
= (void __user
*)buf
;
617 count
= spu_acquire(ctx
);
621 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
623 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
628 * at the end of the mapped area, we can fault
629 * but still need to return the data we have
630 * read successfully so far.
632 ret
= __put_user(mbox_data
, udata
);
647 static const struct file_operations spufs_mbox_fops
= {
648 .open
= spufs_pipe_open
,
649 .read
= spufs_mbox_read
,
653 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
654 size_t len
, loff_t
*pos
)
656 struct spu_context
*ctx
= file
->private_data
;
663 ret
= spu_acquire(ctx
);
667 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
671 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
677 static const struct file_operations spufs_mbox_stat_fops
= {
678 .open
= spufs_pipe_open
,
679 .read
= spufs_mbox_stat_read
,
683 /* low-level ibox access function */
684 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
686 return ctx
->ops
->ibox_read(ctx
, data
);
689 /* interrupt-level ibox callback function. */
690 void spufs_ibox_callback(struct spu
*spu
)
692 struct spu_context
*ctx
= spu
->ctx
;
695 wake_up_all(&ctx
->ibox_wq
);
699 * Read as many bytes from the interrupt mailbox as possible, until
700 * one of the conditions becomes true:
702 * - no more data available in the mailbox
703 * - end of the user provided buffer
704 * - end of the mapped area
706 * If the file is opened without O_NONBLOCK, we wait here until
707 * any data is available, but return when we have been able to
710 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
711 size_t len
, loff_t
*pos
)
713 struct spu_context
*ctx
= file
->private_data
;
714 u32 ibox_data
, __user
*udata
;
720 if (!access_ok(buf
, len
))
723 udata
= (void __user
*)buf
;
725 count
= spu_acquire(ctx
);
729 /* wait only for the first element */
731 if (file
->f_flags
& O_NONBLOCK
) {
732 if (!spu_ibox_read(ctx
, &ibox_data
)) {
737 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
742 /* if we can't write at all, return -EFAULT */
743 count
= __put_user(ibox_data
, udata
);
747 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
749 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
753 * at the end of the mapped area, we can fault
754 * but still need to return the data we have
755 * read successfully so far.
757 ret
= __put_user(ibox_data
, udata
);
768 static __poll_t
spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
770 struct spu_context
*ctx
= file
->private_data
;
773 poll_wait(file
, &ctx
->ibox_wq
, wait
);
776 * For now keep this uninterruptible and also ignore the rule
777 * that poll should not sleep. Will be fixed later.
779 mutex_lock(&ctx
->state_mutex
);
780 mask
= ctx
->ops
->mbox_stat_poll(ctx
, EPOLLIN
| EPOLLRDNORM
);
786 static const struct file_operations spufs_ibox_fops
= {
787 .open
= spufs_pipe_open
,
788 .read
= spufs_ibox_read
,
789 .poll
= spufs_ibox_poll
,
793 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
794 size_t len
, loff_t
*pos
)
796 struct spu_context
*ctx
= file
->private_data
;
803 ret
= spu_acquire(ctx
);
806 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
809 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
815 static const struct file_operations spufs_ibox_stat_fops
= {
816 .open
= spufs_pipe_open
,
817 .read
= spufs_ibox_stat_read
,
821 /* low-level mailbox write */
822 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
824 return ctx
->ops
->wbox_write(ctx
, data
);
827 /* interrupt-level wbox callback function. */
828 void spufs_wbox_callback(struct spu
*spu
)
830 struct spu_context
*ctx
= spu
->ctx
;
833 wake_up_all(&ctx
->wbox_wq
);
837 * Write as many bytes to the interrupt mailbox as possible, until
838 * one of the conditions becomes true:
840 * - the mailbox is full
841 * - end of the user provided buffer
842 * - end of the mapped area
844 * If the file is opened without O_NONBLOCK, we wait here until
845 * space is available, but return when we have been able to
848 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
849 size_t len
, loff_t
*pos
)
851 struct spu_context
*ctx
= file
->private_data
;
852 u32 wbox_data
, __user
*udata
;
858 udata
= (void __user
*)buf
;
859 if (!access_ok(buf
, len
))
862 if (__get_user(wbox_data
, udata
))
865 count
= spu_acquire(ctx
);
870 * make sure we can at least write one element, by waiting
871 * in case of !O_NONBLOCK
874 if (file
->f_flags
& O_NONBLOCK
) {
875 if (!spu_wbox_write(ctx
, wbox_data
)) {
880 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
886 /* write as much as possible */
887 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
889 ret
= __get_user(wbox_data
, udata
);
893 ret
= spu_wbox_write(ctx
, wbox_data
);
904 static __poll_t
spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
906 struct spu_context
*ctx
= file
->private_data
;
909 poll_wait(file
, &ctx
->wbox_wq
, wait
);
912 * For now keep this uninterruptible and also ignore the rule
913 * that poll should not sleep. Will be fixed later.
915 mutex_lock(&ctx
->state_mutex
);
916 mask
= ctx
->ops
->mbox_stat_poll(ctx
, EPOLLOUT
| EPOLLWRNORM
);
922 static const struct file_operations spufs_wbox_fops
= {
923 .open
= spufs_pipe_open
,
924 .write
= spufs_wbox_write
,
925 .poll
= spufs_wbox_poll
,
929 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
930 size_t len
, loff_t
*pos
)
932 struct spu_context
*ctx
= file
->private_data
;
939 ret
= spu_acquire(ctx
);
942 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
945 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
951 static const struct file_operations spufs_wbox_stat_fops
= {
952 .open
= spufs_pipe_open
,
953 .read
= spufs_wbox_stat_read
,
957 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
959 struct spufs_inode_info
*i
= SPUFS_I(inode
);
960 struct spu_context
*ctx
= i
->i_ctx
;
962 mutex_lock(&ctx
->mapping_lock
);
963 file
->private_data
= ctx
;
965 ctx
->signal1
= inode
->i_mapping
;
966 mutex_unlock(&ctx
->mapping_lock
);
967 return nonseekable_open(inode
, file
);
971 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
973 struct spufs_inode_info
*i
= SPUFS_I(inode
);
974 struct spu_context
*ctx
= i
->i_ctx
;
976 mutex_lock(&ctx
->mapping_lock
);
979 mutex_unlock(&ctx
->mapping_lock
);
983 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
984 size_t len
, loff_t
*pos
)
992 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
993 data
= ctx
->csa
.spu_chnldata_RW
[3];
1000 if (copy_to_user(buf
, &data
, 4))
1007 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1008 size_t len
, loff_t
*pos
)
1011 struct spu_context
*ctx
= file
->private_data
;
1013 ret
= spu_acquire_saved(ctx
);
1016 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1017 spu_release_saved(ctx
);
1022 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1023 size_t len
, loff_t
*pos
)
1025 struct spu_context
*ctx
;
1029 ctx
= file
->private_data
;
1034 if (copy_from_user(&data
, buf
, 4))
1037 ret
= spu_acquire(ctx
);
1040 ctx
->ops
->signal1_write(ctx
, data
);
1047 spufs_signal1_mmap_fault(struct vm_fault
*vmf
)
1049 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1050 return spufs_ps_fault(vmf
, 0x14000, SPUFS_SIGNAL_MAP_SIZE
);
1051 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1052 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1053 * signal 1 and 2 area
1055 return spufs_ps_fault(vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1057 #error unsupported page size
1061 static const struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1062 .fault
= spufs_signal1_mmap_fault
,
1065 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1067 if (!(vma
->vm_flags
& VM_SHARED
))
1070 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1071 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1073 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1077 static const struct file_operations spufs_signal1_fops
= {
1078 .open
= spufs_signal1_open
,
1079 .release
= spufs_signal1_release
,
1080 .read
= spufs_signal1_read
,
1081 .write
= spufs_signal1_write
,
1082 .mmap
= spufs_signal1_mmap
,
1083 .llseek
= no_llseek
,
1086 static const struct file_operations spufs_signal1_nosched_fops
= {
1087 .open
= spufs_signal1_open
,
1088 .release
= spufs_signal1_release
,
1089 .write
= spufs_signal1_write
,
1090 .mmap
= spufs_signal1_mmap
,
1091 .llseek
= no_llseek
,
1094 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1096 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1097 struct spu_context
*ctx
= i
->i_ctx
;
1099 mutex_lock(&ctx
->mapping_lock
);
1100 file
->private_data
= ctx
;
1101 if (!i
->i_openers
++)
1102 ctx
->signal2
= inode
->i_mapping
;
1103 mutex_unlock(&ctx
->mapping_lock
);
1104 return nonseekable_open(inode
, file
);
1108 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1110 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1111 struct spu_context
*ctx
= i
->i_ctx
;
1113 mutex_lock(&ctx
->mapping_lock
);
1114 if (!--i
->i_openers
)
1115 ctx
->signal2
= NULL
;
1116 mutex_unlock(&ctx
->mapping_lock
);
1120 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1121 size_t len
, loff_t
*pos
)
1129 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1130 data
= ctx
->csa
.spu_chnldata_RW
[4];
1137 if (copy_to_user(buf
, &data
, 4))
1144 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1145 size_t len
, loff_t
*pos
)
1147 struct spu_context
*ctx
= file
->private_data
;
1150 ret
= spu_acquire_saved(ctx
);
1153 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1154 spu_release_saved(ctx
);
1159 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1160 size_t len
, loff_t
*pos
)
1162 struct spu_context
*ctx
;
1166 ctx
= file
->private_data
;
1171 if (copy_from_user(&data
, buf
, 4))
1174 ret
= spu_acquire(ctx
);
1177 ctx
->ops
->signal2_write(ctx
, data
);
1185 spufs_signal2_mmap_fault(struct vm_fault
*vmf
)
1187 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1188 return spufs_ps_fault(vmf
, 0x1c000, SPUFS_SIGNAL_MAP_SIZE
);
1189 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1190 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1191 * signal 1 and 2 area
1193 return spufs_ps_fault(vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1195 #error unsupported page size
1199 static const struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1200 .fault
= spufs_signal2_mmap_fault
,
1203 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1205 if (!(vma
->vm_flags
& VM_SHARED
))
1208 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1209 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1211 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1214 #else /* SPUFS_MMAP_4K */
1215 #define spufs_signal2_mmap NULL
1216 #endif /* !SPUFS_MMAP_4K */
1218 static const struct file_operations spufs_signal2_fops
= {
1219 .open
= spufs_signal2_open
,
1220 .release
= spufs_signal2_release
,
1221 .read
= spufs_signal2_read
,
1222 .write
= spufs_signal2_write
,
1223 .mmap
= spufs_signal2_mmap
,
1224 .llseek
= no_llseek
,
1227 static const struct file_operations spufs_signal2_nosched_fops
= {
1228 .open
= spufs_signal2_open
,
1229 .release
= spufs_signal2_release
,
1230 .write
= spufs_signal2_write
,
1231 .mmap
= spufs_signal2_mmap
,
1232 .llseek
= no_llseek
,
1236 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1237 * work of acquiring (or not) the SPU context before calling through
1238 * to the actual get routine. The set routine is called directly.
1240 #define SPU_ATTR_NOACQUIRE 0
1241 #define SPU_ATTR_ACQUIRE 1
1242 #define SPU_ATTR_ACQUIRE_SAVED 2
1244 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1245 static int __##__get(void *data, u64 *val) \
1247 struct spu_context *ctx = data; \
1250 if (__acquire == SPU_ATTR_ACQUIRE) { \
1251 ret = spu_acquire(ctx); \
1254 *val = __get(ctx); \
1256 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1257 ret = spu_acquire_saved(ctx); \
1260 *val = __get(ctx); \
1261 spu_release_saved(ctx); \
1263 *val = __get(ctx); \
1267 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1269 static int spufs_signal1_type_set(void *data
, u64 val
)
1271 struct spu_context
*ctx
= data
;
1274 ret
= spu_acquire(ctx
);
1277 ctx
->ops
->signal1_type_set(ctx
, val
);
1283 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1285 return ctx
->ops
->signal1_type_get(ctx
);
1287 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1288 spufs_signal1_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1291 static int spufs_signal2_type_set(void *data
, u64 val
)
1293 struct spu_context
*ctx
= data
;
1296 ret
= spu_acquire(ctx
);
1299 ctx
->ops
->signal2_type_set(ctx
, val
);
1305 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1307 return ctx
->ops
->signal2_type_get(ctx
);
1309 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1310 spufs_signal2_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1314 spufs_mss_mmap_fault(struct vm_fault
*vmf
)
1316 return spufs_ps_fault(vmf
, 0x0000, SPUFS_MSS_MAP_SIZE
);
1319 static const struct vm_operations_struct spufs_mss_mmap_vmops
= {
1320 .fault
= spufs_mss_mmap_fault
,
1324 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1326 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1328 if (!(vma
->vm_flags
& VM_SHARED
))
1331 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1332 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1334 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1337 #else /* SPUFS_MMAP_4K */
1338 #define spufs_mss_mmap NULL
1339 #endif /* !SPUFS_MMAP_4K */
1341 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1343 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1344 struct spu_context
*ctx
= i
->i_ctx
;
1346 file
->private_data
= i
->i_ctx
;
1348 mutex_lock(&ctx
->mapping_lock
);
1349 if (!i
->i_openers
++)
1350 ctx
->mss
= inode
->i_mapping
;
1351 mutex_unlock(&ctx
->mapping_lock
);
1352 return nonseekable_open(inode
, file
);
1356 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1358 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1359 struct spu_context
*ctx
= i
->i_ctx
;
1361 mutex_lock(&ctx
->mapping_lock
);
1362 if (!--i
->i_openers
)
1364 mutex_unlock(&ctx
->mapping_lock
);
1368 static const struct file_operations spufs_mss_fops
= {
1369 .open
= spufs_mss_open
,
1370 .release
= spufs_mss_release
,
1371 .mmap
= spufs_mss_mmap
,
1372 .llseek
= no_llseek
,
1376 spufs_psmap_mmap_fault(struct vm_fault
*vmf
)
1378 return spufs_ps_fault(vmf
, 0x0000, SPUFS_PS_MAP_SIZE
);
1381 static const struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1382 .fault
= spufs_psmap_mmap_fault
,
1386 * mmap support for full problem state area [0x00000 - 0x1ffff].
1388 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1390 if (!(vma
->vm_flags
& VM_SHARED
))
1393 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1394 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1396 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1400 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1402 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1403 struct spu_context
*ctx
= i
->i_ctx
;
1405 mutex_lock(&ctx
->mapping_lock
);
1406 file
->private_data
= i
->i_ctx
;
1407 if (!i
->i_openers
++)
1408 ctx
->psmap
= inode
->i_mapping
;
1409 mutex_unlock(&ctx
->mapping_lock
);
1410 return nonseekable_open(inode
, file
);
1414 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1416 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1417 struct spu_context
*ctx
= i
->i_ctx
;
1419 mutex_lock(&ctx
->mapping_lock
);
1420 if (!--i
->i_openers
)
1422 mutex_unlock(&ctx
->mapping_lock
);
1426 static const struct file_operations spufs_psmap_fops
= {
1427 .open
= spufs_psmap_open
,
1428 .release
= spufs_psmap_release
,
1429 .mmap
= spufs_psmap_mmap
,
1430 .llseek
= no_llseek
,
1436 spufs_mfc_mmap_fault(struct vm_fault
*vmf
)
1438 return spufs_ps_fault(vmf
, 0x3000, SPUFS_MFC_MAP_SIZE
);
1441 static const struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1442 .fault
= spufs_mfc_mmap_fault
,
1446 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1448 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1450 if (!(vma
->vm_flags
& VM_SHARED
))
1453 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1454 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1456 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1459 #else /* SPUFS_MMAP_4K */
1460 #define spufs_mfc_mmap NULL
1461 #endif /* !SPUFS_MMAP_4K */
1463 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1465 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1466 struct spu_context
*ctx
= i
->i_ctx
;
1468 /* we don't want to deal with DMA into other processes */
1469 if (ctx
->owner
!= current
->mm
)
1472 if (atomic_read(&inode
->i_count
) != 1)
1475 mutex_lock(&ctx
->mapping_lock
);
1476 file
->private_data
= ctx
;
1477 if (!i
->i_openers
++)
1478 ctx
->mfc
= inode
->i_mapping
;
1479 mutex_unlock(&ctx
->mapping_lock
);
1480 return nonseekable_open(inode
, file
);
1484 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1486 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1487 struct spu_context
*ctx
= i
->i_ctx
;
1489 mutex_lock(&ctx
->mapping_lock
);
1490 if (!--i
->i_openers
)
1492 mutex_unlock(&ctx
->mapping_lock
);
1496 /* interrupt-level mfc callback function. */
1497 void spufs_mfc_callback(struct spu
*spu
)
1499 struct spu_context
*ctx
= spu
->ctx
;
1502 wake_up_all(&ctx
->mfc_wq
);
1505 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1507 /* See if there is one tag group is complete */
1508 /* FIXME we need locking around tagwait */
1509 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1510 ctx
->tagwait
&= ~*status
;
1514 /* enable interrupt waiting for any tag group,
1515 may silently fail if interrupts are already enabled */
1516 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1520 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1521 size_t size
, loff_t
*pos
)
1523 struct spu_context
*ctx
= file
->private_data
;
1530 ret
= spu_acquire(ctx
);
1535 if (file
->f_flags
& O_NONBLOCK
) {
1536 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1537 if (!(status
& ctx
->tagwait
))
1540 /* XXX(hch): shouldn't we clear ret here? */
1541 ctx
->tagwait
&= ~status
;
1543 ret
= spufs_wait(ctx
->mfc_wq
,
1544 spufs_read_mfc_tagstatus(ctx
, &status
));
1551 if (copy_to_user(buffer
, &status
, 4))
1558 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1560 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd
->lsa
,
1561 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1572 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1576 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1577 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1582 switch (cmd
->size
& 0xf) {
1603 pr_debug("invalid DMA alignment %x for size %x\n",
1604 cmd
->lsa
& 0xf, cmd
->size
);
1608 if (cmd
->size
> 16 * 1024) {
1609 pr_debug("invalid DMA size %x\n", cmd
->size
);
1613 if (cmd
->tag
& 0xfff0) {
1614 /* we reserve the higher tag numbers for kernel use */
1615 pr_debug("invalid DMA tag\n");
1620 /* not supported in this version */
1621 pr_debug("invalid DMA class\n");
1628 static int spu_send_mfc_command(struct spu_context
*ctx
,
1629 struct mfc_dma_command cmd
,
1632 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1633 if (*error
== -EAGAIN
) {
1634 /* wait for any tag group to complete
1635 so we have space for the new command */
1636 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1637 /* try again, because the queue might be
1639 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1640 if (*error
== -EAGAIN
)
1646 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1647 size_t size
, loff_t
*pos
)
1649 struct spu_context
*ctx
= file
->private_data
;
1650 struct mfc_dma_command cmd
;
1653 if (size
!= sizeof cmd
)
1657 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1660 ret
= spufs_check_valid_dma(&cmd
);
1664 ret
= spu_acquire(ctx
);
1668 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1672 if (file
->f_flags
& O_NONBLOCK
) {
1673 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1676 ret
= spufs_wait(ctx
->mfc_wq
,
1677 spu_send_mfc_command(ctx
, cmd
, &status
));
1687 ctx
->tagwait
|= 1 << cmd
.tag
;
1696 static __poll_t
spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1698 struct spu_context
*ctx
= file
->private_data
;
1699 u32 free_elements
, tagstatus
;
1702 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1705 * For now keep this uninterruptible and also ignore the rule
1706 * that poll should not sleep. Will be fixed later.
1708 mutex_lock(&ctx
->state_mutex
);
1709 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1710 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1711 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1715 if (free_elements
& 0xffff)
1716 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1717 if (tagstatus
& ctx
->tagwait
)
1718 mask
|= EPOLLIN
| EPOLLRDNORM
;
1720 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__
,
1721 free_elements
, tagstatus
, ctx
->tagwait
);
1726 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1728 struct spu_context
*ctx
= file
->private_data
;
1731 ret
= spu_acquire(ctx
);
1735 /* this currently hangs */
1736 ret
= spufs_wait(ctx
->mfc_wq
,
1737 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1740 ret
= spufs_wait(ctx
->mfc_wq
,
1741 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1752 static int spufs_mfc_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1754 struct inode
*inode
= file_inode(file
);
1755 int err
= file_write_and_wait_range(file
, start
, end
);
1758 err
= spufs_mfc_flush(file
, NULL
);
1759 inode_unlock(inode
);
1764 static const struct file_operations spufs_mfc_fops
= {
1765 .open
= spufs_mfc_open
,
1766 .release
= spufs_mfc_release
,
1767 .read
= spufs_mfc_read
,
1768 .write
= spufs_mfc_write
,
1769 .poll
= spufs_mfc_poll
,
1770 .flush
= spufs_mfc_flush
,
1771 .fsync
= spufs_mfc_fsync
,
1772 .mmap
= spufs_mfc_mmap
,
1773 .llseek
= no_llseek
,
1776 static int spufs_npc_set(void *data
, u64 val
)
1778 struct spu_context
*ctx
= data
;
1781 ret
= spu_acquire(ctx
);
1784 ctx
->ops
->npc_write(ctx
, val
);
1790 static u64
spufs_npc_get(struct spu_context
*ctx
)
1792 return ctx
->ops
->npc_read(ctx
);
1794 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1795 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1797 static int spufs_decr_set(void *data
, u64 val
)
1799 struct spu_context
*ctx
= data
;
1800 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1803 ret
= spu_acquire_saved(ctx
);
1806 lscsa
->decr
.slot
[0] = (u32
) val
;
1807 spu_release_saved(ctx
);
1812 static u64
spufs_decr_get(struct spu_context
*ctx
)
1814 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1815 return lscsa
->decr
.slot
[0];
1817 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1818 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1820 static int spufs_decr_status_set(void *data
, u64 val
)
1822 struct spu_context
*ctx
= data
;
1825 ret
= spu_acquire_saved(ctx
);
1829 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1831 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1832 spu_release_saved(ctx
);
1837 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1839 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1840 return SPU_DECR_STATUS_RUNNING
;
1844 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1845 spufs_decr_status_set
, "0x%llx\n",
1846 SPU_ATTR_ACQUIRE_SAVED
);
1848 static int spufs_event_mask_set(void *data
, u64 val
)
1850 struct spu_context
*ctx
= data
;
1851 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1854 ret
= spu_acquire_saved(ctx
);
1857 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1858 spu_release_saved(ctx
);
1863 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1865 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1866 return lscsa
->event_mask
.slot
[0];
1869 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1870 spufs_event_mask_set
, "0x%llx\n",
1871 SPU_ATTR_ACQUIRE_SAVED
);
1873 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1875 struct spu_state
*state
= &ctx
->csa
;
1877 stat
= state
->spu_chnlcnt_RW
[0];
1879 return state
->spu_chnldata_RW
[0];
1882 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1883 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1885 static int spufs_srr0_set(void *data
, u64 val
)
1887 struct spu_context
*ctx
= data
;
1888 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1891 ret
= spu_acquire_saved(ctx
);
1894 lscsa
->srr0
.slot
[0] = (u32
) val
;
1895 spu_release_saved(ctx
);
1900 static u64
spufs_srr0_get(struct spu_context
*ctx
)
1902 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1903 return lscsa
->srr0
.slot
[0];
1905 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1906 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1908 static u64
spufs_id_get(struct spu_context
*ctx
)
1912 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1913 num
= ctx
->spu
->number
;
1915 num
= (unsigned int)-1;
1919 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
1922 static u64
spufs_object_id_get(struct spu_context
*ctx
)
1924 /* FIXME: Should there really be no locking here? */
1925 return ctx
->object_id
;
1928 static int spufs_object_id_set(void *data
, u64 id
)
1930 struct spu_context
*ctx
= data
;
1931 ctx
->object_id
= id
;
1936 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1937 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
1939 static u64
spufs_lslr_get(struct spu_context
*ctx
)
1941 return ctx
->csa
.priv2
.spu_lslr_RW
;
1943 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
1944 SPU_ATTR_ACQUIRE_SAVED
);
1946 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
1948 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1949 struct spu_context
*ctx
= i
->i_ctx
;
1950 file
->private_data
= ctx
;
1954 static int spufs_caps_show(struct seq_file
*s
, void *private)
1956 struct spu_context
*ctx
= s
->private;
1958 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
1959 seq_puts(s
, "sched\n");
1960 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
1961 seq_puts(s
, "step\n");
1965 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
1967 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
1970 static const struct file_operations spufs_caps_fops
= {
1971 .open
= spufs_caps_open
,
1973 .llseek
= seq_lseek
,
1974 .release
= single_release
,
1977 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
1978 char __user
*buf
, size_t len
, loff_t
*pos
)
1982 /* EOF if there's no entry in the mbox */
1983 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
1986 data
= ctx
->csa
.prob
.pu_mb_R
;
1988 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1991 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
1992 size_t len
, loff_t
*pos
)
1995 struct spu_context
*ctx
= file
->private_data
;
1997 if (!access_ok(buf
, len
))
2000 ret
= spu_acquire_saved(ctx
);
2003 spin_lock(&ctx
->csa
.register_lock
);
2004 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2005 spin_unlock(&ctx
->csa
.register_lock
);
2006 spu_release_saved(ctx
);
2011 static const struct file_operations spufs_mbox_info_fops
= {
2012 .open
= spufs_info_open
,
2013 .read
= spufs_mbox_info_read
,
2014 .llseek
= generic_file_llseek
,
2017 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2018 char __user
*buf
, size_t len
, loff_t
*pos
)
2022 /* EOF if there's no entry in the ibox */
2023 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2026 data
= ctx
->csa
.priv2
.puint_mb_R
;
2028 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2031 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2032 size_t len
, loff_t
*pos
)
2034 struct spu_context
*ctx
= file
->private_data
;
2037 if (!access_ok(buf
, len
))
2040 ret
= spu_acquire_saved(ctx
);
2043 spin_lock(&ctx
->csa
.register_lock
);
2044 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2045 spin_unlock(&ctx
->csa
.register_lock
);
2046 spu_release_saved(ctx
);
2051 static const struct file_operations spufs_ibox_info_fops
= {
2052 .open
= spufs_info_open
,
2053 .read
= spufs_ibox_info_read
,
2054 .llseek
= generic_file_llseek
,
2057 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2058 char __user
*buf
, size_t len
, loff_t
*pos
)
2064 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2065 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2066 for (i
= 0; i
< cnt
; i
++) {
2067 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2070 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2074 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2075 size_t len
, loff_t
*pos
)
2077 struct spu_context
*ctx
= file
->private_data
;
2080 if (!access_ok(buf
, len
))
2083 ret
= spu_acquire_saved(ctx
);
2086 spin_lock(&ctx
->csa
.register_lock
);
2087 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2088 spin_unlock(&ctx
->csa
.register_lock
);
2089 spu_release_saved(ctx
);
2094 static const struct file_operations spufs_wbox_info_fops
= {
2095 .open
= spufs_info_open
,
2096 .read
= spufs_wbox_info_read
,
2097 .llseek
= generic_file_llseek
,
2100 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2101 char __user
*buf
, size_t len
, loff_t
*pos
)
2103 struct spu_dma_info info
;
2104 struct mfc_cq_sr
*qp
, *spuqp
;
2107 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2108 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2109 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2110 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2111 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2112 for (i
= 0; i
< 16; i
++) {
2113 qp
= &info
.dma_info_command_data
[i
];
2114 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2116 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2117 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2118 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2119 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2122 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2126 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2127 size_t len
, loff_t
*pos
)
2129 struct spu_context
*ctx
= file
->private_data
;
2132 if (!access_ok(buf
, len
))
2135 ret
= spu_acquire_saved(ctx
);
2138 spin_lock(&ctx
->csa
.register_lock
);
2139 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2140 spin_unlock(&ctx
->csa
.register_lock
);
2141 spu_release_saved(ctx
);
2146 static const struct file_operations spufs_dma_info_fops
= {
2147 .open
= spufs_info_open
,
2148 .read
= spufs_dma_info_read
,
2149 .llseek
= no_llseek
,
2152 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2153 char __user
*buf
, size_t len
, loff_t
*pos
)
2155 struct spu_proxydma_info info
;
2156 struct mfc_cq_sr
*qp
, *puqp
;
2157 int ret
= sizeof info
;
2163 if (!access_ok(buf
, len
))
2166 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2167 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2168 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2169 for (i
= 0; i
< 8; i
++) {
2170 qp
= &info
.proxydma_info_command_data
[i
];
2171 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2173 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2174 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2175 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2176 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2179 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2183 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2184 size_t len
, loff_t
*pos
)
2186 struct spu_context
*ctx
= file
->private_data
;
2189 ret
= spu_acquire_saved(ctx
);
2192 spin_lock(&ctx
->csa
.register_lock
);
2193 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2194 spin_unlock(&ctx
->csa
.register_lock
);
2195 spu_release_saved(ctx
);
2200 static const struct file_operations spufs_proxydma_info_fops
= {
2201 .open
= spufs_info_open
,
2202 .read
= spufs_proxydma_info_read
,
2203 .llseek
= no_llseek
,
2206 static int spufs_show_tid(struct seq_file
*s
, void *private)
2208 struct spu_context
*ctx
= s
->private;
2210 seq_printf(s
, "%d\n", ctx
->tid
);
2214 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2216 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2219 static const struct file_operations spufs_tid_fops
= {
2220 .open
= spufs_tid_open
,
2222 .llseek
= seq_lseek
,
2223 .release
= single_release
,
2226 static const char *ctx_state_names
[] = {
2227 "user", "system", "iowait", "loaded"
2230 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2231 enum spu_utilization_state state
)
2233 unsigned long long time
= ctx
->stats
.times
[state
];
2236 * In general, utilization statistics are updated by the controlling
2237 * thread as the spu context moves through various well defined
2238 * state transitions, but if the context is lazily loaded its
2239 * utilization statistics are not updated as the controlling thread
2240 * is not tightly coupled with the execution of the spu context. We
2241 * calculate and apply the time delta from the last recorded state
2242 * of the spu context.
2244 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2245 time
+= ktime_get_ns() - ctx
->stats
.tstamp
;
2248 return time
/ NSEC_PER_MSEC
;
2251 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2253 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2255 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2256 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2257 ctx
->stats
.slb_flt_base
);
2263 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2265 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2267 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2268 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2269 ctx
->stats
.class2_intr_base
);
2272 return class2_intrs
;
2276 static int spufs_show_stat(struct seq_file
*s
, void *private)
2278 struct spu_context
*ctx
= s
->private;
2281 ret
= spu_acquire(ctx
);
2285 seq_printf(s
, "%s %llu %llu %llu %llu "
2286 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2287 ctx_state_names
[ctx
->stats
.util_state
],
2288 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2289 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2290 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2291 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2292 ctx
->stats
.vol_ctx_switch
,
2293 ctx
->stats
.invol_ctx_switch
,
2294 spufs_slb_flts(ctx
),
2295 ctx
->stats
.hash_flt
,
2298 spufs_class2_intrs(ctx
),
2299 ctx
->stats
.libassist
);
2304 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2306 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2309 static const struct file_operations spufs_stat_fops
= {
2310 .open
= spufs_stat_open
,
2312 .llseek
= seq_lseek
,
2313 .release
= single_release
,
2316 static inline int spufs_switch_log_used(struct spu_context
*ctx
)
2318 return (ctx
->switch_log
->head
- ctx
->switch_log
->tail
) %
2322 static inline int spufs_switch_log_avail(struct spu_context
*ctx
)
2324 return SWITCH_LOG_BUFSIZE
- spufs_switch_log_used(ctx
);
2327 static int spufs_switch_log_open(struct inode
*inode
, struct file
*file
)
2329 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2332 rc
= spu_acquire(ctx
);
2336 if (ctx
->switch_log
) {
2341 ctx
->switch_log
= kmalloc(struct_size(ctx
->switch_log
, log
,
2342 SWITCH_LOG_BUFSIZE
), GFP_KERNEL
);
2344 if (!ctx
->switch_log
) {
2349 ctx
->switch_log
->head
= ctx
->switch_log
->tail
= 0;
2350 init_waitqueue_head(&ctx
->switch_log
->wait
);
2358 static int spufs_switch_log_release(struct inode
*inode
, struct file
*file
)
2360 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2363 rc
= spu_acquire(ctx
);
2367 kfree(ctx
->switch_log
);
2368 ctx
->switch_log
= NULL
;
2374 static int switch_log_sprint(struct spu_context
*ctx
, char *tbuf
, int n
)
2376 struct switch_log_entry
*p
;
2378 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->tail
% SWITCH_LOG_BUFSIZE
;
2380 return snprintf(tbuf
, n
, "%llu.%09u %d %u %u %llu\n",
2381 (unsigned long long) p
->tstamp
.tv_sec
,
2382 (unsigned int) p
->tstamp
.tv_nsec
,
2384 (unsigned int) p
->type
,
2385 (unsigned int) p
->val
,
2386 (unsigned long long) p
->timebase
);
2389 static ssize_t
spufs_switch_log_read(struct file
*file
, char __user
*buf
,
2390 size_t len
, loff_t
*ppos
)
2392 struct inode
*inode
= file_inode(file
);
2393 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2394 int error
= 0, cnt
= 0;
2399 error
= spu_acquire(ctx
);
2407 if (spufs_switch_log_used(ctx
) == 0) {
2409 /* If there's data ready to go, we can
2410 * just return straight away */
2413 } else if (file
->f_flags
& O_NONBLOCK
) {
2418 /* spufs_wait will drop the mutex and
2419 * re-acquire, but since we're in read(), the
2420 * file cannot be _released (and so
2421 * ctx->switch_log is stable).
2423 error
= spufs_wait(ctx
->switch_log
->wait
,
2424 spufs_switch_log_used(ctx
) > 0);
2426 /* On error, spufs_wait returns without the
2427 * state mutex held */
2431 /* We may have had entries read from underneath
2432 * us while we dropped the mutex in spufs_wait,
2434 if (spufs_switch_log_used(ctx
) == 0)
2439 width
= switch_log_sprint(ctx
, tbuf
, sizeof(tbuf
));
2441 ctx
->switch_log
->tail
=
2442 (ctx
->switch_log
->tail
+ 1) %
2445 /* If the record is greater than space available return
2446 * partial buffer (so far) */
2449 error
= copy_to_user(buf
+ cnt
, tbuf
, width
);
2457 return cnt
== 0 ? error
: cnt
;
2460 static __poll_t
spufs_switch_log_poll(struct file
*file
, poll_table
*wait
)
2462 struct inode
*inode
= file_inode(file
);
2463 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2467 poll_wait(file
, &ctx
->switch_log
->wait
, wait
);
2469 rc
= spu_acquire(ctx
);
2473 if (spufs_switch_log_used(ctx
) > 0)
2481 static const struct file_operations spufs_switch_log_fops
= {
2482 .open
= spufs_switch_log_open
,
2483 .read
= spufs_switch_log_read
,
2484 .poll
= spufs_switch_log_poll
,
2485 .release
= spufs_switch_log_release
,
2486 .llseek
= no_llseek
,
2490 * Log a context switch event to a switch log reader.
2492 * Must be called with ctx->state_mutex held.
2494 void spu_switch_log_notify(struct spu
*spu
, struct spu_context
*ctx
,
2497 if (!ctx
->switch_log
)
2500 if (spufs_switch_log_avail(ctx
) > 1) {
2501 struct switch_log_entry
*p
;
2503 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->head
;
2504 ktime_get_ts64(&p
->tstamp
);
2505 p
->timebase
= get_tb();
2506 p
->spu_id
= spu
? spu
->number
: -1;
2510 ctx
->switch_log
->head
=
2511 (ctx
->switch_log
->head
+ 1) % SWITCH_LOG_BUFSIZE
;
2514 wake_up(&ctx
->switch_log
->wait
);
2517 static int spufs_show_ctx(struct seq_file
*s
, void *private)
2519 struct spu_context
*ctx
= s
->private;
2522 mutex_lock(&ctx
->state_mutex
);
2524 struct spu
*spu
= ctx
->spu
;
2525 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
2527 spin_lock_irq(&spu
->register_lock
);
2528 mfc_control_RW
= in_be64(&priv2
->mfc_control_RW
);
2529 spin_unlock_irq(&spu
->register_lock
);
2531 struct spu_state
*csa
= &ctx
->csa
;
2533 mfc_control_RW
= csa
->priv2
.mfc_control_RW
;
2536 seq_printf(s
, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2537 " %c %llx %llx %llx %llx %x %x\n",
2538 ctx
->state
== SPU_STATE_SAVED
? 'S' : 'R',
2543 ctx
->spu
? ctx
->spu
->number
: -1,
2544 !list_empty(&ctx
->rq
) ? 'q' : ' ',
2545 ctx
->csa
.class_0_pending
,
2546 ctx
->csa
.class_0_dar
,
2547 ctx
->csa
.class_1_dsisr
,
2549 ctx
->ops
->runcntl_read(ctx
),
2550 ctx
->ops
->status_read(ctx
));
2552 mutex_unlock(&ctx
->state_mutex
);
2557 static int spufs_ctx_open(struct inode
*inode
, struct file
*file
)
2559 return single_open(file
, spufs_show_ctx
, SPUFS_I(inode
)->i_ctx
);
2562 static const struct file_operations spufs_ctx_fops
= {
2563 .open
= spufs_ctx_open
,
2565 .llseek
= seq_lseek
,
2566 .release
= single_release
,
2569 const struct spufs_tree_descr spufs_dir_contents
[] = {
2570 { "capabilities", &spufs_caps_fops
, 0444, },
2571 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2572 { "regs", &spufs_regs_fops
, 0666, sizeof(struct spu_reg128
[128]), },
2573 { "mbox", &spufs_mbox_fops
, 0444, },
2574 { "ibox", &spufs_ibox_fops
, 0444, },
2575 { "wbox", &spufs_wbox_fops
, 0222, },
2576 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2577 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2578 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2579 { "signal1", &spufs_signal1_fops
, 0666, },
2580 { "signal2", &spufs_signal2_fops
, 0666, },
2581 { "signal1_type", &spufs_signal1_type
, 0666, },
2582 { "signal2_type", &spufs_signal2_type
, 0666, },
2583 { "cntl", &spufs_cntl_fops
, 0666, },
2584 { "fpcr", &spufs_fpcr_fops
, 0666, sizeof(struct spu_reg128
), },
2585 { "lslr", &spufs_lslr_ops
, 0444, },
2586 { "mfc", &spufs_mfc_fops
, 0666, },
2587 { "mss", &spufs_mss_fops
, 0666, },
2588 { "npc", &spufs_npc_ops
, 0666, },
2589 { "srr0", &spufs_srr0_ops
, 0666, },
2590 { "decr", &spufs_decr_ops
, 0666, },
2591 { "decr_status", &spufs_decr_status_ops
, 0666, },
2592 { "event_mask", &spufs_event_mask_ops
, 0666, },
2593 { "event_status", &spufs_event_status_ops
, 0444, },
2594 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2595 { "phys-id", &spufs_id_ops
, 0666, },
2596 { "object-id", &spufs_object_id_ops
, 0666, },
2597 { "mbox_info", &spufs_mbox_info_fops
, 0444, sizeof(u32
), },
2598 { "ibox_info", &spufs_ibox_info_fops
, 0444, sizeof(u32
), },
2599 { "wbox_info", &spufs_wbox_info_fops
, 0444, sizeof(u32
), },
2600 { "dma_info", &spufs_dma_info_fops
, 0444,
2601 sizeof(struct spu_dma_info
), },
2602 { "proxydma_info", &spufs_proxydma_info_fops
, 0444,
2603 sizeof(struct spu_proxydma_info
)},
2604 { "tid", &spufs_tid_fops
, 0444, },
2605 { "stat", &spufs_stat_fops
, 0444, },
2606 { "switch_log", &spufs_switch_log_fops
, 0444 },
2610 const struct spufs_tree_descr spufs_dir_nosched_contents
[] = {
2611 { "capabilities", &spufs_caps_fops
, 0444, },
2612 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2613 { "mbox", &spufs_mbox_fops
, 0444, },
2614 { "ibox", &spufs_ibox_fops
, 0444, },
2615 { "wbox", &spufs_wbox_fops
, 0222, },
2616 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2617 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2618 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2619 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2620 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2621 { "signal1_type", &spufs_signal1_type
, 0666, },
2622 { "signal2_type", &spufs_signal2_type
, 0666, },
2623 { "mss", &spufs_mss_fops
, 0666, },
2624 { "mfc", &spufs_mfc_fops
, 0666, },
2625 { "cntl", &spufs_cntl_fops
, 0666, },
2626 { "npc", &spufs_npc_ops
, 0666, },
2627 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2628 { "phys-id", &spufs_id_ops
, 0666, },
2629 { "object-id", &spufs_object_id_ops
, 0666, },
2630 { "tid", &spufs_tid_fops
, 0444, },
2631 { "stat", &spufs_stat_fops
, 0444, },
2635 const struct spufs_tree_descr spufs_dir_debug_contents
[] = {
2636 { ".ctx", &spufs_ctx_fops
, 0444, },
2640 const struct spufs_coredump_reader spufs_coredump_read
[] = {
2641 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2642 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2643 { "lslr", NULL
, spufs_lslr_get
, 19 },
2644 { "decr", NULL
, spufs_decr_get
, 19 },
2645 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2646 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2647 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2648 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2649 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2650 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2651 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2652 { "event_status", NULL
, spufs_event_status_get
, 19 },
2653 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2654 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2655 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2656 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2657 { "proxydma_info", __spufs_proxydma_info_read
,
2658 NULL
, sizeof(struct spu_proxydma_info
)},
2659 { "object-id", NULL
, spufs_object_id_get
, 19 },
2660 { "npc", NULL
, spufs_npc_get
, 19 },