2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
34 #include <asm/semaphore.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 spufs_mem_open(struct inode
*inode
, struct file
*file
)
47 struct spufs_inode_info
*i
= SPUFS_I(inode
);
48 struct spu_context
*ctx
= i
->i_ctx
;
50 mutex_lock(&ctx
->mapping_lock
);
51 file
->private_data
= ctx
;
53 ctx
->local_store
= inode
->i_mapping
;
54 mutex_unlock(&ctx
->mapping_lock
);
59 spufs_mem_release(struct inode
*inode
, struct file
*file
)
61 struct spufs_inode_info
*i
= SPUFS_I(inode
);
62 struct spu_context
*ctx
= i
->i_ctx
;
64 mutex_lock(&ctx
->mapping_lock
);
66 ctx
->local_store
= NULL
;
67 mutex_unlock(&ctx
->mapping_lock
);
72 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
73 size_t size
, loff_t
*pos
)
75 char *local_store
= ctx
->ops
->get_ls(ctx
);
76 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
81 spufs_mem_read(struct file
*file
, char __user
*buffer
,
82 size_t size
, loff_t
*pos
)
84 struct spu_context
*ctx
= file
->private_data
;
88 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
94 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
95 size_t size
, loff_t
*ppos
)
97 struct spu_context
*ctx
= file
->private_data
;
106 if (size
> LS_SIZE
- pos
)
107 size
= LS_SIZE
- pos
;
110 local_store
= ctx
->ops
->get_ls(ctx
);
111 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct
*vma
,
121 unsigned long address
)
123 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
124 unsigned long pfn
, offset
, addr0
= address
;
125 #ifdef CONFIG_SPU_FS_64K_LS
126 struct spu_state
*csa
= &ctx
->csa
;
129 /* Check what page size we are using */
130 psize
= get_slice_psize(vma
->vm_mm
, address
);
132 /* Some sanity checking */
133 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
135 /* Wow, 64K, cool, we need to align the address though */
136 if (csa
->use_big_pages
) {
137 BUG_ON(vma
->vm_start
& 0xffff);
138 address
&= ~0xfffful
;
140 #endif /* CONFIG_SPU_FS_64K_LS */
142 offset
= (address
- vma
->vm_start
) + (vma
->vm_pgoff
<< PAGE_SHIFT
);
143 if (offset
>= LS_SIZE
)
146 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 addr0
, address
, offset
);
151 if (ctx
->state
== SPU_STATE_SAVED
) {
152 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
154 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
156 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
158 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
160 vm_insert_pfn(vma
, address
, pfn
);
164 return NOPFN_REFAULT
;
168 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
169 .nopfn
= spufs_mem_mmap_nopfn
,
172 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
174 #ifdef CONFIG_SPU_FS_64K_LS
175 struct spu_context
*ctx
= file
->private_data
;
176 struct spu_state
*csa
= &ctx
->csa
;
178 /* Sanity check VMA alignment */
179 if (csa
->use_big_pages
) {
180 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
183 if (vma
->vm_start
& 0xffff)
185 if (vma
->vm_pgoff
& 0xf)
188 #endif /* CONFIG_SPU_FS_64K_LS */
190 if (!(vma
->vm_flags
& VM_SHARED
))
193 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
194 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
197 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
201 #ifdef CONFIG_SPU_FS_64K_LS
202 static unsigned long spufs_get_unmapped_area(struct file
*file
,
203 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
206 struct spu_context
*ctx
= file
->private_data
;
207 struct spu_state
*csa
= &ctx
->csa
;
209 /* If not using big pages, fallback to normal MM g_u_a */
210 if (!csa
->use_big_pages
)
211 return current
->mm
->get_unmapped_area(file
, addr
, len
,
214 /* Else, try to obtain a 64K pages slice */
215 return slice_get_unmapped_area(addr
, len
, flags
,
218 #endif /* CONFIG_SPU_FS_64K_LS */
220 static const struct file_operations spufs_mem_fops
= {
221 .open
= spufs_mem_open
,
222 .release
= spufs_mem_release
,
223 .read
= spufs_mem_read
,
224 .write
= spufs_mem_write
,
225 .llseek
= generic_file_llseek
,
226 .mmap
= spufs_mem_mmap
,
227 #ifdef CONFIG_SPU_FS_64K_LS
228 .get_unmapped_area
= spufs_get_unmapped_area
,
232 static unsigned long spufs_ps_nopfn(struct vm_area_struct
*vma
,
233 unsigned long address
,
234 unsigned long ps_offs
,
235 unsigned long ps_size
)
237 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
238 unsigned long area
, offset
= address
- vma
->vm_start
;
241 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
242 if (offset
>= ps_size
)
245 /* error here usually means a signal.. we might want to test
246 * the error code more precisely though
248 ret
= spu_acquire_runnable(ctx
, 0);
250 return NOPFN_REFAULT
;
252 area
= ctx
->spu
->problem_phys
+ ps_offs
;
253 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
256 return NOPFN_REFAULT
;
260 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct
*vma
,
261 unsigned long address
)
263 return spufs_ps_nopfn(vma
, address
, 0x4000, 0x1000);
266 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
267 .nopfn
= spufs_cntl_mmap_nopfn
,
271 * mmap support for problem state control area [0x4000 - 0x4fff].
273 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
275 if (!(vma
->vm_flags
& VM_SHARED
))
278 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
279 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
280 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
282 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
285 #else /* SPUFS_MMAP_4K */
286 #define spufs_cntl_mmap NULL
287 #endif /* !SPUFS_MMAP_4K */
289 static u64
spufs_cntl_get(void *data
)
291 struct spu_context
*ctx
= data
;
295 val
= ctx
->ops
->status_read(ctx
);
301 static void spufs_cntl_set(void *data
, u64 val
)
303 struct spu_context
*ctx
= data
;
306 ctx
->ops
->runcntl_write(ctx
, val
);
310 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
312 struct spufs_inode_info
*i
= SPUFS_I(inode
);
313 struct spu_context
*ctx
= i
->i_ctx
;
315 mutex_lock(&ctx
->mapping_lock
);
316 file
->private_data
= ctx
;
318 ctx
->cntl
= inode
->i_mapping
;
319 mutex_unlock(&ctx
->mapping_lock
);
320 return simple_attr_open(inode
, file
, spufs_cntl_get
,
321 spufs_cntl_set
, "0x%08lx");
325 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
327 struct spufs_inode_info
*i
= SPUFS_I(inode
);
328 struct spu_context
*ctx
= i
->i_ctx
;
330 simple_attr_close(inode
, file
);
332 mutex_lock(&ctx
->mapping_lock
);
335 mutex_unlock(&ctx
->mapping_lock
);
339 static const struct file_operations spufs_cntl_fops
= {
340 .open
= spufs_cntl_open
,
341 .release
= spufs_cntl_release
,
342 .read
= simple_attr_read
,
343 .write
= simple_attr_write
,
344 .mmap
= spufs_cntl_mmap
,
348 spufs_regs_open(struct inode
*inode
, struct file
*file
)
350 struct spufs_inode_info
*i
= SPUFS_I(inode
);
351 file
->private_data
= i
->i_ctx
;
356 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
357 size_t size
, loff_t
*pos
)
359 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
360 return simple_read_from_buffer(buffer
, size
, pos
,
361 lscsa
->gprs
, sizeof lscsa
->gprs
);
365 spufs_regs_read(struct file
*file
, char __user
*buffer
,
366 size_t size
, loff_t
*pos
)
369 struct spu_context
*ctx
= file
->private_data
;
371 spu_acquire_saved(ctx
);
372 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
373 spu_release_saved(ctx
);
378 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
379 size_t size
, loff_t
*pos
)
381 struct spu_context
*ctx
= file
->private_data
;
382 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
385 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
390 spu_acquire_saved(ctx
);
392 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
393 buffer
, size
) ? -EFAULT
: size
;
395 spu_release_saved(ctx
);
399 static const struct file_operations spufs_regs_fops
= {
400 .open
= spufs_regs_open
,
401 .read
= spufs_regs_read
,
402 .write
= spufs_regs_write
,
403 .llseek
= generic_file_llseek
,
407 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
408 size_t size
, loff_t
* pos
)
410 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
411 return simple_read_from_buffer(buffer
, size
, pos
,
412 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
416 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
417 size_t size
, loff_t
* pos
)
420 struct spu_context
*ctx
= file
->private_data
;
422 spu_acquire_saved(ctx
);
423 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
424 spu_release_saved(ctx
);
429 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
430 size_t size
, loff_t
* pos
)
432 struct spu_context
*ctx
= file
->private_data
;
433 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
436 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
441 spu_acquire_saved(ctx
);
443 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
444 buffer
, size
) ? -EFAULT
: size
;
446 spu_release_saved(ctx
);
450 static const struct file_operations spufs_fpcr_fops
= {
451 .open
= spufs_regs_open
,
452 .read
= spufs_fpcr_read
,
453 .write
= spufs_fpcr_write
,
454 .llseek
= generic_file_llseek
,
457 /* generic open function for all pipe-like files */
458 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
460 struct spufs_inode_info
*i
= SPUFS_I(inode
);
461 file
->private_data
= i
->i_ctx
;
463 return nonseekable_open(inode
, file
);
467 * Read as many bytes from the mailbox as possible, until
468 * one of the conditions becomes true:
470 * - no more data available in the mailbox
471 * - end of the user provided buffer
472 * - end of the mapped area
474 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
475 size_t len
, loff_t
*pos
)
477 struct spu_context
*ctx
= file
->private_data
;
478 u32 mbox_data
, __user
*udata
;
484 if (!access_ok(VERIFY_WRITE
, buf
, len
))
487 udata
= (void __user
*)buf
;
490 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
492 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
497 * at the end of the mapped area, we can fault
498 * but still need to return the data we have
499 * read successfully so far.
501 ret
= __put_user(mbox_data
, udata
);
516 static const struct file_operations spufs_mbox_fops
= {
517 .open
= spufs_pipe_open
,
518 .read
= spufs_mbox_read
,
521 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
522 size_t len
, loff_t
*pos
)
524 struct spu_context
*ctx
= file
->private_data
;
532 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
536 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
542 static const struct file_operations spufs_mbox_stat_fops
= {
543 .open
= spufs_pipe_open
,
544 .read
= spufs_mbox_stat_read
,
547 /* low-level ibox access function */
548 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
550 return ctx
->ops
->ibox_read(ctx
, data
);
553 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
555 struct spu_context
*ctx
= file
->private_data
;
557 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
560 /* interrupt-level ibox callback function. */
561 void spufs_ibox_callback(struct spu
*spu
)
563 struct spu_context
*ctx
= spu
->ctx
;
565 wake_up_all(&ctx
->ibox_wq
);
566 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
570 * Read as many bytes from the interrupt mailbox as possible, until
571 * one of the conditions becomes true:
573 * - no more data available in the mailbox
574 * - end of the user provided buffer
575 * - end of the mapped area
577 * If the file is opened without O_NONBLOCK, we wait here until
578 * any data is available, but return when we have been able to
581 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
582 size_t len
, loff_t
*pos
)
584 struct spu_context
*ctx
= file
->private_data
;
585 u32 ibox_data
, __user
*udata
;
591 if (!access_ok(VERIFY_WRITE
, buf
, len
))
594 udata
= (void __user
*)buf
;
598 /* wait only for the first element */
600 if (file
->f_flags
& O_NONBLOCK
) {
601 if (!spu_ibox_read(ctx
, &ibox_data
))
604 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
609 /* if we can't write at all, return -EFAULT */
610 count
= __put_user(ibox_data
, udata
);
614 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
616 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
620 * at the end of the mapped area, we can fault
621 * but still need to return the data we have
622 * read successfully so far.
624 ret
= __put_user(ibox_data
, udata
);
635 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
637 struct spu_context
*ctx
= file
->private_data
;
640 poll_wait(file
, &ctx
->ibox_wq
, wait
);
643 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
649 static const struct file_operations spufs_ibox_fops
= {
650 .open
= spufs_pipe_open
,
651 .read
= spufs_ibox_read
,
652 .poll
= spufs_ibox_poll
,
653 .fasync
= spufs_ibox_fasync
,
656 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
657 size_t len
, loff_t
*pos
)
659 struct spu_context
*ctx
= file
->private_data
;
666 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
669 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
675 static const struct file_operations spufs_ibox_stat_fops
= {
676 .open
= spufs_pipe_open
,
677 .read
= spufs_ibox_stat_read
,
680 /* low-level mailbox write */
681 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
683 return ctx
->ops
->wbox_write(ctx
, data
);
686 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
688 struct spu_context
*ctx
= file
->private_data
;
691 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
696 /* interrupt-level wbox callback function. */
697 void spufs_wbox_callback(struct spu
*spu
)
699 struct spu_context
*ctx
= spu
->ctx
;
701 wake_up_all(&ctx
->wbox_wq
);
702 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
706 * Write as many bytes to the interrupt mailbox as possible, until
707 * one of the conditions becomes true:
709 * - the mailbox is full
710 * - end of the user provided buffer
711 * - end of the mapped area
713 * If the file is opened without O_NONBLOCK, we wait here until
714 * space is availabyl, but return when we have been able to
717 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
718 size_t len
, loff_t
*pos
)
720 struct spu_context
*ctx
= file
->private_data
;
721 u32 wbox_data
, __user
*udata
;
727 udata
= (void __user
*)buf
;
728 if (!access_ok(VERIFY_READ
, buf
, len
))
731 if (__get_user(wbox_data
, udata
))
737 * make sure we can at least write one element, by waiting
738 * in case of !O_NONBLOCK
741 if (file
->f_flags
& O_NONBLOCK
) {
742 if (!spu_wbox_write(ctx
, wbox_data
))
745 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
751 /* write aѕ much as possible */
752 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
754 ret
= __get_user(wbox_data
, udata
);
758 ret
= spu_wbox_write(ctx
, wbox_data
);
768 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
770 struct spu_context
*ctx
= file
->private_data
;
773 poll_wait(file
, &ctx
->wbox_wq
, wait
);
776 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
782 static const struct file_operations spufs_wbox_fops
= {
783 .open
= spufs_pipe_open
,
784 .write
= spufs_wbox_write
,
785 .poll
= spufs_wbox_poll
,
786 .fasync
= spufs_wbox_fasync
,
789 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
790 size_t len
, loff_t
*pos
)
792 struct spu_context
*ctx
= file
->private_data
;
799 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
802 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
808 static const struct file_operations spufs_wbox_stat_fops
= {
809 .open
= spufs_pipe_open
,
810 .read
= spufs_wbox_stat_read
,
813 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
815 struct spufs_inode_info
*i
= SPUFS_I(inode
);
816 struct spu_context
*ctx
= i
->i_ctx
;
818 mutex_lock(&ctx
->mapping_lock
);
819 file
->private_data
= ctx
;
821 ctx
->signal1
= inode
->i_mapping
;
822 mutex_unlock(&ctx
->mapping_lock
);
823 return nonseekable_open(inode
, file
);
827 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
829 struct spufs_inode_info
*i
= SPUFS_I(inode
);
830 struct spu_context
*ctx
= i
->i_ctx
;
832 mutex_lock(&ctx
->mapping_lock
);
835 mutex_unlock(&ctx
->mapping_lock
);
839 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
840 size_t len
, loff_t
*pos
)
848 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
849 data
= ctx
->csa
.spu_chnldata_RW
[3];
856 if (copy_to_user(buf
, &data
, 4))
863 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
864 size_t len
, loff_t
*pos
)
867 struct spu_context
*ctx
= file
->private_data
;
869 spu_acquire_saved(ctx
);
870 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
871 spu_release_saved(ctx
);
876 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
877 size_t len
, loff_t
*pos
)
879 struct spu_context
*ctx
;
882 ctx
= file
->private_data
;
887 if (copy_from_user(&data
, buf
, 4))
891 ctx
->ops
->signal1_write(ctx
, data
);
897 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct
*vma
,
898 unsigned long address
)
900 #if PAGE_SIZE == 0x1000
901 return spufs_ps_nopfn(vma
, address
, 0x14000, 0x1000);
902 #elif PAGE_SIZE == 0x10000
903 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
904 * signal 1 and 2 area
906 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
908 #error unsupported page size
912 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
913 .nopfn
= spufs_signal1_mmap_nopfn
,
916 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
918 if (!(vma
->vm_flags
& VM_SHARED
))
921 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
922 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
923 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
925 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
929 static const struct file_operations spufs_signal1_fops
= {
930 .open
= spufs_signal1_open
,
931 .release
= spufs_signal1_release
,
932 .read
= spufs_signal1_read
,
933 .write
= spufs_signal1_write
,
934 .mmap
= spufs_signal1_mmap
,
937 static const struct file_operations spufs_signal1_nosched_fops
= {
938 .open
= spufs_signal1_open
,
939 .release
= spufs_signal1_release
,
940 .write
= spufs_signal1_write
,
941 .mmap
= spufs_signal1_mmap
,
944 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
946 struct spufs_inode_info
*i
= SPUFS_I(inode
);
947 struct spu_context
*ctx
= i
->i_ctx
;
949 mutex_lock(&ctx
->mapping_lock
);
950 file
->private_data
= ctx
;
952 ctx
->signal2
= inode
->i_mapping
;
953 mutex_unlock(&ctx
->mapping_lock
);
954 return nonseekable_open(inode
, file
);
958 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
960 struct spufs_inode_info
*i
= SPUFS_I(inode
);
961 struct spu_context
*ctx
= i
->i_ctx
;
963 mutex_lock(&ctx
->mapping_lock
);
966 mutex_unlock(&ctx
->mapping_lock
);
970 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
971 size_t len
, loff_t
*pos
)
979 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
980 data
= ctx
->csa
.spu_chnldata_RW
[4];
987 if (copy_to_user(buf
, &data
, 4))
994 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
995 size_t len
, loff_t
*pos
)
997 struct spu_context
*ctx
= file
->private_data
;
1000 spu_acquire_saved(ctx
);
1001 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1002 spu_release_saved(ctx
);
1007 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1008 size_t len
, loff_t
*pos
)
1010 struct spu_context
*ctx
;
1013 ctx
= file
->private_data
;
1018 if (copy_from_user(&data
, buf
, 4))
1022 ctx
->ops
->signal2_write(ctx
, data
);
1029 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct
*vma
,
1030 unsigned long address
)
1032 #if PAGE_SIZE == 0x1000
1033 return spufs_ps_nopfn(vma
, address
, 0x1c000, 0x1000);
1034 #elif PAGE_SIZE == 0x10000
1035 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1036 * signal 1 and 2 area
1038 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1040 #error unsupported page size
1044 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1045 .nopfn
= spufs_signal2_mmap_nopfn
,
1048 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1050 if (!(vma
->vm_flags
& VM_SHARED
))
1053 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1054 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1055 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1057 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1060 #else /* SPUFS_MMAP_4K */
1061 #define spufs_signal2_mmap NULL
1062 #endif /* !SPUFS_MMAP_4K */
1064 static const struct file_operations spufs_signal2_fops
= {
1065 .open
= spufs_signal2_open
,
1066 .release
= spufs_signal2_release
,
1067 .read
= spufs_signal2_read
,
1068 .write
= spufs_signal2_write
,
1069 .mmap
= spufs_signal2_mmap
,
1072 static const struct file_operations spufs_signal2_nosched_fops
= {
1073 .open
= spufs_signal2_open
,
1074 .release
= spufs_signal2_release
,
1075 .write
= spufs_signal2_write
,
1076 .mmap
= spufs_signal2_mmap
,
1080 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1081 * work of acquiring (or not) the SPU context before calling through
1082 * to the actual get routine. The set routine is called directly.
1084 #define SPU_ATTR_NOACQUIRE 0
1085 #define SPU_ATTR_ACQUIRE 1
1086 #define SPU_ATTR_ACQUIRE_SAVED 2
1088 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1089 static u64 __##__get(void *data) \
1091 struct spu_context *ctx = data; \
1094 if (__acquire == SPU_ATTR_ACQUIRE) { \
1098 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1099 spu_acquire_saved(ctx); \
1101 spu_release_saved(ctx); \
1107 DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1109 static void spufs_signal1_type_set(void *data
, u64 val
)
1111 struct spu_context
*ctx
= data
;
1114 ctx
->ops
->signal1_type_set(ctx
, val
);
1118 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1120 return ctx
->ops
->signal1_type_get(ctx
);
1122 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1123 spufs_signal1_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1126 static void spufs_signal2_type_set(void *data
, u64 val
)
1128 struct spu_context
*ctx
= data
;
1131 ctx
->ops
->signal2_type_set(ctx
, val
);
1135 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1137 return ctx
->ops
->signal2_type_get(ctx
);
1139 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1140 spufs_signal2_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1143 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct
*vma
,
1144 unsigned long address
)
1146 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x1000);
1149 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1150 .nopfn
= spufs_mss_mmap_nopfn
,
1154 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1156 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1158 if (!(vma
->vm_flags
& VM_SHARED
))
1161 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1162 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1163 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1165 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1168 #else /* SPUFS_MMAP_4K */
1169 #define spufs_mss_mmap NULL
1170 #endif /* !SPUFS_MMAP_4K */
1172 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1174 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1175 struct spu_context
*ctx
= i
->i_ctx
;
1177 file
->private_data
= i
->i_ctx
;
1179 mutex_lock(&ctx
->mapping_lock
);
1180 if (!i
->i_openers
++)
1181 ctx
->mss
= inode
->i_mapping
;
1182 mutex_unlock(&ctx
->mapping_lock
);
1183 return nonseekable_open(inode
, file
);
1187 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1189 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1190 struct spu_context
*ctx
= i
->i_ctx
;
1192 mutex_lock(&ctx
->mapping_lock
);
1193 if (!--i
->i_openers
)
1195 mutex_unlock(&ctx
->mapping_lock
);
1199 static const struct file_operations spufs_mss_fops
= {
1200 .open
= spufs_mss_open
,
1201 .release
= spufs_mss_release
,
1202 .mmap
= spufs_mss_mmap
,
1205 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct
*vma
,
1206 unsigned long address
)
1208 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x20000);
1211 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1212 .nopfn
= spufs_psmap_mmap_nopfn
,
1216 * mmap support for full problem state area [0x00000 - 0x1ffff].
1218 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1220 if (!(vma
->vm_flags
& VM_SHARED
))
1223 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1224 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1225 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1227 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1231 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1233 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1234 struct spu_context
*ctx
= i
->i_ctx
;
1236 mutex_lock(&ctx
->mapping_lock
);
1237 file
->private_data
= i
->i_ctx
;
1238 if (!i
->i_openers
++)
1239 ctx
->psmap
= inode
->i_mapping
;
1240 mutex_unlock(&ctx
->mapping_lock
);
1241 return nonseekable_open(inode
, file
);
1245 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1247 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1248 struct spu_context
*ctx
= i
->i_ctx
;
1250 mutex_lock(&ctx
->mapping_lock
);
1251 if (!--i
->i_openers
)
1253 mutex_unlock(&ctx
->mapping_lock
);
1257 static const struct file_operations spufs_psmap_fops
= {
1258 .open
= spufs_psmap_open
,
1259 .release
= spufs_psmap_release
,
1260 .mmap
= spufs_psmap_mmap
,
1265 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct
*vma
,
1266 unsigned long address
)
1268 return spufs_ps_nopfn(vma
, address
, 0x3000, 0x1000);
1271 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1272 .nopfn
= spufs_mfc_mmap_nopfn
,
1276 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1278 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1280 if (!(vma
->vm_flags
& VM_SHARED
))
1283 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1284 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1285 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1287 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1290 #else /* SPUFS_MMAP_4K */
1291 #define spufs_mfc_mmap NULL
1292 #endif /* !SPUFS_MMAP_4K */
1294 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1296 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1297 struct spu_context
*ctx
= i
->i_ctx
;
1299 /* we don't want to deal with DMA into other processes */
1300 if (ctx
->owner
!= current
->mm
)
1303 if (atomic_read(&inode
->i_count
) != 1)
1306 mutex_lock(&ctx
->mapping_lock
);
1307 file
->private_data
= ctx
;
1308 if (!i
->i_openers
++)
1309 ctx
->mfc
= inode
->i_mapping
;
1310 mutex_unlock(&ctx
->mapping_lock
);
1311 return nonseekable_open(inode
, file
);
1315 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1317 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1318 struct spu_context
*ctx
= i
->i_ctx
;
1320 mutex_lock(&ctx
->mapping_lock
);
1321 if (!--i
->i_openers
)
1323 mutex_unlock(&ctx
->mapping_lock
);
1327 /* interrupt-level mfc callback function. */
1328 void spufs_mfc_callback(struct spu
*spu
)
1330 struct spu_context
*ctx
= spu
->ctx
;
1332 wake_up_all(&ctx
->mfc_wq
);
1334 pr_debug("%s %s\n", __FUNCTION__
, spu
->name
);
1335 if (ctx
->mfc_fasync
) {
1336 u32 free_elements
, tagstatus
;
1339 /* no need for spu_acquire in interrupt context */
1340 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1341 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1344 if (free_elements
& 0xffff)
1346 if (tagstatus
& ctx
->tagwait
)
1349 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1353 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1355 /* See if there is one tag group is complete */
1356 /* FIXME we need locking around tagwait */
1357 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1358 ctx
->tagwait
&= ~*status
;
1362 /* enable interrupt waiting for any tag group,
1363 may silently fail if interrupts are already enabled */
1364 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1368 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1369 size_t size
, loff_t
*pos
)
1371 struct spu_context
*ctx
= file
->private_data
;
1379 if (file
->f_flags
& O_NONBLOCK
) {
1380 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1381 if (!(status
& ctx
->tagwait
))
1384 ctx
->tagwait
&= ~status
;
1386 ret
= spufs_wait(ctx
->mfc_wq
,
1387 spufs_read_mfc_tagstatus(ctx
, &status
));
1395 if (copy_to_user(buffer
, &status
, 4))
1402 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1404 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1405 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1416 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1420 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1421 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1426 switch (cmd
->size
& 0xf) {
1447 pr_debug("invalid DMA alignment %x for size %x\n",
1448 cmd
->lsa
& 0xf, cmd
->size
);
1452 if (cmd
->size
> 16 * 1024) {
1453 pr_debug("invalid DMA size %x\n", cmd
->size
);
1457 if (cmd
->tag
& 0xfff0) {
1458 /* we reserve the higher tag numbers for kernel use */
1459 pr_debug("invalid DMA tag\n");
1464 /* not supported in this version */
1465 pr_debug("invalid DMA class\n");
1472 static int spu_send_mfc_command(struct spu_context
*ctx
,
1473 struct mfc_dma_command cmd
,
1476 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1477 if (*error
== -EAGAIN
) {
1478 /* wait for any tag group to complete
1479 so we have space for the new command */
1480 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1481 /* try again, because the queue might be
1483 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1484 if (*error
== -EAGAIN
)
1490 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1491 size_t size
, loff_t
*pos
)
1493 struct spu_context
*ctx
= file
->private_data
;
1494 struct mfc_dma_command cmd
;
1497 if (size
!= sizeof cmd
)
1501 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1504 ret
= spufs_check_valid_dma(&cmd
);
1508 ret
= spu_acquire_runnable(ctx
, 0);
1512 if (file
->f_flags
& O_NONBLOCK
) {
1513 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1516 ret
= spufs_wait(ctx
->mfc_wq
,
1517 spu_send_mfc_command(ctx
, cmd
, &status
));
1525 ctx
->tagwait
|= 1 << cmd
.tag
;
1534 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1536 struct spu_context
*ctx
= file
->private_data
;
1537 u32 free_elements
, tagstatus
;
1540 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1543 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1544 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1545 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1549 if (free_elements
& 0xffff)
1550 mask
|= POLLOUT
| POLLWRNORM
;
1551 if (tagstatus
& ctx
->tagwait
)
1552 mask
|= POLLIN
| POLLRDNORM
;
1554 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__
,
1555 free_elements
, tagstatus
, ctx
->tagwait
);
1560 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1562 struct spu_context
*ctx
= file
->private_data
;
1567 /* this currently hangs */
1568 ret
= spufs_wait(ctx
->mfc_wq
,
1569 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1572 ret
= spufs_wait(ctx
->mfc_wq
,
1573 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1583 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1586 return spufs_mfc_flush(file
, NULL
);
1589 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1591 struct spu_context
*ctx
= file
->private_data
;
1593 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1596 static const struct file_operations spufs_mfc_fops
= {
1597 .open
= spufs_mfc_open
,
1598 .release
= spufs_mfc_release
,
1599 .read
= spufs_mfc_read
,
1600 .write
= spufs_mfc_write
,
1601 .poll
= spufs_mfc_poll
,
1602 .flush
= spufs_mfc_flush
,
1603 .fsync
= spufs_mfc_fsync
,
1604 .fasync
= spufs_mfc_fasync
,
1605 .mmap
= spufs_mfc_mmap
,
1608 static void spufs_npc_set(void *data
, u64 val
)
1610 struct spu_context
*ctx
= data
;
1612 ctx
->ops
->npc_write(ctx
, val
);
1616 static u64
spufs_npc_get(struct spu_context
*ctx
)
1618 return ctx
->ops
->npc_read(ctx
);
1620 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1621 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1623 static void spufs_decr_set(void *data
, u64 val
)
1625 struct spu_context
*ctx
= data
;
1626 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1627 spu_acquire_saved(ctx
);
1628 lscsa
->decr
.slot
[0] = (u32
) val
;
1629 spu_release_saved(ctx
);
1632 static u64
spufs_decr_get(struct spu_context
*ctx
)
1634 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1635 return lscsa
->decr
.slot
[0];
1637 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1638 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1640 static void spufs_decr_status_set(void *data
, u64 val
)
1642 struct spu_context
*ctx
= data
;
1643 spu_acquire_saved(ctx
);
1645 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1647 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1648 spu_release_saved(ctx
);
1651 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1653 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1654 return SPU_DECR_STATUS_RUNNING
;
1658 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1659 spufs_decr_status_set
, "0x%llx\n",
1660 SPU_ATTR_ACQUIRE_SAVED
);
1662 static void spufs_event_mask_set(void *data
, u64 val
)
1664 struct spu_context
*ctx
= data
;
1665 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1666 spu_acquire_saved(ctx
);
1667 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1668 spu_release_saved(ctx
);
1671 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1673 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1674 return lscsa
->event_mask
.slot
[0];
1677 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1678 spufs_event_mask_set
, "0x%llx\n",
1679 SPU_ATTR_ACQUIRE_SAVED
);
1681 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1683 struct spu_state
*state
= &ctx
->csa
;
1685 stat
= state
->spu_chnlcnt_RW
[0];
1687 return state
->spu_chnldata_RW
[0];
1690 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1691 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1693 static void spufs_srr0_set(void *data
, u64 val
)
1695 struct spu_context
*ctx
= data
;
1696 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1697 spu_acquire_saved(ctx
);
1698 lscsa
->srr0
.slot
[0] = (u32
) val
;
1699 spu_release_saved(ctx
);
1702 static u64
spufs_srr0_get(struct spu_context
*ctx
)
1704 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1705 return lscsa
->srr0
.slot
[0];
1707 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1708 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1710 static u64
spufs_id_get(struct spu_context
*ctx
)
1714 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1715 num
= ctx
->spu
->number
;
1717 num
= (unsigned int)-1;
1721 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
1724 static u64
spufs_object_id_get(struct spu_context
*ctx
)
1726 /* FIXME: Should there really be no locking here? */
1727 return ctx
->object_id
;
1730 static void spufs_object_id_set(void *data
, u64 id
)
1732 struct spu_context
*ctx
= data
;
1733 ctx
->object_id
= id
;
1736 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1737 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
1739 static u64
spufs_lslr_get(struct spu_context
*ctx
)
1741 return ctx
->csa
.priv2
.spu_lslr_RW
;
1743 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
1744 SPU_ATTR_ACQUIRE_SAVED
);
1746 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
1748 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1749 struct spu_context
*ctx
= i
->i_ctx
;
1750 file
->private_data
= ctx
;
1754 static int spufs_caps_show(struct seq_file
*s
, void *private)
1756 struct spu_context
*ctx
= s
->private;
1758 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
1759 seq_puts(s
, "sched\n");
1760 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
1761 seq_puts(s
, "step\n");
1765 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
1767 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
1770 static const struct file_operations spufs_caps_fops
= {
1771 .open
= spufs_caps_open
,
1773 .llseek
= seq_lseek
,
1774 .release
= single_release
,
1777 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
1778 char __user
*buf
, size_t len
, loff_t
*pos
)
1783 mbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1784 if (mbox_stat
& 0x0000ff) {
1785 data
= ctx
->csa
.prob
.pu_mb_R
;
1788 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1791 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
1792 size_t len
, loff_t
*pos
)
1795 struct spu_context
*ctx
= file
->private_data
;
1797 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1800 spu_acquire_saved(ctx
);
1801 spin_lock(&ctx
->csa
.register_lock
);
1802 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
1803 spin_unlock(&ctx
->csa
.register_lock
);
1804 spu_release_saved(ctx
);
1809 static const struct file_operations spufs_mbox_info_fops
= {
1810 .open
= spufs_info_open
,
1811 .read
= spufs_mbox_info_read
,
1812 .llseek
= generic_file_llseek
,
1815 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
1816 char __user
*buf
, size_t len
, loff_t
*pos
)
1821 ibox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1822 if (ibox_stat
& 0xff0000) {
1823 data
= ctx
->csa
.priv2
.puint_mb_R
;
1826 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1829 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
1830 size_t len
, loff_t
*pos
)
1832 struct spu_context
*ctx
= file
->private_data
;
1835 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1838 spu_acquire_saved(ctx
);
1839 spin_lock(&ctx
->csa
.register_lock
);
1840 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
1841 spin_unlock(&ctx
->csa
.register_lock
);
1842 spu_release_saved(ctx
);
1847 static const struct file_operations spufs_ibox_info_fops
= {
1848 .open
= spufs_info_open
,
1849 .read
= spufs_ibox_info_read
,
1850 .llseek
= generic_file_llseek
,
1853 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
1854 char __user
*buf
, size_t len
, loff_t
*pos
)
1860 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1861 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
1862 for (i
= 0; i
< cnt
; i
++) {
1863 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
1866 return simple_read_from_buffer(buf
, len
, pos
, &data
,
1870 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
1871 size_t len
, loff_t
*pos
)
1873 struct spu_context
*ctx
= file
->private_data
;
1876 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1879 spu_acquire_saved(ctx
);
1880 spin_lock(&ctx
->csa
.register_lock
);
1881 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
1882 spin_unlock(&ctx
->csa
.register_lock
);
1883 spu_release_saved(ctx
);
1888 static const struct file_operations spufs_wbox_info_fops
= {
1889 .open
= spufs_info_open
,
1890 .read
= spufs_wbox_info_read
,
1891 .llseek
= generic_file_llseek
,
1894 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
1895 char __user
*buf
, size_t len
, loff_t
*pos
)
1897 struct spu_dma_info info
;
1898 struct mfc_cq_sr
*qp
, *spuqp
;
1901 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
1902 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
1903 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
1904 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
1905 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
1906 for (i
= 0; i
< 16; i
++) {
1907 qp
= &info
.dma_info_command_data
[i
];
1908 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
1910 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
1911 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
1912 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
1913 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
1916 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1920 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
1921 size_t len
, loff_t
*pos
)
1923 struct spu_context
*ctx
= file
->private_data
;
1926 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1929 spu_acquire_saved(ctx
);
1930 spin_lock(&ctx
->csa
.register_lock
);
1931 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
1932 spin_unlock(&ctx
->csa
.register_lock
);
1933 spu_release_saved(ctx
);
1938 static const struct file_operations spufs_dma_info_fops
= {
1939 .open
= spufs_info_open
,
1940 .read
= spufs_dma_info_read
,
1943 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
1944 char __user
*buf
, size_t len
, loff_t
*pos
)
1946 struct spu_proxydma_info info
;
1947 struct mfc_cq_sr
*qp
, *puqp
;
1948 int ret
= sizeof info
;
1954 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1957 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
1958 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
1959 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
1960 for (i
= 0; i
< 8; i
++) {
1961 qp
= &info
.proxydma_info_command_data
[i
];
1962 puqp
= &ctx
->csa
.priv2
.puq
[i
];
1964 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
1965 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
1966 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
1967 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
1970 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1974 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
1975 size_t len
, loff_t
*pos
)
1977 struct spu_context
*ctx
= file
->private_data
;
1980 spu_acquire_saved(ctx
);
1981 spin_lock(&ctx
->csa
.register_lock
);
1982 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
1983 spin_unlock(&ctx
->csa
.register_lock
);
1984 spu_release_saved(ctx
);
1989 static const struct file_operations spufs_proxydma_info_fops
= {
1990 .open
= spufs_info_open
,
1991 .read
= spufs_proxydma_info_read
,
1994 static int spufs_show_tid(struct seq_file
*s
, void *private)
1996 struct spu_context
*ctx
= s
->private;
1998 seq_printf(s
, "%d\n", ctx
->tid
);
2002 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2004 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2007 static const struct file_operations spufs_tid_fops
= {
2008 .open
= spufs_tid_open
,
2010 .llseek
= seq_lseek
,
2011 .release
= single_release
,
2014 static const char *ctx_state_names
[] = {
2015 "user", "system", "iowait", "loaded"
2018 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2019 enum spu_utilization_state state
)
2022 unsigned long long time
= ctx
->stats
.times
[state
];
2025 * In general, utilization statistics are updated by the controlling
2026 * thread as the spu context moves through various well defined
2027 * state transitions, but if the context is lazily loaded its
2028 * utilization statistics are not updated as the controlling thread
2029 * is not tightly coupled with the execution of the spu context. We
2030 * calculate and apply the time delta from the last recorded state
2031 * of the spu context.
2033 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2035 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2038 return time
/ NSEC_PER_MSEC
;
2041 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2043 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2045 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2046 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2047 ctx
->stats
.slb_flt_base
);
2053 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2055 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2057 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2058 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2059 ctx
->stats
.class2_intr_base
);
2062 return class2_intrs
;
2066 static int spufs_show_stat(struct seq_file
*s
, void *private)
2068 struct spu_context
*ctx
= s
->private;
2071 seq_printf(s
, "%s %llu %llu %llu %llu "
2072 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2073 ctx_state_names
[ctx
->stats
.util_state
],
2074 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2075 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2076 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2077 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2078 ctx
->stats
.vol_ctx_switch
,
2079 ctx
->stats
.invol_ctx_switch
,
2080 spufs_slb_flts(ctx
),
2081 ctx
->stats
.hash_flt
,
2084 spufs_class2_intrs(ctx
),
2085 ctx
->stats
.libassist
);
2090 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2092 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2095 static const struct file_operations spufs_stat_fops
= {
2096 .open
= spufs_stat_open
,
2098 .llseek
= seq_lseek
,
2099 .release
= single_release
,
2103 struct tree_descr spufs_dir_contents
[] = {
2104 { "capabilities", &spufs_caps_fops
, 0444, },
2105 { "mem", &spufs_mem_fops
, 0666, },
2106 { "regs", &spufs_regs_fops
, 0666, },
2107 { "mbox", &spufs_mbox_fops
, 0444, },
2108 { "ibox", &spufs_ibox_fops
, 0444, },
2109 { "wbox", &spufs_wbox_fops
, 0222, },
2110 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2111 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2112 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2113 { "signal1", &spufs_signal1_fops
, 0666, },
2114 { "signal2", &spufs_signal2_fops
, 0666, },
2115 { "signal1_type", &spufs_signal1_type
, 0666, },
2116 { "signal2_type", &spufs_signal2_type
, 0666, },
2117 { "cntl", &spufs_cntl_fops
, 0666, },
2118 { "fpcr", &spufs_fpcr_fops
, 0666, },
2119 { "lslr", &spufs_lslr_ops
, 0444, },
2120 { "mfc", &spufs_mfc_fops
, 0666, },
2121 { "mss", &spufs_mss_fops
, 0666, },
2122 { "npc", &spufs_npc_ops
, 0666, },
2123 { "srr0", &spufs_srr0_ops
, 0666, },
2124 { "decr", &spufs_decr_ops
, 0666, },
2125 { "decr_status", &spufs_decr_status_ops
, 0666, },
2126 { "event_mask", &spufs_event_mask_ops
, 0666, },
2127 { "event_status", &spufs_event_status_ops
, 0444, },
2128 { "psmap", &spufs_psmap_fops
, 0666, },
2129 { "phys-id", &spufs_id_ops
, 0666, },
2130 { "object-id", &spufs_object_id_ops
, 0666, },
2131 { "mbox_info", &spufs_mbox_info_fops
, 0444, },
2132 { "ibox_info", &spufs_ibox_info_fops
, 0444, },
2133 { "wbox_info", &spufs_wbox_info_fops
, 0444, },
2134 { "dma_info", &spufs_dma_info_fops
, 0444, },
2135 { "proxydma_info", &spufs_proxydma_info_fops
, 0444, },
2136 { "tid", &spufs_tid_fops
, 0444, },
2137 { "stat", &spufs_stat_fops
, 0444, },
2141 struct tree_descr spufs_dir_nosched_contents
[] = {
2142 { "capabilities", &spufs_caps_fops
, 0444, },
2143 { "mem", &spufs_mem_fops
, 0666, },
2144 { "mbox", &spufs_mbox_fops
, 0444, },
2145 { "ibox", &spufs_ibox_fops
, 0444, },
2146 { "wbox", &spufs_wbox_fops
, 0222, },
2147 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2148 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2149 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2150 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2151 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2152 { "signal1_type", &spufs_signal1_type
, 0666, },
2153 { "signal2_type", &spufs_signal2_type
, 0666, },
2154 { "mss", &spufs_mss_fops
, 0666, },
2155 { "mfc", &spufs_mfc_fops
, 0666, },
2156 { "cntl", &spufs_cntl_fops
, 0666, },
2157 { "npc", &spufs_npc_ops
, 0666, },
2158 { "psmap", &spufs_psmap_fops
, 0666, },
2159 { "phys-id", &spufs_id_ops
, 0666, },
2160 { "object-id", &spufs_object_id_ops
, 0666, },
2161 { "tid", &spufs_tid_fops
, 0444, },
2162 { "stat", &spufs_stat_fops
, 0444, },
2166 struct spufs_coredump_reader spufs_coredump_read
[] = {
2167 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2168 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2169 { "lslr", NULL
, spufs_lslr_get
, 19 },
2170 { "decr", NULL
, spufs_decr_get
, 19 },
2171 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2172 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2173 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2174 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2175 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2176 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2177 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2178 { "event_status", NULL
, spufs_event_status_get
, 19 },
2179 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2180 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2181 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2182 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2183 { "proxydma_info", __spufs_proxydma_info_read
,
2184 NULL
, sizeof(struct spu_proxydma_info
)},
2185 { "object-id", NULL
, spufs_object_id_get
, 19 },
2186 { "npc", NULL
, spufs_npc_get
, 19 },