2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/marker.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
42 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 /* Simple attribute files */
46 int (*get
)(void *, u64
*);
47 int (*set
)(void *, u64
);
48 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
51 const char *fmt
; /* format for read operation */
52 struct mutex mutex
; /* protects access to these buffers */
55 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
56 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
59 struct spufs_attr
*attr
;
61 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
67 attr
->data
= inode
->i_private
;
69 mutex_init(&attr
->mutex
);
70 file
->private_data
= attr
;
72 return nonseekable_open(inode
, file
);
75 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
77 kfree(file
->private_data
);
81 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
82 size_t len
, loff_t
*ppos
)
84 struct spufs_attr
*attr
;
88 attr
= file
->private_data
;
92 ret
= mutex_lock_interruptible(&attr
->mutex
);
96 if (*ppos
) { /* continued read */
97 size
= strlen(attr
->get_buf
);
98 } else { /* first read */
100 ret
= attr
->get(attr
->data
, &val
);
104 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
105 attr
->fmt
, (unsigned long long)val
);
108 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
110 mutex_unlock(&attr
->mutex
);
114 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
115 size_t len
, loff_t
*ppos
)
117 struct spufs_attr
*attr
;
122 attr
= file
->private_data
;
126 ret
= mutex_lock_interruptible(&attr
->mutex
);
131 size
= min(sizeof(attr
->set_buf
) - 1, len
);
132 if (copy_from_user(attr
->set_buf
, buf
, size
))
135 ret
= len
; /* claim we got the whole input */
136 attr
->set_buf
[size
] = '\0';
137 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
138 attr
->set(attr
->data
, val
);
140 mutex_unlock(&attr
->mutex
);
144 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
145 static int __fops ## _open(struct inode *inode, struct file *file) \
147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150 static struct file_operations __fops = { \
151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
160 spufs_mem_open(struct inode
*inode
, struct file
*file
)
162 struct spufs_inode_info
*i
= SPUFS_I(inode
);
163 struct spu_context
*ctx
= i
->i_ctx
;
165 mutex_lock(&ctx
->mapping_lock
);
166 file
->private_data
= ctx
;
168 ctx
->local_store
= inode
->i_mapping
;
169 mutex_unlock(&ctx
->mapping_lock
);
174 spufs_mem_release(struct inode
*inode
, struct file
*file
)
176 struct spufs_inode_info
*i
= SPUFS_I(inode
);
177 struct spu_context
*ctx
= i
->i_ctx
;
179 mutex_lock(&ctx
->mapping_lock
);
181 ctx
->local_store
= NULL
;
182 mutex_unlock(&ctx
->mapping_lock
);
187 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
188 size_t size
, loff_t
*pos
)
190 char *local_store
= ctx
->ops
->get_ls(ctx
);
191 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
196 spufs_mem_read(struct file
*file
, char __user
*buffer
,
197 size_t size
, loff_t
*pos
)
199 struct spu_context
*ctx
= file
->private_data
;
202 ret
= spu_acquire(ctx
);
205 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
212 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
213 size_t size
, loff_t
*ppos
)
215 struct spu_context
*ctx
= file
->private_data
;
224 if (size
> LS_SIZE
- pos
)
225 size
= LS_SIZE
- pos
;
227 ret
= spu_acquire(ctx
);
231 local_store
= ctx
->ops
->get_ls(ctx
);
232 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
242 spufs_mem_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
244 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
245 unsigned long address
= (unsigned long)vmf
->virtual_address
;
246 unsigned long pfn
, offset
;
248 #ifdef CONFIG_SPU_FS_64K_LS
249 struct spu_state
*csa
= &ctx
->csa
;
252 /* Check what page size we are using */
253 psize
= get_slice_psize(vma
->vm_mm
, address
);
255 /* Some sanity checking */
256 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
258 /* Wow, 64K, cool, we need to align the address though */
259 if (csa
->use_big_pages
) {
260 BUG_ON(vma
->vm_start
& 0xffff);
261 address
&= ~0xfffful
;
263 #endif /* CONFIG_SPU_FS_64K_LS */
265 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
266 if (offset
>= LS_SIZE
)
267 return VM_FAULT_SIGBUS
;
269 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
272 if (spu_acquire(ctx
))
273 return VM_FAULT_NOPAGE
;
275 if (ctx
->state
== SPU_STATE_SAVED
) {
276 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
278 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
280 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
282 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
284 vm_insert_pfn(vma
, address
, pfn
);
288 return VM_FAULT_NOPAGE
;
291 static int spufs_mem_mmap_access(struct vm_area_struct
*vma
,
292 unsigned long address
,
293 void *buf
, int len
, int write
)
295 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
296 unsigned long offset
= address
- vma
->vm_start
;
299 if (write
&& !(vma
->vm_flags
& VM_WRITE
))
301 if (spu_acquire(ctx
))
303 if ((offset
+ len
) > vma
->vm_end
)
304 len
= vma
->vm_end
- offset
;
305 local_store
= ctx
->ops
->get_ls(ctx
);
307 memcpy_toio(local_store
+ offset
, buf
, len
);
309 memcpy_fromio(buf
, local_store
+ offset
, len
);
314 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
315 .fault
= spufs_mem_mmap_fault
,
316 .access
= spufs_mem_mmap_access
,
319 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
321 #ifdef CONFIG_SPU_FS_64K_LS
322 struct spu_context
*ctx
= file
->private_data
;
323 struct spu_state
*csa
= &ctx
->csa
;
325 /* Sanity check VMA alignment */
326 if (csa
->use_big_pages
) {
327 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
328 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
330 if (vma
->vm_start
& 0xffff)
332 if (vma
->vm_pgoff
& 0xf)
335 #endif /* CONFIG_SPU_FS_64K_LS */
337 if (!(vma
->vm_flags
& VM_SHARED
))
340 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
341 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
344 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
348 #ifdef CONFIG_SPU_FS_64K_LS
349 static unsigned long spufs_get_unmapped_area(struct file
*file
,
350 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
353 struct spu_context
*ctx
= file
->private_data
;
354 struct spu_state
*csa
= &ctx
->csa
;
356 /* If not using big pages, fallback to normal MM g_u_a */
357 if (!csa
->use_big_pages
)
358 return current
->mm
->get_unmapped_area(file
, addr
, len
,
361 /* Else, try to obtain a 64K pages slice */
362 return slice_get_unmapped_area(addr
, len
, flags
,
365 #endif /* CONFIG_SPU_FS_64K_LS */
367 static const struct file_operations spufs_mem_fops
= {
368 .open
= spufs_mem_open
,
369 .release
= spufs_mem_release
,
370 .read
= spufs_mem_read
,
371 .write
= spufs_mem_write
,
372 .llseek
= generic_file_llseek
,
373 .mmap
= spufs_mem_mmap
,
374 #ifdef CONFIG_SPU_FS_64K_LS
375 .get_unmapped_area
= spufs_get_unmapped_area
,
379 static int spufs_ps_fault(struct vm_area_struct
*vma
,
380 struct vm_fault
*vmf
,
381 unsigned long ps_offs
,
382 unsigned long ps_size
)
384 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
385 unsigned long area
, offset
= vmf
->pgoff
<< PAGE_SHIFT
;
388 spu_context_nospu_trace(spufs_ps_fault__enter
, ctx
);
390 if (offset
>= ps_size
)
391 return VM_FAULT_SIGBUS
;
393 if (fatal_signal_pending(current
))
394 return VM_FAULT_SIGBUS
;
397 * Because we release the mmap_sem, the context may be destroyed while
398 * we're in spu_wait. Grab an extra reference so it isn't destroyed
401 get_spu_context(ctx
);
404 * We have to wait for context to be loaded before we have
405 * pages to hand out to the user, but we don't want to wait
406 * with the mmap_sem held.
407 * It is possible to drop the mmap_sem here, but then we need
408 * to return VM_FAULT_NOPAGE because the mappings may have
411 if (spu_acquire(ctx
))
414 if (ctx
->state
== SPU_STATE_SAVED
) {
415 up_read(¤t
->mm
->mmap_sem
);
416 spu_context_nospu_trace(spufs_ps_fault__sleep
, ctx
);
417 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
418 spu_context_trace(spufs_ps_fault__wake
, ctx
, ctx
->spu
);
419 down_read(¤t
->mm
->mmap_sem
);
421 area
= ctx
->spu
->problem_phys
+ ps_offs
;
422 vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
423 (area
+ offset
) >> PAGE_SHIFT
);
424 spu_context_trace(spufs_ps_fault__insert
, ctx
, ctx
->spu
);
431 put_spu_context(ctx
);
432 return VM_FAULT_NOPAGE
;
436 static int spufs_cntl_mmap_fault(struct vm_area_struct
*vma
,
437 struct vm_fault
*vmf
)
439 return spufs_ps_fault(vma
, vmf
, 0x4000, SPUFS_CNTL_MAP_SIZE
);
442 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
443 .fault
= spufs_cntl_mmap_fault
,
447 * mmap support for problem state control area [0x4000 - 0x4fff].
449 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
451 if (!(vma
->vm_flags
& VM_SHARED
))
454 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
455 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
456 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
458 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
461 #else /* SPUFS_MMAP_4K */
462 #define spufs_cntl_mmap NULL
463 #endif /* !SPUFS_MMAP_4K */
465 static int spufs_cntl_get(void *data
, u64
*val
)
467 struct spu_context
*ctx
= data
;
470 ret
= spu_acquire(ctx
);
473 *val
= ctx
->ops
->status_read(ctx
);
479 static int spufs_cntl_set(void *data
, u64 val
)
481 struct spu_context
*ctx
= data
;
484 ret
= spu_acquire(ctx
);
487 ctx
->ops
->runcntl_write(ctx
, val
);
493 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
495 struct spufs_inode_info
*i
= SPUFS_I(inode
);
496 struct spu_context
*ctx
= i
->i_ctx
;
498 mutex_lock(&ctx
->mapping_lock
);
499 file
->private_data
= ctx
;
501 ctx
->cntl
= inode
->i_mapping
;
502 mutex_unlock(&ctx
->mapping_lock
);
503 return simple_attr_open(inode
, file
, spufs_cntl_get
,
504 spufs_cntl_set
, "0x%08lx");
508 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
510 struct spufs_inode_info
*i
= SPUFS_I(inode
);
511 struct spu_context
*ctx
= i
->i_ctx
;
513 simple_attr_release(inode
, file
);
515 mutex_lock(&ctx
->mapping_lock
);
518 mutex_unlock(&ctx
->mapping_lock
);
522 static const struct file_operations spufs_cntl_fops
= {
523 .open
= spufs_cntl_open
,
524 .release
= spufs_cntl_release
,
525 .read
= simple_attr_read
,
526 .write
= simple_attr_write
,
527 .mmap
= spufs_cntl_mmap
,
531 spufs_regs_open(struct inode
*inode
, struct file
*file
)
533 struct spufs_inode_info
*i
= SPUFS_I(inode
);
534 file
->private_data
= i
->i_ctx
;
539 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
540 size_t size
, loff_t
*pos
)
542 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
543 return simple_read_from_buffer(buffer
, size
, pos
,
544 lscsa
->gprs
, sizeof lscsa
->gprs
);
548 spufs_regs_read(struct file
*file
, char __user
*buffer
,
549 size_t size
, loff_t
*pos
)
552 struct spu_context
*ctx
= file
->private_data
;
554 /* pre-check for file position: if we'd return EOF, there's no point
555 * causing a deschedule */
556 if (*pos
>= sizeof(ctx
->csa
.lscsa
->gprs
))
559 ret
= spu_acquire_saved(ctx
);
562 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
563 spu_release_saved(ctx
);
568 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
569 size_t size
, loff_t
*pos
)
571 struct spu_context
*ctx
= file
->private_data
;
572 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
575 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
580 ret
= spu_acquire_saved(ctx
);
584 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
585 buffer
, size
) ? -EFAULT
: size
;
587 spu_release_saved(ctx
);
591 static const struct file_operations spufs_regs_fops
= {
592 .open
= spufs_regs_open
,
593 .read
= spufs_regs_read
,
594 .write
= spufs_regs_write
,
595 .llseek
= generic_file_llseek
,
599 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
600 size_t size
, loff_t
* pos
)
602 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
603 return simple_read_from_buffer(buffer
, size
, pos
,
604 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
608 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
609 size_t size
, loff_t
* pos
)
612 struct spu_context
*ctx
= file
->private_data
;
614 ret
= spu_acquire_saved(ctx
);
617 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
618 spu_release_saved(ctx
);
623 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
624 size_t size
, loff_t
* pos
)
626 struct spu_context
*ctx
= file
->private_data
;
627 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
630 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
634 ret
= spu_acquire_saved(ctx
);
639 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
640 buffer
, size
) ? -EFAULT
: size
;
642 spu_release_saved(ctx
);
646 static const struct file_operations spufs_fpcr_fops
= {
647 .open
= spufs_regs_open
,
648 .read
= spufs_fpcr_read
,
649 .write
= spufs_fpcr_write
,
650 .llseek
= generic_file_llseek
,
653 /* generic open function for all pipe-like files */
654 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
656 struct spufs_inode_info
*i
= SPUFS_I(inode
);
657 file
->private_data
= i
->i_ctx
;
659 return nonseekable_open(inode
, file
);
663 * Read as many bytes from the mailbox as possible, until
664 * one of the conditions becomes true:
666 * - no more data available in the mailbox
667 * - end of the user provided buffer
668 * - end of the mapped area
670 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
671 size_t len
, loff_t
*pos
)
673 struct spu_context
*ctx
= file
->private_data
;
674 u32 mbox_data
, __user
*udata
;
680 if (!access_ok(VERIFY_WRITE
, buf
, len
))
683 udata
= (void __user
*)buf
;
685 count
= spu_acquire(ctx
);
689 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
691 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
696 * at the end of the mapped area, we can fault
697 * but still need to return the data we have
698 * read successfully so far.
700 ret
= __put_user(mbox_data
, udata
);
715 static const struct file_operations spufs_mbox_fops
= {
716 .open
= spufs_pipe_open
,
717 .read
= spufs_mbox_read
,
720 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
721 size_t len
, loff_t
*pos
)
723 struct spu_context
*ctx
= file
->private_data
;
730 ret
= spu_acquire(ctx
);
734 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
738 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
744 static const struct file_operations spufs_mbox_stat_fops
= {
745 .open
= spufs_pipe_open
,
746 .read
= spufs_mbox_stat_read
,
749 /* low-level ibox access function */
750 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
752 return ctx
->ops
->ibox_read(ctx
, data
);
755 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
757 struct spu_context
*ctx
= file
->private_data
;
759 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
762 /* interrupt-level ibox callback function. */
763 void spufs_ibox_callback(struct spu
*spu
)
765 struct spu_context
*ctx
= spu
->ctx
;
770 wake_up_all(&ctx
->ibox_wq
);
771 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
775 * Read as many bytes from the interrupt mailbox as possible, until
776 * one of the conditions becomes true:
778 * - no more data available in the mailbox
779 * - end of the user provided buffer
780 * - end of the mapped area
782 * If the file is opened without O_NONBLOCK, we wait here until
783 * any data is available, but return when we have been able to
786 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
787 size_t len
, loff_t
*pos
)
789 struct spu_context
*ctx
= file
->private_data
;
790 u32 ibox_data
, __user
*udata
;
796 if (!access_ok(VERIFY_WRITE
, buf
, len
))
799 udata
= (void __user
*)buf
;
801 count
= spu_acquire(ctx
);
805 /* wait only for the first element */
807 if (file
->f_flags
& O_NONBLOCK
) {
808 if (!spu_ibox_read(ctx
, &ibox_data
)) {
813 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
818 /* if we can't write at all, return -EFAULT */
819 count
= __put_user(ibox_data
, udata
);
823 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
825 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
829 * at the end of the mapped area, we can fault
830 * but still need to return the data we have
831 * read successfully so far.
833 ret
= __put_user(ibox_data
, udata
);
844 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
846 struct spu_context
*ctx
= file
->private_data
;
849 poll_wait(file
, &ctx
->ibox_wq
, wait
);
852 * For now keep this uninterruptible and also ignore the rule
853 * that poll should not sleep. Will be fixed later.
855 mutex_lock(&ctx
->state_mutex
);
856 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
862 static const struct file_operations spufs_ibox_fops
= {
863 .open
= spufs_pipe_open
,
864 .read
= spufs_ibox_read
,
865 .poll
= spufs_ibox_poll
,
866 .fasync
= spufs_ibox_fasync
,
869 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
870 size_t len
, loff_t
*pos
)
872 struct spu_context
*ctx
= file
->private_data
;
879 ret
= spu_acquire(ctx
);
882 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
885 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
891 static const struct file_operations spufs_ibox_stat_fops
= {
892 .open
= spufs_pipe_open
,
893 .read
= spufs_ibox_stat_read
,
896 /* low-level mailbox write */
897 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
899 return ctx
->ops
->wbox_write(ctx
, data
);
902 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
904 struct spu_context
*ctx
= file
->private_data
;
907 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
912 /* interrupt-level wbox callback function. */
913 void spufs_wbox_callback(struct spu
*spu
)
915 struct spu_context
*ctx
= spu
->ctx
;
920 wake_up_all(&ctx
->wbox_wq
);
921 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
925 * Write as many bytes to the interrupt mailbox as possible, until
926 * one of the conditions becomes true:
928 * - the mailbox is full
929 * - end of the user provided buffer
930 * - end of the mapped area
932 * If the file is opened without O_NONBLOCK, we wait here until
933 * space is availabyl, but return when we have been able to
936 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
937 size_t len
, loff_t
*pos
)
939 struct spu_context
*ctx
= file
->private_data
;
940 u32 wbox_data
, __user
*udata
;
946 udata
= (void __user
*)buf
;
947 if (!access_ok(VERIFY_READ
, buf
, len
))
950 if (__get_user(wbox_data
, udata
))
953 count
= spu_acquire(ctx
);
958 * make sure we can at least write one element, by waiting
959 * in case of !O_NONBLOCK
962 if (file
->f_flags
& O_NONBLOCK
) {
963 if (!spu_wbox_write(ctx
, wbox_data
)) {
968 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
974 /* write as much as possible */
975 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
977 ret
= __get_user(wbox_data
, udata
);
981 ret
= spu_wbox_write(ctx
, wbox_data
);
992 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
994 struct spu_context
*ctx
= file
->private_data
;
997 poll_wait(file
, &ctx
->wbox_wq
, wait
);
1000 * For now keep this uninterruptible and also ignore the rule
1001 * that poll should not sleep. Will be fixed later.
1003 mutex_lock(&ctx
->state_mutex
);
1004 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
1010 static const struct file_operations spufs_wbox_fops
= {
1011 .open
= spufs_pipe_open
,
1012 .write
= spufs_wbox_write
,
1013 .poll
= spufs_wbox_poll
,
1014 .fasync
= spufs_wbox_fasync
,
1017 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
1018 size_t len
, loff_t
*pos
)
1020 struct spu_context
*ctx
= file
->private_data
;
1027 ret
= spu_acquire(ctx
);
1030 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
1033 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
1039 static const struct file_operations spufs_wbox_stat_fops
= {
1040 .open
= spufs_pipe_open
,
1041 .read
= spufs_wbox_stat_read
,
1044 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
1046 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1047 struct spu_context
*ctx
= i
->i_ctx
;
1049 mutex_lock(&ctx
->mapping_lock
);
1050 file
->private_data
= ctx
;
1051 if (!i
->i_openers
++)
1052 ctx
->signal1
= inode
->i_mapping
;
1053 mutex_unlock(&ctx
->mapping_lock
);
1054 return nonseekable_open(inode
, file
);
1058 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
1060 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1061 struct spu_context
*ctx
= i
->i_ctx
;
1063 mutex_lock(&ctx
->mapping_lock
);
1064 if (!--i
->i_openers
)
1065 ctx
->signal1
= NULL
;
1066 mutex_unlock(&ctx
->mapping_lock
);
1070 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
1071 size_t len
, loff_t
*pos
)
1079 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
1080 data
= ctx
->csa
.spu_chnldata_RW
[3];
1087 if (copy_to_user(buf
, &data
, 4))
1094 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1095 size_t len
, loff_t
*pos
)
1098 struct spu_context
*ctx
= file
->private_data
;
1100 ret
= spu_acquire_saved(ctx
);
1103 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1104 spu_release_saved(ctx
);
1109 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1110 size_t len
, loff_t
*pos
)
1112 struct spu_context
*ctx
;
1116 ctx
= file
->private_data
;
1121 if (copy_from_user(&data
, buf
, 4))
1124 ret
= spu_acquire(ctx
);
1127 ctx
->ops
->signal1_write(ctx
, data
);
1134 spufs_signal1_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1136 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1137 return spufs_ps_fault(vma
, vmf
, 0x14000, SPUFS_SIGNAL_MAP_SIZE
);
1138 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1139 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1140 * signal 1 and 2 area
1142 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1144 #error unsupported page size
1148 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1149 .fault
= spufs_signal1_mmap_fault
,
1152 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1154 if (!(vma
->vm_flags
& VM_SHARED
))
1157 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1158 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1159 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1161 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1165 static const struct file_operations spufs_signal1_fops
= {
1166 .open
= spufs_signal1_open
,
1167 .release
= spufs_signal1_release
,
1168 .read
= spufs_signal1_read
,
1169 .write
= spufs_signal1_write
,
1170 .mmap
= spufs_signal1_mmap
,
1173 static const struct file_operations spufs_signal1_nosched_fops
= {
1174 .open
= spufs_signal1_open
,
1175 .release
= spufs_signal1_release
,
1176 .write
= spufs_signal1_write
,
1177 .mmap
= spufs_signal1_mmap
,
1180 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1182 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1183 struct spu_context
*ctx
= i
->i_ctx
;
1185 mutex_lock(&ctx
->mapping_lock
);
1186 file
->private_data
= ctx
;
1187 if (!i
->i_openers
++)
1188 ctx
->signal2
= inode
->i_mapping
;
1189 mutex_unlock(&ctx
->mapping_lock
);
1190 return nonseekable_open(inode
, file
);
1194 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1196 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1197 struct spu_context
*ctx
= i
->i_ctx
;
1199 mutex_lock(&ctx
->mapping_lock
);
1200 if (!--i
->i_openers
)
1201 ctx
->signal2
= NULL
;
1202 mutex_unlock(&ctx
->mapping_lock
);
1206 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1207 size_t len
, loff_t
*pos
)
1215 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1216 data
= ctx
->csa
.spu_chnldata_RW
[4];
1223 if (copy_to_user(buf
, &data
, 4))
1230 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1231 size_t len
, loff_t
*pos
)
1233 struct spu_context
*ctx
= file
->private_data
;
1236 ret
= spu_acquire_saved(ctx
);
1239 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1240 spu_release_saved(ctx
);
1245 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1246 size_t len
, loff_t
*pos
)
1248 struct spu_context
*ctx
;
1252 ctx
= file
->private_data
;
1257 if (copy_from_user(&data
, buf
, 4))
1260 ret
= spu_acquire(ctx
);
1263 ctx
->ops
->signal2_write(ctx
, data
);
1271 spufs_signal2_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1273 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1274 return spufs_ps_fault(vma
, vmf
, 0x1c000, SPUFS_SIGNAL_MAP_SIZE
);
1275 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1276 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1277 * signal 1 and 2 area
1279 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1281 #error unsupported page size
1285 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1286 .fault
= spufs_signal2_mmap_fault
,
1289 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1291 if (!(vma
->vm_flags
& VM_SHARED
))
1294 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1295 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1296 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1298 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1301 #else /* SPUFS_MMAP_4K */
1302 #define spufs_signal2_mmap NULL
1303 #endif /* !SPUFS_MMAP_4K */
1305 static const struct file_operations spufs_signal2_fops
= {
1306 .open
= spufs_signal2_open
,
1307 .release
= spufs_signal2_release
,
1308 .read
= spufs_signal2_read
,
1309 .write
= spufs_signal2_write
,
1310 .mmap
= spufs_signal2_mmap
,
1313 static const struct file_operations spufs_signal2_nosched_fops
= {
1314 .open
= spufs_signal2_open
,
1315 .release
= spufs_signal2_release
,
1316 .write
= spufs_signal2_write
,
1317 .mmap
= spufs_signal2_mmap
,
1321 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1322 * work of acquiring (or not) the SPU context before calling through
1323 * to the actual get routine. The set routine is called directly.
1325 #define SPU_ATTR_NOACQUIRE 0
1326 #define SPU_ATTR_ACQUIRE 1
1327 #define SPU_ATTR_ACQUIRE_SAVED 2
1329 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1330 static int __##__get(void *data, u64 *val) \
1332 struct spu_context *ctx = data; \
1335 if (__acquire == SPU_ATTR_ACQUIRE) { \
1336 ret = spu_acquire(ctx); \
1339 *val = __get(ctx); \
1341 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1342 ret = spu_acquire_saved(ctx); \
1345 *val = __get(ctx); \
1346 spu_release_saved(ctx); \
1348 *val = __get(ctx); \
1352 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1354 static int spufs_signal1_type_set(void *data
, u64 val
)
1356 struct spu_context
*ctx
= data
;
1359 ret
= spu_acquire(ctx
);
1362 ctx
->ops
->signal1_type_set(ctx
, val
);
1368 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1370 return ctx
->ops
->signal1_type_get(ctx
);
1372 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1373 spufs_signal1_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1376 static int spufs_signal2_type_set(void *data
, u64 val
)
1378 struct spu_context
*ctx
= data
;
1381 ret
= spu_acquire(ctx
);
1384 ctx
->ops
->signal2_type_set(ctx
, val
);
1390 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1392 return ctx
->ops
->signal2_type_get(ctx
);
1394 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1395 spufs_signal2_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1399 spufs_mss_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1401 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_MSS_MAP_SIZE
);
1404 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1405 .fault
= spufs_mss_mmap_fault
,
1409 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1411 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1413 if (!(vma
->vm_flags
& VM_SHARED
))
1416 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1417 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1418 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1420 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1423 #else /* SPUFS_MMAP_4K */
1424 #define spufs_mss_mmap NULL
1425 #endif /* !SPUFS_MMAP_4K */
1427 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1429 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1430 struct spu_context
*ctx
= i
->i_ctx
;
1432 file
->private_data
= i
->i_ctx
;
1434 mutex_lock(&ctx
->mapping_lock
);
1435 if (!i
->i_openers
++)
1436 ctx
->mss
= inode
->i_mapping
;
1437 mutex_unlock(&ctx
->mapping_lock
);
1438 return nonseekable_open(inode
, file
);
1442 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1444 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1445 struct spu_context
*ctx
= i
->i_ctx
;
1447 mutex_lock(&ctx
->mapping_lock
);
1448 if (!--i
->i_openers
)
1450 mutex_unlock(&ctx
->mapping_lock
);
1454 static const struct file_operations spufs_mss_fops
= {
1455 .open
= spufs_mss_open
,
1456 .release
= spufs_mss_release
,
1457 .mmap
= spufs_mss_mmap
,
1461 spufs_psmap_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1463 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_PS_MAP_SIZE
);
1466 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1467 .fault
= spufs_psmap_mmap_fault
,
1471 * mmap support for full problem state area [0x00000 - 0x1ffff].
1473 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1475 if (!(vma
->vm_flags
& VM_SHARED
))
1478 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1479 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1480 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1482 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1486 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1488 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1489 struct spu_context
*ctx
= i
->i_ctx
;
1491 mutex_lock(&ctx
->mapping_lock
);
1492 file
->private_data
= i
->i_ctx
;
1493 if (!i
->i_openers
++)
1494 ctx
->psmap
= inode
->i_mapping
;
1495 mutex_unlock(&ctx
->mapping_lock
);
1496 return nonseekable_open(inode
, file
);
1500 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1502 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1503 struct spu_context
*ctx
= i
->i_ctx
;
1505 mutex_lock(&ctx
->mapping_lock
);
1506 if (!--i
->i_openers
)
1508 mutex_unlock(&ctx
->mapping_lock
);
1512 static const struct file_operations spufs_psmap_fops
= {
1513 .open
= spufs_psmap_open
,
1514 .release
= spufs_psmap_release
,
1515 .mmap
= spufs_psmap_mmap
,
1521 spufs_mfc_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1523 return spufs_ps_fault(vma
, vmf
, 0x3000, SPUFS_MFC_MAP_SIZE
);
1526 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1527 .fault
= spufs_mfc_mmap_fault
,
1531 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1533 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1535 if (!(vma
->vm_flags
& VM_SHARED
))
1538 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1539 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1540 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1542 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1545 #else /* SPUFS_MMAP_4K */
1546 #define spufs_mfc_mmap NULL
1547 #endif /* !SPUFS_MMAP_4K */
1549 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1551 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1552 struct spu_context
*ctx
= i
->i_ctx
;
1554 /* we don't want to deal with DMA into other processes */
1555 if (ctx
->owner
!= current
->mm
)
1558 if (atomic_read(&inode
->i_count
) != 1)
1561 mutex_lock(&ctx
->mapping_lock
);
1562 file
->private_data
= ctx
;
1563 if (!i
->i_openers
++)
1564 ctx
->mfc
= inode
->i_mapping
;
1565 mutex_unlock(&ctx
->mapping_lock
);
1566 return nonseekable_open(inode
, file
);
1570 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1572 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1573 struct spu_context
*ctx
= i
->i_ctx
;
1575 mutex_lock(&ctx
->mapping_lock
);
1576 if (!--i
->i_openers
)
1578 mutex_unlock(&ctx
->mapping_lock
);
1582 /* interrupt-level mfc callback function. */
1583 void spufs_mfc_callback(struct spu
*spu
)
1585 struct spu_context
*ctx
= spu
->ctx
;
1590 wake_up_all(&ctx
->mfc_wq
);
1592 pr_debug("%s %s\n", __func__
, spu
->name
);
1593 if (ctx
->mfc_fasync
) {
1594 u32 free_elements
, tagstatus
;
1597 /* no need for spu_acquire in interrupt context */
1598 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1599 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1602 if (free_elements
& 0xffff)
1604 if (tagstatus
& ctx
->tagwait
)
1607 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1611 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1613 /* See if there is one tag group is complete */
1614 /* FIXME we need locking around tagwait */
1615 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1616 ctx
->tagwait
&= ~*status
;
1620 /* enable interrupt waiting for any tag group,
1621 may silently fail if interrupts are already enabled */
1622 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1626 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1627 size_t size
, loff_t
*pos
)
1629 struct spu_context
*ctx
= file
->private_data
;
1636 ret
= spu_acquire(ctx
);
1641 if (file
->f_flags
& O_NONBLOCK
) {
1642 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1643 if (!(status
& ctx
->tagwait
))
1646 /* XXX(hch): shouldn't we clear ret here? */
1647 ctx
->tagwait
&= ~status
;
1649 ret
= spufs_wait(ctx
->mfc_wq
,
1650 spufs_read_mfc_tagstatus(ctx
, &status
));
1657 if (copy_to_user(buffer
, &status
, 4))
1664 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1666 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1667 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1678 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1682 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1683 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1688 switch (cmd
->size
& 0xf) {
1709 pr_debug("invalid DMA alignment %x for size %x\n",
1710 cmd
->lsa
& 0xf, cmd
->size
);
1714 if (cmd
->size
> 16 * 1024) {
1715 pr_debug("invalid DMA size %x\n", cmd
->size
);
1719 if (cmd
->tag
& 0xfff0) {
1720 /* we reserve the higher tag numbers for kernel use */
1721 pr_debug("invalid DMA tag\n");
1726 /* not supported in this version */
1727 pr_debug("invalid DMA class\n");
1734 static int spu_send_mfc_command(struct spu_context
*ctx
,
1735 struct mfc_dma_command cmd
,
1738 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1739 if (*error
== -EAGAIN
) {
1740 /* wait for any tag group to complete
1741 so we have space for the new command */
1742 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1743 /* try again, because the queue might be
1745 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1746 if (*error
== -EAGAIN
)
1752 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1753 size_t size
, loff_t
*pos
)
1755 struct spu_context
*ctx
= file
->private_data
;
1756 struct mfc_dma_command cmd
;
1759 if (size
!= sizeof cmd
)
1763 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1766 ret
= spufs_check_valid_dma(&cmd
);
1770 ret
= spu_acquire(ctx
);
1774 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1778 if (file
->f_flags
& O_NONBLOCK
) {
1779 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1782 ret
= spufs_wait(ctx
->mfc_wq
,
1783 spu_send_mfc_command(ctx
, cmd
, &status
));
1793 ctx
->tagwait
|= 1 << cmd
.tag
;
1802 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1804 struct spu_context
*ctx
= file
->private_data
;
1805 u32 free_elements
, tagstatus
;
1808 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1811 * For now keep this uninterruptible and also ignore the rule
1812 * that poll should not sleep. Will be fixed later.
1814 mutex_lock(&ctx
->state_mutex
);
1815 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1816 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1817 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1821 if (free_elements
& 0xffff)
1822 mask
|= POLLOUT
| POLLWRNORM
;
1823 if (tagstatus
& ctx
->tagwait
)
1824 mask
|= POLLIN
| POLLRDNORM
;
1826 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__
,
1827 free_elements
, tagstatus
, ctx
->tagwait
);
1832 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1834 struct spu_context
*ctx
= file
->private_data
;
1837 ret
= spu_acquire(ctx
);
1841 /* this currently hangs */
1842 ret
= spufs_wait(ctx
->mfc_wq
,
1843 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1846 ret
= spufs_wait(ctx
->mfc_wq
,
1847 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1858 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1861 return spufs_mfc_flush(file
, NULL
);
1864 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1866 struct spu_context
*ctx
= file
->private_data
;
1868 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1871 static const struct file_operations spufs_mfc_fops
= {
1872 .open
= spufs_mfc_open
,
1873 .release
= spufs_mfc_release
,
1874 .read
= spufs_mfc_read
,
1875 .write
= spufs_mfc_write
,
1876 .poll
= spufs_mfc_poll
,
1877 .flush
= spufs_mfc_flush
,
1878 .fsync
= spufs_mfc_fsync
,
1879 .fasync
= spufs_mfc_fasync
,
1880 .mmap
= spufs_mfc_mmap
,
1883 static int spufs_npc_set(void *data
, u64 val
)
1885 struct spu_context
*ctx
= data
;
1888 ret
= spu_acquire(ctx
);
1891 ctx
->ops
->npc_write(ctx
, val
);
1897 static u64
spufs_npc_get(struct spu_context
*ctx
)
1899 return ctx
->ops
->npc_read(ctx
);
1901 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1902 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1904 static int spufs_decr_set(void *data
, u64 val
)
1906 struct spu_context
*ctx
= data
;
1907 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1910 ret
= spu_acquire_saved(ctx
);
1913 lscsa
->decr
.slot
[0] = (u32
) val
;
1914 spu_release_saved(ctx
);
1919 static u64
spufs_decr_get(struct spu_context
*ctx
)
1921 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1922 return lscsa
->decr
.slot
[0];
1924 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1925 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1927 static int spufs_decr_status_set(void *data
, u64 val
)
1929 struct spu_context
*ctx
= data
;
1932 ret
= spu_acquire_saved(ctx
);
1936 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1938 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1939 spu_release_saved(ctx
);
1944 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1946 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1947 return SPU_DECR_STATUS_RUNNING
;
1951 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1952 spufs_decr_status_set
, "0x%llx\n",
1953 SPU_ATTR_ACQUIRE_SAVED
);
1955 static int spufs_event_mask_set(void *data
, u64 val
)
1957 struct spu_context
*ctx
= data
;
1958 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1961 ret
= spu_acquire_saved(ctx
);
1964 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1965 spu_release_saved(ctx
);
1970 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1972 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1973 return lscsa
->event_mask
.slot
[0];
1976 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1977 spufs_event_mask_set
, "0x%llx\n",
1978 SPU_ATTR_ACQUIRE_SAVED
);
1980 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1982 struct spu_state
*state
= &ctx
->csa
;
1984 stat
= state
->spu_chnlcnt_RW
[0];
1986 return state
->spu_chnldata_RW
[0];
1989 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1990 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1992 static int spufs_srr0_set(void *data
, u64 val
)
1994 struct spu_context
*ctx
= data
;
1995 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1998 ret
= spu_acquire_saved(ctx
);
2001 lscsa
->srr0
.slot
[0] = (u32
) val
;
2002 spu_release_saved(ctx
);
2007 static u64
spufs_srr0_get(struct spu_context
*ctx
)
2009 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
2010 return lscsa
->srr0
.slot
[0];
2012 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
2013 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
2015 static u64
spufs_id_get(struct spu_context
*ctx
)
2019 if (ctx
->state
== SPU_STATE_RUNNABLE
)
2020 num
= ctx
->spu
->number
;
2022 num
= (unsigned int)-1;
2026 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
2029 static u64
spufs_object_id_get(struct spu_context
*ctx
)
2031 /* FIXME: Should there really be no locking here? */
2032 return ctx
->object_id
;
2035 static int spufs_object_id_set(void *data
, u64 id
)
2037 struct spu_context
*ctx
= data
;
2038 ctx
->object_id
= id
;
2043 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
2044 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
2046 static u64
spufs_lslr_get(struct spu_context
*ctx
)
2048 return ctx
->csa
.priv2
.spu_lslr_RW
;
2050 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
2051 SPU_ATTR_ACQUIRE_SAVED
);
2053 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
2055 struct spufs_inode_info
*i
= SPUFS_I(inode
);
2056 struct spu_context
*ctx
= i
->i_ctx
;
2057 file
->private_data
= ctx
;
2061 static int spufs_caps_show(struct seq_file
*s
, void *private)
2063 struct spu_context
*ctx
= s
->private;
2065 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
2066 seq_puts(s
, "sched\n");
2067 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
2068 seq_puts(s
, "step\n");
2072 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
2074 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
2077 static const struct file_operations spufs_caps_fops
= {
2078 .open
= spufs_caps_open
,
2080 .llseek
= seq_lseek
,
2081 .release
= single_release
,
2084 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
2085 char __user
*buf
, size_t len
, loff_t
*pos
)
2089 /* EOF if there's no entry in the mbox */
2090 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
2093 data
= ctx
->csa
.prob
.pu_mb_R
;
2095 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2098 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
2099 size_t len
, loff_t
*pos
)
2102 struct spu_context
*ctx
= file
->private_data
;
2104 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2107 ret
= spu_acquire_saved(ctx
);
2110 spin_lock(&ctx
->csa
.register_lock
);
2111 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2112 spin_unlock(&ctx
->csa
.register_lock
);
2113 spu_release_saved(ctx
);
2118 static const struct file_operations spufs_mbox_info_fops
= {
2119 .open
= spufs_info_open
,
2120 .read
= spufs_mbox_info_read
,
2121 .llseek
= generic_file_llseek
,
2124 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2125 char __user
*buf
, size_t len
, loff_t
*pos
)
2129 /* EOF if there's no entry in the ibox */
2130 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2133 data
= ctx
->csa
.priv2
.puint_mb_R
;
2135 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2138 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2139 size_t len
, loff_t
*pos
)
2141 struct spu_context
*ctx
= file
->private_data
;
2144 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2147 ret
= spu_acquire_saved(ctx
);
2150 spin_lock(&ctx
->csa
.register_lock
);
2151 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2152 spin_unlock(&ctx
->csa
.register_lock
);
2153 spu_release_saved(ctx
);
2158 static const struct file_operations spufs_ibox_info_fops
= {
2159 .open
= spufs_info_open
,
2160 .read
= spufs_ibox_info_read
,
2161 .llseek
= generic_file_llseek
,
2164 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2165 char __user
*buf
, size_t len
, loff_t
*pos
)
2171 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2172 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2173 for (i
= 0; i
< cnt
; i
++) {
2174 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2177 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2181 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2182 size_t len
, loff_t
*pos
)
2184 struct spu_context
*ctx
= file
->private_data
;
2187 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2190 ret
= spu_acquire_saved(ctx
);
2193 spin_lock(&ctx
->csa
.register_lock
);
2194 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2195 spin_unlock(&ctx
->csa
.register_lock
);
2196 spu_release_saved(ctx
);
2201 static const struct file_operations spufs_wbox_info_fops
= {
2202 .open
= spufs_info_open
,
2203 .read
= spufs_wbox_info_read
,
2204 .llseek
= generic_file_llseek
,
2207 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2208 char __user
*buf
, size_t len
, loff_t
*pos
)
2210 struct spu_dma_info info
;
2211 struct mfc_cq_sr
*qp
, *spuqp
;
2214 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2215 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2216 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2217 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2218 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2219 for (i
= 0; i
< 16; i
++) {
2220 qp
= &info
.dma_info_command_data
[i
];
2221 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2223 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2224 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2225 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2226 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2229 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2233 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2234 size_t len
, loff_t
*pos
)
2236 struct spu_context
*ctx
= file
->private_data
;
2239 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2242 ret
= spu_acquire_saved(ctx
);
2245 spin_lock(&ctx
->csa
.register_lock
);
2246 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2247 spin_unlock(&ctx
->csa
.register_lock
);
2248 spu_release_saved(ctx
);
2253 static const struct file_operations spufs_dma_info_fops
= {
2254 .open
= spufs_info_open
,
2255 .read
= spufs_dma_info_read
,
2258 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2259 char __user
*buf
, size_t len
, loff_t
*pos
)
2261 struct spu_proxydma_info info
;
2262 struct mfc_cq_sr
*qp
, *puqp
;
2263 int ret
= sizeof info
;
2269 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2272 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2273 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2274 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2275 for (i
= 0; i
< 8; i
++) {
2276 qp
= &info
.proxydma_info_command_data
[i
];
2277 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2279 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2280 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2281 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2282 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2285 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2289 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2290 size_t len
, loff_t
*pos
)
2292 struct spu_context
*ctx
= file
->private_data
;
2295 ret
= spu_acquire_saved(ctx
);
2298 spin_lock(&ctx
->csa
.register_lock
);
2299 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2300 spin_unlock(&ctx
->csa
.register_lock
);
2301 spu_release_saved(ctx
);
2306 static const struct file_operations spufs_proxydma_info_fops
= {
2307 .open
= spufs_info_open
,
2308 .read
= spufs_proxydma_info_read
,
2311 static int spufs_show_tid(struct seq_file
*s
, void *private)
2313 struct spu_context
*ctx
= s
->private;
2315 seq_printf(s
, "%d\n", ctx
->tid
);
2319 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2321 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2324 static const struct file_operations spufs_tid_fops
= {
2325 .open
= spufs_tid_open
,
2327 .llseek
= seq_lseek
,
2328 .release
= single_release
,
2331 static const char *ctx_state_names
[] = {
2332 "user", "system", "iowait", "loaded"
2335 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2336 enum spu_utilization_state state
)
2339 unsigned long long time
= ctx
->stats
.times
[state
];
2342 * In general, utilization statistics are updated by the controlling
2343 * thread as the spu context moves through various well defined
2344 * state transitions, but if the context is lazily loaded its
2345 * utilization statistics are not updated as the controlling thread
2346 * is not tightly coupled with the execution of the spu context. We
2347 * calculate and apply the time delta from the last recorded state
2348 * of the spu context.
2350 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2352 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2355 return time
/ NSEC_PER_MSEC
;
2358 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2360 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2362 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2363 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2364 ctx
->stats
.slb_flt_base
);
2370 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2372 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2374 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2375 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2376 ctx
->stats
.class2_intr_base
);
2379 return class2_intrs
;
2383 static int spufs_show_stat(struct seq_file
*s
, void *private)
2385 struct spu_context
*ctx
= s
->private;
2388 ret
= spu_acquire(ctx
);
2392 seq_printf(s
, "%s %llu %llu %llu %llu "
2393 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2394 ctx_state_names
[ctx
->stats
.util_state
],
2395 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2396 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2397 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2398 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2399 ctx
->stats
.vol_ctx_switch
,
2400 ctx
->stats
.invol_ctx_switch
,
2401 spufs_slb_flts(ctx
),
2402 ctx
->stats
.hash_flt
,
2405 spufs_class2_intrs(ctx
),
2406 ctx
->stats
.libassist
);
2411 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2413 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2416 static const struct file_operations spufs_stat_fops
= {
2417 .open
= spufs_stat_open
,
2419 .llseek
= seq_lseek
,
2420 .release
= single_release
,
2423 static inline int spufs_switch_log_used(struct spu_context
*ctx
)
2425 return (ctx
->switch_log
->head
- ctx
->switch_log
->tail
) %
2429 static inline int spufs_switch_log_avail(struct spu_context
*ctx
)
2431 return SWITCH_LOG_BUFSIZE
- spufs_switch_log_used(ctx
);
2434 static int spufs_switch_log_open(struct inode
*inode
, struct file
*file
)
2436 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2439 rc
= spu_acquire(ctx
);
2443 if (ctx
->switch_log
) {
2448 ctx
->switch_log
= kmalloc(sizeof(struct switch_log
) +
2449 SWITCH_LOG_BUFSIZE
* sizeof(struct switch_log_entry
),
2452 if (!ctx
->switch_log
) {
2457 ctx
->switch_log
->head
= ctx
->switch_log
->tail
= 0;
2458 init_waitqueue_head(&ctx
->switch_log
->wait
);
2466 static int spufs_switch_log_release(struct inode
*inode
, struct file
*file
)
2468 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2471 rc
= spu_acquire(ctx
);
2475 kfree(ctx
->switch_log
);
2476 ctx
->switch_log
= NULL
;
2482 static int switch_log_sprint(struct spu_context
*ctx
, char *tbuf
, int n
)
2484 struct switch_log_entry
*p
;
2486 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->tail
% SWITCH_LOG_BUFSIZE
;
2488 return snprintf(tbuf
, n
, "%u.%09u %d %u %u %llu\n",
2489 (unsigned int) p
->tstamp
.tv_sec
,
2490 (unsigned int) p
->tstamp
.tv_nsec
,
2492 (unsigned int) p
->type
,
2493 (unsigned int) p
->val
,
2494 (unsigned long long) p
->timebase
);
2497 static ssize_t
spufs_switch_log_read(struct file
*file
, char __user
*buf
,
2498 size_t len
, loff_t
*ppos
)
2500 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
2501 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2502 int error
= 0, cnt
= 0;
2504 if (!buf
|| len
< 0)
2507 error
= spu_acquire(ctx
);
2515 if (spufs_switch_log_used(ctx
) == 0) {
2517 /* If there's data ready to go, we can
2518 * just return straight away */
2521 } else if (file
->f_flags
& O_NONBLOCK
) {
2526 /* spufs_wait will drop the mutex and
2527 * re-acquire, but since we're in read(), the
2528 * file cannot be _released (and so
2529 * ctx->switch_log is stable).
2531 error
= spufs_wait(ctx
->switch_log
->wait
,
2532 spufs_switch_log_used(ctx
) > 0);
2534 /* On error, spufs_wait returns without the
2535 * state mutex held */
2539 /* We may have had entries read from underneath
2540 * us while we dropped the mutex in spufs_wait,
2542 if (spufs_switch_log_used(ctx
) == 0)
2547 width
= switch_log_sprint(ctx
, tbuf
, sizeof(tbuf
));
2549 ctx
->switch_log
->tail
=
2550 (ctx
->switch_log
->tail
+ 1) %
2553 /* If the record is greater than space available return
2554 * partial buffer (so far) */
2557 error
= copy_to_user(buf
+ cnt
, tbuf
, width
);
2565 return cnt
== 0 ? error
: cnt
;
2568 static unsigned int spufs_switch_log_poll(struct file
*file
, poll_table
*wait
)
2570 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
2571 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2572 unsigned int mask
= 0;
2575 poll_wait(file
, &ctx
->switch_log
->wait
, wait
);
2577 rc
= spu_acquire(ctx
);
2581 if (spufs_switch_log_used(ctx
) > 0)
2589 static const struct file_operations spufs_switch_log_fops
= {
2590 .owner
= THIS_MODULE
,
2591 .open
= spufs_switch_log_open
,
2592 .read
= spufs_switch_log_read
,
2593 .poll
= spufs_switch_log_poll
,
2594 .release
= spufs_switch_log_release
,
2598 * Log a context switch event to a switch log reader.
2600 * Must be called with ctx->state_mutex held.
2602 void spu_switch_log_notify(struct spu
*spu
, struct spu_context
*ctx
,
2605 if (!ctx
->switch_log
)
2608 if (spufs_switch_log_avail(ctx
) > 1) {
2609 struct switch_log_entry
*p
;
2611 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->head
;
2612 ktime_get_ts(&p
->tstamp
);
2613 p
->timebase
= get_tb();
2614 p
->spu_id
= spu
? spu
->number
: -1;
2618 ctx
->switch_log
->head
=
2619 (ctx
->switch_log
->head
+ 1) % SWITCH_LOG_BUFSIZE
;
2622 wake_up(&ctx
->switch_log
->wait
);
2625 static int spufs_show_ctx(struct seq_file
*s
, void *private)
2627 struct spu_context
*ctx
= s
->private;
2630 mutex_lock(&ctx
->state_mutex
);
2632 struct spu
*spu
= ctx
->spu
;
2633 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
2635 spin_lock_irq(&spu
->register_lock
);
2636 mfc_control_RW
= in_be64(&priv2
->mfc_control_RW
);
2637 spin_unlock_irq(&spu
->register_lock
);
2639 struct spu_state
*csa
= &ctx
->csa
;
2641 mfc_control_RW
= csa
->priv2
.mfc_control_RW
;
2644 seq_printf(s
, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2645 " %c %lx %lx %lx %lx %x %x\n",
2646 ctx
->state
== SPU_STATE_SAVED
? 'S' : 'R',
2651 ctx
->spu
? ctx
->spu
->number
: -1,
2652 !list_empty(&ctx
->rq
) ? 'q' : ' ',
2653 ctx
->csa
.class_0_pending
,
2654 ctx
->csa
.class_0_dar
,
2655 ctx
->csa
.class_1_dsisr
,
2657 ctx
->ops
->runcntl_read(ctx
),
2658 ctx
->ops
->status_read(ctx
));
2660 mutex_unlock(&ctx
->state_mutex
);
2665 static int spufs_ctx_open(struct inode
*inode
, struct file
*file
)
2667 return single_open(file
, spufs_show_ctx
, SPUFS_I(inode
)->i_ctx
);
2670 static const struct file_operations spufs_ctx_fops
= {
2671 .open
= spufs_ctx_open
,
2673 .llseek
= seq_lseek
,
2674 .release
= single_release
,
2677 struct spufs_tree_descr spufs_dir_contents
[] = {
2678 { "capabilities", &spufs_caps_fops
, 0444, },
2679 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2680 { "regs", &spufs_regs_fops
, 0666, sizeof(struct spu_reg128
[128]), },
2681 { "mbox", &spufs_mbox_fops
, 0444, },
2682 { "ibox", &spufs_ibox_fops
, 0444, },
2683 { "wbox", &spufs_wbox_fops
, 0222, },
2684 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2685 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2686 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2687 { "signal1", &spufs_signal1_fops
, 0666, },
2688 { "signal2", &spufs_signal2_fops
, 0666, },
2689 { "signal1_type", &spufs_signal1_type
, 0666, },
2690 { "signal2_type", &spufs_signal2_type
, 0666, },
2691 { "cntl", &spufs_cntl_fops
, 0666, },
2692 { "fpcr", &spufs_fpcr_fops
, 0666, sizeof(struct spu_reg128
), },
2693 { "lslr", &spufs_lslr_ops
, 0444, },
2694 { "mfc", &spufs_mfc_fops
, 0666, },
2695 { "mss", &spufs_mss_fops
, 0666, },
2696 { "npc", &spufs_npc_ops
, 0666, },
2697 { "srr0", &spufs_srr0_ops
, 0666, },
2698 { "decr", &spufs_decr_ops
, 0666, },
2699 { "decr_status", &spufs_decr_status_ops
, 0666, },
2700 { "event_mask", &spufs_event_mask_ops
, 0666, },
2701 { "event_status", &spufs_event_status_ops
, 0444, },
2702 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2703 { "phys-id", &spufs_id_ops
, 0666, },
2704 { "object-id", &spufs_object_id_ops
, 0666, },
2705 { "mbox_info", &spufs_mbox_info_fops
, 0444, sizeof(u32
), },
2706 { "ibox_info", &spufs_ibox_info_fops
, 0444, sizeof(u32
), },
2707 { "wbox_info", &spufs_wbox_info_fops
, 0444, sizeof(u32
), },
2708 { "dma_info", &spufs_dma_info_fops
, 0444,
2709 sizeof(struct spu_dma_info
), },
2710 { "proxydma_info", &spufs_proxydma_info_fops
, 0444,
2711 sizeof(struct spu_proxydma_info
)},
2712 { "tid", &spufs_tid_fops
, 0444, },
2713 { "stat", &spufs_stat_fops
, 0444, },
2714 { "switch_log", &spufs_switch_log_fops
, 0444 },
2718 struct spufs_tree_descr spufs_dir_nosched_contents
[] = {
2719 { "capabilities", &spufs_caps_fops
, 0444, },
2720 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2721 { "mbox", &spufs_mbox_fops
, 0444, },
2722 { "ibox", &spufs_ibox_fops
, 0444, },
2723 { "wbox", &spufs_wbox_fops
, 0222, },
2724 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2725 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2726 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2727 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2728 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2729 { "signal1_type", &spufs_signal1_type
, 0666, },
2730 { "signal2_type", &spufs_signal2_type
, 0666, },
2731 { "mss", &spufs_mss_fops
, 0666, },
2732 { "mfc", &spufs_mfc_fops
, 0666, },
2733 { "cntl", &spufs_cntl_fops
, 0666, },
2734 { "npc", &spufs_npc_ops
, 0666, },
2735 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2736 { "phys-id", &spufs_id_ops
, 0666, },
2737 { "object-id", &spufs_object_id_ops
, 0666, },
2738 { "tid", &spufs_tid_fops
, 0444, },
2739 { "stat", &spufs_stat_fops
, 0444, },
2743 struct spufs_tree_descr spufs_dir_debug_contents
[] = {
2744 { ".ctx", &spufs_ctx_fops
, 0444, },
2748 struct spufs_coredump_reader spufs_coredump_read
[] = {
2749 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2750 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2751 { "lslr", NULL
, spufs_lslr_get
, 19 },
2752 { "decr", NULL
, spufs_decr_get
, 19 },
2753 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2754 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2755 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2756 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2757 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2758 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2759 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2760 { "event_status", NULL
, spufs_event_status_get
, 19 },
2761 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2762 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2763 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2764 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2765 { "proxydma_info", __spufs_proxydma_info_read
,
2766 NULL
, sizeof(struct spu_proxydma_info
)},
2767 { "object-id", NULL
, spufs_object_id_get
, 19 },
2768 { "npc", NULL
, spufs_npc_get
, 19 },