]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/platforms/cell/spufs/file.c
[POWERPC] spufs: Add infrastructure needed for gang scheduling
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / platforms / cell / spufs / file.c
CommitLineData
67207b96
AB
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
a33a7d73
AB
23#undef DEBUG
24
67207b96
AB
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
d88cfffa 28#include <linux/pagemap.h>
67207b96 29#include <linux/poll.h>
5110459f 30#include <linux/ptrace.h>
67207b96
AB
31
32#include <asm/io.h>
33#include <asm/semaphore.h>
34#include <asm/spu.h>
35#include <asm/uaccess.h>
36
37#include "spufs.h"
38
27d5bf2a
BH
39#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
40
8b3d6663 41
67207b96
AB
42static int
43spufs_mem_open(struct inode *inode, struct file *file)
44{
45 struct spufs_inode_info *i = SPUFS_I(inode);
6df10a82
MN
46 struct spu_context *ctx = i->i_ctx;
47 file->private_data = ctx;
48 file->f_mapping = inode->i_mapping;
49 ctx->local_store = inode->i_mapping;
67207b96
AB
50 return 0;
51}
52
53static ssize_t
54spufs_mem_read(struct file *file, char __user *buffer,
55 size_t size, loff_t *pos)
56{
8b3d6663
AB
57 struct spu_context *ctx = file->private_data;
58 char *local_store;
67207b96
AB
59 int ret;
60
8b3d6663 61 spu_acquire(ctx);
67207b96 62
8b3d6663
AB
63 local_store = ctx->ops->get_ls(ctx);
64 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
67207b96 65
8b3d6663 66 spu_release(ctx);
67207b96
AB
67 return ret;
68}
69
70static ssize_t
71spufs_mem_write(struct file *file, const char __user *buffer,
72 size_t size, loff_t *pos)
73{
74 struct spu_context *ctx = file->private_data;
8b3d6663
AB
75 char *local_store;
76 int ret;
67207b96
AB
77
78 size = min_t(ssize_t, LS_SIZE - *pos, size);
79 if (size <= 0)
80 return -EFBIG;
81 *pos += size;
8b3d6663
AB
82
83 spu_acquire(ctx);
84
85 local_store = ctx->ops->get_ls(ctx);
86 ret = copy_from_user(local_store + *pos - size,
87 buffer, size) ? -EFAULT : size;
88
89 spu_release(ctx);
90 return ret;
67207b96
AB
91}
92
8b3d6663
AB
93static struct page *
94spufs_mem_mmap_nopage(struct vm_area_struct *vma,
95 unsigned long address, int *type)
96{
97 struct page *page = NOPAGE_SIGBUS;
98
99 struct spu_context *ctx = vma->vm_file->private_data;
100 unsigned long offset = address - vma->vm_start;
101 offset += vma->vm_pgoff << PAGE_SHIFT;
102
103 spu_acquire(ctx);
104
105 if (ctx->state == SPU_STATE_SAVED)
106 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
107 else
108 page = pfn_to_page((ctx->spu->local_store_phys + offset)
109 >> PAGE_SHIFT);
110
111 spu_release(ctx);
112
113 if (type)
114 *type = VM_FAULT_MINOR;
115
d88cfffa 116 page_cache_get(page);
8b3d6663
AB
117 return page;
118}
119
120static struct vm_operations_struct spufs_mem_mmap_vmops = {
121 .nopage = spufs_mem_mmap_nopage,
122};
123
67207b96
AB
124static int
125spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
126{
8b3d6663
AB
127 if (!(vma->vm_flags & VM_SHARED))
128 return -EINVAL;
67207b96 129
8b3d6663 130 /* FIXME: */
8b3d6663
AB
131 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
132 | _PAGE_NO_CACHE);
133
134 vma->vm_ops = &spufs_mem_mmap_vmops;
67207b96
AB
135 return 0;
136}
137
138static struct file_operations spufs_mem_fops = {
139 .open = spufs_mem_open,
140 .read = spufs_mem_read,
141 .write = spufs_mem_write,
8b3d6663 142 .llseek = generic_file_llseek,
67207b96 143 .mmap = spufs_mem_mmap,
8b3d6663
AB
144};
145
6df10a82
MN
146static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
147 unsigned long address,
27d5bf2a
BH
148 int *type, unsigned long ps_offs,
149 unsigned long ps_size)
6df10a82
MN
150{
151 struct page *page = NOPAGE_SIGBUS;
152 int fault_type = VM_FAULT_SIGBUS;
153 struct spu_context *ctx = vma->vm_file->private_data;
154 unsigned long offset = address - vma->vm_start;
155 unsigned long area;
156 int ret;
157
158 offset += vma->vm_pgoff << PAGE_SHIFT;
27d5bf2a 159 if (offset >= ps_size)
6df10a82
MN
160 goto out;
161
162 ret = spu_acquire_runnable(ctx);
163 if (ret)
164 goto out;
165
166 area = ctx->spu->problem_phys + ps_offs;
167 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
168 fault_type = VM_FAULT_MINOR;
169 page_cache_get(page);
170
171 spu_release(ctx);
172
173 out:
174 if (type)
175 *type = fault_type;
176
177 return page;
178}
179
27d5bf2a 180#if SPUFS_MMAP_4K
6df10a82
MN
181static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
182 unsigned long address, int *type)
183{
27d5bf2a 184 return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
6df10a82
MN
185}
186
187static struct vm_operations_struct spufs_cntl_mmap_vmops = {
188 .nopage = spufs_cntl_mmap_nopage,
189};
190
191/*
192 * mmap support for problem state control area [0x4000 - 0x4fff].
6df10a82
MN
193 */
194static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
195{
196 if (!(vma->vm_flags & VM_SHARED))
197 return -EINVAL;
198
6df10a82
MN
199 vma->vm_flags |= VM_RESERVED;
200 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 201 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
202
203 vma->vm_ops = &spufs_cntl_mmap_vmops;
204 return 0;
205}
27d5bf2a
BH
206#else /* SPUFS_MMAP_4K */
207#define spufs_cntl_mmap NULL
208#endif /* !SPUFS_MMAP_4K */
6df10a82
MN
209
210static int spufs_cntl_open(struct inode *inode, struct file *file)
211{
212 struct spufs_inode_info *i = SPUFS_I(inode);
213 struct spu_context *ctx = i->i_ctx;
214
215 file->private_data = ctx;
216 file->f_mapping = inode->i_mapping;
217 ctx->cntl = inode->i_mapping;
218 return 0;
219}
220
221static ssize_t
222spufs_cntl_read(struct file *file, char __user *buffer,
223 size_t size, loff_t *pos)
224{
225 /* FIXME: read from spu status */
226 return -EINVAL;
227}
228
229static ssize_t
230spufs_cntl_write(struct file *file, const char __user *buffer,
231 size_t size, loff_t *pos)
232{
233 /* FIXME: write to runctl bit */
234 return -EINVAL;
235}
236
237static struct file_operations spufs_cntl_fops = {
238 .open = spufs_cntl_open,
239 .read = spufs_cntl_read,
240 .write = spufs_cntl_write,
6df10a82 241 .mmap = spufs_cntl_mmap,
6df10a82
MN
242};
243
8b3d6663
AB
244static int
245spufs_regs_open(struct inode *inode, struct file *file)
246{
247 struct spufs_inode_info *i = SPUFS_I(inode);
248 file->private_data = i->i_ctx;
249 return 0;
250}
251
252static ssize_t
253spufs_regs_read(struct file *file, char __user *buffer,
254 size_t size, loff_t *pos)
255{
256 struct spu_context *ctx = file->private_data;
257 struct spu_lscsa *lscsa = ctx->csa.lscsa;
258 int ret;
259
260 spu_acquire_saved(ctx);
261
262 ret = simple_read_from_buffer(buffer, size, pos,
263 lscsa->gprs, sizeof lscsa->gprs);
264
265 spu_release(ctx);
266 return ret;
267}
268
269static ssize_t
270spufs_regs_write(struct file *file, const char __user *buffer,
271 size_t size, loff_t *pos)
272{
273 struct spu_context *ctx = file->private_data;
274 struct spu_lscsa *lscsa = ctx->csa.lscsa;
275 int ret;
276
277 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
278 if (size <= 0)
279 return -EFBIG;
280 *pos += size;
281
282 spu_acquire_saved(ctx);
283
284 ret = copy_from_user(lscsa->gprs + *pos - size,
285 buffer, size) ? -EFAULT : size;
286
287 spu_release(ctx);
288 return ret;
289}
290
291static struct file_operations spufs_regs_fops = {
292 .open = spufs_regs_open,
293 .read = spufs_regs_read,
294 .write = spufs_regs_write,
67207b96
AB
295 .llseek = generic_file_llseek,
296};
297
8b3d6663
AB
298static ssize_t
299spufs_fpcr_read(struct file *file, char __user * buffer,
300 size_t size, loff_t * pos)
301{
302 struct spu_context *ctx = file->private_data;
303 struct spu_lscsa *lscsa = ctx->csa.lscsa;
304 int ret;
305
306 spu_acquire_saved(ctx);
307
308 ret = simple_read_from_buffer(buffer, size, pos,
309 &lscsa->fpcr, sizeof(lscsa->fpcr));
310
311 spu_release(ctx);
312 return ret;
313}
314
315static ssize_t
316spufs_fpcr_write(struct file *file, const char __user * buffer,
317 size_t size, loff_t * pos)
318{
319 struct spu_context *ctx = file->private_data;
320 struct spu_lscsa *lscsa = ctx->csa.lscsa;
321 int ret;
322
323 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
324 if (size <= 0)
325 return -EFBIG;
326 *pos += size;
327
328 spu_acquire_saved(ctx);
329
330 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
331 buffer, size) ? -EFAULT : size;
332
333 spu_release(ctx);
334 return ret;
335}
336
337static struct file_operations spufs_fpcr_fops = {
338 .open = spufs_regs_open,
339 .read = spufs_fpcr_read,
340 .write = spufs_fpcr_write,
341 .llseek = generic_file_llseek,
342};
343
67207b96
AB
344/* generic open function for all pipe-like files */
345static int spufs_pipe_open(struct inode *inode, struct file *file)
346{
347 struct spufs_inode_info *i = SPUFS_I(inode);
348 file->private_data = i->i_ctx;
349
350 return nonseekable_open(inode, file);
351}
352
353static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
354 size_t len, loff_t *pos)
355{
8b3d6663 356 struct spu_context *ctx = file->private_data;
67207b96 357 u32 mbox_data;
8b3d6663 358 int ret;
67207b96
AB
359
360 if (len < 4)
361 return -EINVAL;
362
8b3d6663
AB
363 spu_acquire(ctx);
364 ret = ctx->ops->mbox_read(ctx, &mbox_data);
365 spu_release(ctx);
67207b96 366
8b3d6663
AB
367 if (!ret)
368 return -EAGAIN;
67207b96
AB
369
370 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
371 return -EFAULT;
372
373 return 4;
374}
375
376static struct file_operations spufs_mbox_fops = {
377 .open = spufs_pipe_open,
378 .read = spufs_mbox_read,
379};
380
381static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
382 size_t len, loff_t *pos)
383{
8b3d6663 384 struct spu_context *ctx = file->private_data;
67207b96
AB
385 u32 mbox_stat;
386
387 if (len < 4)
388 return -EINVAL;
389
8b3d6663
AB
390 spu_acquire(ctx);
391
392 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
393
394 spu_release(ctx);
67207b96
AB
395
396 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
397 return -EFAULT;
398
399 return 4;
400}
401
402static struct file_operations spufs_mbox_stat_fops = {
403 .open = spufs_pipe_open,
404 .read = spufs_mbox_stat_read,
405};
406
407/* low-level ibox access function */
8b3d6663 408size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
67207b96 409{
8b3d6663
AB
410 return ctx->ops->ibox_read(ctx, data);
411}
67207b96 412
8b3d6663
AB
413static int spufs_ibox_fasync(int fd, struct file *file, int on)
414{
415 struct spu_context *ctx = file->private_data;
67207b96 416
8b3d6663 417 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
67207b96 418}
67207b96 419
8b3d6663
AB
420/* interrupt-level ibox callback function. */
421void spufs_ibox_callback(struct spu *spu)
67207b96 422{
8b3d6663
AB
423 struct spu_context *ctx = spu->ctx;
424
425 wake_up_all(&ctx->ibox_wq);
426 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
67207b96
AB
427}
428
429static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
430 size_t len, loff_t *pos)
431{
8b3d6663 432 struct spu_context *ctx = file->private_data;
67207b96
AB
433 u32 ibox_data;
434 ssize_t ret;
435
436 if (len < 4)
437 return -EINVAL;
438
8b3d6663 439 spu_acquire(ctx);
67207b96
AB
440
441 ret = 0;
442 if (file->f_flags & O_NONBLOCK) {
8b3d6663 443 if (!spu_ibox_read(ctx, &ibox_data))
67207b96
AB
444 ret = -EAGAIN;
445 } else {
8b3d6663 446 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
67207b96
AB
447 }
448
8b3d6663
AB
449 spu_release(ctx);
450
67207b96
AB
451 if (ret)
452 return ret;
453
454 ret = 4;
455 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
456 ret = -EFAULT;
457
458 return ret;
459}
460
461static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
462{
8b3d6663 463 struct spu_context *ctx = file->private_data;
67207b96
AB
464 unsigned int mask;
465
8b3d6663 466 poll_wait(file, &ctx->ibox_wq, wait);
67207b96 467
3a843d7c
AB
468 spu_acquire(ctx);
469 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
470 spu_release(ctx);
67207b96
AB
471
472 return mask;
473}
474
475static struct file_operations spufs_ibox_fops = {
476 .open = spufs_pipe_open,
477 .read = spufs_ibox_read,
478 .poll = spufs_ibox_poll,
479 .fasync = spufs_ibox_fasync,
480};
481
482static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
483 size_t len, loff_t *pos)
484{
8b3d6663 485 struct spu_context *ctx = file->private_data;
67207b96
AB
486 u32 ibox_stat;
487
488 if (len < 4)
489 return -EINVAL;
490
8b3d6663
AB
491 spu_acquire(ctx);
492 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
493 spu_release(ctx);
67207b96
AB
494
495 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
496 return -EFAULT;
497
498 return 4;
499}
500
501static struct file_operations spufs_ibox_stat_fops = {
502 .open = spufs_pipe_open,
503 .read = spufs_ibox_stat_read,
504};
505
506/* low-level mailbox write */
8b3d6663 507size_t spu_wbox_write(struct spu_context *ctx, u32 data)
67207b96 508{
8b3d6663
AB
509 return ctx->ops->wbox_write(ctx, data);
510}
67207b96 511
8b3d6663
AB
512static int spufs_wbox_fasync(int fd, struct file *file, int on)
513{
514 struct spu_context *ctx = file->private_data;
515 int ret;
67207b96 516
8b3d6663 517 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
67207b96 518
67207b96
AB
519 return ret;
520}
67207b96 521
8b3d6663
AB
522/* interrupt-level wbox callback function. */
523void spufs_wbox_callback(struct spu *spu)
67207b96 524{
8b3d6663
AB
525 struct spu_context *ctx = spu->ctx;
526
527 wake_up_all(&ctx->wbox_wq);
528 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
67207b96
AB
529}
530
531static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
532 size_t len, loff_t *pos)
533{
8b3d6663 534 struct spu_context *ctx = file->private_data;
67207b96
AB
535 u32 wbox_data;
536 int ret;
537
538 if (len < 4)
539 return -EINVAL;
540
67207b96
AB
541 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
542 return -EFAULT;
543
8b3d6663
AB
544 spu_acquire(ctx);
545
67207b96
AB
546 ret = 0;
547 if (file->f_flags & O_NONBLOCK) {
8b3d6663 548 if (!spu_wbox_write(ctx, wbox_data))
67207b96
AB
549 ret = -EAGAIN;
550 } else {
8b3d6663 551 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
67207b96
AB
552 }
553
8b3d6663
AB
554 spu_release(ctx);
555
67207b96
AB
556 return ret ? ret : sizeof wbox_data;
557}
558
559static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
560{
8b3d6663 561 struct spu_context *ctx = file->private_data;
67207b96
AB
562 unsigned int mask;
563
8b3d6663 564 poll_wait(file, &ctx->wbox_wq, wait);
67207b96 565
3a843d7c
AB
566 spu_acquire(ctx);
567 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
568 spu_release(ctx);
67207b96
AB
569
570 return mask;
571}
572
573static struct file_operations spufs_wbox_fops = {
574 .open = spufs_pipe_open,
575 .write = spufs_wbox_write,
576 .poll = spufs_wbox_poll,
577 .fasync = spufs_wbox_fasync,
578};
579
580static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
581 size_t len, loff_t *pos)
582{
8b3d6663 583 struct spu_context *ctx = file->private_data;
67207b96
AB
584 u32 wbox_stat;
585
586 if (len < 4)
587 return -EINVAL;
588
8b3d6663
AB
589 spu_acquire(ctx);
590 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
591 spu_release(ctx);
67207b96
AB
592
593 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
594 return -EFAULT;
595
596 return 4;
597}
598
599static struct file_operations spufs_wbox_stat_fops = {
600 .open = spufs_pipe_open,
601 .read = spufs_wbox_stat_read,
602};
603
6df10a82
MN
604static int spufs_signal1_open(struct inode *inode, struct file *file)
605{
606 struct spufs_inode_info *i = SPUFS_I(inode);
607 struct spu_context *ctx = i->i_ctx;
608 file->private_data = ctx;
609 file->f_mapping = inode->i_mapping;
610 ctx->signal1 = inode->i_mapping;
611 return nonseekable_open(inode, file);
612}
613
67207b96
AB
614static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
615 size_t len, loff_t *pos)
616{
8b3d6663 617 struct spu_context *ctx = file->private_data;
67207b96
AB
618 u32 data;
619
67207b96
AB
620 if (len < 4)
621 return -EINVAL;
622
8b3d6663
AB
623 spu_acquire(ctx);
624 data = ctx->ops->signal1_read(ctx);
625 spu_release(ctx);
626
67207b96
AB
627 if (copy_to_user(buf, &data, 4))
628 return -EFAULT;
629
630 return 4;
631}
632
633static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
634 size_t len, loff_t *pos)
635{
636 struct spu_context *ctx;
67207b96
AB
637 u32 data;
638
639 ctx = file->private_data;
67207b96
AB
640
641 if (len < 4)
642 return -EINVAL;
643
644 if (copy_from_user(&data, buf, 4))
645 return -EFAULT;
646
8b3d6663
AB
647 spu_acquire(ctx);
648 ctx->ops->signal1_write(ctx, data);
649 spu_release(ctx);
67207b96
AB
650
651 return 4;
652}
653
6df10a82
MN
654static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
655 unsigned long address, int *type)
656{
27d5bf2a
BH
657#if PAGE_SIZE == 0x1000
658 return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
659#elif PAGE_SIZE == 0x10000
660 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
661 * signal 1 and 2 area
662 */
663 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
664#else
665#error unsupported page size
666#endif
6df10a82
MN
667}
668
669static struct vm_operations_struct spufs_signal1_mmap_vmops = {
670 .nopage = spufs_signal1_mmap_nopage,
671};
672
673static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
674{
675 if (!(vma->vm_flags & VM_SHARED))
676 return -EINVAL;
677
678 vma->vm_flags |= VM_RESERVED;
679 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 680 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
681
682 vma->vm_ops = &spufs_signal1_mmap_vmops;
683 return 0;
684}
6df10a82 685
67207b96 686static struct file_operations spufs_signal1_fops = {
6df10a82 687 .open = spufs_signal1_open,
67207b96
AB
688 .read = spufs_signal1_read,
689 .write = spufs_signal1_write,
6df10a82 690 .mmap = spufs_signal1_mmap,
67207b96
AB
691};
692
6df10a82
MN
693static int spufs_signal2_open(struct inode *inode, struct file *file)
694{
695 struct spufs_inode_info *i = SPUFS_I(inode);
696 struct spu_context *ctx = i->i_ctx;
697 file->private_data = ctx;
698 file->f_mapping = inode->i_mapping;
699 ctx->signal2 = inode->i_mapping;
700 return nonseekable_open(inode, file);
701}
702
67207b96
AB
703static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
704 size_t len, loff_t *pos)
705{
706 struct spu_context *ctx;
67207b96
AB
707 u32 data;
708
709 ctx = file->private_data;
67207b96
AB
710
711 if (len < 4)
712 return -EINVAL;
713
8b3d6663
AB
714 spu_acquire(ctx);
715 data = ctx->ops->signal2_read(ctx);
716 spu_release(ctx);
717
67207b96
AB
718 if (copy_to_user(buf, &data, 4))
719 return -EFAULT;
720
721 return 4;
722}
723
724static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
725 size_t len, loff_t *pos)
726{
727 struct spu_context *ctx;
67207b96
AB
728 u32 data;
729
730 ctx = file->private_data;
67207b96
AB
731
732 if (len < 4)
733 return -EINVAL;
734
735 if (copy_from_user(&data, buf, 4))
736 return -EFAULT;
737
8b3d6663
AB
738 spu_acquire(ctx);
739 ctx->ops->signal2_write(ctx, data);
740 spu_release(ctx);
67207b96
AB
741
742 return 4;
743}
744
27d5bf2a 745#if SPUFS_MMAP_4K
6df10a82
MN
746static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
747 unsigned long address, int *type)
748{
27d5bf2a
BH
749#if PAGE_SIZE == 0x1000
750 return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
751#elif PAGE_SIZE == 0x10000
752 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
753 * signal 1 and 2 area
754 */
755 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
756#else
757#error unsupported page size
758#endif
6df10a82
MN
759}
760
761static struct vm_operations_struct spufs_signal2_mmap_vmops = {
762 .nopage = spufs_signal2_mmap_nopage,
763};
764
765static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
766{
767 if (!(vma->vm_flags & VM_SHARED))
768 return -EINVAL;
769
770 /* FIXME: */
771 vma->vm_flags |= VM_RESERVED;
772 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 773 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
774
775 vma->vm_ops = &spufs_signal2_mmap_vmops;
776 return 0;
777}
27d5bf2a
BH
778#else /* SPUFS_MMAP_4K */
779#define spufs_signal2_mmap NULL
780#endif /* !SPUFS_MMAP_4K */
6df10a82 781
67207b96 782static struct file_operations spufs_signal2_fops = {
6df10a82 783 .open = spufs_signal2_open,
67207b96
AB
784 .read = spufs_signal2_read,
785 .write = spufs_signal2_write,
6df10a82 786 .mmap = spufs_signal2_mmap,
67207b96
AB
787};
788
789static void spufs_signal1_type_set(void *data, u64 val)
790{
791 struct spu_context *ctx = data;
67207b96 792
8b3d6663
AB
793 spu_acquire(ctx);
794 ctx->ops->signal1_type_set(ctx, val);
795 spu_release(ctx);
67207b96
AB
796}
797
798static u64 spufs_signal1_type_get(void *data)
799{
800 struct spu_context *ctx = data;
8b3d6663
AB
801 u64 ret;
802
803 spu_acquire(ctx);
804 ret = ctx->ops->signal1_type_get(ctx);
805 spu_release(ctx);
806
807 return ret;
67207b96
AB
808}
809DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
810 spufs_signal1_type_set, "%llu");
811
812static void spufs_signal2_type_set(void *data, u64 val)
813{
814 struct spu_context *ctx = data;
67207b96 815
8b3d6663
AB
816 spu_acquire(ctx);
817 ctx->ops->signal2_type_set(ctx, val);
818 spu_release(ctx);
67207b96
AB
819}
820
821static u64 spufs_signal2_type_get(void *data)
822{
823 struct spu_context *ctx = data;
8b3d6663
AB
824 u64 ret;
825
826 spu_acquire(ctx);
827 ret = ctx->ops->signal2_type_get(ctx);
828 spu_release(ctx);
829
830 return ret;
67207b96
AB
831}
832DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
833 spufs_signal2_type_set, "%llu");
834
27d5bf2a 835#if SPUFS_MMAP_4K
d9379c4b
AB
836static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
837 unsigned long address, int *type)
838{
27d5bf2a 839 return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
d9379c4b
AB
840}
841
842static struct vm_operations_struct spufs_mss_mmap_vmops = {
843 .nopage = spufs_mss_mmap_nopage,
844};
845
846/*
847 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
d9379c4b
AB
848 */
849static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
850{
851 if (!(vma->vm_flags & VM_SHARED))
852 return -EINVAL;
853
d9379c4b
AB
854 vma->vm_flags |= VM_RESERVED;
855 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 856 | _PAGE_NO_CACHE | _PAGE_GUARDED);
d9379c4b
AB
857
858 vma->vm_ops = &spufs_mss_mmap_vmops;
859 return 0;
860}
27d5bf2a
BH
861#else /* SPUFS_MMAP_4K */
862#define spufs_mss_mmap NULL
863#endif /* !SPUFS_MMAP_4K */
d9379c4b
AB
864
865static int spufs_mss_open(struct inode *inode, struct file *file)
866{
867 struct spufs_inode_info *i = SPUFS_I(inode);
868
869 file->private_data = i->i_ctx;
870 return nonseekable_open(inode, file);
871}
872
873static struct file_operations spufs_mss_fops = {
874 .open = spufs_mss_open,
d9379c4b 875 .mmap = spufs_mss_mmap,
27d5bf2a
BH
876};
877
878static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
879 unsigned long address, int *type)
880{
881 return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
882}
883
884static struct vm_operations_struct spufs_psmap_mmap_vmops = {
885 .nopage = spufs_psmap_mmap_nopage,
886};
887
888/*
889 * mmap support for full problem state area [0x00000 - 0x1ffff].
890 */
891static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
892{
893 if (!(vma->vm_flags & VM_SHARED))
894 return -EINVAL;
895
896 vma->vm_flags |= VM_RESERVED;
897 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
898 | _PAGE_NO_CACHE | _PAGE_GUARDED);
899
900 vma->vm_ops = &spufs_psmap_mmap_vmops;
901 return 0;
902}
903
904static int spufs_psmap_open(struct inode *inode, struct file *file)
905{
906 struct spufs_inode_info *i = SPUFS_I(inode);
907
908 file->private_data = i->i_ctx;
909 return nonseekable_open(inode, file);
910}
911
912static struct file_operations spufs_psmap_fops = {
913 .open = spufs_psmap_open,
914 .mmap = spufs_psmap_mmap,
d9379c4b
AB
915};
916
917
27d5bf2a 918#if SPUFS_MMAP_4K
6df10a82
MN
919static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
920 unsigned long address, int *type)
921{
27d5bf2a 922 return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
6df10a82
MN
923}
924
925static struct vm_operations_struct spufs_mfc_mmap_vmops = {
926 .nopage = spufs_mfc_mmap_nopage,
927};
928
929/*
930 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
6df10a82
MN
931 */
932static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
933{
934 if (!(vma->vm_flags & VM_SHARED))
935 return -EINVAL;
936
6df10a82
MN
937 vma->vm_flags |= VM_RESERVED;
938 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
23cc7701 939 | _PAGE_NO_CACHE | _PAGE_GUARDED);
6df10a82
MN
940
941 vma->vm_ops = &spufs_mfc_mmap_vmops;
942 return 0;
943}
27d5bf2a
BH
944#else /* SPUFS_MMAP_4K */
945#define spufs_mfc_mmap NULL
946#endif /* !SPUFS_MMAP_4K */
a33a7d73
AB
947
948static int spufs_mfc_open(struct inode *inode, struct file *file)
949{
950 struct spufs_inode_info *i = SPUFS_I(inode);
951 struct spu_context *ctx = i->i_ctx;
952
953 /* we don't want to deal with DMA into other processes */
954 if (ctx->owner != current->mm)
955 return -EINVAL;
956
957 if (atomic_read(&inode->i_count) != 1)
958 return -EBUSY;
959
960 file->private_data = ctx;
961 return nonseekable_open(inode, file);
962}
963
964/* interrupt-level mfc callback function. */
965void spufs_mfc_callback(struct spu *spu)
966{
967 struct spu_context *ctx = spu->ctx;
968
969 wake_up_all(&ctx->mfc_wq);
970
971 pr_debug("%s %s\n", __FUNCTION__, spu->name);
972 if (ctx->mfc_fasync) {
973 u32 free_elements, tagstatus;
974 unsigned int mask;
975
976 /* no need for spu_acquire in interrupt context */
977 free_elements = ctx->ops->get_mfc_free_elements(ctx);
978 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
979
980 mask = 0;
981 if (free_elements & 0xffff)
982 mask |= POLLOUT;
983 if (tagstatus & ctx->tagwait)
984 mask |= POLLIN;
985
986 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
987 }
988}
989
990static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
991{
992 /* See if there is one tag group is complete */
993 /* FIXME we need locking around tagwait */
994 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
995 ctx->tagwait &= ~*status;
996 if (*status)
997 return 1;
998
999 /* enable interrupt waiting for any tag group,
1000 may silently fail if interrupts are already enabled */
1001 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1002 return 0;
1003}
1004
1005static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1006 size_t size, loff_t *pos)
1007{
1008 struct spu_context *ctx = file->private_data;
1009 int ret = -EINVAL;
1010 u32 status;
1011
1012 if (size != 4)
1013 goto out;
1014
1015 spu_acquire(ctx);
1016 if (file->f_flags & O_NONBLOCK) {
1017 status = ctx->ops->read_mfc_tagstatus(ctx);
1018 if (!(status & ctx->tagwait))
1019 ret = -EAGAIN;
1020 else
1021 ctx->tagwait &= ~status;
1022 } else {
1023 ret = spufs_wait(ctx->mfc_wq,
1024 spufs_read_mfc_tagstatus(ctx, &status));
1025 }
1026 spu_release(ctx);
1027
1028 if (ret)
1029 goto out;
1030
1031 ret = 4;
1032 if (copy_to_user(buffer, &status, 4))
1033 ret = -EFAULT;
1034
1035out:
1036 return ret;
1037}
1038
1039static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1040{
1041 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1042 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1043
1044 switch (cmd->cmd) {
1045 case MFC_PUT_CMD:
1046 case MFC_PUTF_CMD:
1047 case MFC_PUTB_CMD:
1048 case MFC_GET_CMD:
1049 case MFC_GETF_CMD:
1050 case MFC_GETB_CMD:
1051 break;
1052 default:
1053 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1054 return -EIO;
1055 }
1056
1057 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1058 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1059 cmd->ea, cmd->lsa);
1060 return -EIO;
1061 }
1062
1063 switch (cmd->size & 0xf) {
1064 case 1:
1065 break;
1066 case 2:
1067 if (cmd->lsa & 1)
1068 goto error;
1069 break;
1070 case 4:
1071 if (cmd->lsa & 3)
1072 goto error;
1073 break;
1074 case 8:
1075 if (cmd->lsa & 7)
1076 goto error;
1077 break;
1078 case 0:
1079 if (cmd->lsa & 15)
1080 goto error;
1081 break;
1082 error:
1083 default:
1084 pr_debug("invalid DMA alignment %x for size %x\n",
1085 cmd->lsa & 0xf, cmd->size);
1086 return -EIO;
1087 }
1088
1089 if (cmd->size > 16 * 1024) {
1090 pr_debug("invalid DMA size %x\n", cmd->size);
1091 return -EIO;
1092 }
1093
1094 if (cmd->tag & 0xfff0) {
1095 /* we reserve the higher tag numbers for kernel use */
1096 pr_debug("invalid DMA tag\n");
1097 return -EIO;
1098 }
1099
1100 if (cmd->class) {
1101 /* not supported in this version */
1102 pr_debug("invalid DMA class\n");
1103 return -EIO;
1104 }
1105
1106 return 0;
1107}
1108
1109static int spu_send_mfc_command(struct spu_context *ctx,
1110 struct mfc_dma_command cmd,
1111 int *error)
1112{
1113 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1114 if (*error == -EAGAIN) {
1115 /* wait for any tag group to complete
1116 so we have space for the new command */
1117 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1118 /* try again, because the queue might be
1119 empty again */
1120 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1121 if (*error == -EAGAIN)
1122 return 0;
1123 }
1124 return 1;
1125}
1126
1127static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1128 size_t size, loff_t *pos)
1129{
1130 struct spu_context *ctx = file->private_data;
1131 struct mfc_dma_command cmd;
1132 int ret = -EINVAL;
1133
1134 if (size != sizeof cmd)
1135 goto out;
1136
1137 ret = -EFAULT;
1138 if (copy_from_user(&cmd, buffer, sizeof cmd))
1139 goto out;
1140
1141 ret = spufs_check_valid_dma(&cmd);
1142 if (ret)
1143 goto out;
1144
1145 spu_acquire_runnable(ctx);
1146 if (file->f_flags & O_NONBLOCK) {
1147 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1148 } else {
1149 int status;
1150 ret = spufs_wait(ctx->mfc_wq,
1151 spu_send_mfc_command(ctx, cmd, &status));
1152 if (status)
1153 ret = status;
1154 }
1155 spu_release(ctx);
1156
1157 if (ret)
1158 goto out;
1159
1160 ctx->tagwait |= 1 << cmd.tag;
1161
1162out:
1163 return ret;
1164}
1165
1166static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1167{
1168 struct spu_context *ctx = file->private_data;
1169 u32 free_elements, tagstatus;
1170 unsigned int mask;
1171
1172 spu_acquire(ctx);
1173 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1174 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1175 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1176 spu_release(ctx);
1177
1178 poll_wait(file, &ctx->mfc_wq, wait);
1179
1180 mask = 0;
1181 if (free_elements & 0xffff)
1182 mask |= POLLOUT | POLLWRNORM;
1183 if (tagstatus & ctx->tagwait)
1184 mask |= POLLIN | POLLRDNORM;
1185
1186 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1187 free_elements, tagstatus, ctx->tagwait);
1188
1189 return mask;
1190}
1191
73b6af8a 1192static int spufs_mfc_flush(struct file *file, fl_owner_t id)
a33a7d73
AB
1193{
1194 struct spu_context *ctx = file->private_data;
1195 int ret;
1196
1197 spu_acquire(ctx);
1198#if 0
1199/* this currently hangs */
1200 ret = spufs_wait(ctx->mfc_wq,
1201 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1202 if (ret)
1203 goto out;
1204 ret = spufs_wait(ctx->mfc_wq,
1205 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1206out:
1207#else
1208 ret = 0;
1209#endif
1210 spu_release(ctx);
1211
1212 return ret;
1213}
1214
1215static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1216 int datasync)
1217{
73b6af8a 1218 return spufs_mfc_flush(file, NULL);
a33a7d73
AB
1219}
1220
1221static int spufs_mfc_fasync(int fd, struct file *file, int on)
1222{
1223 struct spu_context *ctx = file->private_data;
1224
1225 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1226}
1227
1228static struct file_operations spufs_mfc_fops = {
1229 .open = spufs_mfc_open,
1230 .read = spufs_mfc_read,
1231 .write = spufs_mfc_write,
1232 .poll = spufs_mfc_poll,
1233 .flush = spufs_mfc_flush,
1234 .fsync = spufs_mfc_fsync,
1235 .fasync = spufs_mfc_fasync,
6df10a82 1236 .mmap = spufs_mfc_mmap,
a33a7d73
AB
1237};
1238
67207b96
AB
1239static void spufs_npc_set(void *data, u64 val)
1240{
1241 struct spu_context *ctx = data;
8b3d6663
AB
1242 spu_acquire(ctx);
1243 ctx->ops->npc_write(ctx, val);
1244 spu_release(ctx);
67207b96
AB
1245}
1246
1247static u64 spufs_npc_get(void *data)
1248{
1249 struct spu_context *ctx = data;
1250 u64 ret;
8b3d6663
AB
1251 spu_acquire(ctx);
1252 ret = ctx->ops->npc_read(ctx);
1253 spu_release(ctx);
67207b96
AB
1254 return ret;
1255}
1256DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
1257
8b3d6663
AB
1258static void spufs_decr_set(void *data, u64 val)
1259{
1260 struct spu_context *ctx = data;
1261 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1262 spu_acquire_saved(ctx);
1263 lscsa->decr.slot[0] = (u32) val;
1264 spu_release(ctx);
1265}
1266
1267static u64 spufs_decr_get(void *data)
1268{
1269 struct spu_context *ctx = data;
1270 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1271 u64 ret;
1272 spu_acquire_saved(ctx);
1273 ret = lscsa->decr.slot[0];
1274 spu_release(ctx);
1275 return ret;
1276}
1277DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1278 "%llx\n")
1279
1280static void spufs_decr_status_set(void *data, u64 val)
1281{
1282 struct spu_context *ctx = data;
1283 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1284 spu_acquire_saved(ctx);
1285 lscsa->decr_status.slot[0] = (u32) val;
1286 spu_release(ctx);
1287}
1288
1289static u64 spufs_decr_status_get(void *data)
1290{
1291 struct spu_context *ctx = data;
1292 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1293 u64 ret;
1294 spu_acquire_saved(ctx);
1295 ret = lscsa->decr_status.slot[0];
1296 spu_release(ctx);
1297 return ret;
1298}
1299DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1300 spufs_decr_status_set, "%llx\n")
1301
1302static void spufs_spu_tag_mask_set(void *data, u64 val)
1303{
1304 struct spu_context *ctx = data;
1305 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1306 spu_acquire_saved(ctx);
1307 lscsa->tag_mask.slot[0] = (u32) val;
1308 spu_release(ctx);
1309}
1310
1311static u64 spufs_spu_tag_mask_get(void *data)
1312{
1313 struct spu_context *ctx = data;
1314 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1315 u64 ret;
1316 spu_acquire_saved(ctx);
1317 ret = lscsa->tag_mask.slot[0];
1318 spu_release(ctx);
1319 return ret;
1320}
1321DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
1322 spufs_spu_tag_mask_set, "%llx\n")
1323
1324static void spufs_event_mask_set(void *data, u64 val)
1325{
1326 struct spu_context *ctx = data;
1327 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1328 spu_acquire_saved(ctx);
1329 lscsa->event_mask.slot[0] = (u32) val;
1330 spu_release(ctx);
1331}
1332
1333static u64 spufs_event_mask_get(void *data)
1334{
1335 struct spu_context *ctx = data;
1336 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1337 u64 ret;
1338 spu_acquire_saved(ctx);
1339 ret = lscsa->event_mask.slot[0];
1340 spu_release(ctx);
1341 return ret;
1342}
1343DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1344 spufs_event_mask_set, "%llx\n")
1345
1346static void spufs_srr0_set(void *data, u64 val)
1347{
1348 struct spu_context *ctx = data;
1349 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1350 spu_acquire_saved(ctx);
1351 lscsa->srr0.slot[0] = (u32) val;
1352 spu_release(ctx);
1353}
1354
1355static u64 spufs_srr0_get(void *data)
1356{
1357 struct spu_context *ctx = data;
1358 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1359 u64 ret;
1360 spu_acquire_saved(ctx);
1361 ret = lscsa->srr0.slot[0];
1362 spu_release(ctx);
1363 return ret;
1364}
1365DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1366 "%llx\n")
1367
7b1a7014
AB
1368static u64 spufs_id_get(void *data)
1369{
1370 struct spu_context *ctx = data;
1371 u64 num;
1372
1373 spu_acquire(ctx);
1374 if (ctx->state == SPU_STATE_RUNNABLE)
1375 num = ctx->spu->number;
1376 else
1377 num = (unsigned int)-1;
1378 spu_release(ctx);
1379
1380 return num;
1381}
e45d6634 1382DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
7b1a7014 1383
67207b96
AB
1384struct tree_descr spufs_dir_contents[] = {
1385 { "mem", &spufs_mem_fops, 0666, },
8b3d6663 1386 { "regs", &spufs_regs_fops, 0666, },
67207b96
AB
1387 { "mbox", &spufs_mbox_fops, 0444, },
1388 { "ibox", &spufs_ibox_fops, 0444, },
1389 { "wbox", &spufs_wbox_fops, 0222, },
1390 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1391 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1392 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1393 { "signal1", &spufs_signal1_fops, 0666, },
1394 { "signal2", &spufs_signal2_fops, 0666, },
1395 { "signal1_type", &spufs_signal1_type, 0666, },
1396 { "signal2_type", &spufs_signal2_type, 0666, },
d9379c4b 1397 { "mss", &spufs_mss_fops, 0666, },
a33a7d73 1398 { "mfc", &spufs_mfc_fops, 0666, },
6df10a82 1399 { "cntl", &spufs_cntl_fops, 0666, },
67207b96 1400 { "npc", &spufs_npc_ops, 0666, },
8b3d6663
AB
1401 { "fpcr", &spufs_fpcr_fops, 0666, },
1402 { "decr", &spufs_decr_ops, 0666, },
1403 { "decr_status", &spufs_decr_status_ops, 0666, },
1404 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1405 { "event_mask", &spufs_event_mask_ops, 0666, },
1406 { "srr0", &spufs_srr0_ops, 0666, },
7b1a7014 1407 { "phys-id", &spufs_id_ops, 0666, },
27d5bf2a 1408 { "psmap", &spufs_psmap_fops, 0666, },
67207b96
AB
1409 {},
1410};