]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/platforms/cell/spufs/file.c
[PATCH] xfs: update ->flush method proto
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / platforms / cell / spufs / file.c
CommitLineData
67207b96
AB
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
a33a7d73
AB
23#undef DEBUG
24
67207b96
AB
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
d88cfffa 28#include <linux/pagemap.h>
67207b96 29#include <linux/poll.h>
5110459f 30#include <linux/ptrace.h>
67207b96
AB
31
32#include <asm/io.h>
33#include <asm/semaphore.h>
34#include <asm/spu.h>
35#include <asm/uaccess.h>
36
37#include "spufs.h"
38
8b3d6663 39
67207b96
AB
40static int
41spufs_mem_open(struct inode *inode, struct file *file)
42{
43 struct spufs_inode_info *i = SPUFS_I(inode);
6df10a82
MN
44 struct spu_context *ctx = i->i_ctx;
45 file->private_data = ctx;
46 file->f_mapping = inode->i_mapping;
47 ctx->local_store = inode->i_mapping;
67207b96
AB
48 return 0;
49}
50
51static ssize_t
52spufs_mem_read(struct file *file, char __user *buffer,
53 size_t size, loff_t *pos)
54{
8b3d6663
AB
55 struct spu_context *ctx = file->private_data;
56 char *local_store;
67207b96
AB
57 int ret;
58
8b3d6663 59 spu_acquire(ctx);
67207b96 60
8b3d6663
AB
61 local_store = ctx->ops->get_ls(ctx);
62 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
67207b96 63
8b3d6663 64 spu_release(ctx);
67207b96
AB
65 return ret;
66}
67
68static ssize_t
69spufs_mem_write(struct file *file, const char __user *buffer,
70 size_t size, loff_t *pos)
71{
72 struct spu_context *ctx = file->private_data;
8b3d6663
AB
73 char *local_store;
74 int ret;
67207b96
AB
75
76 size = min_t(ssize_t, LS_SIZE - *pos, size);
77 if (size <= 0)
78 return -EFBIG;
79 *pos += size;
8b3d6663
AB
80
81 spu_acquire(ctx);
82
83 local_store = ctx->ops->get_ls(ctx);
84 ret = copy_from_user(local_store + *pos - size,
85 buffer, size) ? -EFAULT : size;
86
87 spu_release(ctx);
88 return ret;
67207b96
AB
89}
90
6df10a82 91#ifdef CONFIG_SPUFS_MMAP
8b3d6663
AB
92static struct page *
93spufs_mem_mmap_nopage(struct vm_area_struct *vma,
94 unsigned long address, int *type)
95{
96 struct page *page = NOPAGE_SIGBUS;
97
98 struct spu_context *ctx = vma->vm_file->private_data;
99 unsigned long offset = address - vma->vm_start;
100 offset += vma->vm_pgoff << PAGE_SHIFT;
101
102 spu_acquire(ctx);
103
104 if (ctx->state == SPU_STATE_SAVED)
105 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
106 else
107 page = pfn_to_page((ctx->spu->local_store_phys + offset)
108 >> PAGE_SHIFT);
109
110 spu_release(ctx);
111
112 if (type)
113 *type = VM_FAULT_MINOR;
114
d88cfffa 115 page_cache_get(page);
8b3d6663
AB
116 return page;
117}
118
119static struct vm_operations_struct spufs_mem_mmap_vmops = {
120 .nopage = spufs_mem_mmap_nopage,
121};
122
67207b96
AB
123static int
124spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
125{
8b3d6663
AB
126 if (!(vma->vm_flags & VM_SHARED))
127 return -EINVAL;
67207b96 128
8b3d6663 129 /* FIXME: */
8b3d6663
AB
130 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
131 | _PAGE_NO_CACHE);
132
133 vma->vm_ops = &spufs_mem_mmap_vmops;
67207b96
AB
134 return 0;
135}
8b3d6663 136#endif
67207b96
AB
137
138static struct file_operations spufs_mem_fops = {
139 .open = spufs_mem_open,
140 .read = spufs_mem_read,
141 .write = spufs_mem_write,
8b3d6663 142 .llseek = generic_file_llseek,
6df10a82 143#ifdef CONFIG_SPUFS_MMAP
67207b96 144 .mmap = spufs_mem_mmap,
8b3d6663
AB
145#endif
146};
147
6df10a82
MN
148#ifdef CONFIG_SPUFS_MMAP
149static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
150 unsigned long address,
151 int *type, unsigned long ps_offs)
152{
153 struct page *page = NOPAGE_SIGBUS;
154 int fault_type = VM_FAULT_SIGBUS;
155 struct spu_context *ctx = vma->vm_file->private_data;
156 unsigned long offset = address - vma->vm_start;
157 unsigned long area;
158 int ret;
159
160 offset += vma->vm_pgoff << PAGE_SHIFT;
161 if (offset >= 0x4000)
162 goto out;
163
164 ret = spu_acquire_runnable(ctx);
165 if (ret)
166 goto out;
167
168 area = ctx->spu->problem_phys + ps_offs;
169 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
170 fault_type = VM_FAULT_MINOR;
171 page_cache_get(page);
172
173 spu_release(ctx);
174
175 out:
176 if (type)
177 *type = fault_type;
178
179 return page;
180}
181
182static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
183 unsigned long address, int *type)
184{
185 return spufs_ps_nopage(vma, address, type, 0x4000);
186}
187
188static struct vm_operations_struct spufs_cntl_mmap_vmops = {
189 .nopage = spufs_cntl_mmap_nopage,
190};
191
192/*
193 * mmap support for problem state control area [0x4000 - 0x4fff].
194 * Mapping this area requires that the application have CAP_SYS_RAWIO,
195 * as these registers require special care when read/writing.
196 */
197static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
198{
199 if (!(vma->vm_flags & VM_SHARED))
200 return -EINVAL;
201
202 if (!capable(CAP_SYS_RAWIO))
203 return -EPERM;
204
205 vma->vm_flags |= VM_RESERVED;
206 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
207 | _PAGE_NO_CACHE);
208
209 vma->vm_ops = &spufs_cntl_mmap_vmops;
210 return 0;
211}
212#endif
213
214static int spufs_cntl_open(struct inode *inode, struct file *file)
215{
216 struct spufs_inode_info *i = SPUFS_I(inode);
217 struct spu_context *ctx = i->i_ctx;
218
219 file->private_data = ctx;
220 file->f_mapping = inode->i_mapping;
221 ctx->cntl = inode->i_mapping;
222 return 0;
223}
224
225static ssize_t
226spufs_cntl_read(struct file *file, char __user *buffer,
227 size_t size, loff_t *pos)
228{
229 /* FIXME: read from spu status */
230 return -EINVAL;
231}
232
233static ssize_t
234spufs_cntl_write(struct file *file, const char __user *buffer,
235 size_t size, loff_t *pos)
236{
237 /* FIXME: write to runctl bit */
238 return -EINVAL;
239}
240
241static struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open,
243 .read = spufs_cntl_read,
244 .write = spufs_cntl_write,
245#ifdef CONFIG_SPUFS_MMAP
246 .mmap = spufs_cntl_mmap,
247#endif
248};
249
8b3d6663
AB
250static int
251spufs_regs_open(struct inode *inode, struct file *file)
252{
253 struct spufs_inode_info *i = SPUFS_I(inode);
254 file->private_data = i->i_ctx;
255 return 0;
256}
257
258static ssize_t
259spufs_regs_read(struct file *file, char __user *buffer,
260 size_t size, loff_t *pos)
261{
262 struct spu_context *ctx = file->private_data;
263 struct spu_lscsa *lscsa = ctx->csa.lscsa;
264 int ret;
265
266 spu_acquire_saved(ctx);
267
268 ret = simple_read_from_buffer(buffer, size, pos,
269 lscsa->gprs, sizeof lscsa->gprs);
270
271 spu_release(ctx);
272 return ret;
273}
274
275static ssize_t
276spufs_regs_write(struct file *file, const char __user *buffer,
277 size_t size, loff_t *pos)
278{
279 struct spu_context *ctx = file->private_data;
280 struct spu_lscsa *lscsa = ctx->csa.lscsa;
281 int ret;
282
283 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
284 if (size <= 0)
285 return -EFBIG;
286 *pos += size;
287
288 spu_acquire_saved(ctx);
289
290 ret = copy_from_user(lscsa->gprs + *pos - size,
291 buffer, size) ? -EFAULT : size;
292
293 spu_release(ctx);
294 return ret;
295}
296
297static struct file_operations spufs_regs_fops = {
298 .open = spufs_regs_open,
299 .read = spufs_regs_read,
300 .write = spufs_regs_write,
67207b96
AB
301 .llseek = generic_file_llseek,
302};
303
8b3d6663
AB
304static ssize_t
305spufs_fpcr_read(struct file *file, char __user * buffer,
306 size_t size, loff_t * pos)
307{
308 struct spu_context *ctx = file->private_data;
309 struct spu_lscsa *lscsa = ctx->csa.lscsa;
310 int ret;
311
312 spu_acquire_saved(ctx);
313
314 ret = simple_read_from_buffer(buffer, size, pos,
315 &lscsa->fpcr, sizeof(lscsa->fpcr));
316
317 spu_release(ctx);
318 return ret;
319}
320
321static ssize_t
322spufs_fpcr_write(struct file *file, const char __user * buffer,
323 size_t size, loff_t * pos)
324{
325 struct spu_context *ctx = file->private_data;
326 struct spu_lscsa *lscsa = ctx->csa.lscsa;
327 int ret;
328
329 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
330 if (size <= 0)
331 return -EFBIG;
332 *pos += size;
333
334 spu_acquire_saved(ctx);
335
336 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
337 buffer, size) ? -EFAULT : size;
338
339 spu_release(ctx);
340 return ret;
341}
342
343static struct file_operations spufs_fpcr_fops = {
344 .open = spufs_regs_open,
345 .read = spufs_fpcr_read,
346 .write = spufs_fpcr_write,
347 .llseek = generic_file_llseek,
348};
349
67207b96
AB
350/* generic open function for all pipe-like files */
351static int spufs_pipe_open(struct inode *inode, struct file *file)
352{
353 struct spufs_inode_info *i = SPUFS_I(inode);
354 file->private_data = i->i_ctx;
355
356 return nonseekable_open(inode, file);
357}
358
359static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
360 size_t len, loff_t *pos)
361{
8b3d6663 362 struct spu_context *ctx = file->private_data;
67207b96 363 u32 mbox_data;
8b3d6663 364 int ret;
67207b96
AB
365
366 if (len < 4)
367 return -EINVAL;
368
8b3d6663
AB
369 spu_acquire(ctx);
370 ret = ctx->ops->mbox_read(ctx, &mbox_data);
371 spu_release(ctx);
67207b96 372
8b3d6663
AB
373 if (!ret)
374 return -EAGAIN;
67207b96
AB
375
376 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
377 return -EFAULT;
378
379 return 4;
380}
381
382static struct file_operations spufs_mbox_fops = {
383 .open = spufs_pipe_open,
384 .read = spufs_mbox_read,
385};
386
387static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
388 size_t len, loff_t *pos)
389{
8b3d6663 390 struct spu_context *ctx = file->private_data;
67207b96
AB
391 u32 mbox_stat;
392
393 if (len < 4)
394 return -EINVAL;
395
8b3d6663
AB
396 spu_acquire(ctx);
397
398 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
399
400 spu_release(ctx);
67207b96
AB
401
402 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
403 return -EFAULT;
404
405 return 4;
406}
407
408static struct file_operations spufs_mbox_stat_fops = {
409 .open = spufs_pipe_open,
410 .read = spufs_mbox_stat_read,
411};
412
413/* low-level ibox access function */
8b3d6663 414size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
67207b96 415{
8b3d6663
AB
416 return ctx->ops->ibox_read(ctx, data);
417}
67207b96 418
8b3d6663
AB
419static int spufs_ibox_fasync(int fd, struct file *file, int on)
420{
421 struct spu_context *ctx = file->private_data;
67207b96 422
8b3d6663 423 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
67207b96 424}
67207b96 425
8b3d6663
AB
426/* interrupt-level ibox callback function. */
427void spufs_ibox_callback(struct spu *spu)
67207b96 428{
8b3d6663
AB
429 struct spu_context *ctx = spu->ctx;
430
431 wake_up_all(&ctx->ibox_wq);
432 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
67207b96
AB
433}
434
435static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
436 size_t len, loff_t *pos)
437{
8b3d6663 438 struct spu_context *ctx = file->private_data;
67207b96
AB
439 u32 ibox_data;
440 ssize_t ret;
441
442 if (len < 4)
443 return -EINVAL;
444
8b3d6663 445 spu_acquire(ctx);
67207b96
AB
446
447 ret = 0;
448 if (file->f_flags & O_NONBLOCK) {
8b3d6663 449 if (!spu_ibox_read(ctx, &ibox_data))
67207b96
AB
450 ret = -EAGAIN;
451 } else {
8b3d6663 452 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
67207b96
AB
453 }
454
8b3d6663
AB
455 spu_release(ctx);
456
67207b96
AB
457 if (ret)
458 return ret;
459
460 ret = 4;
461 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
462 ret = -EFAULT;
463
464 return ret;
465}
466
467static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
468{
8b3d6663 469 struct spu_context *ctx = file->private_data;
67207b96
AB
470 unsigned int mask;
471
8b3d6663 472 poll_wait(file, &ctx->ibox_wq, wait);
67207b96 473
3a843d7c
AB
474 spu_acquire(ctx);
475 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
476 spu_release(ctx);
67207b96
AB
477
478 return mask;
479}
480
481static struct file_operations spufs_ibox_fops = {
482 .open = spufs_pipe_open,
483 .read = spufs_ibox_read,
484 .poll = spufs_ibox_poll,
485 .fasync = spufs_ibox_fasync,
486};
487
488static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
489 size_t len, loff_t *pos)
490{
8b3d6663 491 struct spu_context *ctx = file->private_data;
67207b96
AB
492 u32 ibox_stat;
493
494 if (len < 4)
495 return -EINVAL;
496
8b3d6663
AB
497 spu_acquire(ctx);
498 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
499 spu_release(ctx);
67207b96
AB
500
501 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
502 return -EFAULT;
503
504 return 4;
505}
506
507static struct file_operations spufs_ibox_stat_fops = {
508 .open = spufs_pipe_open,
509 .read = spufs_ibox_stat_read,
510};
511
512/* low-level mailbox write */
8b3d6663 513size_t spu_wbox_write(struct spu_context *ctx, u32 data)
67207b96 514{
8b3d6663
AB
515 return ctx->ops->wbox_write(ctx, data);
516}
67207b96 517
8b3d6663
AB
518static int spufs_wbox_fasync(int fd, struct file *file, int on)
519{
520 struct spu_context *ctx = file->private_data;
521 int ret;
67207b96 522
8b3d6663 523 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
67207b96 524
67207b96
AB
525 return ret;
526}
67207b96 527
8b3d6663
AB
528/* interrupt-level wbox callback function. */
529void spufs_wbox_callback(struct spu *spu)
67207b96 530{
8b3d6663
AB
531 struct spu_context *ctx = spu->ctx;
532
533 wake_up_all(&ctx->wbox_wq);
534 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
67207b96
AB
535}
536
537static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
538 size_t len, loff_t *pos)
539{
8b3d6663 540 struct spu_context *ctx = file->private_data;
67207b96
AB
541 u32 wbox_data;
542 int ret;
543
544 if (len < 4)
545 return -EINVAL;
546
67207b96
AB
547 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
548 return -EFAULT;
549
8b3d6663
AB
550 spu_acquire(ctx);
551
67207b96
AB
552 ret = 0;
553 if (file->f_flags & O_NONBLOCK) {
8b3d6663 554 if (!spu_wbox_write(ctx, wbox_data))
67207b96
AB
555 ret = -EAGAIN;
556 } else {
8b3d6663 557 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
67207b96
AB
558 }
559
8b3d6663
AB
560 spu_release(ctx);
561
67207b96
AB
562 return ret ? ret : sizeof wbox_data;
563}
564
565static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
566{
8b3d6663 567 struct spu_context *ctx = file->private_data;
67207b96
AB
568 unsigned int mask;
569
8b3d6663 570 poll_wait(file, &ctx->wbox_wq, wait);
67207b96 571
3a843d7c
AB
572 spu_acquire(ctx);
573 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
574 spu_release(ctx);
67207b96
AB
575
576 return mask;
577}
578
579static struct file_operations spufs_wbox_fops = {
580 .open = spufs_pipe_open,
581 .write = spufs_wbox_write,
582 .poll = spufs_wbox_poll,
583 .fasync = spufs_wbox_fasync,
584};
585
586static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
587 size_t len, loff_t *pos)
588{
8b3d6663 589 struct spu_context *ctx = file->private_data;
67207b96
AB
590 u32 wbox_stat;
591
592 if (len < 4)
593 return -EINVAL;
594
8b3d6663
AB
595 spu_acquire(ctx);
596 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
597 spu_release(ctx);
67207b96
AB
598
599 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
600 return -EFAULT;
601
602 return 4;
603}
604
605static struct file_operations spufs_wbox_stat_fops = {
606 .open = spufs_pipe_open,
607 .read = spufs_wbox_stat_read,
608};
609
6df10a82
MN
610static int spufs_signal1_open(struct inode *inode, struct file *file)
611{
612 struct spufs_inode_info *i = SPUFS_I(inode);
613 struct spu_context *ctx = i->i_ctx;
614 file->private_data = ctx;
615 file->f_mapping = inode->i_mapping;
616 ctx->signal1 = inode->i_mapping;
617 return nonseekable_open(inode, file);
618}
619
67207b96
AB
620static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
621 size_t len, loff_t *pos)
622{
8b3d6663 623 struct spu_context *ctx = file->private_data;
67207b96
AB
624 u32 data;
625
67207b96
AB
626 if (len < 4)
627 return -EINVAL;
628
8b3d6663
AB
629 spu_acquire(ctx);
630 data = ctx->ops->signal1_read(ctx);
631 spu_release(ctx);
632
67207b96
AB
633 if (copy_to_user(buf, &data, 4))
634 return -EFAULT;
635
636 return 4;
637}
638
639static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
640 size_t len, loff_t *pos)
641{
642 struct spu_context *ctx;
67207b96
AB
643 u32 data;
644
645 ctx = file->private_data;
67207b96
AB
646
647 if (len < 4)
648 return -EINVAL;
649
650 if (copy_from_user(&data, buf, 4))
651 return -EFAULT;
652
8b3d6663
AB
653 spu_acquire(ctx);
654 ctx->ops->signal1_write(ctx, data);
655 spu_release(ctx);
67207b96
AB
656
657 return 4;
658}
659
6df10a82
MN
660#ifdef CONFIG_SPUFS_MMAP
661static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
662 unsigned long address, int *type)
663{
664 return spufs_ps_nopage(vma, address, type, 0x14000);
665}
666
667static struct vm_operations_struct spufs_signal1_mmap_vmops = {
668 .nopage = spufs_signal1_mmap_nopage,
669};
670
671static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
672{
673 if (!(vma->vm_flags & VM_SHARED))
674 return -EINVAL;
675
676 vma->vm_flags |= VM_RESERVED;
677 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
678 | _PAGE_NO_CACHE);
679
680 vma->vm_ops = &spufs_signal1_mmap_vmops;
681 return 0;
682}
683#endif
684
67207b96 685static struct file_operations spufs_signal1_fops = {
6df10a82 686 .open = spufs_signal1_open,
67207b96
AB
687 .read = spufs_signal1_read,
688 .write = spufs_signal1_write,
6df10a82
MN
689#ifdef CONFIG_SPUFS_MMAP
690 .mmap = spufs_signal1_mmap,
691#endif
67207b96
AB
692};
693
6df10a82
MN
694static int spufs_signal2_open(struct inode *inode, struct file *file)
695{
696 struct spufs_inode_info *i = SPUFS_I(inode);
697 struct spu_context *ctx = i->i_ctx;
698 file->private_data = ctx;
699 file->f_mapping = inode->i_mapping;
700 ctx->signal2 = inode->i_mapping;
701 return nonseekable_open(inode, file);
702}
703
67207b96
AB
704static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
705 size_t len, loff_t *pos)
706{
707 struct spu_context *ctx;
67207b96
AB
708 u32 data;
709
710 ctx = file->private_data;
67207b96
AB
711
712 if (len < 4)
713 return -EINVAL;
714
8b3d6663
AB
715 spu_acquire(ctx);
716 data = ctx->ops->signal2_read(ctx);
717 spu_release(ctx);
718
67207b96
AB
719 if (copy_to_user(buf, &data, 4))
720 return -EFAULT;
721
722 return 4;
723}
724
725static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
726 size_t len, loff_t *pos)
727{
728 struct spu_context *ctx;
67207b96
AB
729 u32 data;
730
731 ctx = file->private_data;
67207b96
AB
732
733 if (len < 4)
734 return -EINVAL;
735
736 if (copy_from_user(&data, buf, 4))
737 return -EFAULT;
738
8b3d6663
AB
739 spu_acquire(ctx);
740 ctx->ops->signal2_write(ctx, data);
741 spu_release(ctx);
67207b96
AB
742
743 return 4;
744}
745
6df10a82
MN
746#ifdef CONFIG_SPUFS_MMAP
747static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
748 unsigned long address, int *type)
749{
750 return spufs_ps_nopage(vma, address, type, 0x1c000);
751}
752
753static struct vm_operations_struct spufs_signal2_mmap_vmops = {
754 .nopage = spufs_signal2_mmap_nopage,
755};
756
757static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
758{
759 if (!(vma->vm_flags & VM_SHARED))
760 return -EINVAL;
761
762 /* FIXME: */
763 vma->vm_flags |= VM_RESERVED;
764 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
765 | _PAGE_NO_CACHE);
766
767 vma->vm_ops = &spufs_signal2_mmap_vmops;
768 return 0;
769}
770#endif
771
67207b96 772static struct file_operations spufs_signal2_fops = {
6df10a82 773 .open = spufs_signal2_open,
67207b96
AB
774 .read = spufs_signal2_read,
775 .write = spufs_signal2_write,
6df10a82
MN
776#ifdef CONFIG_SPUFS_MMAP
777 .mmap = spufs_signal2_mmap,
778#endif
67207b96
AB
779};
780
781static void spufs_signal1_type_set(void *data, u64 val)
782{
783 struct spu_context *ctx = data;
67207b96 784
8b3d6663
AB
785 spu_acquire(ctx);
786 ctx->ops->signal1_type_set(ctx, val);
787 spu_release(ctx);
67207b96
AB
788}
789
790static u64 spufs_signal1_type_get(void *data)
791{
792 struct spu_context *ctx = data;
8b3d6663
AB
793 u64 ret;
794
795 spu_acquire(ctx);
796 ret = ctx->ops->signal1_type_get(ctx);
797 spu_release(ctx);
798
799 return ret;
67207b96
AB
800}
801DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
802 spufs_signal1_type_set, "%llu");
803
804static void spufs_signal2_type_set(void *data, u64 val)
805{
806 struct spu_context *ctx = data;
67207b96 807
8b3d6663
AB
808 spu_acquire(ctx);
809 ctx->ops->signal2_type_set(ctx, val);
810 spu_release(ctx);
67207b96
AB
811}
812
813static u64 spufs_signal2_type_get(void *data)
814{
815 struct spu_context *ctx = data;
8b3d6663
AB
816 u64 ret;
817
818 spu_acquire(ctx);
819 ret = ctx->ops->signal2_type_get(ctx);
820 spu_release(ctx);
821
822 return ret;
67207b96
AB
823}
824DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
825 spufs_signal2_type_set, "%llu");
826
d9379c4b
AB
827#ifdef CONFIG_SPUFS_MMAP
828static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
829 unsigned long address, int *type)
830{
831 return spufs_ps_nopage(vma, address, type, 0x0000);
832}
833
834static struct vm_operations_struct spufs_mss_mmap_vmops = {
835 .nopage = spufs_mss_mmap_nopage,
836};
837
838/*
839 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
840 * Mapping this area requires that the application have CAP_SYS_RAWIO,
841 * as these registers require special care when read/writing.
842 */
843static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
844{
845 if (!(vma->vm_flags & VM_SHARED))
846 return -EINVAL;
847
848 if (!capable(CAP_SYS_RAWIO))
849 return -EPERM;
850
851 vma->vm_flags |= VM_RESERVED;
852 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
853 | _PAGE_NO_CACHE);
854
855 vma->vm_ops = &spufs_mss_mmap_vmops;
856 return 0;
857}
858#endif
859
860static int spufs_mss_open(struct inode *inode, struct file *file)
861{
862 struct spufs_inode_info *i = SPUFS_I(inode);
863
864 file->private_data = i->i_ctx;
865 return nonseekable_open(inode, file);
866}
867
868static struct file_operations spufs_mss_fops = {
869 .open = spufs_mss_open,
870#ifdef CONFIG_SPUFS_MMAP
871 .mmap = spufs_mss_mmap,
872#endif
873};
874
875
6df10a82
MN
876#ifdef CONFIG_SPUFS_MMAP
877static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
878 unsigned long address, int *type)
879{
880 return spufs_ps_nopage(vma, address, type, 0x3000);
881}
882
883static struct vm_operations_struct spufs_mfc_mmap_vmops = {
884 .nopage = spufs_mfc_mmap_nopage,
885};
886
887/*
888 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
889 * Mapping this area requires that the application have CAP_SYS_RAWIO,
890 * as these registers require special care when read/writing.
891 */
892static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
893{
894 if (!(vma->vm_flags & VM_SHARED))
895 return -EINVAL;
896
897 if (!capable(CAP_SYS_RAWIO))
898 return -EPERM;
899
900 vma->vm_flags |= VM_RESERVED;
901 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
902 | _PAGE_NO_CACHE);
903
904 vma->vm_ops = &spufs_mfc_mmap_vmops;
905 return 0;
906}
907#endif
a33a7d73
AB
908
909static int spufs_mfc_open(struct inode *inode, struct file *file)
910{
911 struct spufs_inode_info *i = SPUFS_I(inode);
912 struct spu_context *ctx = i->i_ctx;
913
914 /* we don't want to deal with DMA into other processes */
915 if (ctx->owner != current->mm)
916 return -EINVAL;
917
918 if (atomic_read(&inode->i_count) != 1)
919 return -EBUSY;
920
921 file->private_data = ctx;
922 return nonseekable_open(inode, file);
923}
924
925/* interrupt-level mfc callback function. */
926void spufs_mfc_callback(struct spu *spu)
927{
928 struct spu_context *ctx = spu->ctx;
929
930 wake_up_all(&ctx->mfc_wq);
931
932 pr_debug("%s %s\n", __FUNCTION__, spu->name);
933 if (ctx->mfc_fasync) {
934 u32 free_elements, tagstatus;
935 unsigned int mask;
936
937 /* no need for spu_acquire in interrupt context */
938 free_elements = ctx->ops->get_mfc_free_elements(ctx);
939 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
940
941 mask = 0;
942 if (free_elements & 0xffff)
943 mask |= POLLOUT;
944 if (tagstatus & ctx->tagwait)
945 mask |= POLLIN;
946
947 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
948 }
949}
950
951static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
952{
953 /* See if there is one tag group is complete */
954 /* FIXME we need locking around tagwait */
955 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
956 ctx->tagwait &= ~*status;
957 if (*status)
958 return 1;
959
960 /* enable interrupt waiting for any tag group,
961 may silently fail if interrupts are already enabled */
962 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
963 return 0;
964}
965
966static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
967 size_t size, loff_t *pos)
968{
969 struct spu_context *ctx = file->private_data;
970 int ret = -EINVAL;
971 u32 status;
972
973 if (size != 4)
974 goto out;
975
976 spu_acquire(ctx);
977 if (file->f_flags & O_NONBLOCK) {
978 status = ctx->ops->read_mfc_tagstatus(ctx);
979 if (!(status & ctx->tagwait))
980 ret = -EAGAIN;
981 else
982 ctx->tagwait &= ~status;
983 } else {
984 ret = spufs_wait(ctx->mfc_wq,
985 spufs_read_mfc_tagstatus(ctx, &status));
986 }
987 spu_release(ctx);
988
989 if (ret)
990 goto out;
991
992 ret = 4;
993 if (copy_to_user(buffer, &status, 4))
994 ret = -EFAULT;
995
996out:
997 return ret;
998}
999
1000static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1001{
1002 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1003 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1004
1005 switch (cmd->cmd) {
1006 case MFC_PUT_CMD:
1007 case MFC_PUTF_CMD:
1008 case MFC_PUTB_CMD:
1009 case MFC_GET_CMD:
1010 case MFC_GETF_CMD:
1011 case MFC_GETB_CMD:
1012 break;
1013 default:
1014 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1015 return -EIO;
1016 }
1017
1018 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1019 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1020 cmd->ea, cmd->lsa);
1021 return -EIO;
1022 }
1023
1024 switch (cmd->size & 0xf) {
1025 case 1:
1026 break;
1027 case 2:
1028 if (cmd->lsa & 1)
1029 goto error;
1030 break;
1031 case 4:
1032 if (cmd->lsa & 3)
1033 goto error;
1034 break;
1035 case 8:
1036 if (cmd->lsa & 7)
1037 goto error;
1038 break;
1039 case 0:
1040 if (cmd->lsa & 15)
1041 goto error;
1042 break;
1043 error:
1044 default:
1045 pr_debug("invalid DMA alignment %x for size %x\n",
1046 cmd->lsa & 0xf, cmd->size);
1047 return -EIO;
1048 }
1049
1050 if (cmd->size > 16 * 1024) {
1051 pr_debug("invalid DMA size %x\n", cmd->size);
1052 return -EIO;
1053 }
1054
1055 if (cmd->tag & 0xfff0) {
1056 /* we reserve the higher tag numbers for kernel use */
1057 pr_debug("invalid DMA tag\n");
1058 return -EIO;
1059 }
1060
1061 if (cmd->class) {
1062 /* not supported in this version */
1063 pr_debug("invalid DMA class\n");
1064 return -EIO;
1065 }
1066
1067 return 0;
1068}
1069
1070static int spu_send_mfc_command(struct spu_context *ctx,
1071 struct mfc_dma_command cmd,
1072 int *error)
1073{
1074 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1075 if (*error == -EAGAIN) {
1076 /* wait for any tag group to complete
1077 so we have space for the new command */
1078 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1079 /* try again, because the queue might be
1080 empty again */
1081 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1082 if (*error == -EAGAIN)
1083 return 0;
1084 }
1085 return 1;
1086}
1087
1088static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1089 size_t size, loff_t *pos)
1090{
1091 struct spu_context *ctx = file->private_data;
1092 struct mfc_dma_command cmd;
1093 int ret = -EINVAL;
1094
1095 if (size != sizeof cmd)
1096 goto out;
1097
1098 ret = -EFAULT;
1099 if (copy_from_user(&cmd, buffer, sizeof cmd))
1100 goto out;
1101
1102 ret = spufs_check_valid_dma(&cmd);
1103 if (ret)
1104 goto out;
1105
1106 spu_acquire_runnable(ctx);
1107 if (file->f_flags & O_NONBLOCK) {
1108 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1109 } else {
1110 int status;
1111 ret = spufs_wait(ctx->mfc_wq,
1112 spu_send_mfc_command(ctx, cmd, &status));
1113 if (status)
1114 ret = status;
1115 }
1116 spu_release(ctx);
1117
1118 if (ret)
1119 goto out;
1120
1121 ctx->tagwait |= 1 << cmd.tag;
1122
1123out:
1124 return ret;
1125}
1126
1127static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1128{
1129 struct spu_context *ctx = file->private_data;
1130 u32 free_elements, tagstatus;
1131 unsigned int mask;
1132
1133 spu_acquire(ctx);
1134 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1135 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1136 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1137 spu_release(ctx);
1138
1139 poll_wait(file, &ctx->mfc_wq, wait);
1140
1141 mask = 0;
1142 if (free_elements & 0xffff)
1143 mask |= POLLOUT | POLLWRNORM;
1144 if (tagstatus & ctx->tagwait)
1145 mask |= POLLIN | POLLRDNORM;
1146
1147 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1148 free_elements, tagstatus, ctx->tagwait);
1149
1150 return mask;
1151}
1152
1153static int spufs_mfc_flush(struct file *file)
1154{
1155 struct spu_context *ctx = file->private_data;
1156 int ret;
1157
1158 spu_acquire(ctx);
1159#if 0
1160/* this currently hangs */
1161 ret = spufs_wait(ctx->mfc_wq,
1162 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1163 if (ret)
1164 goto out;
1165 ret = spufs_wait(ctx->mfc_wq,
1166 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1167out:
1168#else
1169 ret = 0;
1170#endif
1171 spu_release(ctx);
1172
1173 return ret;
1174}
1175
1176static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1177 int datasync)
1178{
1179 return spufs_mfc_flush(file);
1180}
1181
1182static int spufs_mfc_fasync(int fd, struct file *file, int on)
1183{
1184 struct spu_context *ctx = file->private_data;
1185
1186 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1187}
1188
1189static struct file_operations spufs_mfc_fops = {
1190 .open = spufs_mfc_open,
1191 .read = spufs_mfc_read,
1192 .write = spufs_mfc_write,
1193 .poll = spufs_mfc_poll,
1194 .flush = spufs_mfc_flush,
1195 .fsync = spufs_mfc_fsync,
1196 .fasync = spufs_mfc_fasync,
6df10a82
MN
1197#ifdef CONFIG_SPUFS_MMAP
1198 .mmap = spufs_mfc_mmap,
1199#endif
a33a7d73
AB
1200};
1201
67207b96
AB
1202static void spufs_npc_set(void *data, u64 val)
1203{
1204 struct spu_context *ctx = data;
8b3d6663
AB
1205 spu_acquire(ctx);
1206 ctx->ops->npc_write(ctx, val);
1207 spu_release(ctx);
67207b96
AB
1208}
1209
1210static u64 spufs_npc_get(void *data)
1211{
1212 struct spu_context *ctx = data;
1213 u64 ret;
8b3d6663
AB
1214 spu_acquire(ctx);
1215 ret = ctx->ops->npc_read(ctx);
1216 spu_release(ctx);
67207b96
AB
1217 return ret;
1218}
1219DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
1220
8b3d6663
AB
1221static void spufs_decr_set(void *data, u64 val)
1222{
1223 struct spu_context *ctx = data;
1224 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1225 spu_acquire_saved(ctx);
1226 lscsa->decr.slot[0] = (u32) val;
1227 spu_release(ctx);
1228}
1229
1230static u64 spufs_decr_get(void *data)
1231{
1232 struct spu_context *ctx = data;
1233 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1234 u64 ret;
1235 spu_acquire_saved(ctx);
1236 ret = lscsa->decr.slot[0];
1237 spu_release(ctx);
1238 return ret;
1239}
1240DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1241 "%llx\n")
1242
1243static void spufs_decr_status_set(void *data, u64 val)
1244{
1245 struct spu_context *ctx = data;
1246 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1247 spu_acquire_saved(ctx);
1248 lscsa->decr_status.slot[0] = (u32) val;
1249 spu_release(ctx);
1250}
1251
1252static u64 spufs_decr_status_get(void *data)
1253{
1254 struct spu_context *ctx = data;
1255 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1256 u64 ret;
1257 spu_acquire_saved(ctx);
1258 ret = lscsa->decr_status.slot[0];
1259 spu_release(ctx);
1260 return ret;
1261}
1262DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1263 spufs_decr_status_set, "%llx\n")
1264
1265static void spufs_spu_tag_mask_set(void *data, u64 val)
1266{
1267 struct spu_context *ctx = data;
1268 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1269 spu_acquire_saved(ctx);
1270 lscsa->tag_mask.slot[0] = (u32) val;
1271 spu_release(ctx);
1272}
1273
1274static u64 spufs_spu_tag_mask_get(void *data)
1275{
1276 struct spu_context *ctx = data;
1277 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1278 u64 ret;
1279 spu_acquire_saved(ctx);
1280 ret = lscsa->tag_mask.slot[0];
1281 spu_release(ctx);
1282 return ret;
1283}
1284DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
1285 spufs_spu_tag_mask_set, "%llx\n")
1286
1287static void spufs_event_mask_set(void *data, u64 val)
1288{
1289 struct spu_context *ctx = data;
1290 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1291 spu_acquire_saved(ctx);
1292 lscsa->event_mask.slot[0] = (u32) val;
1293 spu_release(ctx);
1294}
1295
1296static u64 spufs_event_mask_get(void *data)
1297{
1298 struct spu_context *ctx = data;
1299 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1300 u64 ret;
1301 spu_acquire_saved(ctx);
1302 ret = lscsa->event_mask.slot[0];
1303 spu_release(ctx);
1304 return ret;
1305}
1306DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1307 spufs_event_mask_set, "%llx\n")
1308
1309static void spufs_srr0_set(void *data, u64 val)
1310{
1311 struct spu_context *ctx = data;
1312 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1313 spu_acquire_saved(ctx);
1314 lscsa->srr0.slot[0] = (u32) val;
1315 spu_release(ctx);
1316}
1317
1318static u64 spufs_srr0_get(void *data)
1319{
1320 struct spu_context *ctx = data;
1321 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1322 u64 ret;
1323 spu_acquire_saved(ctx);
1324 ret = lscsa->srr0.slot[0];
1325 spu_release(ctx);
1326 return ret;
1327}
1328DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1329 "%llx\n")
1330
7b1a7014
AB
1331static u64 spufs_id_get(void *data)
1332{
1333 struct spu_context *ctx = data;
1334 u64 num;
1335
1336 spu_acquire(ctx);
1337 if (ctx->state == SPU_STATE_RUNNABLE)
1338 num = ctx->spu->number;
1339 else
1340 num = (unsigned int)-1;
1341 spu_release(ctx);
1342
1343 return num;
1344}
1345DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, 0, "0x%llx\n")
1346
67207b96
AB
1347struct tree_descr spufs_dir_contents[] = {
1348 { "mem", &spufs_mem_fops, 0666, },
8b3d6663 1349 { "regs", &spufs_regs_fops, 0666, },
67207b96
AB
1350 { "mbox", &spufs_mbox_fops, 0444, },
1351 { "ibox", &spufs_ibox_fops, 0444, },
1352 { "wbox", &spufs_wbox_fops, 0222, },
1353 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1354 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1355 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1356 { "signal1", &spufs_signal1_fops, 0666, },
1357 { "signal2", &spufs_signal2_fops, 0666, },
1358 { "signal1_type", &spufs_signal1_type, 0666, },
1359 { "signal2_type", &spufs_signal2_type, 0666, },
d9379c4b 1360 { "mss", &spufs_mss_fops, 0666, },
a33a7d73 1361 { "mfc", &spufs_mfc_fops, 0666, },
6df10a82 1362 { "cntl", &spufs_cntl_fops, 0666, },
67207b96 1363 { "npc", &spufs_npc_ops, 0666, },
8b3d6663
AB
1364 { "fpcr", &spufs_fpcr_fops, 0666, },
1365 { "decr", &spufs_decr_ops, 0666, },
1366 { "decr_status", &spufs_decr_status_ops, 0666, },
1367 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1368 { "event_mask", &spufs_event_mask_ops, 0666, },
1369 { "srr0", &spufs_srr0_ops, 0666, },
7b1a7014 1370 { "phys-id", &spufs_id_ops, 0666, },
67207b96
AB
1371 {},
1372};