[POWERPC] spufs: Hook up spufs_release_mem
[sfrench/cifs-2.6.git] / arch / powerpc / platforms / cell / spufs / file.c
1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #undef DEBUG
24
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31
32 #include <asm/io.h>
33 #include <asm/semaphore.h>
34 #include <asm/spu.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
37
38 #include "spufs.h"
39
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
41
42 static int
43 spufs_mem_open(struct inode *inode, struct file *file)
44 {
45         struct spufs_inode_info *i = SPUFS_I(inode);
46         struct spu_context *ctx = i->i_ctx;
47
48         spin_lock(&ctx->mapping_lock);
49         file->private_data = ctx;
50         if (!i->i_openers++)
51                 ctx->local_store = inode->i_mapping;
52         spin_unlock(&ctx->mapping_lock);
53         return 0;
54 }
55
56 static int
57 spufs_mem_release(struct inode *inode, struct file *file)
58 {
59         struct spufs_inode_info *i = SPUFS_I(inode);
60         struct spu_context *ctx = i->i_ctx;
61
62         spin_lock(&ctx->mapping_lock);
63         if (!--i->i_openers)
64                 ctx->local_store = NULL;
65         spin_unlock(&ctx->mapping_lock);
66         return 0;
67 }
68
69 static ssize_t
70 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
71                         size_t size, loff_t *pos)
72 {
73         char *local_store = ctx->ops->get_ls(ctx);
74         return simple_read_from_buffer(buffer, size, pos, local_store,
75                                         LS_SIZE);
76 }
77
78 static ssize_t
79 spufs_mem_read(struct file *file, char __user *buffer,
80                                 size_t size, loff_t *pos)
81 {
82         struct spu_context *ctx = file->private_data;
83         ssize_t ret;
84
85         spu_acquire(ctx);
86         ret = __spufs_mem_read(ctx, buffer, size, pos);
87         spu_release(ctx);
88         return ret;
89 }
90
91 static ssize_t
92 spufs_mem_write(struct file *file, const char __user *buffer,
93                                         size_t size, loff_t *ppos)
94 {
95         struct spu_context *ctx = file->private_data;
96         char *local_store;
97         loff_t pos = *ppos;
98         int ret;
99
100         if (pos < 0)
101                 return -EINVAL;
102         if (pos > LS_SIZE)
103                 return -EFBIG;
104         if (size > LS_SIZE - pos)
105                 size = LS_SIZE - pos;
106
107         spu_acquire(ctx);
108         local_store = ctx->ops->get_ls(ctx);
109         ret = copy_from_user(local_store + pos, buffer, size);
110         spu_release(ctx);
111
112         if (ret)
113                 return -EFAULT;
114         *ppos = pos + size;
115         return size;
116 }
117
118 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
119                                           unsigned long address)
120 {
121         struct spu_context *ctx = vma->vm_file->private_data;
122         unsigned long pfn, offset, addr0 = address;
123 #ifdef CONFIG_SPU_FS_64K_LS
124         struct spu_state *csa = &ctx->csa;
125         int psize;
126
127         /* Check what page size we are using */
128         psize = get_slice_psize(vma->vm_mm, address);
129
130         /* Some sanity checking */
131         BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
132
133         /* Wow, 64K, cool, we need to align the address though */
134         if (csa->use_big_pages) {
135                 BUG_ON(vma->vm_start & 0xffff);
136                 address &= ~0xfffful;
137         }
138 #endif /* CONFIG_SPU_FS_64K_LS */
139
140         offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
141         if (offset >= LS_SIZE)
142                 return NOPFN_SIGBUS;
143
144         pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
145                  addr0, address, offset);
146
147         spu_acquire(ctx);
148
149         if (ctx->state == SPU_STATE_SAVED) {
150                 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
151                                                         & ~_PAGE_NO_CACHE);
152                 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
153         } else {
154                 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
155                                              | _PAGE_NO_CACHE);
156                 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
157         }
158         vm_insert_pfn(vma, address, pfn);
159
160         spu_release(ctx);
161
162         return NOPFN_REFAULT;
163 }
164
165
166 static struct vm_operations_struct spufs_mem_mmap_vmops = {
167         .nopfn = spufs_mem_mmap_nopfn,
168 };
169
170 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
171 {
172 #ifdef CONFIG_SPU_FS_64K_LS
173         struct spu_context      *ctx = file->private_data;
174         struct spu_state        *csa = &ctx->csa;
175
176         /* Sanity check VMA alignment */
177         if (csa->use_big_pages) {
178                 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
179                          " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
180                          vma->vm_pgoff);
181                 if (vma->vm_start & 0xffff)
182                         return -EINVAL;
183                 if (vma->vm_pgoff & 0xf)
184                         return -EINVAL;
185         }
186 #endif /* CONFIG_SPU_FS_64K_LS */
187
188         if (!(vma->vm_flags & VM_SHARED))
189                 return -EINVAL;
190
191         vma->vm_flags |= VM_IO | VM_PFNMAP;
192         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
193                                      | _PAGE_NO_CACHE);
194
195         vma->vm_ops = &spufs_mem_mmap_vmops;
196         return 0;
197 }
198
199 #ifdef CONFIG_SPU_FS_64K_LS
200 unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
201                                       unsigned long len, unsigned long pgoff,
202                                       unsigned long flags)
203 {
204         struct spu_context      *ctx = file->private_data;
205         struct spu_state        *csa = &ctx->csa;
206
207         /* If not using big pages, fallback to normal MM g_u_a */
208         if (!csa->use_big_pages)
209                 return current->mm->get_unmapped_area(file, addr, len,
210                                                       pgoff, flags);
211
212         /* Else, try to obtain a 64K pages slice */
213         return slice_get_unmapped_area(addr, len, flags,
214                                        MMU_PAGE_64K, 1, 0);
215 }
216 #endif /* CONFIG_SPU_FS_64K_LS */
217
218 static const struct file_operations spufs_mem_fops = {
219         .open                   = spufs_mem_open,
220         .release                = spufs_mem_release,
221         .read                   = spufs_mem_read,
222         .write                  = spufs_mem_write,
223         .llseek                 = generic_file_llseek,
224         .mmap                   = spufs_mem_mmap,
225 #ifdef CONFIG_SPU_FS_64K_LS
226         .get_unmapped_area      = spufs_get_unmapped_area,
227 #endif
228 };
229
230 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
231                                     unsigned long address,
232                                     unsigned long ps_offs,
233                                     unsigned long ps_size)
234 {
235         struct spu_context *ctx = vma->vm_file->private_data;
236         unsigned long area, offset = address - vma->vm_start;
237         int ret;
238
239         offset += vma->vm_pgoff << PAGE_SHIFT;
240         if (offset >= ps_size)
241                 return NOPFN_SIGBUS;
242
243         /* error here usually means a signal.. we might want to test
244          * the error code more precisely though
245          */
246         ret = spu_acquire_runnable(ctx, 0);
247         if (ret)
248                 return NOPFN_REFAULT;
249
250         area = ctx->spu->problem_phys + ps_offs;
251         vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
252         spu_release(ctx);
253
254         return NOPFN_REFAULT;
255 }
256
257 #if SPUFS_MMAP_4K
258 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
259                                            unsigned long address)
260 {
261         return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
262 }
263
264 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
265         .nopfn = spufs_cntl_mmap_nopfn,
266 };
267
268 /*
269  * mmap support for problem state control area [0x4000 - 0x4fff].
270  */
271 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
272 {
273         if (!(vma->vm_flags & VM_SHARED))
274                 return -EINVAL;
275
276         vma->vm_flags |= VM_IO | VM_PFNMAP;
277         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
278                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
279
280         vma->vm_ops = &spufs_cntl_mmap_vmops;
281         return 0;
282 }
283 #else /* SPUFS_MMAP_4K */
284 #define spufs_cntl_mmap NULL
285 #endif /* !SPUFS_MMAP_4K */
286
287 static u64 spufs_cntl_get(void *data)
288 {
289         struct spu_context *ctx = data;
290         u64 val;
291
292         spu_acquire(ctx);
293         val = ctx->ops->status_read(ctx);
294         spu_release(ctx);
295
296         return val;
297 }
298
299 static void spufs_cntl_set(void *data, u64 val)
300 {
301         struct spu_context *ctx = data;
302
303         spu_acquire(ctx);
304         ctx->ops->runcntl_write(ctx, val);
305         spu_release(ctx);
306 }
307
308 static int spufs_cntl_open(struct inode *inode, struct file *file)
309 {
310         struct spufs_inode_info *i = SPUFS_I(inode);
311         struct spu_context *ctx = i->i_ctx;
312
313         spin_lock(&ctx->mapping_lock);
314         file->private_data = ctx;
315         if (!i->i_openers++)
316                 ctx->cntl = inode->i_mapping;
317         spin_unlock(&ctx->mapping_lock);
318         return simple_attr_open(inode, file, spufs_cntl_get,
319                                         spufs_cntl_set, "0x%08lx");
320 }
321
322 static int
323 spufs_cntl_release(struct inode *inode, struct file *file)
324 {
325         struct spufs_inode_info *i = SPUFS_I(inode);
326         struct spu_context *ctx = i->i_ctx;
327
328         simple_attr_close(inode, file);
329
330         spin_lock(&ctx->mapping_lock);
331         if (!--i->i_openers)
332                 ctx->cntl = NULL;
333         spin_unlock(&ctx->mapping_lock);
334         return 0;
335 }
336
337 static const struct file_operations spufs_cntl_fops = {
338         .open = spufs_cntl_open,
339         .release = spufs_cntl_release,
340         .read = simple_attr_read,
341         .write = simple_attr_write,
342         .mmap = spufs_cntl_mmap,
343 };
344
345 static int
346 spufs_regs_open(struct inode *inode, struct file *file)
347 {
348         struct spufs_inode_info *i = SPUFS_I(inode);
349         file->private_data = i->i_ctx;
350         return 0;
351 }
352
353 static ssize_t
354 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
355                         size_t size, loff_t *pos)
356 {
357         struct spu_lscsa *lscsa = ctx->csa.lscsa;
358         return simple_read_from_buffer(buffer, size, pos,
359                                       lscsa->gprs, sizeof lscsa->gprs);
360 }
361
362 static ssize_t
363 spufs_regs_read(struct file *file, char __user *buffer,
364                 size_t size, loff_t *pos)
365 {
366         int ret;
367         struct spu_context *ctx = file->private_data;
368
369         spu_acquire_saved(ctx);
370         ret = __spufs_regs_read(ctx, buffer, size, pos);
371         spu_release(ctx);
372         return ret;
373 }
374
375 static ssize_t
376 spufs_regs_write(struct file *file, const char __user *buffer,
377                  size_t size, loff_t *pos)
378 {
379         struct spu_context *ctx = file->private_data;
380         struct spu_lscsa *lscsa = ctx->csa.lscsa;
381         int ret;
382
383         size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
384         if (size <= 0)
385                 return -EFBIG;
386         *pos += size;
387
388         spu_acquire_saved(ctx);
389
390         ret = copy_from_user(lscsa->gprs + *pos - size,
391                              buffer, size) ? -EFAULT : size;
392
393         spu_release(ctx);
394         return ret;
395 }
396
397 static const struct file_operations spufs_regs_fops = {
398         .open    = spufs_regs_open,
399         .read    = spufs_regs_read,
400         .write   = spufs_regs_write,
401         .llseek  = generic_file_llseek,
402 };
403
404 static ssize_t
405 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
406                         size_t size, loff_t * pos)
407 {
408         struct spu_lscsa *lscsa = ctx->csa.lscsa;
409         return simple_read_from_buffer(buffer, size, pos,
410                                       &lscsa->fpcr, sizeof(lscsa->fpcr));
411 }
412
413 static ssize_t
414 spufs_fpcr_read(struct file *file, char __user * buffer,
415                 size_t size, loff_t * pos)
416 {
417         int ret;
418         struct spu_context *ctx = file->private_data;
419
420         spu_acquire_saved(ctx);
421         ret = __spufs_fpcr_read(ctx, buffer, size, pos);
422         spu_release(ctx);
423         return ret;
424 }
425
426 static ssize_t
427 spufs_fpcr_write(struct file *file, const char __user * buffer,
428                  size_t size, loff_t * pos)
429 {
430         struct spu_context *ctx = file->private_data;
431         struct spu_lscsa *lscsa = ctx->csa.lscsa;
432         int ret;
433
434         size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
435         if (size <= 0)
436                 return -EFBIG;
437         *pos += size;
438
439         spu_acquire_saved(ctx);
440
441         ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
442                              buffer, size) ? -EFAULT : size;
443
444         spu_release(ctx);
445         return ret;
446 }
447
448 static const struct file_operations spufs_fpcr_fops = {
449         .open = spufs_regs_open,
450         .read = spufs_fpcr_read,
451         .write = spufs_fpcr_write,
452         .llseek = generic_file_llseek,
453 };
454
455 /* generic open function for all pipe-like files */
456 static int spufs_pipe_open(struct inode *inode, struct file *file)
457 {
458         struct spufs_inode_info *i = SPUFS_I(inode);
459         file->private_data = i->i_ctx;
460
461         return nonseekable_open(inode, file);
462 }
463
464 /*
465  * Read as many bytes from the mailbox as possible, until
466  * one of the conditions becomes true:
467  *
468  * - no more data available in the mailbox
469  * - end of the user provided buffer
470  * - end of the mapped area
471  */
472 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
473                         size_t len, loff_t *pos)
474 {
475         struct spu_context *ctx = file->private_data;
476         u32 mbox_data, __user *udata;
477         ssize_t count;
478
479         if (len < 4)
480                 return -EINVAL;
481
482         if (!access_ok(VERIFY_WRITE, buf, len))
483                 return -EFAULT;
484
485         udata = (void __user *)buf;
486
487         spu_acquire(ctx);
488         for (count = 0; (count + 4) <= len; count += 4, udata++) {
489                 int ret;
490                 ret = ctx->ops->mbox_read(ctx, &mbox_data);
491                 if (ret == 0)
492                         break;
493
494                 /*
495                  * at the end of the mapped area, we can fault
496                  * but still need to return the data we have
497                  * read successfully so far.
498                  */
499                 ret = __put_user(mbox_data, udata);
500                 if (ret) {
501                         if (!count)
502                                 count = -EFAULT;
503                         break;
504                 }
505         }
506         spu_release(ctx);
507
508         if (!count)
509                 count = -EAGAIN;
510
511         return count;
512 }
513
514 static const struct file_operations spufs_mbox_fops = {
515         .open   = spufs_pipe_open,
516         .read   = spufs_mbox_read,
517 };
518
519 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
520                         size_t len, loff_t *pos)
521 {
522         struct spu_context *ctx = file->private_data;
523         u32 mbox_stat;
524
525         if (len < 4)
526                 return -EINVAL;
527
528         spu_acquire(ctx);
529
530         mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
531
532         spu_release(ctx);
533
534         if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
535                 return -EFAULT;
536
537         return 4;
538 }
539
540 static const struct file_operations spufs_mbox_stat_fops = {
541         .open   = spufs_pipe_open,
542         .read   = spufs_mbox_stat_read,
543 };
544
545 /* low-level ibox access function */
546 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
547 {
548         return ctx->ops->ibox_read(ctx, data);
549 }
550
551 static int spufs_ibox_fasync(int fd, struct file *file, int on)
552 {
553         struct spu_context *ctx = file->private_data;
554
555         return fasync_helper(fd, file, on, &ctx->ibox_fasync);
556 }
557
558 /* interrupt-level ibox callback function. */
559 void spufs_ibox_callback(struct spu *spu)
560 {
561         struct spu_context *ctx = spu->ctx;
562
563         wake_up_all(&ctx->ibox_wq);
564         kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
565 }
566
567 /*
568  * Read as many bytes from the interrupt mailbox as possible, until
569  * one of the conditions becomes true:
570  *
571  * - no more data available in the mailbox
572  * - end of the user provided buffer
573  * - end of the mapped area
574  *
575  * If the file is opened without O_NONBLOCK, we wait here until
576  * any data is available, but return when we have been able to
577  * read something.
578  */
579 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
580                         size_t len, loff_t *pos)
581 {
582         struct spu_context *ctx = file->private_data;
583         u32 ibox_data, __user *udata;
584         ssize_t count;
585
586         if (len < 4)
587                 return -EINVAL;
588
589         if (!access_ok(VERIFY_WRITE, buf, len))
590                 return -EFAULT;
591
592         udata = (void __user *)buf;
593
594         spu_acquire(ctx);
595
596         /* wait only for the first element */
597         count = 0;
598         if (file->f_flags & O_NONBLOCK) {
599                 if (!spu_ibox_read(ctx, &ibox_data))
600                         count = -EAGAIN;
601         } else {
602                 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
603         }
604         if (count)
605                 goto out;
606
607         /* if we can't write at all, return -EFAULT */
608         count = __put_user(ibox_data, udata);
609         if (count)
610                 goto out;
611
612         for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
613                 int ret;
614                 ret = ctx->ops->ibox_read(ctx, &ibox_data);
615                 if (ret == 0)
616                         break;
617                 /*
618                  * at the end of the mapped area, we can fault
619                  * but still need to return the data we have
620                  * read successfully so far.
621                  */
622                 ret = __put_user(ibox_data, udata);
623                 if (ret)
624                         break;
625         }
626
627 out:
628         spu_release(ctx);
629
630         return count;
631 }
632
633 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
634 {
635         struct spu_context *ctx = file->private_data;
636         unsigned int mask;
637
638         poll_wait(file, &ctx->ibox_wq, wait);
639
640         spu_acquire(ctx);
641         mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
642         spu_release(ctx);
643
644         return mask;
645 }
646
647 static const struct file_operations spufs_ibox_fops = {
648         .open   = spufs_pipe_open,
649         .read   = spufs_ibox_read,
650         .poll   = spufs_ibox_poll,
651         .fasync = spufs_ibox_fasync,
652 };
653
654 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
655                         size_t len, loff_t *pos)
656 {
657         struct spu_context *ctx = file->private_data;
658         u32 ibox_stat;
659
660         if (len < 4)
661                 return -EINVAL;
662
663         spu_acquire(ctx);
664         ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
665         spu_release(ctx);
666
667         if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
668                 return -EFAULT;
669
670         return 4;
671 }
672
673 static const struct file_operations spufs_ibox_stat_fops = {
674         .open   = spufs_pipe_open,
675         .read   = spufs_ibox_stat_read,
676 };
677
678 /* low-level mailbox write */
679 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
680 {
681         return ctx->ops->wbox_write(ctx, data);
682 }
683
684 static int spufs_wbox_fasync(int fd, struct file *file, int on)
685 {
686         struct spu_context *ctx = file->private_data;
687         int ret;
688
689         ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
690
691         return ret;
692 }
693
694 /* interrupt-level wbox callback function. */
695 void spufs_wbox_callback(struct spu *spu)
696 {
697         struct spu_context *ctx = spu->ctx;
698
699         wake_up_all(&ctx->wbox_wq);
700         kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
701 }
702
703 /*
704  * Write as many bytes to the interrupt mailbox as possible, until
705  * one of the conditions becomes true:
706  *
707  * - the mailbox is full
708  * - end of the user provided buffer
709  * - end of the mapped area
710  *
711  * If the file is opened without O_NONBLOCK, we wait here until
712  * space is availabyl, but return when we have been able to
713  * write something.
714  */
715 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
716                         size_t len, loff_t *pos)
717 {
718         struct spu_context *ctx = file->private_data;
719         u32 wbox_data, __user *udata;
720         ssize_t count;
721
722         if (len < 4)
723                 return -EINVAL;
724
725         udata = (void __user *)buf;
726         if (!access_ok(VERIFY_READ, buf, len))
727                 return -EFAULT;
728
729         if (__get_user(wbox_data, udata))
730                 return -EFAULT;
731
732         spu_acquire(ctx);
733
734         /*
735          * make sure we can at least write one element, by waiting
736          * in case of !O_NONBLOCK
737          */
738         count = 0;
739         if (file->f_flags & O_NONBLOCK) {
740                 if (!spu_wbox_write(ctx, wbox_data))
741                         count = -EAGAIN;
742         } else {
743                 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
744         }
745
746         if (count)
747                 goto out;
748
749         /* write aÑ• much as possible */
750         for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
751                 int ret;
752                 ret = __get_user(wbox_data, udata);
753                 if (ret)
754                         break;
755
756                 ret = spu_wbox_write(ctx, wbox_data);
757                 if (ret == 0)
758                         break;
759         }
760
761 out:
762         spu_release(ctx);
763         return count;
764 }
765
766 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
767 {
768         struct spu_context *ctx = file->private_data;
769         unsigned int mask;
770
771         poll_wait(file, &ctx->wbox_wq, wait);
772
773         spu_acquire(ctx);
774         mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
775         spu_release(ctx);
776
777         return mask;
778 }
779
780 static const struct file_operations spufs_wbox_fops = {
781         .open   = spufs_pipe_open,
782         .write  = spufs_wbox_write,
783         .poll   = spufs_wbox_poll,
784         .fasync = spufs_wbox_fasync,
785 };
786
787 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
788                         size_t len, loff_t *pos)
789 {
790         struct spu_context *ctx = file->private_data;
791         u32 wbox_stat;
792
793         if (len < 4)
794                 return -EINVAL;
795
796         spu_acquire(ctx);
797         wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
798         spu_release(ctx);
799
800         if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
801                 return -EFAULT;
802
803         return 4;
804 }
805
806 static const struct file_operations spufs_wbox_stat_fops = {
807         .open   = spufs_pipe_open,
808         .read   = spufs_wbox_stat_read,
809 };
810
811 static int spufs_signal1_open(struct inode *inode, struct file *file)
812 {
813         struct spufs_inode_info *i = SPUFS_I(inode);
814         struct spu_context *ctx = i->i_ctx;
815
816         spin_lock(&ctx->mapping_lock);
817         file->private_data = ctx;
818         if (!i->i_openers++)
819                 ctx->signal1 = inode->i_mapping;
820         spin_unlock(&ctx->mapping_lock);
821         return nonseekable_open(inode, file);
822 }
823
824 static int
825 spufs_signal1_release(struct inode *inode, struct file *file)
826 {
827         struct spufs_inode_info *i = SPUFS_I(inode);
828         struct spu_context *ctx = i->i_ctx;
829
830         spin_lock(&ctx->mapping_lock);
831         if (!--i->i_openers)
832                 ctx->signal1 = NULL;
833         spin_unlock(&ctx->mapping_lock);
834         return 0;
835 }
836
837 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
838                         size_t len, loff_t *pos)
839 {
840         int ret = 0;
841         u32 data;
842
843         if (len < 4)
844                 return -EINVAL;
845
846         if (ctx->csa.spu_chnlcnt_RW[3]) {
847                 data = ctx->csa.spu_chnldata_RW[3];
848                 ret = 4;
849         }
850
851         if (!ret)
852                 goto out;
853
854         if (copy_to_user(buf, &data, 4))
855                 return -EFAULT;
856
857 out:
858         return ret;
859 }
860
861 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
862                         size_t len, loff_t *pos)
863 {
864         int ret;
865         struct spu_context *ctx = file->private_data;
866
867         spu_acquire_saved(ctx);
868         ret = __spufs_signal1_read(ctx, buf, len, pos);
869         spu_release(ctx);
870
871         return ret;
872 }
873
874 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
875                         size_t len, loff_t *pos)
876 {
877         struct spu_context *ctx;
878         u32 data;
879
880         ctx = file->private_data;
881
882         if (len < 4)
883                 return -EINVAL;
884
885         if (copy_from_user(&data, buf, 4))
886                 return -EFAULT;
887
888         spu_acquire(ctx);
889         ctx->ops->signal1_write(ctx, data);
890         spu_release(ctx);
891
892         return 4;
893 }
894
895 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
896                                               unsigned long address)
897 {
898 #if PAGE_SIZE == 0x1000
899         return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
900 #elif PAGE_SIZE == 0x10000
901         /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
902          * signal 1 and 2 area
903          */
904         return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
905 #else
906 #error unsupported page size
907 #endif
908 }
909
910 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
911         .nopfn = spufs_signal1_mmap_nopfn,
912 };
913
914 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
915 {
916         if (!(vma->vm_flags & VM_SHARED))
917                 return -EINVAL;
918
919         vma->vm_flags |= VM_IO | VM_PFNMAP;
920         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
921                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
922
923         vma->vm_ops = &spufs_signal1_mmap_vmops;
924         return 0;
925 }
926
927 static const struct file_operations spufs_signal1_fops = {
928         .open = spufs_signal1_open,
929         .release = spufs_signal1_release,
930         .read = spufs_signal1_read,
931         .write = spufs_signal1_write,
932         .mmap = spufs_signal1_mmap,
933 };
934
935 static int spufs_signal2_open(struct inode *inode, struct file *file)
936 {
937         struct spufs_inode_info *i = SPUFS_I(inode);
938         struct spu_context *ctx = i->i_ctx;
939
940         spin_lock(&ctx->mapping_lock);
941         file->private_data = ctx;
942         if (!i->i_openers++)
943                 ctx->signal2 = inode->i_mapping;
944         spin_unlock(&ctx->mapping_lock);
945         return nonseekable_open(inode, file);
946 }
947
948 static int
949 spufs_signal2_release(struct inode *inode, struct file *file)
950 {
951         struct spufs_inode_info *i = SPUFS_I(inode);
952         struct spu_context *ctx = i->i_ctx;
953
954         spin_lock(&ctx->mapping_lock);
955         if (!--i->i_openers)
956                 ctx->signal2 = NULL;
957         spin_unlock(&ctx->mapping_lock);
958         return 0;
959 }
960
961 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
962                         size_t len, loff_t *pos)
963 {
964         int ret = 0;
965         u32 data;
966
967         if (len < 4)
968                 return -EINVAL;
969
970         if (ctx->csa.spu_chnlcnt_RW[4]) {
971                 data =  ctx->csa.spu_chnldata_RW[4];
972                 ret = 4;
973         }
974
975         if (!ret)
976                 goto out;
977
978         if (copy_to_user(buf, &data, 4))
979                 return -EFAULT;
980
981 out:
982         return ret;
983 }
984
985 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
986                         size_t len, loff_t *pos)
987 {
988         struct spu_context *ctx = file->private_data;
989         int ret;
990
991         spu_acquire_saved(ctx);
992         ret = __spufs_signal2_read(ctx, buf, len, pos);
993         spu_release(ctx);
994
995         return ret;
996 }
997
998 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
999                         size_t len, loff_t *pos)
1000 {
1001         struct spu_context *ctx;
1002         u32 data;
1003
1004         ctx = file->private_data;
1005
1006         if (len < 4)
1007                 return -EINVAL;
1008
1009         if (copy_from_user(&data, buf, 4))
1010                 return -EFAULT;
1011
1012         spu_acquire(ctx);
1013         ctx->ops->signal2_write(ctx, data);
1014         spu_release(ctx);
1015
1016         return 4;
1017 }
1018
1019 #if SPUFS_MMAP_4K
1020 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1021                                               unsigned long address)
1022 {
1023 #if PAGE_SIZE == 0x1000
1024         return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1025 #elif PAGE_SIZE == 0x10000
1026         /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1027          * signal 1 and 2 area
1028          */
1029         return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1030 #else
1031 #error unsupported page size
1032 #endif
1033 }
1034
1035 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1036         .nopfn = spufs_signal2_mmap_nopfn,
1037 };
1038
1039 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1040 {
1041         if (!(vma->vm_flags & VM_SHARED))
1042                 return -EINVAL;
1043
1044         vma->vm_flags |= VM_IO | VM_PFNMAP;
1045         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1046                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1047
1048         vma->vm_ops = &spufs_signal2_mmap_vmops;
1049         return 0;
1050 }
1051 #else /* SPUFS_MMAP_4K */
1052 #define spufs_signal2_mmap NULL
1053 #endif /* !SPUFS_MMAP_4K */
1054
1055 static const struct file_operations spufs_signal2_fops = {
1056         .open = spufs_signal2_open,
1057         .release = spufs_signal2_release,
1058         .read = spufs_signal2_read,
1059         .write = spufs_signal2_write,
1060         .mmap = spufs_signal2_mmap,
1061 };
1062
1063 static void spufs_signal1_type_set(void *data, u64 val)
1064 {
1065         struct spu_context *ctx = data;
1066
1067         spu_acquire(ctx);
1068         ctx->ops->signal1_type_set(ctx, val);
1069         spu_release(ctx);
1070 }
1071
1072 static u64 __spufs_signal1_type_get(void *data)
1073 {
1074         struct spu_context *ctx = data;
1075         return ctx->ops->signal1_type_get(ctx);
1076 }
1077
1078 static u64 spufs_signal1_type_get(void *data)
1079 {
1080         struct spu_context *ctx = data;
1081         u64 ret;
1082
1083         spu_acquire(ctx);
1084         ret = __spufs_signal1_type_get(data);
1085         spu_release(ctx);
1086
1087         return ret;
1088 }
1089 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1090                                         spufs_signal1_type_set, "%llu");
1091
1092 static void spufs_signal2_type_set(void *data, u64 val)
1093 {
1094         struct spu_context *ctx = data;
1095
1096         spu_acquire(ctx);
1097         ctx->ops->signal2_type_set(ctx, val);
1098         spu_release(ctx);
1099 }
1100
1101 static u64 __spufs_signal2_type_get(void *data)
1102 {
1103         struct spu_context *ctx = data;
1104         return ctx->ops->signal2_type_get(ctx);
1105 }
1106
1107 static u64 spufs_signal2_type_get(void *data)
1108 {
1109         struct spu_context *ctx = data;
1110         u64 ret;
1111
1112         spu_acquire(ctx);
1113         ret = __spufs_signal2_type_get(data);
1114         spu_release(ctx);
1115
1116         return ret;
1117 }
1118 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1119                                         spufs_signal2_type_set, "%llu");
1120
1121 #if SPUFS_MMAP_4K
1122 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1123                                           unsigned long address)
1124 {
1125         return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1126 }
1127
1128 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1129         .nopfn = spufs_mss_mmap_nopfn,
1130 };
1131
1132 /*
1133  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1134  */
1135 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1136 {
1137         if (!(vma->vm_flags & VM_SHARED))
1138                 return -EINVAL;
1139
1140         vma->vm_flags |= VM_IO | VM_PFNMAP;
1141         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1142                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1143
1144         vma->vm_ops = &spufs_mss_mmap_vmops;
1145         return 0;
1146 }
1147 #else /* SPUFS_MMAP_4K */
1148 #define spufs_mss_mmap NULL
1149 #endif /* !SPUFS_MMAP_4K */
1150
1151 static int spufs_mss_open(struct inode *inode, struct file *file)
1152 {
1153         struct spufs_inode_info *i = SPUFS_I(inode);
1154         struct spu_context *ctx = i->i_ctx;
1155
1156         file->private_data = i->i_ctx;
1157
1158         spin_lock(&ctx->mapping_lock);
1159         if (!i->i_openers++)
1160                 ctx->mss = inode->i_mapping;
1161         spin_unlock(&ctx->mapping_lock);
1162         return nonseekable_open(inode, file);
1163 }
1164
1165 static int
1166 spufs_mss_release(struct inode *inode, struct file *file)
1167 {
1168         struct spufs_inode_info *i = SPUFS_I(inode);
1169         struct spu_context *ctx = i->i_ctx;
1170
1171         spin_lock(&ctx->mapping_lock);
1172         if (!--i->i_openers)
1173                 ctx->mss = NULL;
1174         spin_unlock(&ctx->mapping_lock);
1175         return 0;
1176 }
1177
1178 static const struct file_operations spufs_mss_fops = {
1179         .open    = spufs_mss_open,
1180         .release = spufs_mss_release,
1181         .mmap    = spufs_mss_mmap,
1182 };
1183
1184 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1185                                             unsigned long address)
1186 {
1187         return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1188 }
1189
1190 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1191         .nopfn = spufs_psmap_mmap_nopfn,
1192 };
1193
1194 /*
1195  * mmap support for full problem state area [0x00000 - 0x1ffff].
1196  */
1197 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1198 {
1199         if (!(vma->vm_flags & VM_SHARED))
1200                 return -EINVAL;
1201
1202         vma->vm_flags |= VM_IO | VM_PFNMAP;
1203         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1204                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1205
1206         vma->vm_ops = &spufs_psmap_mmap_vmops;
1207         return 0;
1208 }
1209
1210 static int spufs_psmap_open(struct inode *inode, struct file *file)
1211 {
1212         struct spufs_inode_info *i = SPUFS_I(inode);
1213         struct spu_context *ctx = i->i_ctx;
1214
1215         spin_lock(&ctx->mapping_lock);
1216         file->private_data = i->i_ctx;
1217         if (!i->i_openers++)
1218                 ctx->psmap = inode->i_mapping;
1219         spin_unlock(&ctx->mapping_lock);
1220         return nonseekable_open(inode, file);
1221 }
1222
1223 static int
1224 spufs_psmap_release(struct inode *inode, struct file *file)
1225 {
1226         struct spufs_inode_info *i = SPUFS_I(inode);
1227         struct spu_context *ctx = i->i_ctx;
1228
1229         spin_lock(&ctx->mapping_lock);
1230         if (!--i->i_openers)
1231                 ctx->psmap = NULL;
1232         spin_unlock(&ctx->mapping_lock);
1233         return 0;
1234 }
1235
1236 static const struct file_operations spufs_psmap_fops = {
1237         .open    = spufs_psmap_open,
1238         .release = spufs_psmap_release,
1239         .mmap    = spufs_psmap_mmap,
1240 };
1241
1242
1243 #if SPUFS_MMAP_4K
1244 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1245                                           unsigned long address)
1246 {
1247         return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1248 }
1249
1250 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1251         .nopfn = spufs_mfc_mmap_nopfn,
1252 };
1253
1254 /*
1255  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1256  */
1257 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1258 {
1259         if (!(vma->vm_flags & VM_SHARED))
1260                 return -EINVAL;
1261
1262         vma->vm_flags |= VM_IO | VM_PFNMAP;
1263         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1264                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1265
1266         vma->vm_ops = &spufs_mfc_mmap_vmops;
1267         return 0;
1268 }
1269 #else /* SPUFS_MMAP_4K */
1270 #define spufs_mfc_mmap NULL
1271 #endif /* !SPUFS_MMAP_4K */
1272
1273 static int spufs_mfc_open(struct inode *inode, struct file *file)
1274 {
1275         struct spufs_inode_info *i = SPUFS_I(inode);
1276         struct spu_context *ctx = i->i_ctx;
1277
1278         /* we don't want to deal with DMA into other processes */
1279         if (ctx->owner != current->mm)
1280                 return -EINVAL;
1281
1282         if (atomic_read(&inode->i_count) != 1)
1283                 return -EBUSY;
1284
1285         spin_lock(&ctx->mapping_lock);
1286         file->private_data = ctx;
1287         if (!i->i_openers++)
1288                 ctx->mfc = inode->i_mapping;
1289         spin_unlock(&ctx->mapping_lock);
1290         return nonseekable_open(inode, file);
1291 }
1292
1293 static int
1294 spufs_mfc_release(struct inode *inode, struct file *file)
1295 {
1296         struct spufs_inode_info *i = SPUFS_I(inode);
1297         struct spu_context *ctx = i->i_ctx;
1298
1299         spin_lock(&ctx->mapping_lock);
1300         if (!--i->i_openers)
1301                 ctx->mfc = NULL;
1302         spin_unlock(&ctx->mapping_lock);
1303         return 0;
1304 }
1305
1306 /* interrupt-level mfc callback function. */
1307 void spufs_mfc_callback(struct spu *spu)
1308 {
1309         struct spu_context *ctx = spu->ctx;
1310
1311         wake_up_all(&ctx->mfc_wq);
1312
1313         pr_debug("%s %s\n", __FUNCTION__, spu->name);
1314         if (ctx->mfc_fasync) {
1315                 u32 free_elements, tagstatus;
1316                 unsigned int mask;
1317
1318                 /* no need for spu_acquire in interrupt context */
1319                 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1320                 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1321
1322                 mask = 0;
1323                 if (free_elements & 0xffff)
1324                         mask |= POLLOUT;
1325                 if (tagstatus & ctx->tagwait)
1326                         mask |= POLLIN;
1327
1328                 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1329         }
1330 }
1331
1332 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1333 {
1334         /* See if there is one tag group is complete */
1335         /* FIXME we need locking around tagwait */
1336         *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1337         ctx->tagwait &= ~*status;
1338         if (*status)
1339                 return 1;
1340
1341         /* enable interrupt waiting for any tag group,
1342            may silently fail if interrupts are already enabled */
1343         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1344         return 0;
1345 }
1346
1347 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1348                         size_t size, loff_t *pos)
1349 {
1350         struct spu_context *ctx = file->private_data;
1351         int ret = -EINVAL;
1352         u32 status;
1353
1354         if (size != 4)
1355                 goto out;
1356
1357         spu_acquire(ctx);
1358         if (file->f_flags & O_NONBLOCK) {
1359                 status = ctx->ops->read_mfc_tagstatus(ctx);
1360                 if (!(status & ctx->tagwait))
1361                         ret = -EAGAIN;
1362                 else
1363                         ctx->tagwait &= ~status;
1364         } else {
1365                 ret = spufs_wait(ctx->mfc_wq,
1366                            spufs_read_mfc_tagstatus(ctx, &status));
1367         }
1368         spu_release(ctx);
1369
1370         if (ret)
1371                 goto out;
1372
1373         ret = 4;
1374         if (copy_to_user(buffer, &status, 4))
1375                 ret = -EFAULT;
1376
1377 out:
1378         return ret;
1379 }
1380
1381 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1382 {
1383         pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1384                  cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1385
1386         switch (cmd->cmd) {
1387         case MFC_PUT_CMD:
1388         case MFC_PUTF_CMD:
1389         case MFC_PUTB_CMD:
1390         case MFC_GET_CMD:
1391         case MFC_GETF_CMD:
1392         case MFC_GETB_CMD:
1393                 break;
1394         default:
1395                 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1396                 return -EIO;
1397         }
1398
1399         if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1400                 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1401                                 cmd->ea, cmd->lsa);
1402                 return -EIO;
1403         }
1404
1405         switch (cmd->size & 0xf) {
1406         case 1:
1407                 break;
1408         case 2:
1409                 if (cmd->lsa & 1)
1410                         goto error;
1411                 break;
1412         case 4:
1413                 if (cmd->lsa & 3)
1414                         goto error;
1415                 break;
1416         case 8:
1417                 if (cmd->lsa & 7)
1418                         goto error;
1419                 break;
1420         case 0:
1421                 if (cmd->lsa & 15)
1422                         goto error;
1423                 break;
1424         error:
1425         default:
1426                 pr_debug("invalid DMA alignment %x for size %x\n",
1427                         cmd->lsa & 0xf, cmd->size);
1428                 return -EIO;
1429         }
1430
1431         if (cmd->size > 16 * 1024) {
1432                 pr_debug("invalid DMA size %x\n", cmd->size);
1433                 return -EIO;
1434         }
1435
1436         if (cmd->tag & 0xfff0) {
1437                 /* we reserve the higher tag numbers for kernel use */
1438                 pr_debug("invalid DMA tag\n");
1439                 return -EIO;
1440         }
1441
1442         if (cmd->class) {
1443                 /* not supported in this version */
1444                 pr_debug("invalid DMA class\n");
1445                 return -EIO;
1446         }
1447
1448         return 0;
1449 }
1450
1451 static int spu_send_mfc_command(struct spu_context *ctx,
1452                                 struct mfc_dma_command cmd,
1453                                 int *error)
1454 {
1455         *error = ctx->ops->send_mfc_command(ctx, &cmd);
1456         if (*error == -EAGAIN) {
1457                 /* wait for any tag group to complete
1458                    so we have space for the new command */
1459                 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1460                 /* try again, because the queue might be
1461                    empty again */
1462                 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1463                 if (*error == -EAGAIN)
1464                         return 0;
1465         }
1466         return 1;
1467 }
1468
1469 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1470                         size_t size, loff_t *pos)
1471 {
1472         struct spu_context *ctx = file->private_data;
1473         struct mfc_dma_command cmd;
1474         int ret = -EINVAL;
1475
1476         if (size != sizeof cmd)
1477                 goto out;
1478
1479         ret = -EFAULT;
1480         if (copy_from_user(&cmd, buffer, sizeof cmd))
1481                 goto out;
1482
1483         ret = spufs_check_valid_dma(&cmd);
1484         if (ret)
1485                 goto out;
1486
1487         ret = spu_acquire_runnable(ctx, 0);
1488         if (ret)
1489                 goto out;
1490
1491         if (file->f_flags & O_NONBLOCK) {
1492                 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1493         } else {
1494                 int status;
1495                 ret = spufs_wait(ctx->mfc_wq,
1496                                  spu_send_mfc_command(ctx, cmd, &status));
1497                 if (status)
1498                         ret = status;
1499         }
1500         spu_release(ctx);
1501
1502         if (ret)
1503                 goto out;
1504
1505         ctx->tagwait |= 1 << cmd.tag;
1506         ret = size;
1507
1508 out:
1509         return ret;
1510 }
1511
1512 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1513 {
1514         struct spu_context *ctx = file->private_data;
1515         u32 free_elements, tagstatus;
1516         unsigned int mask;
1517
1518         spu_acquire(ctx);
1519         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1520         free_elements = ctx->ops->get_mfc_free_elements(ctx);
1521         tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1522         spu_release(ctx);
1523
1524         poll_wait(file, &ctx->mfc_wq, wait);
1525
1526         mask = 0;
1527         if (free_elements & 0xffff)
1528                 mask |= POLLOUT | POLLWRNORM;
1529         if (tagstatus & ctx->tagwait)
1530                 mask |= POLLIN | POLLRDNORM;
1531
1532         pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1533                 free_elements, tagstatus, ctx->tagwait);
1534
1535         return mask;
1536 }
1537
1538 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1539 {
1540         struct spu_context *ctx = file->private_data;
1541         int ret;
1542
1543         spu_acquire(ctx);
1544 #if 0
1545 /* this currently hangs */
1546         ret = spufs_wait(ctx->mfc_wq,
1547                          ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1548         if (ret)
1549                 goto out;
1550         ret = spufs_wait(ctx->mfc_wq,
1551                          ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1552 out:
1553 #else
1554         ret = 0;
1555 #endif
1556         spu_release(ctx);
1557
1558         return ret;
1559 }
1560
1561 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1562                            int datasync)
1563 {
1564         return spufs_mfc_flush(file, NULL);
1565 }
1566
1567 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1568 {
1569         struct spu_context *ctx = file->private_data;
1570
1571         return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1572 }
1573
1574 static const struct file_operations spufs_mfc_fops = {
1575         .open    = spufs_mfc_open,
1576         .release = spufs_mfc_release,
1577         .read    = spufs_mfc_read,
1578         .write   = spufs_mfc_write,
1579         .poll    = spufs_mfc_poll,
1580         .flush   = spufs_mfc_flush,
1581         .fsync   = spufs_mfc_fsync,
1582         .fasync  = spufs_mfc_fasync,
1583         .mmap    = spufs_mfc_mmap,
1584 };
1585
1586 static void spufs_npc_set(void *data, u64 val)
1587 {
1588         struct spu_context *ctx = data;
1589         spu_acquire(ctx);
1590         ctx->ops->npc_write(ctx, val);
1591         spu_release(ctx);
1592 }
1593
1594 static u64 spufs_npc_get(void *data)
1595 {
1596         struct spu_context *ctx = data;
1597         u64 ret;
1598         spu_acquire(ctx);
1599         ret = ctx->ops->npc_read(ctx);
1600         spu_release(ctx);
1601         return ret;
1602 }
1603 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1604                         "0x%llx\n")
1605
1606 static void spufs_decr_set(void *data, u64 val)
1607 {
1608         struct spu_context *ctx = data;
1609         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1610         spu_acquire_saved(ctx);
1611         lscsa->decr.slot[0] = (u32) val;
1612         spu_release(ctx);
1613 }
1614
1615 static u64 __spufs_decr_get(void *data)
1616 {
1617         struct spu_context *ctx = data;
1618         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1619         return lscsa->decr.slot[0];
1620 }
1621
1622 static u64 spufs_decr_get(void *data)
1623 {
1624         struct spu_context *ctx = data;
1625         u64 ret;
1626         spu_acquire_saved(ctx);
1627         ret = __spufs_decr_get(data);
1628         spu_release(ctx);
1629         return ret;
1630 }
1631 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1632                         "0x%llx\n")
1633
1634 static void spufs_decr_status_set(void *data, u64 val)
1635 {
1636         struct spu_context *ctx = data;
1637         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1638         spu_acquire_saved(ctx);
1639         lscsa->decr_status.slot[0] = (u32) val;
1640         spu_release(ctx);
1641 }
1642
1643 static u64 __spufs_decr_status_get(void *data)
1644 {
1645         struct spu_context *ctx = data;
1646         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1647         return lscsa->decr_status.slot[0];
1648 }
1649
1650 static u64 spufs_decr_status_get(void *data)
1651 {
1652         struct spu_context *ctx = data;
1653         u64 ret;
1654         spu_acquire_saved(ctx);
1655         ret = __spufs_decr_status_get(data);
1656         spu_release(ctx);
1657         return ret;
1658 }
1659 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1660                         spufs_decr_status_set, "0x%llx\n")
1661
1662 static void spufs_event_mask_set(void *data, u64 val)
1663 {
1664         struct spu_context *ctx = data;
1665         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1666         spu_acquire_saved(ctx);
1667         lscsa->event_mask.slot[0] = (u32) val;
1668         spu_release(ctx);
1669 }
1670
1671 static u64 __spufs_event_mask_get(void *data)
1672 {
1673         struct spu_context *ctx = data;
1674         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1675         return lscsa->event_mask.slot[0];
1676 }
1677
1678 static u64 spufs_event_mask_get(void *data)
1679 {
1680         struct spu_context *ctx = data;
1681         u64 ret;
1682         spu_acquire_saved(ctx);
1683         ret = __spufs_event_mask_get(data);
1684         spu_release(ctx);
1685         return ret;
1686 }
1687 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1688                         spufs_event_mask_set, "0x%llx\n")
1689
1690 static u64 __spufs_event_status_get(void *data)
1691 {
1692         struct spu_context *ctx = data;
1693         struct spu_state *state = &ctx->csa;
1694         u64 stat;
1695         stat = state->spu_chnlcnt_RW[0];
1696         if (stat)
1697                 return state->spu_chnldata_RW[0];
1698         return 0;
1699 }
1700
1701 static u64 spufs_event_status_get(void *data)
1702 {
1703         struct spu_context *ctx = data;
1704         u64 ret = 0;
1705
1706         spu_acquire_saved(ctx);
1707         ret = __spufs_event_status_get(data);
1708         spu_release(ctx);
1709         return ret;
1710 }
1711 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1712                         NULL, "0x%llx\n")
1713
1714 static void spufs_srr0_set(void *data, u64 val)
1715 {
1716         struct spu_context *ctx = data;
1717         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1718         spu_acquire_saved(ctx);
1719         lscsa->srr0.slot[0] = (u32) val;
1720         spu_release(ctx);
1721 }
1722
1723 static u64 spufs_srr0_get(void *data)
1724 {
1725         struct spu_context *ctx = data;
1726         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1727         u64 ret;
1728         spu_acquire_saved(ctx);
1729         ret = lscsa->srr0.slot[0];
1730         spu_release(ctx);
1731         return ret;
1732 }
1733 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1734                         "0x%llx\n")
1735
1736 static u64 spufs_id_get(void *data)
1737 {
1738         struct spu_context *ctx = data;
1739         u64 num;
1740
1741         spu_acquire(ctx);
1742         if (ctx->state == SPU_STATE_RUNNABLE)
1743                 num = ctx->spu->number;
1744         else
1745                 num = (unsigned int)-1;
1746         spu_release(ctx);
1747
1748         return num;
1749 }
1750 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1751
1752 static u64 __spufs_object_id_get(void *data)
1753 {
1754         struct spu_context *ctx = data;
1755         return ctx->object_id;
1756 }
1757
1758 static u64 spufs_object_id_get(void *data)
1759 {
1760         /* FIXME: Should there really be no locking here? */
1761         return __spufs_object_id_get(data);
1762 }
1763
1764 static void spufs_object_id_set(void *data, u64 id)
1765 {
1766         struct spu_context *ctx = data;
1767         ctx->object_id = id;
1768 }
1769
1770 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1771                 spufs_object_id_set, "0x%llx\n");
1772
1773 static u64 __spufs_lslr_get(void *data)
1774 {
1775         struct spu_context *ctx = data;
1776         return ctx->csa.priv2.spu_lslr_RW;
1777 }
1778
1779 static u64 spufs_lslr_get(void *data)
1780 {
1781         struct spu_context *ctx = data;
1782         u64 ret;
1783
1784         spu_acquire_saved(ctx);
1785         ret = __spufs_lslr_get(data);
1786         spu_release(ctx);
1787
1788         return ret;
1789 }
1790 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1791
1792 static int spufs_info_open(struct inode *inode, struct file *file)
1793 {
1794         struct spufs_inode_info *i = SPUFS_I(inode);
1795         struct spu_context *ctx = i->i_ctx;
1796         file->private_data = ctx;
1797         return 0;
1798 }
1799
1800 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1801                         char __user *buf, size_t len, loff_t *pos)
1802 {
1803         u32 mbox_stat;
1804         u32 data;
1805
1806         mbox_stat = ctx->csa.prob.mb_stat_R;
1807         if (mbox_stat & 0x0000ff) {
1808                 data = ctx->csa.prob.pu_mb_R;
1809         }
1810
1811         return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1812 }
1813
1814 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1815                                    size_t len, loff_t *pos)
1816 {
1817         int ret;
1818         struct spu_context *ctx = file->private_data;
1819
1820         if (!access_ok(VERIFY_WRITE, buf, len))
1821                 return -EFAULT;
1822
1823         spu_acquire_saved(ctx);
1824         spin_lock(&ctx->csa.register_lock);
1825         ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1826         spin_unlock(&ctx->csa.register_lock);
1827         spu_release(ctx);
1828
1829         return ret;
1830 }
1831
1832 static const struct file_operations spufs_mbox_info_fops = {
1833         .open = spufs_info_open,
1834         .read = spufs_mbox_info_read,
1835         .llseek  = generic_file_llseek,
1836 };
1837
1838 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1839                                 char __user *buf, size_t len, loff_t *pos)
1840 {
1841         u32 ibox_stat;
1842         u32 data;
1843
1844         ibox_stat = ctx->csa.prob.mb_stat_R;
1845         if (ibox_stat & 0xff0000) {
1846                 data = ctx->csa.priv2.puint_mb_R;
1847         }
1848
1849         return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1850 }
1851
1852 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1853                                    size_t len, loff_t *pos)
1854 {
1855         struct spu_context *ctx = file->private_data;
1856         int ret;
1857
1858         if (!access_ok(VERIFY_WRITE, buf, len))
1859                 return -EFAULT;
1860
1861         spu_acquire_saved(ctx);
1862         spin_lock(&ctx->csa.register_lock);
1863         ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1864         spin_unlock(&ctx->csa.register_lock);
1865         spu_release(ctx);
1866
1867         return ret;
1868 }
1869
1870 static const struct file_operations spufs_ibox_info_fops = {
1871         .open = spufs_info_open,
1872         .read = spufs_ibox_info_read,
1873         .llseek  = generic_file_llseek,
1874 };
1875
1876 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1877                         char __user *buf, size_t len, loff_t *pos)
1878 {
1879         int i, cnt;
1880         u32 data[4];
1881         u32 wbox_stat;
1882
1883         wbox_stat = ctx->csa.prob.mb_stat_R;
1884         cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1885         for (i = 0; i < cnt; i++) {
1886                 data[i] = ctx->csa.spu_mailbox_data[i];
1887         }
1888
1889         return simple_read_from_buffer(buf, len, pos, &data,
1890                                 cnt * sizeof(u32));
1891 }
1892
1893 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1894                                    size_t len, loff_t *pos)
1895 {
1896         struct spu_context *ctx = file->private_data;
1897         int ret;
1898
1899         if (!access_ok(VERIFY_WRITE, buf, len))
1900                 return -EFAULT;
1901
1902         spu_acquire_saved(ctx);
1903         spin_lock(&ctx->csa.register_lock);
1904         ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1905         spin_unlock(&ctx->csa.register_lock);
1906         spu_release(ctx);
1907
1908         return ret;
1909 }
1910
1911 static const struct file_operations spufs_wbox_info_fops = {
1912         .open = spufs_info_open,
1913         .read = spufs_wbox_info_read,
1914         .llseek  = generic_file_llseek,
1915 };
1916
1917 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1918                         char __user *buf, size_t len, loff_t *pos)
1919 {
1920         struct spu_dma_info info;
1921         struct mfc_cq_sr *qp, *spuqp;
1922         int i;
1923
1924         info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1925         info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1926         info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1927         info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1928         info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1929         for (i = 0; i < 16; i++) {
1930                 qp = &info.dma_info_command_data[i];
1931                 spuqp = &ctx->csa.priv2.spuq[i];
1932
1933                 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1934                 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1935                 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1936                 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1937         }
1938
1939         return simple_read_from_buffer(buf, len, pos, &info,
1940                                 sizeof info);
1941 }
1942
1943 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1944                               size_t len, loff_t *pos)
1945 {
1946         struct spu_context *ctx = file->private_data;
1947         int ret;
1948
1949         if (!access_ok(VERIFY_WRITE, buf, len))
1950                 return -EFAULT;
1951
1952         spu_acquire_saved(ctx);
1953         spin_lock(&ctx->csa.register_lock);
1954         ret = __spufs_dma_info_read(ctx, buf, len, pos);
1955         spin_unlock(&ctx->csa.register_lock);
1956         spu_release(ctx);
1957
1958         return ret;
1959 }
1960
1961 static const struct file_operations spufs_dma_info_fops = {
1962         .open = spufs_info_open,
1963         .read = spufs_dma_info_read,
1964 };
1965
1966 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1967                         char __user *buf, size_t len, loff_t *pos)
1968 {
1969         struct spu_proxydma_info info;
1970         struct mfc_cq_sr *qp, *puqp;
1971         int ret = sizeof info;
1972         int i;
1973
1974         if (len < ret)
1975                 return -EINVAL;
1976
1977         if (!access_ok(VERIFY_WRITE, buf, len))
1978                 return -EFAULT;
1979
1980         info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1981         info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1982         info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1983         for (i = 0; i < 8; i++) {
1984                 qp = &info.proxydma_info_command_data[i];
1985                 puqp = &ctx->csa.priv2.puq[i];
1986
1987                 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1988                 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1989                 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1990                 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1991         }
1992
1993         return simple_read_from_buffer(buf, len, pos, &info,
1994                                 sizeof info);
1995 }
1996
1997 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1998                                    size_t len, loff_t *pos)
1999 {
2000         struct spu_context *ctx = file->private_data;
2001         int ret;
2002
2003         spu_acquire_saved(ctx);
2004         spin_lock(&ctx->csa.register_lock);
2005         ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2006         spin_unlock(&ctx->csa.register_lock);
2007         spu_release(ctx);
2008
2009         return ret;
2010 }
2011
2012 static const struct file_operations spufs_proxydma_info_fops = {
2013         .open = spufs_info_open,
2014         .read = spufs_proxydma_info_read,
2015 };
2016
2017 struct tree_descr spufs_dir_contents[] = {
2018         { "mem",  &spufs_mem_fops,  0666, },
2019         { "regs", &spufs_regs_fops,  0666, },
2020         { "mbox", &spufs_mbox_fops, 0444, },
2021         { "ibox", &spufs_ibox_fops, 0444, },
2022         { "wbox", &spufs_wbox_fops, 0222, },
2023         { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2024         { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2025         { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2026         { "signal1", &spufs_signal1_fops, 0666, },
2027         { "signal2", &spufs_signal2_fops, 0666, },
2028         { "signal1_type", &spufs_signal1_type, 0666, },
2029         { "signal2_type", &spufs_signal2_type, 0666, },
2030         { "cntl", &spufs_cntl_fops,  0666, },
2031         { "fpcr", &spufs_fpcr_fops, 0666, },
2032         { "lslr", &spufs_lslr_ops, 0444, },
2033         { "mfc", &spufs_mfc_fops, 0666, },
2034         { "mss", &spufs_mss_fops, 0666, },
2035         { "npc", &spufs_npc_ops, 0666, },
2036         { "srr0", &spufs_srr0_ops, 0666, },
2037         { "decr", &spufs_decr_ops, 0666, },
2038         { "decr_status", &spufs_decr_status_ops, 0666, },
2039         { "event_mask", &spufs_event_mask_ops, 0666, },
2040         { "event_status", &spufs_event_status_ops, 0444, },
2041         { "psmap", &spufs_psmap_fops, 0666, },
2042         { "phys-id", &spufs_id_ops, 0666, },
2043         { "object-id", &spufs_object_id_ops, 0666, },
2044         { "mbox_info", &spufs_mbox_info_fops, 0444, },
2045         { "ibox_info", &spufs_ibox_info_fops, 0444, },
2046         { "wbox_info", &spufs_wbox_info_fops, 0444, },
2047         { "dma_info", &spufs_dma_info_fops, 0444, },
2048         { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2049         {},
2050 };
2051
2052 struct tree_descr spufs_dir_nosched_contents[] = {
2053         { "mem",  &spufs_mem_fops,  0666, },
2054         { "mbox", &spufs_mbox_fops, 0444, },
2055         { "ibox", &spufs_ibox_fops, 0444, },
2056         { "wbox", &spufs_wbox_fops, 0222, },
2057         { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2058         { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2059         { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2060         { "signal1", &spufs_signal1_fops, 0666, },
2061         { "signal2", &spufs_signal2_fops, 0666, },
2062         { "signal1_type", &spufs_signal1_type, 0666, },
2063         { "signal2_type", &spufs_signal2_type, 0666, },
2064         { "mss", &spufs_mss_fops, 0666, },
2065         { "mfc", &spufs_mfc_fops, 0666, },
2066         { "cntl", &spufs_cntl_fops,  0666, },
2067         { "npc", &spufs_npc_ops, 0666, },
2068         { "psmap", &spufs_psmap_fops, 0666, },
2069         { "phys-id", &spufs_id_ops, 0666, },
2070         { "object-id", &spufs_object_id_ops, 0666, },
2071         {},
2072 };
2073
2074 struct spufs_coredump_reader spufs_coredump_read[] = {
2075         { "regs", __spufs_regs_read, NULL, 128 * 16 },
2076         { "fpcr", __spufs_fpcr_read, NULL, 16 },
2077         { "lslr", NULL, __spufs_lslr_get, 11 },
2078         { "decr", NULL, __spufs_decr_get, 11 },
2079         { "decr_status", NULL, __spufs_decr_status_get, 11 },
2080         { "mem", __spufs_mem_read, NULL, 256 * 1024, },
2081         { "signal1", __spufs_signal1_read, NULL, 4 },
2082         { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2083         { "signal2", __spufs_signal2_read, NULL, 4 },
2084         { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2085         { "event_mask", NULL, __spufs_event_mask_get, 8 },
2086         { "event_status", NULL, __spufs_event_status_get, 8 },
2087         { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2088         { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2089         { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2090         { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2091         { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2092         { "object-id", NULL, __spufs_object_id_get, 19 },
2093         { },
2094 };
2095 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
2096