powerpc/mm: Rework usage of _PAGE_COHERENT/NO_CACHE/GUARDED
[deliverable/linux.git] / arch / powerpc / platforms / cell / spufs / file.c
1 /*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #undef DEBUG
24
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/marker.h>
33
34 #include <asm/io.h>
35 #include <asm/time.h>
36 #include <asm/spu.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
39
40 #include "spufs.h"
41
42 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43
44 /* Simple attribute files */
45 struct spufs_attr {
46 int (*get)(void *, u64 *);
47 int (*set)(void *, u64);
48 char get_buf[24]; /* enough to store a u64 and "\n\0" */
49 char set_buf[24];
50 void *data;
51 const char *fmt; /* format for read operation */
52 struct mutex mutex; /* protects access to these buffers */
53 };
54
55 static int spufs_attr_open(struct inode *inode, struct file *file,
56 int (*get)(void *, u64 *), int (*set)(void *, u64),
57 const char *fmt)
58 {
59 struct spufs_attr *attr;
60
61 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
62 if (!attr)
63 return -ENOMEM;
64
65 attr->get = get;
66 attr->set = set;
67 attr->data = inode->i_private;
68 attr->fmt = fmt;
69 mutex_init(&attr->mutex);
70 file->private_data = attr;
71
72 return nonseekable_open(inode, file);
73 }
74
75 static int spufs_attr_release(struct inode *inode, struct file *file)
76 {
77 kfree(file->private_data);
78 return 0;
79 }
80
81 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
82 size_t len, loff_t *ppos)
83 {
84 struct spufs_attr *attr;
85 size_t size;
86 ssize_t ret;
87
88 attr = file->private_data;
89 if (!attr->get)
90 return -EACCES;
91
92 ret = mutex_lock_interruptible(&attr->mutex);
93 if (ret)
94 return ret;
95
96 if (*ppos) { /* continued read */
97 size = strlen(attr->get_buf);
98 } else { /* first read */
99 u64 val;
100 ret = attr->get(attr->data, &val);
101 if (ret)
102 goto out;
103
104 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
105 attr->fmt, (unsigned long long)val);
106 }
107
108 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
109 out:
110 mutex_unlock(&attr->mutex);
111 return ret;
112 }
113
114 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
115 size_t len, loff_t *ppos)
116 {
117 struct spufs_attr *attr;
118 u64 val;
119 size_t size;
120 ssize_t ret;
121
122 attr = file->private_data;
123 if (!attr->set)
124 return -EACCES;
125
126 ret = mutex_lock_interruptible(&attr->mutex);
127 if (ret)
128 return ret;
129
130 ret = -EFAULT;
131 size = min(sizeof(attr->set_buf) - 1, len);
132 if (copy_from_user(attr->set_buf, buf, size))
133 goto out;
134
135 ret = len; /* claim we got the whole input */
136 attr->set_buf[size] = '\0';
137 val = simple_strtol(attr->set_buf, NULL, 0);
138 attr->set(attr->data, val);
139 out:
140 mutex_unlock(&attr->mutex);
141 return ret;
142 }
143
144 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
145 static int __fops ## _open(struct inode *inode, struct file *file) \
146 { \
147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
149 } \
150 static struct file_operations __fops = { \
151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
156 };
157
158
159 static int
160 spufs_mem_open(struct inode *inode, struct file *file)
161 {
162 struct spufs_inode_info *i = SPUFS_I(inode);
163 struct spu_context *ctx = i->i_ctx;
164
165 mutex_lock(&ctx->mapping_lock);
166 file->private_data = ctx;
167 if (!i->i_openers++)
168 ctx->local_store = inode->i_mapping;
169 mutex_unlock(&ctx->mapping_lock);
170 return 0;
171 }
172
173 static int
174 spufs_mem_release(struct inode *inode, struct file *file)
175 {
176 struct spufs_inode_info *i = SPUFS_I(inode);
177 struct spu_context *ctx = i->i_ctx;
178
179 mutex_lock(&ctx->mapping_lock);
180 if (!--i->i_openers)
181 ctx->local_store = NULL;
182 mutex_unlock(&ctx->mapping_lock);
183 return 0;
184 }
185
186 static ssize_t
187 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
188 size_t size, loff_t *pos)
189 {
190 char *local_store = ctx->ops->get_ls(ctx);
191 return simple_read_from_buffer(buffer, size, pos, local_store,
192 LS_SIZE);
193 }
194
195 static ssize_t
196 spufs_mem_read(struct file *file, char __user *buffer,
197 size_t size, loff_t *pos)
198 {
199 struct spu_context *ctx = file->private_data;
200 ssize_t ret;
201
202 ret = spu_acquire(ctx);
203 if (ret)
204 return ret;
205 ret = __spufs_mem_read(ctx, buffer, size, pos);
206 spu_release(ctx);
207
208 return ret;
209 }
210
211 static ssize_t
212 spufs_mem_write(struct file *file, const char __user *buffer,
213 size_t size, loff_t *ppos)
214 {
215 struct spu_context *ctx = file->private_data;
216 char *local_store;
217 loff_t pos = *ppos;
218 int ret;
219
220 if (pos < 0)
221 return -EINVAL;
222 if (pos > LS_SIZE)
223 return -EFBIG;
224 if (size > LS_SIZE - pos)
225 size = LS_SIZE - pos;
226
227 ret = spu_acquire(ctx);
228 if (ret)
229 return ret;
230
231 local_store = ctx->ops->get_ls(ctx);
232 ret = copy_from_user(local_store + pos, buffer, size);
233 spu_release(ctx);
234
235 if (ret)
236 return -EFAULT;
237 *ppos = pos + size;
238 return size;
239 }
240
241 static int
242 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
243 {
244 struct spu_context *ctx = vma->vm_file->private_data;
245 unsigned long address = (unsigned long)vmf->virtual_address;
246 unsigned long pfn, offset;
247
248 #ifdef CONFIG_SPU_FS_64K_LS
249 struct spu_state *csa = &ctx->csa;
250 int psize;
251
252 /* Check what page size we are using */
253 psize = get_slice_psize(vma->vm_mm, address);
254
255 /* Some sanity checking */
256 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
257
258 /* Wow, 64K, cool, we need to align the address though */
259 if (csa->use_big_pages) {
260 BUG_ON(vma->vm_start & 0xffff);
261 address &= ~0xfffful;
262 }
263 #endif /* CONFIG_SPU_FS_64K_LS */
264
265 offset = vmf->pgoff << PAGE_SHIFT;
266 if (offset >= LS_SIZE)
267 return VM_FAULT_SIGBUS;
268
269 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
270 address, offset);
271
272 if (spu_acquire(ctx))
273 return VM_FAULT_NOPAGE;
274
275 if (ctx->state == SPU_STATE_SAVED) {
276 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
277 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
278 } else {
279 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
280 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
281 }
282 vm_insert_pfn(vma, address, pfn);
283
284 spu_release(ctx);
285
286 return VM_FAULT_NOPAGE;
287 }
288
289 static int spufs_mem_mmap_access(struct vm_area_struct *vma,
290 unsigned long address,
291 void *buf, int len, int write)
292 {
293 struct spu_context *ctx = vma->vm_file->private_data;
294 unsigned long offset = address - vma->vm_start;
295 char *local_store;
296
297 if (write && !(vma->vm_flags & VM_WRITE))
298 return -EACCES;
299 if (spu_acquire(ctx))
300 return -EINTR;
301 if ((offset + len) > vma->vm_end)
302 len = vma->vm_end - offset;
303 local_store = ctx->ops->get_ls(ctx);
304 if (write)
305 memcpy_toio(local_store + offset, buf, len);
306 else
307 memcpy_fromio(buf, local_store + offset, len);
308 spu_release(ctx);
309 return len;
310 }
311
312 static struct vm_operations_struct spufs_mem_mmap_vmops = {
313 .fault = spufs_mem_mmap_fault,
314 .access = spufs_mem_mmap_access,
315 };
316
317 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
318 {
319 #ifdef CONFIG_SPU_FS_64K_LS
320 struct spu_context *ctx = file->private_data;
321 struct spu_state *csa = &ctx->csa;
322
323 /* Sanity check VMA alignment */
324 if (csa->use_big_pages) {
325 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
326 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
327 vma->vm_pgoff);
328 if (vma->vm_start & 0xffff)
329 return -EINVAL;
330 if (vma->vm_pgoff & 0xf)
331 return -EINVAL;
332 }
333 #endif /* CONFIG_SPU_FS_64K_LS */
334
335 if (!(vma->vm_flags & VM_SHARED))
336 return -EINVAL;
337
338 vma->vm_flags |= VM_IO | VM_PFNMAP;
339 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
340
341 vma->vm_ops = &spufs_mem_mmap_vmops;
342 return 0;
343 }
344
345 #ifdef CONFIG_SPU_FS_64K_LS
346 static unsigned long spufs_get_unmapped_area(struct file *file,
347 unsigned long addr, unsigned long len, unsigned long pgoff,
348 unsigned long flags)
349 {
350 struct spu_context *ctx = file->private_data;
351 struct spu_state *csa = &ctx->csa;
352
353 /* If not using big pages, fallback to normal MM g_u_a */
354 if (!csa->use_big_pages)
355 return current->mm->get_unmapped_area(file, addr, len,
356 pgoff, flags);
357
358 /* Else, try to obtain a 64K pages slice */
359 return slice_get_unmapped_area(addr, len, flags,
360 MMU_PAGE_64K, 1, 0);
361 }
362 #endif /* CONFIG_SPU_FS_64K_LS */
363
364 static const struct file_operations spufs_mem_fops = {
365 .open = spufs_mem_open,
366 .release = spufs_mem_release,
367 .read = spufs_mem_read,
368 .write = spufs_mem_write,
369 .llseek = generic_file_llseek,
370 .mmap = spufs_mem_mmap,
371 #ifdef CONFIG_SPU_FS_64K_LS
372 .get_unmapped_area = spufs_get_unmapped_area,
373 #endif
374 };
375
376 static int spufs_ps_fault(struct vm_area_struct *vma,
377 struct vm_fault *vmf,
378 unsigned long ps_offs,
379 unsigned long ps_size)
380 {
381 struct spu_context *ctx = vma->vm_file->private_data;
382 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
383 int ret = 0;
384
385 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
386
387 if (offset >= ps_size)
388 return VM_FAULT_SIGBUS;
389
390 if (fatal_signal_pending(current))
391 return VM_FAULT_SIGBUS;
392
393 /*
394 * Because we release the mmap_sem, the context may be destroyed while
395 * we're in spu_wait. Grab an extra reference so it isn't destroyed
396 * in the meantime.
397 */
398 get_spu_context(ctx);
399
400 /*
401 * We have to wait for context to be loaded before we have
402 * pages to hand out to the user, but we don't want to wait
403 * with the mmap_sem held.
404 * It is possible to drop the mmap_sem here, but then we need
405 * to return VM_FAULT_NOPAGE because the mappings may have
406 * hanged.
407 */
408 if (spu_acquire(ctx))
409 goto refault;
410
411 if (ctx->state == SPU_STATE_SAVED) {
412 up_read(&current->mm->mmap_sem);
413 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
414 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
415 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
416 down_read(&current->mm->mmap_sem);
417 } else {
418 area = ctx->spu->problem_phys + ps_offs;
419 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
420 (area + offset) >> PAGE_SHIFT);
421 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
422 }
423
424 if (!ret)
425 spu_release(ctx);
426
427 refault:
428 put_spu_context(ctx);
429 return VM_FAULT_NOPAGE;
430 }
431
432 #if SPUFS_MMAP_4K
433 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
434 struct vm_fault *vmf)
435 {
436 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
437 }
438
439 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
440 .fault = spufs_cntl_mmap_fault,
441 };
442
443 /*
444 * mmap support for problem state control area [0x4000 - 0x4fff].
445 */
446 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
447 {
448 if (!(vma->vm_flags & VM_SHARED))
449 return -EINVAL;
450
451 vma->vm_flags |= VM_IO | VM_PFNMAP;
452 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
453
454 vma->vm_ops = &spufs_cntl_mmap_vmops;
455 return 0;
456 }
457 #else /* SPUFS_MMAP_4K */
458 #define spufs_cntl_mmap NULL
459 #endif /* !SPUFS_MMAP_4K */
460
461 static int spufs_cntl_get(void *data, u64 *val)
462 {
463 struct spu_context *ctx = data;
464 int ret;
465
466 ret = spu_acquire(ctx);
467 if (ret)
468 return ret;
469 *val = ctx->ops->status_read(ctx);
470 spu_release(ctx);
471
472 return 0;
473 }
474
475 static int spufs_cntl_set(void *data, u64 val)
476 {
477 struct spu_context *ctx = data;
478 int ret;
479
480 ret = spu_acquire(ctx);
481 if (ret)
482 return ret;
483 ctx->ops->runcntl_write(ctx, val);
484 spu_release(ctx);
485
486 return 0;
487 }
488
489 static int spufs_cntl_open(struct inode *inode, struct file *file)
490 {
491 struct spufs_inode_info *i = SPUFS_I(inode);
492 struct spu_context *ctx = i->i_ctx;
493
494 mutex_lock(&ctx->mapping_lock);
495 file->private_data = ctx;
496 if (!i->i_openers++)
497 ctx->cntl = inode->i_mapping;
498 mutex_unlock(&ctx->mapping_lock);
499 return simple_attr_open(inode, file, spufs_cntl_get,
500 spufs_cntl_set, "0x%08lx");
501 }
502
503 static int
504 spufs_cntl_release(struct inode *inode, struct file *file)
505 {
506 struct spufs_inode_info *i = SPUFS_I(inode);
507 struct spu_context *ctx = i->i_ctx;
508
509 simple_attr_release(inode, file);
510
511 mutex_lock(&ctx->mapping_lock);
512 if (!--i->i_openers)
513 ctx->cntl = NULL;
514 mutex_unlock(&ctx->mapping_lock);
515 return 0;
516 }
517
518 static const struct file_operations spufs_cntl_fops = {
519 .open = spufs_cntl_open,
520 .release = spufs_cntl_release,
521 .read = simple_attr_read,
522 .write = simple_attr_write,
523 .mmap = spufs_cntl_mmap,
524 };
525
526 static int
527 spufs_regs_open(struct inode *inode, struct file *file)
528 {
529 struct spufs_inode_info *i = SPUFS_I(inode);
530 file->private_data = i->i_ctx;
531 return 0;
532 }
533
534 static ssize_t
535 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
536 size_t size, loff_t *pos)
537 {
538 struct spu_lscsa *lscsa = ctx->csa.lscsa;
539 return simple_read_from_buffer(buffer, size, pos,
540 lscsa->gprs, sizeof lscsa->gprs);
541 }
542
543 static ssize_t
544 spufs_regs_read(struct file *file, char __user *buffer,
545 size_t size, loff_t *pos)
546 {
547 int ret;
548 struct spu_context *ctx = file->private_data;
549
550 /* pre-check for file position: if we'd return EOF, there's no point
551 * causing a deschedule */
552 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
553 return 0;
554
555 ret = spu_acquire_saved(ctx);
556 if (ret)
557 return ret;
558 ret = __spufs_regs_read(ctx, buffer, size, pos);
559 spu_release_saved(ctx);
560 return ret;
561 }
562
563 static ssize_t
564 spufs_regs_write(struct file *file, const char __user *buffer,
565 size_t size, loff_t *pos)
566 {
567 struct spu_context *ctx = file->private_data;
568 struct spu_lscsa *lscsa = ctx->csa.lscsa;
569 int ret;
570
571 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
572 if (size <= 0)
573 return -EFBIG;
574 *pos += size;
575
576 ret = spu_acquire_saved(ctx);
577 if (ret)
578 return ret;
579
580 ret = copy_from_user(lscsa->gprs + *pos - size,
581 buffer, size) ? -EFAULT : size;
582
583 spu_release_saved(ctx);
584 return ret;
585 }
586
587 static const struct file_operations spufs_regs_fops = {
588 .open = spufs_regs_open,
589 .read = spufs_regs_read,
590 .write = spufs_regs_write,
591 .llseek = generic_file_llseek,
592 };
593
594 static ssize_t
595 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
596 size_t size, loff_t * pos)
597 {
598 struct spu_lscsa *lscsa = ctx->csa.lscsa;
599 return simple_read_from_buffer(buffer, size, pos,
600 &lscsa->fpcr, sizeof(lscsa->fpcr));
601 }
602
603 static ssize_t
604 spufs_fpcr_read(struct file *file, char __user * buffer,
605 size_t size, loff_t * pos)
606 {
607 int ret;
608 struct spu_context *ctx = file->private_data;
609
610 ret = spu_acquire_saved(ctx);
611 if (ret)
612 return ret;
613 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
614 spu_release_saved(ctx);
615 return ret;
616 }
617
618 static ssize_t
619 spufs_fpcr_write(struct file *file, const char __user * buffer,
620 size_t size, loff_t * pos)
621 {
622 struct spu_context *ctx = file->private_data;
623 struct spu_lscsa *lscsa = ctx->csa.lscsa;
624 int ret;
625
626 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
627 if (size <= 0)
628 return -EFBIG;
629
630 ret = spu_acquire_saved(ctx);
631 if (ret)
632 return ret;
633
634 *pos += size;
635 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
636 buffer, size) ? -EFAULT : size;
637
638 spu_release_saved(ctx);
639 return ret;
640 }
641
642 static const struct file_operations spufs_fpcr_fops = {
643 .open = spufs_regs_open,
644 .read = spufs_fpcr_read,
645 .write = spufs_fpcr_write,
646 .llseek = generic_file_llseek,
647 };
648
649 /* generic open function for all pipe-like files */
650 static int spufs_pipe_open(struct inode *inode, struct file *file)
651 {
652 struct spufs_inode_info *i = SPUFS_I(inode);
653 file->private_data = i->i_ctx;
654
655 return nonseekable_open(inode, file);
656 }
657
658 /*
659 * Read as many bytes from the mailbox as possible, until
660 * one of the conditions becomes true:
661 *
662 * - no more data available in the mailbox
663 * - end of the user provided buffer
664 * - end of the mapped area
665 */
666 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
667 size_t len, loff_t *pos)
668 {
669 struct spu_context *ctx = file->private_data;
670 u32 mbox_data, __user *udata;
671 ssize_t count;
672
673 if (len < 4)
674 return -EINVAL;
675
676 if (!access_ok(VERIFY_WRITE, buf, len))
677 return -EFAULT;
678
679 udata = (void __user *)buf;
680
681 count = spu_acquire(ctx);
682 if (count)
683 return count;
684
685 for (count = 0; (count + 4) <= len; count += 4, udata++) {
686 int ret;
687 ret = ctx->ops->mbox_read(ctx, &mbox_data);
688 if (ret == 0)
689 break;
690
691 /*
692 * at the end of the mapped area, we can fault
693 * but still need to return the data we have
694 * read successfully so far.
695 */
696 ret = __put_user(mbox_data, udata);
697 if (ret) {
698 if (!count)
699 count = -EFAULT;
700 break;
701 }
702 }
703 spu_release(ctx);
704
705 if (!count)
706 count = -EAGAIN;
707
708 return count;
709 }
710
711 static const struct file_operations spufs_mbox_fops = {
712 .open = spufs_pipe_open,
713 .read = spufs_mbox_read,
714 };
715
716 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
717 size_t len, loff_t *pos)
718 {
719 struct spu_context *ctx = file->private_data;
720 ssize_t ret;
721 u32 mbox_stat;
722
723 if (len < 4)
724 return -EINVAL;
725
726 ret = spu_acquire(ctx);
727 if (ret)
728 return ret;
729
730 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
731
732 spu_release(ctx);
733
734 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
735 return -EFAULT;
736
737 return 4;
738 }
739
740 static const struct file_operations spufs_mbox_stat_fops = {
741 .open = spufs_pipe_open,
742 .read = spufs_mbox_stat_read,
743 };
744
745 /* low-level ibox access function */
746 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
747 {
748 return ctx->ops->ibox_read(ctx, data);
749 }
750
751 static int spufs_ibox_fasync(int fd, struct file *file, int on)
752 {
753 struct spu_context *ctx = file->private_data;
754
755 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
756 }
757
758 /* interrupt-level ibox callback function. */
759 void spufs_ibox_callback(struct spu *spu)
760 {
761 struct spu_context *ctx = spu->ctx;
762
763 if (!ctx)
764 return;
765
766 wake_up_all(&ctx->ibox_wq);
767 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
768 }
769
770 /*
771 * Read as many bytes from the interrupt mailbox as possible, until
772 * one of the conditions becomes true:
773 *
774 * - no more data available in the mailbox
775 * - end of the user provided buffer
776 * - end of the mapped area
777 *
778 * If the file is opened without O_NONBLOCK, we wait here until
779 * any data is available, but return when we have been able to
780 * read something.
781 */
782 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
783 size_t len, loff_t *pos)
784 {
785 struct spu_context *ctx = file->private_data;
786 u32 ibox_data, __user *udata;
787 ssize_t count;
788
789 if (len < 4)
790 return -EINVAL;
791
792 if (!access_ok(VERIFY_WRITE, buf, len))
793 return -EFAULT;
794
795 udata = (void __user *)buf;
796
797 count = spu_acquire(ctx);
798 if (count)
799 goto out;
800
801 /* wait only for the first element */
802 count = 0;
803 if (file->f_flags & O_NONBLOCK) {
804 if (!spu_ibox_read(ctx, &ibox_data)) {
805 count = -EAGAIN;
806 goto out_unlock;
807 }
808 } else {
809 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
810 if (count)
811 goto out;
812 }
813
814 /* if we can't write at all, return -EFAULT */
815 count = __put_user(ibox_data, udata);
816 if (count)
817 goto out_unlock;
818
819 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
820 int ret;
821 ret = ctx->ops->ibox_read(ctx, &ibox_data);
822 if (ret == 0)
823 break;
824 /*
825 * at the end of the mapped area, we can fault
826 * but still need to return the data we have
827 * read successfully so far.
828 */
829 ret = __put_user(ibox_data, udata);
830 if (ret)
831 break;
832 }
833
834 out_unlock:
835 spu_release(ctx);
836 out:
837 return count;
838 }
839
840 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
841 {
842 struct spu_context *ctx = file->private_data;
843 unsigned int mask;
844
845 poll_wait(file, &ctx->ibox_wq, wait);
846
847 /*
848 * For now keep this uninterruptible and also ignore the rule
849 * that poll should not sleep. Will be fixed later.
850 */
851 mutex_lock(&ctx->state_mutex);
852 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
853 spu_release(ctx);
854
855 return mask;
856 }
857
858 static const struct file_operations spufs_ibox_fops = {
859 .open = spufs_pipe_open,
860 .read = spufs_ibox_read,
861 .poll = spufs_ibox_poll,
862 .fasync = spufs_ibox_fasync,
863 };
864
865 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
866 size_t len, loff_t *pos)
867 {
868 struct spu_context *ctx = file->private_data;
869 ssize_t ret;
870 u32 ibox_stat;
871
872 if (len < 4)
873 return -EINVAL;
874
875 ret = spu_acquire(ctx);
876 if (ret)
877 return ret;
878 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
879 spu_release(ctx);
880
881 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
882 return -EFAULT;
883
884 return 4;
885 }
886
887 static const struct file_operations spufs_ibox_stat_fops = {
888 .open = spufs_pipe_open,
889 .read = spufs_ibox_stat_read,
890 };
891
892 /* low-level mailbox write */
893 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
894 {
895 return ctx->ops->wbox_write(ctx, data);
896 }
897
898 static int spufs_wbox_fasync(int fd, struct file *file, int on)
899 {
900 struct spu_context *ctx = file->private_data;
901 int ret;
902
903 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
904
905 return ret;
906 }
907
908 /* interrupt-level wbox callback function. */
909 void spufs_wbox_callback(struct spu *spu)
910 {
911 struct spu_context *ctx = spu->ctx;
912
913 if (!ctx)
914 return;
915
916 wake_up_all(&ctx->wbox_wq);
917 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
918 }
919
920 /*
921 * Write as many bytes to the interrupt mailbox as possible, until
922 * one of the conditions becomes true:
923 *
924 * - the mailbox is full
925 * - end of the user provided buffer
926 * - end of the mapped area
927 *
928 * If the file is opened without O_NONBLOCK, we wait here until
929 * space is availabyl, but return when we have been able to
930 * write something.
931 */
932 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
933 size_t len, loff_t *pos)
934 {
935 struct spu_context *ctx = file->private_data;
936 u32 wbox_data, __user *udata;
937 ssize_t count;
938
939 if (len < 4)
940 return -EINVAL;
941
942 udata = (void __user *)buf;
943 if (!access_ok(VERIFY_READ, buf, len))
944 return -EFAULT;
945
946 if (__get_user(wbox_data, udata))
947 return -EFAULT;
948
949 count = spu_acquire(ctx);
950 if (count)
951 goto out;
952
953 /*
954 * make sure we can at least write one element, by waiting
955 * in case of !O_NONBLOCK
956 */
957 count = 0;
958 if (file->f_flags & O_NONBLOCK) {
959 if (!spu_wbox_write(ctx, wbox_data)) {
960 count = -EAGAIN;
961 goto out_unlock;
962 }
963 } else {
964 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
965 if (count)
966 goto out;
967 }
968
969
970 /* write as much as possible */
971 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
972 int ret;
973 ret = __get_user(wbox_data, udata);
974 if (ret)
975 break;
976
977 ret = spu_wbox_write(ctx, wbox_data);
978 if (ret == 0)
979 break;
980 }
981
982 out_unlock:
983 spu_release(ctx);
984 out:
985 return count;
986 }
987
988 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
989 {
990 struct spu_context *ctx = file->private_data;
991 unsigned int mask;
992
993 poll_wait(file, &ctx->wbox_wq, wait);
994
995 /*
996 * For now keep this uninterruptible and also ignore the rule
997 * that poll should not sleep. Will be fixed later.
998 */
999 mutex_lock(&ctx->state_mutex);
1000 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
1001 spu_release(ctx);
1002
1003 return mask;
1004 }
1005
1006 static const struct file_operations spufs_wbox_fops = {
1007 .open = spufs_pipe_open,
1008 .write = spufs_wbox_write,
1009 .poll = spufs_wbox_poll,
1010 .fasync = spufs_wbox_fasync,
1011 };
1012
1013 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
1014 size_t len, loff_t *pos)
1015 {
1016 struct spu_context *ctx = file->private_data;
1017 ssize_t ret;
1018 u32 wbox_stat;
1019
1020 if (len < 4)
1021 return -EINVAL;
1022
1023 ret = spu_acquire(ctx);
1024 if (ret)
1025 return ret;
1026 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
1027 spu_release(ctx);
1028
1029 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1030 return -EFAULT;
1031
1032 return 4;
1033 }
1034
1035 static const struct file_operations spufs_wbox_stat_fops = {
1036 .open = spufs_pipe_open,
1037 .read = spufs_wbox_stat_read,
1038 };
1039
1040 static int spufs_signal1_open(struct inode *inode, struct file *file)
1041 {
1042 struct spufs_inode_info *i = SPUFS_I(inode);
1043 struct spu_context *ctx = i->i_ctx;
1044
1045 mutex_lock(&ctx->mapping_lock);
1046 file->private_data = ctx;
1047 if (!i->i_openers++)
1048 ctx->signal1 = inode->i_mapping;
1049 mutex_unlock(&ctx->mapping_lock);
1050 return nonseekable_open(inode, file);
1051 }
1052
1053 static int
1054 spufs_signal1_release(struct inode *inode, struct file *file)
1055 {
1056 struct spufs_inode_info *i = SPUFS_I(inode);
1057 struct spu_context *ctx = i->i_ctx;
1058
1059 mutex_lock(&ctx->mapping_lock);
1060 if (!--i->i_openers)
1061 ctx->signal1 = NULL;
1062 mutex_unlock(&ctx->mapping_lock);
1063 return 0;
1064 }
1065
1066 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1067 size_t len, loff_t *pos)
1068 {
1069 int ret = 0;
1070 u32 data;
1071
1072 if (len < 4)
1073 return -EINVAL;
1074
1075 if (ctx->csa.spu_chnlcnt_RW[3]) {
1076 data = ctx->csa.spu_chnldata_RW[3];
1077 ret = 4;
1078 }
1079
1080 if (!ret)
1081 goto out;
1082
1083 if (copy_to_user(buf, &data, 4))
1084 return -EFAULT;
1085
1086 out:
1087 return ret;
1088 }
1089
1090 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1091 size_t len, loff_t *pos)
1092 {
1093 int ret;
1094 struct spu_context *ctx = file->private_data;
1095
1096 ret = spu_acquire_saved(ctx);
1097 if (ret)
1098 return ret;
1099 ret = __spufs_signal1_read(ctx, buf, len, pos);
1100 spu_release_saved(ctx);
1101
1102 return ret;
1103 }
1104
1105 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1106 size_t len, loff_t *pos)
1107 {
1108 struct spu_context *ctx;
1109 ssize_t ret;
1110 u32 data;
1111
1112 ctx = file->private_data;
1113
1114 if (len < 4)
1115 return -EINVAL;
1116
1117 if (copy_from_user(&data, buf, 4))
1118 return -EFAULT;
1119
1120 ret = spu_acquire(ctx);
1121 if (ret)
1122 return ret;
1123 ctx->ops->signal1_write(ctx, data);
1124 spu_release(ctx);
1125
1126 return 4;
1127 }
1128
1129 static int
1130 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1131 {
1132 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1133 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1134 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1135 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1136 * signal 1 and 2 area
1137 */
1138 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1139 #else
1140 #error unsupported page size
1141 #endif
1142 }
1143
1144 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
1145 .fault = spufs_signal1_mmap_fault,
1146 };
1147
1148 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1149 {
1150 if (!(vma->vm_flags & VM_SHARED))
1151 return -EINVAL;
1152
1153 vma->vm_flags |= VM_IO | VM_PFNMAP;
1154 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1155
1156 vma->vm_ops = &spufs_signal1_mmap_vmops;
1157 return 0;
1158 }
1159
1160 static const struct file_operations spufs_signal1_fops = {
1161 .open = spufs_signal1_open,
1162 .release = spufs_signal1_release,
1163 .read = spufs_signal1_read,
1164 .write = spufs_signal1_write,
1165 .mmap = spufs_signal1_mmap,
1166 };
1167
1168 static const struct file_operations spufs_signal1_nosched_fops = {
1169 .open = spufs_signal1_open,
1170 .release = spufs_signal1_release,
1171 .write = spufs_signal1_write,
1172 .mmap = spufs_signal1_mmap,
1173 };
1174
1175 static int spufs_signal2_open(struct inode *inode, struct file *file)
1176 {
1177 struct spufs_inode_info *i = SPUFS_I(inode);
1178 struct spu_context *ctx = i->i_ctx;
1179
1180 mutex_lock(&ctx->mapping_lock);
1181 file->private_data = ctx;
1182 if (!i->i_openers++)
1183 ctx->signal2 = inode->i_mapping;
1184 mutex_unlock(&ctx->mapping_lock);
1185 return nonseekable_open(inode, file);
1186 }
1187
1188 static int
1189 spufs_signal2_release(struct inode *inode, struct file *file)
1190 {
1191 struct spufs_inode_info *i = SPUFS_I(inode);
1192 struct spu_context *ctx = i->i_ctx;
1193
1194 mutex_lock(&ctx->mapping_lock);
1195 if (!--i->i_openers)
1196 ctx->signal2 = NULL;
1197 mutex_unlock(&ctx->mapping_lock);
1198 return 0;
1199 }
1200
1201 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1202 size_t len, loff_t *pos)
1203 {
1204 int ret = 0;
1205 u32 data;
1206
1207 if (len < 4)
1208 return -EINVAL;
1209
1210 if (ctx->csa.spu_chnlcnt_RW[4]) {
1211 data = ctx->csa.spu_chnldata_RW[4];
1212 ret = 4;
1213 }
1214
1215 if (!ret)
1216 goto out;
1217
1218 if (copy_to_user(buf, &data, 4))
1219 return -EFAULT;
1220
1221 out:
1222 return ret;
1223 }
1224
1225 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1226 size_t len, loff_t *pos)
1227 {
1228 struct spu_context *ctx = file->private_data;
1229 int ret;
1230
1231 ret = spu_acquire_saved(ctx);
1232 if (ret)
1233 return ret;
1234 ret = __spufs_signal2_read(ctx, buf, len, pos);
1235 spu_release_saved(ctx);
1236
1237 return ret;
1238 }
1239
1240 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1241 size_t len, loff_t *pos)
1242 {
1243 struct spu_context *ctx;
1244 ssize_t ret;
1245 u32 data;
1246
1247 ctx = file->private_data;
1248
1249 if (len < 4)
1250 return -EINVAL;
1251
1252 if (copy_from_user(&data, buf, 4))
1253 return -EFAULT;
1254
1255 ret = spu_acquire(ctx);
1256 if (ret)
1257 return ret;
1258 ctx->ops->signal2_write(ctx, data);
1259 spu_release(ctx);
1260
1261 return 4;
1262 }
1263
1264 #if SPUFS_MMAP_4K
1265 static int
1266 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1267 {
1268 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1269 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1270 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1271 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1272 * signal 1 and 2 area
1273 */
1274 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1275 #else
1276 #error unsupported page size
1277 #endif
1278 }
1279
1280 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1281 .fault = spufs_signal2_mmap_fault,
1282 };
1283
1284 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1285 {
1286 if (!(vma->vm_flags & VM_SHARED))
1287 return -EINVAL;
1288
1289 vma->vm_flags |= VM_IO | VM_PFNMAP;
1290 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1291
1292 vma->vm_ops = &spufs_signal2_mmap_vmops;
1293 return 0;
1294 }
1295 #else /* SPUFS_MMAP_4K */
1296 #define spufs_signal2_mmap NULL
1297 #endif /* !SPUFS_MMAP_4K */
1298
1299 static const struct file_operations spufs_signal2_fops = {
1300 .open = spufs_signal2_open,
1301 .release = spufs_signal2_release,
1302 .read = spufs_signal2_read,
1303 .write = spufs_signal2_write,
1304 .mmap = spufs_signal2_mmap,
1305 };
1306
1307 static const struct file_operations spufs_signal2_nosched_fops = {
1308 .open = spufs_signal2_open,
1309 .release = spufs_signal2_release,
1310 .write = spufs_signal2_write,
1311 .mmap = spufs_signal2_mmap,
1312 };
1313
1314 /*
1315 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1316 * work of acquiring (or not) the SPU context before calling through
1317 * to the actual get routine. The set routine is called directly.
1318 */
1319 #define SPU_ATTR_NOACQUIRE 0
1320 #define SPU_ATTR_ACQUIRE 1
1321 #define SPU_ATTR_ACQUIRE_SAVED 2
1322
1323 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1324 static int __##__get(void *data, u64 *val) \
1325 { \
1326 struct spu_context *ctx = data; \
1327 int ret = 0; \
1328 \
1329 if (__acquire == SPU_ATTR_ACQUIRE) { \
1330 ret = spu_acquire(ctx); \
1331 if (ret) \
1332 return ret; \
1333 *val = __get(ctx); \
1334 spu_release(ctx); \
1335 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1336 ret = spu_acquire_saved(ctx); \
1337 if (ret) \
1338 return ret; \
1339 *val = __get(ctx); \
1340 spu_release_saved(ctx); \
1341 } else \
1342 *val = __get(ctx); \
1343 \
1344 return 0; \
1345 } \
1346 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1347
1348 static int spufs_signal1_type_set(void *data, u64 val)
1349 {
1350 struct spu_context *ctx = data;
1351 int ret;
1352
1353 ret = spu_acquire(ctx);
1354 if (ret)
1355 return ret;
1356 ctx->ops->signal1_type_set(ctx, val);
1357 spu_release(ctx);
1358
1359 return 0;
1360 }
1361
1362 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1363 {
1364 return ctx->ops->signal1_type_get(ctx);
1365 }
1366 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1367 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1368
1369
1370 static int spufs_signal2_type_set(void *data, u64 val)
1371 {
1372 struct spu_context *ctx = data;
1373 int ret;
1374
1375 ret = spu_acquire(ctx);
1376 if (ret)
1377 return ret;
1378 ctx->ops->signal2_type_set(ctx, val);
1379 spu_release(ctx);
1380
1381 return 0;
1382 }
1383
1384 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1385 {
1386 return ctx->ops->signal2_type_get(ctx);
1387 }
1388 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1389 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1390
1391 #if SPUFS_MMAP_4K
1392 static int
1393 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1394 {
1395 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1396 }
1397
1398 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1399 .fault = spufs_mss_mmap_fault,
1400 };
1401
1402 /*
1403 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1404 */
1405 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1406 {
1407 if (!(vma->vm_flags & VM_SHARED))
1408 return -EINVAL;
1409
1410 vma->vm_flags |= VM_IO | VM_PFNMAP;
1411 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1412
1413 vma->vm_ops = &spufs_mss_mmap_vmops;
1414 return 0;
1415 }
1416 #else /* SPUFS_MMAP_4K */
1417 #define spufs_mss_mmap NULL
1418 #endif /* !SPUFS_MMAP_4K */
1419
1420 static int spufs_mss_open(struct inode *inode, struct file *file)
1421 {
1422 struct spufs_inode_info *i = SPUFS_I(inode);
1423 struct spu_context *ctx = i->i_ctx;
1424
1425 file->private_data = i->i_ctx;
1426
1427 mutex_lock(&ctx->mapping_lock);
1428 if (!i->i_openers++)
1429 ctx->mss = inode->i_mapping;
1430 mutex_unlock(&ctx->mapping_lock);
1431 return nonseekable_open(inode, file);
1432 }
1433
1434 static int
1435 spufs_mss_release(struct inode *inode, struct file *file)
1436 {
1437 struct spufs_inode_info *i = SPUFS_I(inode);
1438 struct spu_context *ctx = i->i_ctx;
1439
1440 mutex_lock(&ctx->mapping_lock);
1441 if (!--i->i_openers)
1442 ctx->mss = NULL;
1443 mutex_unlock(&ctx->mapping_lock);
1444 return 0;
1445 }
1446
1447 static const struct file_operations spufs_mss_fops = {
1448 .open = spufs_mss_open,
1449 .release = spufs_mss_release,
1450 .mmap = spufs_mss_mmap,
1451 };
1452
1453 static int
1454 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1455 {
1456 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1457 }
1458
1459 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1460 .fault = spufs_psmap_mmap_fault,
1461 };
1462
1463 /*
1464 * mmap support for full problem state area [0x00000 - 0x1ffff].
1465 */
1466 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1467 {
1468 if (!(vma->vm_flags & VM_SHARED))
1469 return -EINVAL;
1470
1471 vma->vm_flags |= VM_IO | VM_PFNMAP;
1472 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1473
1474 vma->vm_ops = &spufs_psmap_mmap_vmops;
1475 return 0;
1476 }
1477
1478 static int spufs_psmap_open(struct inode *inode, struct file *file)
1479 {
1480 struct spufs_inode_info *i = SPUFS_I(inode);
1481 struct spu_context *ctx = i->i_ctx;
1482
1483 mutex_lock(&ctx->mapping_lock);
1484 file->private_data = i->i_ctx;
1485 if (!i->i_openers++)
1486 ctx->psmap = inode->i_mapping;
1487 mutex_unlock(&ctx->mapping_lock);
1488 return nonseekable_open(inode, file);
1489 }
1490
1491 static int
1492 spufs_psmap_release(struct inode *inode, struct file *file)
1493 {
1494 struct spufs_inode_info *i = SPUFS_I(inode);
1495 struct spu_context *ctx = i->i_ctx;
1496
1497 mutex_lock(&ctx->mapping_lock);
1498 if (!--i->i_openers)
1499 ctx->psmap = NULL;
1500 mutex_unlock(&ctx->mapping_lock);
1501 return 0;
1502 }
1503
1504 static const struct file_operations spufs_psmap_fops = {
1505 .open = spufs_psmap_open,
1506 .release = spufs_psmap_release,
1507 .mmap = spufs_psmap_mmap,
1508 };
1509
1510
1511 #if SPUFS_MMAP_4K
1512 static int
1513 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1514 {
1515 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1516 }
1517
1518 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1519 .fault = spufs_mfc_mmap_fault,
1520 };
1521
1522 /*
1523 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1524 */
1525 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1526 {
1527 if (!(vma->vm_flags & VM_SHARED))
1528 return -EINVAL;
1529
1530 vma->vm_flags |= VM_IO | VM_PFNMAP;
1531 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1532
1533 vma->vm_ops = &spufs_mfc_mmap_vmops;
1534 return 0;
1535 }
1536 #else /* SPUFS_MMAP_4K */
1537 #define spufs_mfc_mmap NULL
1538 #endif /* !SPUFS_MMAP_4K */
1539
1540 static int spufs_mfc_open(struct inode *inode, struct file *file)
1541 {
1542 struct spufs_inode_info *i = SPUFS_I(inode);
1543 struct spu_context *ctx = i->i_ctx;
1544
1545 /* we don't want to deal with DMA into other processes */
1546 if (ctx->owner != current->mm)
1547 return -EINVAL;
1548
1549 if (atomic_read(&inode->i_count) != 1)
1550 return -EBUSY;
1551
1552 mutex_lock(&ctx->mapping_lock);
1553 file->private_data = ctx;
1554 if (!i->i_openers++)
1555 ctx->mfc = inode->i_mapping;
1556 mutex_unlock(&ctx->mapping_lock);
1557 return nonseekable_open(inode, file);
1558 }
1559
1560 static int
1561 spufs_mfc_release(struct inode *inode, struct file *file)
1562 {
1563 struct spufs_inode_info *i = SPUFS_I(inode);
1564 struct spu_context *ctx = i->i_ctx;
1565
1566 mutex_lock(&ctx->mapping_lock);
1567 if (!--i->i_openers)
1568 ctx->mfc = NULL;
1569 mutex_unlock(&ctx->mapping_lock);
1570 return 0;
1571 }
1572
1573 /* interrupt-level mfc callback function. */
1574 void spufs_mfc_callback(struct spu *spu)
1575 {
1576 struct spu_context *ctx = spu->ctx;
1577
1578 if (!ctx)
1579 return;
1580
1581 wake_up_all(&ctx->mfc_wq);
1582
1583 pr_debug("%s %s\n", __func__, spu->name);
1584 if (ctx->mfc_fasync) {
1585 u32 free_elements, tagstatus;
1586 unsigned int mask;
1587
1588 /* no need for spu_acquire in interrupt context */
1589 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1590 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1591
1592 mask = 0;
1593 if (free_elements & 0xffff)
1594 mask |= POLLOUT;
1595 if (tagstatus & ctx->tagwait)
1596 mask |= POLLIN;
1597
1598 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1599 }
1600 }
1601
1602 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1603 {
1604 /* See if there is one tag group is complete */
1605 /* FIXME we need locking around tagwait */
1606 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1607 ctx->tagwait &= ~*status;
1608 if (*status)
1609 return 1;
1610
1611 /* enable interrupt waiting for any tag group,
1612 may silently fail if interrupts are already enabled */
1613 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1614 return 0;
1615 }
1616
1617 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1618 size_t size, loff_t *pos)
1619 {
1620 struct spu_context *ctx = file->private_data;
1621 int ret = -EINVAL;
1622 u32 status;
1623
1624 if (size != 4)
1625 goto out;
1626
1627 ret = spu_acquire(ctx);
1628 if (ret)
1629 return ret;
1630
1631 ret = -EINVAL;
1632 if (file->f_flags & O_NONBLOCK) {
1633 status = ctx->ops->read_mfc_tagstatus(ctx);
1634 if (!(status & ctx->tagwait))
1635 ret = -EAGAIN;
1636 else
1637 /* XXX(hch): shouldn't we clear ret here? */
1638 ctx->tagwait &= ~status;
1639 } else {
1640 ret = spufs_wait(ctx->mfc_wq,
1641 spufs_read_mfc_tagstatus(ctx, &status));
1642 if (ret)
1643 goto out;
1644 }
1645 spu_release(ctx);
1646
1647 ret = 4;
1648 if (copy_to_user(buffer, &status, 4))
1649 ret = -EFAULT;
1650
1651 out:
1652 return ret;
1653 }
1654
1655 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1656 {
1657 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1658 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1659
1660 switch (cmd->cmd) {
1661 case MFC_PUT_CMD:
1662 case MFC_PUTF_CMD:
1663 case MFC_PUTB_CMD:
1664 case MFC_GET_CMD:
1665 case MFC_GETF_CMD:
1666 case MFC_GETB_CMD:
1667 break;
1668 default:
1669 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1670 return -EIO;
1671 }
1672
1673 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1674 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1675 cmd->ea, cmd->lsa);
1676 return -EIO;
1677 }
1678
1679 switch (cmd->size & 0xf) {
1680 case 1:
1681 break;
1682 case 2:
1683 if (cmd->lsa & 1)
1684 goto error;
1685 break;
1686 case 4:
1687 if (cmd->lsa & 3)
1688 goto error;
1689 break;
1690 case 8:
1691 if (cmd->lsa & 7)
1692 goto error;
1693 break;
1694 case 0:
1695 if (cmd->lsa & 15)
1696 goto error;
1697 break;
1698 error:
1699 default:
1700 pr_debug("invalid DMA alignment %x for size %x\n",
1701 cmd->lsa & 0xf, cmd->size);
1702 return -EIO;
1703 }
1704
1705 if (cmd->size > 16 * 1024) {
1706 pr_debug("invalid DMA size %x\n", cmd->size);
1707 return -EIO;
1708 }
1709
1710 if (cmd->tag & 0xfff0) {
1711 /* we reserve the higher tag numbers for kernel use */
1712 pr_debug("invalid DMA tag\n");
1713 return -EIO;
1714 }
1715
1716 if (cmd->class) {
1717 /* not supported in this version */
1718 pr_debug("invalid DMA class\n");
1719 return -EIO;
1720 }
1721
1722 return 0;
1723 }
1724
1725 static int spu_send_mfc_command(struct spu_context *ctx,
1726 struct mfc_dma_command cmd,
1727 int *error)
1728 {
1729 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1730 if (*error == -EAGAIN) {
1731 /* wait for any tag group to complete
1732 so we have space for the new command */
1733 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1734 /* try again, because the queue might be
1735 empty again */
1736 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1737 if (*error == -EAGAIN)
1738 return 0;
1739 }
1740 return 1;
1741 }
1742
1743 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1744 size_t size, loff_t *pos)
1745 {
1746 struct spu_context *ctx = file->private_data;
1747 struct mfc_dma_command cmd;
1748 int ret = -EINVAL;
1749
1750 if (size != sizeof cmd)
1751 goto out;
1752
1753 ret = -EFAULT;
1754 if (copy_from_user(&cmd, buffer, sizeof cmd))
1755 goto out;
1756
1757 ret = spufs_check_valid_dma(&cmd);
1758 if (ret)
1759 goto out;
1760
1761 ret = spu_acquire(ctx);
1762 if (ret)
1763 goto out;
1764
1765 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1766 if (ret)
1767 goto out;
1768
1769 if (file->f_flags & O_NONBLOCK) {
1770 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1771 } else {
1772 int status;
1773 ret = spufs_wait(ctx->mfc_wq,
1774 spu_send_mfc_command(ctx, cmd, &status));
1775 if (ret)
1776 goto out;
1777 if (status)
1778 ret = status;
1779 }
1780
1781 if (ret)
1782 goto out_unlock;
1783
1784 ctx->tagwait |= 1 << cmd.tag;
1785 ret = size;
1786
1787 out_unlock:
1788 spu_release(ctx);
1789 out:
1790 return ret;
1791 }
1792
1793 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1794 {
1795 struct spu_context *ctx = file->private_data;
1796 u32 free_elements, tagstatus;
1797 unsigned int mask;
1798
1799 poll_wait(file, &ctx->mfc_wq, wait);
1800
1801 /*
1802 * For now keep this uninterruptible and also ignore the rule
1803 * that poll should not sleep. Will be fixed later.
1804 */
1805 mutex_lock(&ctx->state_mutex);
1806 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1807 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1808 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1809 spu_release(ctx);
1810
1811 mask = 0;
1812 if (free_elements & 0xffff)
1813 mask |= POLLOUT | POLLWRNORM;
1814 if (tagstatus & ctx->tagwait)
1815 mask |= POLLIN | POLLRDNORM;
1816
1817 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1818 free_elements, tagstatus, ctx->tagwait);
1819
1820 return mask;
1821 }
1822
1823 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1824 {
1825 struct spu_context *ctx = file->private_data;
1826 int ret;
1827
1828 ret = spu_acquire(ctx);
1829 if (ret)
1830 goto out;
1831 #if 0
1832 /* this currently hangs */
1833 ret = spufs_wait(ctx->mfc_wq,
1834 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1835 if (ret)
1836 goto out;
1837 ret = spufs_wait(ctx->mfc_wq,
1838 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1839 if (ret)
1840 goto out;
1841 #else
1842 ret = 0;
1843 #endif
1844 spu_release(ctx);
1845 out:
1846 return ret;
1847 }
1848
1849 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1850 int datasync)
1851 {
1852 return spufs_mfc_flush(file, NULL);
1853 }
1854
1855 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1856 {
1857 struct spu_context *ctx = file->private_data;
1858
1859 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1860 }
1861
1862 static const struct file_operations spufs_mfc_fops = {
1863 .open = spufs_mfc_open,
1864 .release = spufs_mfc_release,
1865 .read = spufs_mfc_read,
1866 .write = spufs_mfc_write,
1867 .poll = spufs_mfc_poll,
1868 .flush = spufs_mfc_flush,
1869 .fsync = spufs_mfc_fsync,
1870 .fasync = spufs_mfc_fasync,
1871 .mmap = spufs_mfc_mmap,
1872 };
1873
1874 static int spufs_npc_set(void *data, u64 val)
1875 {
1876 struct spu_context *ctx = data;
1877 int ret;
1878
1879 ret = spu_acquire(ctx);
1880 if (ret)
1881 return ret;
1882 ctx->ops->npc_write(ctx, val);
1883 spu_release(ctx);
1884
1885 return 0;
1886 }
1887
1888 static u64 spufs_npc_get(struct spu_context *ctx)
1889 {
1890 return ctx->ops->npc_read(ctx);
1891 }
1892 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1893 "0x%llx\n", SPU_ATTR_ACQUIRE);
1894
1895 static int spufs_decr_set(void *data, u64 val)
1896 {
1897 struct spu_context *ctx = data;
1898 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1899 int ret;
1900
1901 ret = spu_acquire_saved(ctx);
1902 if (ret)
1903 return ret;
1904 lscsa->decr.slot[0] = (u32) val;
1905 spu_release_saved(ctx);
1906
1907 return 0;
1908 }
1909
1910 static u64 spufs_decr_get(struct spu_context *ctx)
1911 {
1912 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1913 return lscsa->decr.slot[0];
1914 }
1915 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1916 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1917
1918 static int spufs_decr_status_set(void *data, u64 val)
1919 {
1920 struct spu_context *ctx = data;
1921 int ret;
1922
1923 ret = spu_acquire_saved(ctx);
1924 if (ret)
1925 return ret;
1926 if (val)
1927 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1928 else
1929 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1930 spu_release_saved(ctx);
1931
1932 return 0;
1933 }
1934
1935 static u64 spufs_decr_status_get(struct spu_context *ctx)
1936 {
1937 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1938 return SPU_DECR_STATUS_RUNNING;
1939 else
1940 return 0;
1941 }
1942 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1943 spufs_decr_status_set, "0x%llx\n",
1944 SPU_ATTR_ACQUIRE_SAVED);
1945
1946 static int spufs_event_mask_set(void *data, u64 val)
1947 {
1948 struct spu_context *ctx = data;
1949 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1950 int ret;
1951
1952 ret = spu_acquire_saved(ctx);
1953 if (ret)
1954 return ret;
1955 lscsa->event_mask.slot[0] = (u32) val;
1956 spu_release_saved(ctx);
1957
1958 return 0;
1959 }
1960
1961 static u64 spufs_event_mask_get(struct spu_context *ctx)
1962 {
1963 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1964 return lscsa->event_mask.slot[0];
1965 }
1966
1967 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1968 spufs_event_mask_set, "0x%llx\n",
1969 SPU_ATTR_ACQUIRE_SAVED);
1970
1971 static u64 spufs_event_status_get(struct spu_context *ctx)
1972 {
1973 struct spu_state *state = &ctx->csa;
1974 u64 stat;
1975 stat = state->spu_chnlcnt_RW[0];
1976 if (stat)
1977 return state->spu_chnldata_RW[0];
1978 return 0;
1979 }
1980 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1981 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1982
1983 static int spufs_srr0_set(void *data, u64 val)
1984 {
1985 struct spu_context *ctx = data;
1986 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1987 int ret;
1988
1989 ret = spu_acquire_saved(ctx);
1990 if (ret)
1991 return ret;
1992 lscsa->srr0.slot[0] = (u32) val;
1993 spu_release_saved(ctx);
1994
1995 return 0;
1996 }
1997
1998 static u64 spufs_srr0_get(struct spu_context *ctx)
1999 {
2000 struct spu_lscsa *lscsa = ctx->csa.lscsa;
2001 return lscsa->srr0.slot[0];
2002 }
2003 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
2004 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
2005
2006 static u64 spufs_id_get(struct spu_context *ctx)
2007 {
2008 u64 num;
2009
2010 if (ctx->state == SPU_STATE_RUNNABLE)
2011 num = ctx->spu->number;
2012 else
2013 num = (unsigned int)-1;
2014
2015 return num;
2016 }
2017 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
2018 SPU_ATTR_ACQUIRE)
2019
2020 static u64 spufs_object_id_get(struct spu_context *ctx)
2021 {
2022 /* FIXME: Should there really be no locking here? */
2023 return ctx->object_id;
2024 }
2025
2026 static int spufs_object_id_set(void *data, u64 id)
2027 {
2028 struct spu_context *ctx = data;
2029 ctx->object_id = id;
2030
2031 return 0;
2032 }
2033
2034 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2035 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
2036
2037 static u64 spufs_lslr_get(struct spu_context *ctx)
2038 {
2039 return ctx->csa.priv2.spu_lslr_RW;
2040 }
2041 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2042 SPU_ATTR_ACQUIRE_SAVED);
2043
2044 static int spufs_info_open(struct inode *inode, struct file *file)
2045 {
2046 struct spufs_inode_info *i = SPUFS_I(inode);
2047 struct spu_context *ctx = i->i_ctx;
2048 file->private_data = ctx;
2049 return 0;
2050 }
2051
2052 static int spufs_caps_show(struct seq_file *s, void *private)
2053 {
2054 struct spu_context *ctx = s->private;
2055
2056 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2057 seq_puts(s, "sched\n");
2058 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2059 seq_puts(s, "step\n");
2060 return 0;
2061 }
2062
2063 static int spufs_caps_open(struct inode *inode, struct file *file)
2064 {
2065 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2066 }
2067
2068 static const struct file_operations spufs_caps_fops = {
2069 .open = spufs_caps_open,
2070 .read = seq_read,
2071 .llseek = seq_lseek,
2072 .release = single_release,
2073 };
2074
2075 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2076 char __user *buf, size_t len, loff_t *pos)
2077 {
2078 u32 data;
2079
2080 /* EOF if there's no entry in the mbox */
2081 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2082 return 0;
2083
2084 data = ctx->csa.prob.pu_mb_R;
2085
2086 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2087 }
2088
2089 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2090 size_t len, loff_t *pos)
2091 {
2092 int ret;
2093 struct spu_context *ctx = file->private_data;
2094
2095 if (!access_ok(VERIFY_WRITE, buf, len))
2096 return -EFAULT;
2097
2098 ret = spu_acquire_saved(ctx);
2099 if (ret)
2100 return ret;
2101 spin_lock(&ctx->csa.register_lock);
2102 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2103 spin_unlock(&ctx->csa.register_lock);
2104 spu_release_saved(ctx);
2105
2106 return ret;
2107 }
2108
2109 static const struct file_operations spufs_mbox_info_fops = {
2110 .open = spufs_info_open,
2111 .read = spufs_mbox_info_read,
2112 .llseek = generic_file_llseek,
2113 };
2114
2115 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2116 char __user *buf, size_t len, loff_t *pos)
2117 {
2118 u32 data;
2119
2120 /* EOF if there's no entry in the ibox */
2121 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2122 return 0;
2123
2124 data = ctx->csa.priv2.puint_mb_R;
2125
2126 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2127 }
2128
2129 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2130 size_t len, loff_t *pos)
2131 {
2132 struct spu_context *ctx = file->private_data;
2133 int ret;
2134
2135 if (!access_ok(VERIFY_WRITE, buf, len))
2136 return -EFAULT;
2137
2138 ret = spu_acquire_saved(ctx);
2139 if (ret)
2140 return ret;
2141 spin_lock(&ctx->csa.register_lock);
2142 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2143 spin_unlock(&ctx->csa.register_lock);
2144 spu_release_saved(ctx);
2145
2146 return ret;
2147 }
2148
2149 static const struct file_operations spufs_ibox_info_fops = {
2150 .open = spufs_info_open,
2151 .read = spufs_ibox_info_read,
2152 .llseek = generic_file_llseek,
2153 };
2154
2155 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2156 char __user *buf, size_t len, loff_t *pos)
2157 {
2158 int i, cnt;
2159 u32 data[4];
2160 u32 wbox_stat;
2161
2162 wbox_stat = ctx->csa.prob.mb_stat_R;
2163 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2164 for (i = 0; i < cnt; i++) {
2165 data[i] = ctx->csa.spu_mailbox_data[i];
2166 }
2167
2168 return simple_read_from_buffer(buf, len, pos, &data,
2169 cnt * sizeof(u32));
2170 }
2171
2172 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2173 size_t len, loff_t *pos)
2174 {
2175 struct spu_context *ctx = file->private_data;
2176 int ret;
2177
2178 if (!access_ok(VERIFY_WRITE, buf, len))
2179 return -EFAULT;
2180
2181 ret = spu_acquire_saved(ctx);
2182 if (ret)
2183 return ret;
2184 spin_lock(&ctx->csa.register_lock);
2185 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2186 spin_unlock(&ctx->csa.register_lock);
2187 spu_release_saved(ctx);
2188
2189 return ret;
2190 }
2191
2192 static const struct file_operations spufs_wbox_info_fops = {
2193 .open = spufs_info_open,
2194 .read = spufs_wbox_info_read,
2195 .llseek = generic_file_llseek,
2196 };
2197
2198 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2199 char __user *buf, size_t len, loff_t *pos)
2200 {
2201 struct spu_dma_info info;
2202 struct mfc_cq_sr *qp, *spuqp;
2203 int i;
2204
2205 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2206 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2207 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2208 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2209 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2210 for (i = 0; i < 16; i++) {
2211 qp = &info.dma_info_command_data[i];
2212 spuqp = &ctx->csa.priv2.spuq[i];
2213
2214 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2215 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2216 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2217 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2218 }
2219
2220 return simple_read_from_buffer(buf, len, pos, &info,
2221 sizeof info);
2222 }
2223
2224 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2225 size_t len, loff_t *pos)
2226 {
2227 struct spu_context *ctx = file->private_data;
2228 int ret;
2229
2230 if (!access_ok(VERIFY_WRITE, buf, len))
2231 return -EFAULT;
2232
2233 ret = spu_acquire_saved(ctx);
2234 if (ret)
2235 return ret;
2236 spin_lock(&ctx->csa.register_lock);
2237 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2238 spin_unlock(&ctx->csa.register_lock);
2239 spu_release_saved(ctx);
2240
2241 return ret;
2242 }
2243
2244 static const struct file_operations spufs_dma_info_fops = {
2245 .open = spufs_info_open,
2246 .read = spufs_dma_info_read,
2247 };
2248
2249 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2250 char __user *buf, size_t len, loff_t *pos)
2251 {
2252 struct spu_proxydma_info info;
2253 struct mfc_cq_sr *qp, *puqp;
2254 int ret = sizeof info;
2255 int i;
2256
2257 if (len < ret)
2258 return -EINVAL;
2259
2260 if (!access_ok(VERIFY_WRITE, buf, len))
2261 return -EFAULT;
2262
2263 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2264 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2265 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2266 for (i = 0; i < 8; i++) {
2267 qp = &info.proxydma_info_command_data[i];
2268 puqp = &ctx->csa.priv2.puq[i];
2269
2270 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2271 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2272 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2273 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2274 }
2275
2276 return simple_read_from_buffer(buf, len, pos, &info,
2277 sizeof info);
2278 }
2279
2280 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2281 size_t len, loff_t *pos)
2282 {
2283 struct spu_context *ctx = file->private_data;
2284 int ret;
2285
2286 ret = spu_acquire_saved(ctx);
2287 if (ret)
2288 return ret;
2289 spin_lock(&ctx->csa.register_lock);
2290 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2291 spin_unlock(&ctx->csa.register_lock);
2292 spu_release_saved(ctx);
2293
2294 return ret;
2295 }
2296
2297 static const struct file_operations spufs_proxydma_info_fops = {
2298 .open = spufs_info_open,
2299 .read = spufs_proxydma_info_read,
2300 };
2301
2302 static int spufs_show_tid(struct seq_file *s, void *private)
2303 {
2304 struct spu_context *ctx = s->private;
2305
2306 seq_printf(s, "%d\n", ctx->tid);
2307 return 0;
2308 }
2309
2310 static int spufs_tid_open(struct inode *inode, struct file *file)
2311 {
2312 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2313 }
2314
2315 static const struct file_operations spufs_tid_fops = {
2316 .open = spufs_tid_open,
2317 .read = seq_read,
2318 .llseek = seq_lseek,
2319 .release = single_release,
2320 };
2321
2322 static const char *ctx_state_names[] = {
2323 "user", "system", "iowait", "loaded"
2324 };
2325
2326 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2327 enum spu_utilization_state state)
2328 {
2329 struct timespec ts;
2330 unsigned long long time = ctx->stats.times[state];
2331
2332 /*
2333 * In general, utilization statistics are updated by the controlling
2334 * thread as the spu context moves through various well defined
2335 * state transitions, but if the context is lazily loaded its
2336 * utilization statistics are not updated as the controlling thread
2337 * is not tightly coupled with the execution of the spu context. We
2338 * calculate and apply the time delta from the last recorded state
2339 * of the spu context.
2340 */
2341 if (ctx->spu && ctx->stats.util_state == state) {
2342 ktime_get_ts(&ts);
2343 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2344 }
2345
2346 return time / NSEC_PER_MSEC;
2347 }
2348
2349 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2350 {
2351 unsigned long long slb_flts = ctx->stats.slb_flt;
2352
2353 if (ctx->state == SPU_STATE_RUNNABLE) {
2354 slb_flts += (ctx->spu->stats.slb_flt -
2355 ctx->stats.slb_flt_base);
2356 }
2357
2358 return slb_flts;
2359 }
2360
2361 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2362 {
2363 unsigned long long class2_intrs = ctx->stats.class2_intr;
2364
2365 if (ctx->state == SPU_STATE_RUNNABLE) {
2366 class2_intrs += (ctx->spu->stats.class2_intr -
2367 ctx->stats.class2_intr_base);
2368 }
2369
2370 return class2_intrs;
2371 }
2372
2373
2374 static int spufs_show_stat(struct seq_file *s, void *private)
2375 {
2376 struct spu_context *ctx = s->private;
2377 int ret;
2378
2379 ret = spu_acquire(ctx);
2380 if (ret)
2381 return ret;
2382
2383 seq_printf(s, "%s %llu %llu %llu %llu "
2384 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2385 ctx_state_names[ctx->stats.util_state],
2386 spufs_acct_time(ctx, SPU_UTIL_USER),
2387 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2388 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2389 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2390 ctx->stats.vol_ctx_switch,
2391 ctx->stats.invol_ctx_switch,
2392 spufs_slb_flts(ctx),
2393 ctx->stats.hash_flt,
2394 ctx->stats.min_flt,
2395 ctx->stats.maj_flt,
2396 spufs_class2_intrs(ctx),
2397 ctx->stats.libassist);
2398 spu_release(ctx);
2399 return 0;
2400 }
2401
2402 static int spufs_stat_open(struct inode *inode, struct file *file)
2403 {
2404 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2405 }
2406
2407 static const struct file_operations spufs_stat_fops = {
2408 .open = spufs_stat_open,
2409 .read = seq_read,
2410 .llseek = seq_lseek,
2411 .release = single_release,
2412 };
2413
2414 static inline int spufs_switch_log_used(struct spu_context *ctx)
2415 {
2416 return (ctx->switch_log->head - ctx->switch_log->tail) %
2417 SWITCH_LOG_BUFSIZE;
2418 }
2419
2420 static inline int spufs_switch_log_avail(struct spu_context *ctx)
2421 {
2422 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2423 }
2424
2425 static int spufs_switch_log_open(struct inode *inode, struct file *file)
2426 {
2427 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2428 int rc;
2429
2430 rc = spu_acquire(ctx);
2431 if (rc)
2432 return rc;
2433
2434 if (ctx->switch_log) {
2435 rc = -EBUSY;
2436 goto out;
2437 }
2438
2439 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2440 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2441 GFP_KERNEL);
2442
2443 if (!ctx->switch_log) {
2444 rc = -ENOMEM;
2445 goto out;
2446 }
2447
2448 ctx->switch_log->head = ctx->switch_log->tail = 0;
2449 init_waitqueue_head(&ctx->switch_log->wait);
2450 rc = 0;
2451
2452 out:
2453 spu_release(ctx);
2454 return rc;
2455 }
2456
2457 static int spufs_switch_log_release(struct inode *inode, struct file *file)
2458 {
2459 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2460 int rc;
2461
2462 rc = spu_acquire(ctx);
2463 if (rc)
2464 return rc;
2465
2466 kfree(ctx->switch_log);
2467 ctx->switch_log = NULL;
2468 spu_release(ctx);
2469
2470 return 0;
2471 }
2472
2473 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2474 {
2475 struct switch_log_entry *p;
2476
2477 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2478
2479 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2480 (unsigned int) p->tstamp.tv_sec,
2481 (unsigned int) p->tstamp.tv_nsec,
2482 p->spu_id,
2483 (unsigned int) p->type,
2484 (unsigned int) p->val,
2485 (unsigned long long) p->timebase);
2486 }
2487
2488 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2489 size_t len, loff_t *ppos)
2490 {
2491 struct inode *inode = file->f_path.dentry->d_inode;
2492 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2493 int error = 0, cnt = 0;
2494
2495 if (!buf || len < 0)
2496 return -EINVAL;
2497
2498 error = spu_acquire(ctx);
2499 if (error)
2500 return error;
2501
2502 while (cnt < len) {
2503 char tbuf[128];
2504 int width;
2505
2506 if (spufs_switch_log_used(ctx) == 0) {
2507 if (cnt > 0) {
2508 /* If there's data ready to go, we can
2509 * just return straight away */
2510 break;
2511
2512 } else if (file->f_flags & O_NONBLOCK) {
2513 error = -EAGAIN;
2514 break;
2515
2516 } else {
2517 /* spufs_wait will drop the mutex and
2518 * re-acquire, but since we're in read(), the
2519 * file cannot be _released (and so
2520 * ctx->switch_log is stable).
2521 */
2522 error = spufs_wait(ctx->switch_log->wait,
2523 spufs_switch_log_used(ctx) > 0);
2524
2525 /* On error, spufs_wait returns without the
2526 * state mutex held */
2527 if (error)
2528 return error;
2529
2530 /* We may have had entries read from underneath
2531 * us while we dropped the mutex in spufs_wait,
2532 * so re-check */
2533 if (spufs_switch_log_used(ctx) == 0)
2534 continue;
2535 }
2536 }
2537
2538 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2539 if (width < len)
2540 ctx->switch_log->tail =
2541 (ctx->switch_log->tail + 1) %
2542 SWITCH_LOG_BUFSIZE;
2543 else
2544 /* If the record is greater than space available return
2545 * partial buffer (so far) */
2546 break;
2547
2548 error = copy_to_user(buf + cnt, tbuf, width);
2549 if (error)
2550 break;
2551 cnt += width;
2552 }
2553
2554 spu_release(ctx);
2555
2556 return cnt == 0 ? error : cnt;
2557 }
2558
2559 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2560 {
2561 struct inode *inode = file->f_path.dentry->d_inode;
2562 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2563 unsigned int mask = 0;
2564 int rc;
2565
2566 poll_wait(file, &ctx->switch_log->wait, wait);
2567
2568 rc = spu_acquire(ctx);
2569 if (rc)
2570 return rc;
2571
2572 if (spufs_switch_log_used(ctx) > 0)
2573 mask |= POLLIN;
2574
2575 spu_release(ctx);
2576
2577 return mask;
2578 }
2579
2580 static const struct file_operations spufs_switch_log_fops = {
2581 .owner = THIS_MODULE,
2582 .open = spufs_switch_log_open,
2583 .read = spufs_switch_log_read,
2584 .poll = spufs_switch_log_poll,
2585 .release = spufs_switch_log_release,
2586 };
2587
2588 /**
2589 * Log a context switch event to a switch log reader.
2590 *
2591 * Must be called with ctx->state_mutex held.
2592 */
2593 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2594 u32 type, u32 val)
2595 {
2596 if (!ctx->switch_log)
2597 return;
2598
2599 if (spufs_switch_log_avail(ctx) > 1) {
2600 struct switch_log_entry *p;
2601
2602 p = ctx->switch_log->log + ctx->switch_log->head;
2603 ktime_get_ts(&p->tstamp);
2604 p->timebase = get_tb();
2605 p->spu_id = spu ? spu->number : -1;
2606 p->type = type;
2607 p->val = val;
2608
2609 ctx->switch_log->head =
2610 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2611 }
2612
2613 wake_up(&ctx->switch_log->wait);
2614 }
2615
2616 static int spufs_show_ctx(struct seq_file *s, void *private)
2617 {
2618 struct spu_context *ctx = s->private;
2619 u64 mfc_control_RW;
2620
2621 mutex_lock(&ctx->state_mutex);
2622 if (ctx->spu) {
2623 struct spu *spu = ctx->spu;
2624 struct spu_priv2 __iomem *priv2 = spu->priv2;
2625
2626 spin_lock_irq(&spu->register_lock);
2627 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2628 spin_unlock_irq(&spu->register_lock);
2629 } else {
2630 struct spu_state *csa = &ctx->csa;
2631
2632 mfc_control_RW = csa->priv2.mfc_control_RW;
2633 }
2634
2635 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2636 " %c %lx %lx %lx %lx %x %x\n",
2637 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2638 ctx->flags,
2639 ctx->sched_flags,
2640 ctx->prio,
2641 ctx->time_slice,
2642 ctx->spu ? ctx->spu->number : -1,
2643 !list_empty(&ctx->rq) ? 'q' : ' ',
2644 ctx->csa.class_0_pending,
2645 ctx->csa.class_0_dar,
2646 ctx->csa.class_1_dsisr,
2647 mfc_control_RW,
2648 ctx->ops->runcntl_read(ctx),
2649 ctx->ops->status_read(ctx));
2650
2651 mutex_unlock(&ctx->state_mutex);
2652
2653 return 0;
2654 }
2655
2656 static int spufs_ctx_open(struct inode *inode, struct file *file)
2657 {
2658 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2659 }
2660
2661 static const struct file_operations spufs_ctx_fops = {
2662 .open = spufs_ctx_open,
2663 .read = seq_read,
2664 .llseek = seq_lseek,
2665 .release = single_release,
2666 };
2667
2668 struct spufs_tree_descr spufs_dir_contents[] = {
2669 { "capabilities", &spufs_caps_fops, 0444, },
2670 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2671 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
2672 { "mbox", &spufs_mbox_fops, 0444, },
2673 { "ibox", &spufs_ibox_fops, 0444, },
2674 { "wbox", &spufs_wbox_fops, 0222, },
2675 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2676 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2677 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2678 { "signal1", &spufs_signal1_fops, 0666, },
2679 { "signal2", &spufs_signal2_fops, 0666, },
2680 { "signal1_type", &spufs_signal1_type, 0666, },
2681 { "signal2_type", &spufs_signal2_type, 0666, },
2682 { "cntl", &spufs_cntl_fops, 0666, },
2683 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2684 { "lslr", &spufs_lslr_ops, 0444, },
2685 { "mfc", &spufs_mfc_fops, 0666, },
2686 { "mss", &spufs_mss_fops, 0666, },
2687 { "npc", &spufs_npc_ops, 0666, },
2688 { "srr0", &spufs_srr0_ops, 0666, },
2689 { "decr", &spufs_decr_ops, 0666, },
2690 { "decr_status", &spufs_decr_status_ops, 0666, },
2691 { "event_mask", &spufs_event_mask_ops, 0666, },
2692 { "event_status", &spufs_event_status_ops, 0444, },
2693 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2694 { "phys-id", &spufs_id_ops, 0666, },
2695 { "object-id", &spufs_object_id_ops, 0666, },
2696 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2697 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2698 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2699 { "dma_info", &spufs_dma_info_fops, 0444,
2700 sizeof(struct spu_dma_info), },
2701 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2702 sizeof(struct spu_proxydma_info)},
2703 { "tid", &spufs_tid_fops, 0444, },
2704 { "stat", &spufs_stat_fops, 0444, },
2705 { "switch_log", &spufs_switch_log_fops, 0444 },
2706 {},
2707 };
2708
2709 struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2710 { "capabilities", &spufs_caps_fops, 0444, },
2711 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2712 { "mbox", &spufs_mbox_fops, 0444, },
2713 { "ibox", &spufs_ibox_fops, 0444, },
2714 { "wbox", &spufs_wbox_fops, 0222, },
2715 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2716 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2717 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2718 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2719 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2720 { "signal1_type", &spufs_signal1_type, 0666, },
2721 { "signal2_type", &spufs_signal2_type, 0666, },
2722 { "mss", &spufs_mss_fops, 0666, },
2723 { "mfc", &spufs_mfc_fops, 0666, },
2724 { "cntl", &spufs_cntl_fops, 0666, },
2725 { "npc", &spufs_npc_ops, 0666, },
2726 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2727 { "phys-id", &spufs_id_ops, 0666, },
2728 { "object-id", &spufs_object_id_ops, 0666, },
2729 { "tid", &spufs_tid_fops, 0444, },
2730 { "stat", &spufs_stat_fops, 0444, },
2731 {},
2732 };
2733
2734 struct spufs_tree_descr spufs_dir_debug_contents[] = {
2735 { ".ctx", &spufs_ctx_fops, 0444, },
2736 {},
2737 };
2738
2739 struct spufs_coredump_reader spufs_coredump_read[] = {
2740 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2741 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2742 { "lslr", NULL, spufs_lslr_get, 19 },
2743 { "decr", NULL, spufs_decr_get, 19 },
2744 { "decr_status", NULL, spufs_decr_status_get, 19 },
2745 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2746 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2747 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2748 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2749 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2750 { "event_mask", NULL, spufs_event_mask_get, 19 },
2751 { "event_status", NULL, spufs_event_status_get, 19 },
2752 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2753 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2754 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2755 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2756 { "proxydma_info", __spufs_proxydma_info_read,
2757 NULL, sizeof(struct spu_proxydma_info)},
2758 { "object-id", NULL, spufs_object_id_get, 19 },
2759 { "npc", NULL, spufs_npc_get, 19 },
2760 { NULL },
2761 };
This page took 0.0907 seconds and 5 git commands to generate.