2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 /* Simple attribute files */
47 int (*get
)(void *, u64
*);
48 int (*set
)(void *, u64
);
49 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
52 const char *fmt
; /* format for read operation */
53 struct mutex mutex
; /* protects access to these buffers */
56 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
57 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
60 struct spufs_attr
*attr
;
62 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
68 attr
->data
= inode
->i_private
;
70 mutex_init(&attr
->mutex
);
71 file
->private_data
= attr
;
73 return nonseekable_open(inode
, file
);
76 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
78 kfree(file
->private_data
);
82 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
83 size_t len
, loff_t
*ppos
)
85 struct spufs_attr
*attr
;
89 attr
= file
->private_data
;
93 ret
= mutex_lock_interruptible(&attr
->mutex
);
97 if (*ppos
) { /* continued read */
98 size
= strlen(attr
->get_buf
);
99 } else { /* first read */
101 ret
= attr
->get(attr
->data
, &val
);
105 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
106 attr
->fmt
, (unsigned long long)val
);
109 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
111 mutex_unlock(&attr
->mutex
);
115 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
116 size_t len
, loff_t
*ppos
)
118 struct spufs_attr
*attr
;
123 attr
= file
->private_data
;
127 ret
= mutex_lock_interruptible(&attr
->mutex
);
132 size
= min(sizeof(attr
->set_buf
) - 1, len
);
133 if (copy_from_user(attr
->set_buf
, buf
, size
))
136 ret
= len
; /* claim we got the whole input */
137 attr
->set_buf
[size
] = '\0';
138 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
139 attr
->set(attr
->data
, val
);
141 mutex_unlock(&attr
->mutex
);
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146 static int __fops ## _open(struct inode *inode, struct file *file) \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
151 static const struct file_operations __fops = { \
152 .owner = THIS_MODULE, \
153 .open = __fops ## _open, \
154 .release = spufs_attr_release, \
155 .read = spufs_attr_read, \
156 .write = spufs_attr_write, \
161 spufs_mem_open(struct inode
*inode
, struct file
*file
)
163 struct spufs_inode_info
*i
= SPUFS_I(inode
);
164 struct spu_context
*ctx
= i
->i_ctx
;
166 mutex_lock(&ctx
->mapping_lock
);
167 file
->private_data
= ctx
;
169 ctx
->local_store
= inode
->i_mapping
;
170 mutex_unlock(&ctx
->mapping_lock
);
175 spufs_mem_release(struct inode
*inode
, struct file
*file
)
177 struct spufs_inode_info
*i
= SPUFS_I(inode
);
178 struct spu_context
*ctx
= i
->i_ctx
;
180 mutex_lock(&ctx
->mapping_lock
);
182 ctx
->local_store
= NULL
;
183 mutex_unlock(&ctx
->mapping_lock
);
188 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
189 size_t size
, loff_t
*pos
)
191 char *local_store
= ctx
->ops
->get_ls(ctx
);
192 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
197 spufs_mem_read(struct file
*file
, char __user
*buffer
,
198 size_t size
, loff_t
*pos
)
200 struct spu_context
*ctx
= file
->private_data
;
203 ret
= spu_acquire(ctx
);
206 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
213 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
214 size_t size
, loff_t
*ppos
)
216 struct spu_context
*ctx
= file
->private_data
;
225 if (size
> LS_SIZE
- pos
)
226 size
= LS_SIZE
- pos
;
228 ret
= spu_acquire(ctx
);
232 local_store
= ctx
->ops
->get_ls(ctx
);
233 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
243 spufs_mem_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
245 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
246 unsigned long address
= (unsigned long)vmf
->virtual_address
;
247 unsigned long pfn
, offset
;
249 #ifdef CONFIG_SPU_FS_64K_LS
250 struct spu_state
*csa
= &ctx
->csa
;
253 /* Check what page size we are using */
254 psize
= get_slice_psize(vma
->vm_mm
, address
);
256 /* Some sanity checking */
257 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
259 /* Wow, 64K, cool, we need to align the address though */
260 if (csa
->use_big_pages
) {
261 BUG_ON(vma
->vm_start
& 0xffff);
262 address
&= ~0xfffful
;
264 #endif /* CONFIG_SPU_FS_64K_LS */
266 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
267 if (offset
>= LS_SIZE
)
268 return VM_FAULT_SIGBUS
;
270 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
273 if (spu_acquire(ctx
))
274 return VM_FAULT_NOPAGE
;
276 if (ctx
->state
== SPU_STATE_SAVED
) {
277 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
278 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
280 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
281 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
283 vm_insert_pfn(vma
, address
, pfn
);
287 return VM_FAULT_NOPAGE
;
290 static int spufs_mem_mmap_access(struct vm_area_struct
*vma
,
291 unsigned long address
,
292 void *buf
, int len
, int write
)
294 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
295 unsigned long offset
= address
- vma
->vm_start
;
298 if (write
&& !(vma
->vm_flags
& VM_WRITE
))
300 if (spu_acquire(ctx
))
302 if ((offset
+ len
) > vma
->vm_end
)
303 len
= vma
->vm_end
- offset
;
304 local_store
= ctx
->ops
->get_ls(ctx
);
306 memcpy_toio(local_store
+ offset
, buf
, len
);
308 memcpy_fromio(buf
, local_store
+ offset
, len
);
313 static const struct vm_operations_struct spufs_mem_mmap_vmops
= {
314 .fault
= spufs_mem_mmap_fault
,
315 .access
= spufs_mem_mmap_access
,
318 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
320 #ifdef CONFIG_SPU_FS_64K_LS
321 struct spu_context
*ctx
= file
->private_data
;
322 struct spu_state
*csa
= &ctx
->csa
;
324 /* Sanity check VMA alignment */
325 if (csa
->use_big_pages
) {
326 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
327 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
329 if (vma
->vm_start
& 0xffff)
331 if (vma
->vm_pgoff
& 0xf)
334 #endif /* CONFIG_SPU_FS_64K_LS */
336 if (!(vma
->vm_flags
& VM_SHARED
))
339 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
340 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
342 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
346 #ifdef CONFIG_SPU_FS_64K_LS
347 static unsigned long spufs_get_unmapped_area(struct file
*file
,
348 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
351 struct spu_context
*ctx
= file
->private_data
;
352 struct spu_state
*csa
= &ctx
->csa
;
354 /* If not using big pages, fallback to normal MM g_u_a */
355 if (!csa
->use_big_pages
)
356 return current
->mm
->get_unmapped_area(file
, addr
, len
,
359 /* Else, try to obtain a 64K pages slice */
360 return slice_get_unmapped_area(addr
, len
, flags
,
363 #endif /* CONFIG_SPU_FS_64K_LS */
365 static const struct file_operations spufs_mem_fops
= {
366 .open
= spufs_mem_open
,
367 .release
= spufs_mem_release
,
368 .read
= spufs_mem_read
,
369 .write
= spufs_mem_write
,
370 .llseek
= generic_file_llseek
,
371 .mmap
= spufs_mem_mmap
,
372 #ifdef CONFIG_SPU_FS_64K_LS
373 .get_unmapped_area
= spufs_get_unmapped_area
,
377 static int spufs_ps_fault(struct vm_area_struct
*vma
,
378 struct vm_fault
*vmf
,
379 unsigned long ps_offs
,
380 unsigned long ps_size
)
382 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
383 unsigned long area
, offset
= vmf
->pgoff
<< PAGE_SHIFT
;
386 spu_context_nospu_trace(spufs_ps_fault__enter
, ctx
);
388 if (offset
>= ps_size
)
389 return VM_FAULT_SIGBUS
;
391 if (fatal_signal_pending(current
))
392 return VM_FAULT_SIGBUS
;
395 * Because we release the mmap_sem, the context may be destroyed while
396 * we're in spu_wait. Grab an extra reference so it isn't destroyed
399 get_spu_context(ctx
);
402 * We have to wait for context to be loaded before we have
403 * pages to hand out to the user, but we don't want to wait
404 * with the mmap_sem held.
405 * It is possible to drop the mmap_sem here, but then we need
406 * to return VM_FAULT_NOPAGE because the mappings may have
409 if (spu_acquire(ctx
))
412 if (ctx
->state
== SPU_STATE_SAVED
) {
413 up_read(¤t
->mm
->mmap_sem
);
414 spu_context_nospu_trace(spufs_ps_fault__sleep
, ctx
);
415 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
416 spu_context_trace(spufs_ps_fault__wake
, ctx
, ctx
->spu
);
417 down_read(¤t
->mm
->mmap_sem
);
419 area
= ctx
->spu
->problem_phys
+ ps_offs
;
420 vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
421 (area
+ offset
) >> PAGE_SHIFT
);
422 spu_context_trace(spufs_ps_fault__insert
, ctx
, ctx
->spu
);
429 put_spu_context(ctx
);
430 return VM_FAULT_NOPAGE
;
434 static int spufs_cntl_mmap_fault(struct vm_area_struct
*vma
,
435 struct vm_fault
*vmf
)
437 return spufs_ps_fault(vma
, vmf
, 0x4000, SPUFS_CNTL_MAP_SIZE
);
440 static const struct vm_operations_struct spufs_cntl_mmap_vmops
= {
441 .fault
= spufs_cntl_mmap_fault
,
445 * mmap support for problem state control area [0x4000 - 0x4fff].
447 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
449 if (!(vma
->vm_flags
& VM_SHARED
))
452 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
453 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
455 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
458 #else /* SPUFS_MMAP_4K */
459 #define spufs_cntl_mmap NULL
460 #endif /* !SPUFS_MMAP_4K */
462 static int spufs_cntl_get(void *data
, u64
*val
)
464 struct spu_context
*ctx
= data
;
467 ret
= spu_acquire(ctx
);
470 *val
= ctx
->ops
->status_read(ctx
);
476 static int spufs_cntl_set(void *data
, u64 val
)
478 struct spu_context
*ctx
= data
;
481 ret
= spu_acquire(ctx
);
484 ctx
->ops
->runcntl_write(ctx
, val
);
490 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
492 struct spufs_inode_info
*i
= SPUFS_I(inode
);
493 struct spu_context
*ctx
= i
->i_ctx
;
495 mutex_lock(&ctx
->mapping_lock
);
496 file
->private_data
= ctx
;
498 ctx
->cntl
= inode
->i_mapping
;
499 mutex_unlock(&ctx
->mapping_lock
);
500 return simple_attr_open(inode
, file
, spufs_cntl_get
,
501 spufs_cntl_set
, "0x%08lx");
505 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
507 struct spufs_inode_info
*i
= SPUFS_I(inode
);
508 struct spu_context
*ctx
= i
->i_ctx
;
510 simple_attr_release(inode
, file
);
512 mutex_lock(&ctx
->mapping_lock
);
515 mutex_unlock(&ctx
->mapping_lock
);
519 static const struct file_operations spufs_cntl_fops
= {
520 .open
= spufs_cntl_open
,
521 .release
= spufs_cntl_release
,
522 .read
= simple_attr_read
,
523 .write
= simple_attr_write
,
524 .mmap
= spufs_cntl_mmap
,
528 spufs_regs_open(struct inode
*inode
, struct file
*file
)
530 struct spufs_inode_info
*i
= SPUFS_I(inode
);
531 file
->private_data
= i
->i_ctx
;
536 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
537 size_t size
, loff_t
*pos
)
539 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
540 return simple_read_from_buffer(buffer
, size
, pos
,
541 lscsa
->gprs
, sizeof lscsa
->gprs
);
545 spufs_regs_read(struct file
*file
, char __user
*buffer
,
546 size_t size
, loff_t
*pos
)
549 struct spu_context
*ctx
= file
->private_data
;
551 /* pre-check for file position: if we'd return EOF, there's no point
552 * causing a deschedule */
553 if (*pos
>= sizeof(ctx
->csa
.lscsa
->gprs
))
556 ret
= spu_acquire_saved(ctx
);
559 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
560 spu_release_saved(ctx
);
565 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
566 size_t size
, loff_t
*pos
)
568 struct spu_context
*ctx
= file
->private_data
;
569 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
572 if (*pos
>= sizeof(lscsa
->gprs
))
575 size
= min_t(ssize_t
, sizeof(lscsa
->gprs
) - *pos
, size
);
578 ret
= spu_acquire_saved(ctx
);
582 ret
= copy_from_user((char *)lscsa
->gprs
+ *pos
- size
,
583 buffer
, size
) ? -EFAULT
: size
;
585 spu_release_saved(ctx
);
589 static const struct file_operations spufs_regs_fops
= {
590 .open
= spufs_regs_open
,
591 .read
= spufs_regs_read
,
592 .write
= spufs_regs_write
,
593 .llseek
= generic_file_llseek
,
597 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
598 size_t size
, loff_t
* pos
)
600 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
601 return simple_read_from_buffer(buffer
, size
, pos
,
602 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
606 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
607 size_t size
, loff_t
* pos
)
610 struct spu_context
*ctx
= file
->private_data
;
612 ret
= spu_acquire_saved(ctx
);
615 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
616 spu_release_saved(ctx
);
621 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
622 size_t size
, loff_t
* pos
)
624 struct spu_context
*ctx
= file
->private_data
;
625 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
628 if (*pos
>= sizeof(lscsa
->fpcr
))
631 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
633 ret
= spu_acquire_saved(ctx
);
638 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
639 buffer
, size
) ? -EFAULT
: size
;
641 spu_release_saved(ctx
);
645 static const struct file_operations spufs_fpcr_fops
= {
646 .open
= spufs_regs_open
,
647 .read
= spufs_fpcr_read
,
648 .write
= spufs_fpcr_write
,
649 .llseek
= generic_file_llseek
,
652 /* generic open function for all pipe-like files */
653 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
655 struct spufs_inode_info
*i
= SPUFS_I(inode
);
656 file
->private_data
= i
->i_ctx
;
658 return nonseekable_open(inode
, file
);
662 * Read as many bytes from the mailbox as possible, until
663 * one of the conditions becomes true:
665 * - no more data available in the mailbox
666 * - end of the user provided buffer
667 * - end of the mapped area
669 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
670 size_t len
, loff_t
*pos
)
672 struct spu_context
*ctx
= file
->private_data
;
673 u32 mbox_data
, __user
*udata
;
679 if (!access_ok(VERIFY_WRITE
, buf
, len
))
682 udata
= (void __user
*)buf
;
684 count
= spu_acquire(ctx
);
688 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
690 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
695 * at the end of the mapped area, we can fault
696 * but still need to return the data we have
697 * read successfully so far.
699 ret
= __put_user(mbox_data
, udata
);
714 static const struct file_operations spufs_mbox_fops
= {
715 .open
= spufs_pipe_open
,
716 .read
= spufs_mbox_read
,
719 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
720 size_t len
, loff_t
*pos
)
722 struct spu_context
*ctx
= file
->private_data
;
729 ret
= spu_acquire(ctx
);
733 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
737 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
743 static const struct file_operations spufs_mbox_stat_fops
= {
744 .open
= spufs_pipe_open
,
745 .read
= spufs_mbox_stat_read
,
748 /* low-level ibox access function */
749 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
751 return ctx
->ops
->ibox_read(ctx
, data
);
754 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
756 struct spu_context
*ctx
= file
->private_data
;
758 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
761 /* interrupt-level ibox callback function. */
762 void spufs_ibox_callback(struct spu
*spu
)
764 struct spu_context
*ctx
= spu
->ctx
;
769 wake_up_all(&ctx
->ibox_wq
);
770 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
774 * Read as many bytes from the interrupt mailbox as possible, until
775 * one of the conditions becomes true:
777 * - no more data available in the mailbox
778 * - end of the user provided buffer
779 * - end of the mapped area
781 * If the file is opened without O_NONBLOCK, we wait here until
782 * any data is available, but return when we have been able to
785 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
786 size_t len
, loff_t
*pos
)
788 struct spu_context
*ctx
= file
->private_data
;
789 u32 ibox_data
, __user
*udata
;
795 if (!access_ok(VERIFY_WRITE
, buf
, len
))
798 udata
= (void __user
*)buf
;
800 count
= spu_acquire(ctx
);
804 /* wait only for the first element */
806 if (file
->f_flags
& O_NONBLOCK
) {
807 if (!spu_ibox_read(ctx
, &ibox_data
)) {
812 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
817 /* if we can't write at all, return -EFAULT */
818 count
= __put_user(ibox_data
, udata
);
822 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
824 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
828 * at the end of the mapped area, we can fault
829 * but still need to return the data we have
830 * read successfully so far.
832 ret
= __put_user(ibox_data
, udata
);
843 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
845 struct spu_context
*ctx
= file
->private_data
;
848 poll_wait(file
, &ctx
->ibox_wq
, wait
);
851 * For now keep this uninterruptible and also ignore the rule
852 * that poll should not sleep. Will be fixed later.
854 mutex_lock(&ctx
->state_mutex
);
855 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
861 static const struct file_operations spufs_ibox_fops
= {
862 .open
= spufs_pipe_open
,
863 .read
= spufs_ibox_read
,
864 .poll
= spufs_ibox_poll
,
865 .fasync
= spufs_ibox_fasync
,
868 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
869 size_t len
, loff_t
*pos
)
871 struct spu_context
*ctx
= file
->private_data
;
878 ret
= spu_acquire(ctx
);
881 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
884 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
890 static const struct file_operations spufs_ibox_stat_fops
= {
891 .open
= spufs_pipe_open
,
892 .read
= spufs_ibox_stat_read
,
895 /* low-level mailbox write */
896 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
898 return ctx
->ops
->wbox_write(ctx
, data
);
901 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
903 struct spu_context
*ctx
= file
->private_data
;
906 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
911 /* interrupt-level wbox callback function. */
912 void spufs_wbox_callback(struct spu
*spu
)
914 struct spu_context
*ctx
= spu
->ctx
;
919 wake_up_all(&ctx
->wbox_wq
);
920 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
924 * Write as many bytes to the interrupt mailbox as possible, until
925 * one of the conditions becomes true:
927 * - the mailbox is full
928 * - end of the user provided buffer
929 * - end of the mapped area
931 * If the file is opened without O_NONBLOCK, we wait here until
932 * space is availabyl, but return when we have been able to
935 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
936 size_t len
, loff_t
*pos
)
938 struct spu_context
*ctx
= file
->private_data
;
939 u32 wbox_data
, __user
*udata
;
945 udata
= (void __user
*)buf
;
946 if (!access_ok(VERIFY_READ
, buf
, len
))
949 if (__get_user(wbox_data
, udata
))
952 count
= spu_acquire(ctx
);
957 * make sure we can at least write one element, by waiting
958 * in case of !O_NONBLOCK
961 if (file
->f_flags
& O_NONBLOCK
) {
962 if (!spu_wbox_write(ctx
, wbox_data
)) {
967 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
973 /* write as much as possible */
974 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
976 ret
= __get_user(wbox_data
, udata
);
980 ret
= spu_wbox_write(ctx
, wbox_data
);
991 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
993 struct spu_context
*ctx
= file
->private_data
;
996 poll_wait(file
, &ctx
->wbox_wq
, wait
);
999 * For now keep this uninterruptible and also ignore the rule
1000 * that poll should not sleep. Will be fixed later.
1002 mutex_lock(&ctx
->state_mutex
);
1003 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
1009 static const struct file_operations spufs_wbox_fops
= {
1010 .open
= spufs_pipe_open
,
1011 .write
= spufs_wbox_write
,
1012 .poll
= spufs_wbox_poll
,
1013 .fasync
= spufs_wbox_fasync
,
1016 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
1017 size_t len
, loff_t
*pos
)
1019 struct spu_context
*ctx
= file
->private_data
;
1026 ret
= spu_acquire(ctx
);
1029 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
1032 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
1038 static const struct file_operations spufs_wbox_stat_fops
= {
1039 .open
= spufs_pipe_open
,
1040 .read
= spufs_wbox_stat_read
,
1043 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
1045 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1046 struct spu_context
*ctx
= i
->i_ctx
;
1048 mutex_lock(&ctx
->mapping_lock
);
1049 file
->private_data
= ctx
;
1050 if (!i
->i_openers
++)
1051 ctx
->signal1
= inode
->i_mapping
;
1052 mutex_unlock(&ctx
->mapping_lock
);
1053 return nonseekable_open(inode
, file
);
1057 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
1059 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1060 struct spu_context
*ctx
= i
->i_ctx
;
1062 mutex_lock(&ctx
->mapping_lock
);
1063 if (!--i
->i_openers
)
1064 ctx
->signal1
= NULL
;
1065 mutex_unlock(&ctx
->mapping_lock
);
1069 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
1070 size_t len
, loff_t
*pos
)
1078 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
1079 data
= ctx
->csa
.spu_chnldata_RW
[3];
1086 if (copy_to_user(buf
, &data
, 4))
1093 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1094 size_t len
, loff_t
*pos
)
1097 struct spu_context
*ctx
= file
->private_data
;
1099 ret
= spu_acquire_saved(ctx
);
1102 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1103 spu_release_saved(ctx
);
1108 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1109 size_t len
, loff_t
*pos
)
1111 struct spu_context
*ctx
;
1115 ctx
= file
->private_data
;
1120 if (copy_from_user(&data
, buf
, 4))
1123 ret
= spu_acquire(ctx
);
1126 ctx
->ops
->signal1_write(ctx
, data
);
1133 spufs_signal1_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1135 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1136 return spufs_ps_fault(vma
, vmf
, 0x14000, SPUFS_SIGNAL_MAP_SIZE
);
1137 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1138 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1139 * signal 1 and 2 area
1141 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1143 #error unsupported page size
1147 static const struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1148 .fault
= spufs_signal1_mmap_fault
,
1151 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1153 if (!(vma
->vm_flags
& VM_SHARED
))
1156 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1157 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1159 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1163 static const struct file_operations spufs_signal1_fops
= {
1164 .open
= spufs_signal1_open
,
1165 .release
= spufs_signal1_release
,
1166 .read
= spufs_signal1_read
,
1167 .write
= spufs_signal1_write
,
1168 .mmap
= spufs_signal1_mmap
,
1171 static const struct file_operations spufs_signal1_nosched_fops
= {
1172 .open
= spufs_signal1_open
,
1173 .release
= spufs_signal1_release
,
1174 .write
= spufs_signal1_write
,
1175 .mmap
= spufs_signal1_mmap
,
1178 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1180 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1181 struct spu_context
*ctx
= i
->i_ctx
;
1183 mutex_lock(&ctx
->mapping_lock
);
1184 file
->private_data
= ctx
;
1185 if (!i
->i_openers
++)
1186 ctx
->signal2
= inode
->i_mapping
;
1187 mutex_unlock(&ctx
->mapping_lock
);
1188 return nonseekable_open(inode
, file
);
1192 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1194 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1195 struct spu_context
*ctx
= i
->i_ctx
;
1197 mutex_lock(&ctx
->mapping_lock
);
1198 if (!--i
->i_openers
)
1199 ctx
->signal2
= NULL
;
1200 mutex_unlock(&ctx
->mapping_lock
);
1204 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1205 size_t len
, loff_t
*pos
)
1213 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1214 data
= ctx
->csa
.spu_chnldata_RW
[4];
1221 if (copy_to_user(buf
, &data
, 4))
1228 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1229 size_t len
, loff_t
*pos
)
1231 struct spu_context
*ctx
= file
->private_data
;
1234 ret
= spu_acquire_saved(ctx
);
1237 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1238 spu_release_saved(ctx
);
1243 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1244 size_t len
, loff_t
*pos
)
1246 struct spu_context
*ctx
;
1250 ctx
= file
->private_data
;
1255 if (copy_from_user(&data
, buf
, 4))
1258 ret
= spu_acquire(ctx
);
1261 ctx
->ops
->signal2_write(ctx
, data
);
1269 spufs_signal2_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1271 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1272 return spufs_ps_fault(vma
, vmf
, 0x1c000, SPUFS_SIGNAL_MAP_SIZE
);
1273 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1274 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1275 * signal 1 and 2 area
1277 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1279 #error unsupported page size
1283 static const struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1284 .fault
= spufs_signal2_mmap_fault
,
1287 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1289 if (!(vma
->vm_flags
& VM_SHARED
))
1292 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1293 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1295 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1298 #else /* SPUFS_MMAP_4K */
1299 #define spufs_signal2_mmap NULL
1300 #endif /* !SPUFS_MMAP_4K */
1302 static const struct file_operations spufs_signal2_fops
= {
1303 .open
= spufs_signal2_open
,
1304 .release
= spufs_signal2_release
,
1305 .read
= spufs_signal2_read
,
1306 .write
= spufs_signal2_write
,
1307 .mmap
= spufs_signal2_mmap
,
1310 static const struct file_operations spufs_signal2_nosched_fops
= {
1311 .open
= spufs_signal2_open
,
1312 .release
= spufs_signal2_release
,
1313 .write
= spufs_signal2_write
,
1314 .mmap
= spufs_signal2_mmap
,
1318 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1319 * work of acquiring (or not) the SPU context before calling through
1320 * to the actual get routine. The set routine is called directly.
1322 #define SPU_ATTR_NOACQUIRE 0
1323 #define SPU_ATTR_ACQUIRE 1
1324 #define SPU_ATTR_ACQUIRE_SAVED 2
1326 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1327 static int __##__get(void *data, u64 *val) \
1329 struct spu_context *ctx = data; \
1332 if (__acquire == SPU_ATTR_ACQUIRE) { \
1333 ret = spu_acquire(ctx); \
1336 *val = __get(ctx); \
1338 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1339 ret = spu_acquire_saved(ctx); \
1342 *val = __get(ctx); \
1343 spu_release_saved(ctx); \
1345 *val = __get(ctx); \
1349 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1351 static int spufs_signal1_type_set(void *data
, u64 val
)
1353 struct spu_context
*ctx
= data
;
1356 ret
= spu_acquire(ctx
);
1359 ctx
->ops
->signal1_type_set(ctx
, val
);
1365 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1367 return ctx
->ops
->signal1_type_get(ctx
);
1369 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1370 spufs_signal1_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1373 static int spufs_signal2_type_set(void *data
, u64 val
)
1375 struct spu_context
*ctx
= data
;
1378 ret
= spu_acquire(ctx
);
1381 ctx
->ops
->signal2_type_set(ctx
, val
);
1387 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1389 return ctx
->ops
->signal2_type_get(ctx
);
1391 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1392 spufs_signal2_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1396 spufs_mss_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1398 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_MSS_MAP_SIZE
);
1401 static const struct vm_operations_struct spufs_mss_mmap_vmops
= {
1402 .fault
= spufs_mss_mmap_fault
,
1406 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1408 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1410 if (!(vma
->vm_flags
& VM_SHARED
))
1413 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1414 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1416 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1419 #else /* SPUFS_MMAP_4K */
1420 #define spufs_mss_mmap NULL
1421 #endif /* !SPUFS_MMAP_4K */
1423 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1425 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1426 struct spu_context
*ctx
= i
->i_ctx
;
1428 file
->private_data
= i
->i_ctx
;
1430 mutex_lock(&ctx
->mapping_lock
);
1431 if (!i
->i_openers
++)
1432 ctx
->mss
= inode
->i_mapping
;
1433 mutex_unlock(&ctx
->mapping_lock
);
1434 return nonseekable_open(inode
, file
);
1438 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1440 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1441 struct spu_context
*ctx
= i
->i_ctx
;
1443 mutex_lock(&ctx
->mapping_lock
);
1444 if (!--i
->i_openers
)
1446 mutex_unlock(&ctx
->mapping_lock
);
1450 static const struct file_operations spufs_mss_fops
= {
1451 .open
= spufs_mss_open
,
1452 .release
= spufs_mss_release
,
1453 .mmap
= spufs_mss_mmap
,
1457 spufs_psmap_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1459 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_PS_MAP_SIZE
);
1462 static const struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1463 .fault
= spufs_psmap_mmap_fault
,
1467 * mmap support for full problem state area [0x00000 - 0x1ffff].
1469 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1471 if (!(vma
->vm_flags
& VM_SHARED
))
1474 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1475 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1477 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1481 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1483 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1484 struct spu_context
*ctx
= i
->i_ctx
;
1486 mutex_lock(&ctx
->mapping_lock
);
1487 file
->private_data
= i
->i_ctx
;
1488 if (!i
->i_openers
++)
1489 ctx
->psmap
= inode
->i_mapping
;
1490 mutex_unlock(&ctx
->mapping_lock
);
1491 return nonseekable_open(inode
, file
);
1495 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1497 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1498 struct spu_context
*ctx
= i
->i_ctx
;
1500 mutex_lock(&ctx
->mapping_lock
);
1501 if (!--i
->i_openers
)
1503 mutex_unlock(&ctx
->mapping_lock
);
1507 static const struct file_operations spufs_psmap_fops
= {
1508 .open
= spufs_psmap_open
,
1509 .release
= spufs_psmap_release
,
1510 .mmap
= spufs_psmap_mmap
,
1516 spufs_mfc_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1518 return spufs_ps_fault(vma
, vmf
, 0x3000, SPUFS_MFC_MAP_SIZE
);
1521 static const struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1522 .fault
= spufs_mfc_mmap_fault
,
1526 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1528 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1530 if (!(vma
->vm_flags
& VM_SHARED
))
1533 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1534 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1536 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1539 #else /* SPUFS_MMAP_4K */
1540 #define spufs_mfc_mmap NULL
1541 #endif /* !SPUFS_MMAP_4K */
1543 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1545 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1546 struct spu_context
*ctx
= i
->i_ctx
;
1548 /* we don't want to deal with DMA into other processes */
1549 if (ctx
->owner
!= current
->mm
)
1552 if (atomic_read(&inode
->i_count
) != 1)
1555 mutex_lock(&ctx
->mapping_lock
);
1556 file
->private_data
= ctx
;
1557 if (!i
->i_openers
++)
1558 ctx
->mfc
= inode
->i_mapping
;
1559 mutex_unlock(&ctx
->mapping_lock
);
1560 return nonseekable_open(inode
, file
);
1564 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1566 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1567 struct spu_context
*ctx
= i
->i_ctx
;
1569 mutex_lock(&ctx
->mapping_lock
);
1570 if (!--i
->i_openers
)
1572 mutex_unlock(&ctx
->mapping_lock
);
1576 /* interrupt-level mfc callback function. */
1577 void spufs_mfc_callback(struct spu
*spu
)
1579 struct spu_context
*ctx
= spu
->ctx
;
1584 wake_up_all(&ctx
->mfc_wq
);
1586 pr_debug("%s %s\n", __func__
, spu
->name
);
1587 if (ctx
->mfc_fasync
) {
1588 u32 free_elements
, tagstatus
;
1591 /* no need for spu_acquire in interrupt context */
1592 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1593 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1596 if (free_elements
& 0xffff)
1598 if (tagstatus
& ctx
->tagwait
)
1601 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1605 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1607 /* See if there is one tag group is complete */
1608 /* FIXME we need locking around tagwait */
1609 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1610 ctx
->tagwait
&= ~*status
;
1614 /* enable interrupt waiting for any tag group,
1615 may silently fail if interrupts are already enabled */
1616 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1620 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1621 size_t size
, loff_t
*pos
)
1623 struct spu_context
*ctx
= file
->private_data
;
1630 ret
= spu_acquire(ctx
);
1635 if (file
->f_flags
& O_NONBLOCK
) {
1636 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1637 if (!(status
& ctx
->tagwait
))
1640 /* XXX(hch): shouldn't we clear ret here? */
1641 ctx
->tagwait
&= ~status
;
1643 ret
= spufs_wait(ctx
->mfc_wq
,
1644 spufs_read_mfc_tagstatus(ctx
, &status
));
1651 if (copy_to_user(buffer
, &status
, 4))
1658 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1660 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd
->lsa
,
1661 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1672 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1676 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1677 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1682 switch (cmd
->size
& 0xf) {
1703 pr_debug("invalid DMA alignment %x for size %x\n",
1704 cmd
->lsa
& 0xf, cmd
->size
);
1708 if (cmd
->size
> 16 * 1024) {
1709 pr_debug("invalid DMA size %x\n", cmd
->size
);
1713 if (cmd
->tag
& 0xfff0) {
1714 /* we reserve the higher tag numbers for kernel use */
1715 pr_debug("invalid DMA tag\n");
1720 /* not supported in this version */
1721 pr_debug("invalid DMA class\n");
1728 static int spu_send_mfc_command(struct spu_context
*ctx
,
1729 struct mfc_dma_command cmd
,
1732 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1733 if (*error
== -EAGAIN
) {
1734 /* wait for any tag group to complete
1735 so we have space for the new command */
1736 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1737 /* try again, because the queue might be
1739 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1740 if (*error
== -EAGAIN
)
1746 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1747 size_t size
, loff_t
*pos
)
1749 struct spu_context
*ctx
= file
->private_data
;
1750 struct mfc_dma_command cmd
;
1753 if (size
!= sizeof cmd
)
1757 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1760 ret
= spufs_check_valid_dma(&cmd
);
1764 ret
= spu_acquire(ctx
);
1768 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1772 if (file
->f_flags
& O_NONBLOCK
) {
1773 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1776 ret
= spufs_wait(ctx
->mfc_wq
,
1777 spu_send_mfc_command(ctx
, cmd
, &status
));
1787 ctx
->tagwait
|= 1 << cmd
.tag
;
1796 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1798 struct spu_context
*ctx
= file
->private_data
;
1799 u32 free_elements
, tagstatus
;
1802 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1805 * For now keep this uninterruptible and also ignore the rule
1806 * that poll should not sleep. Will be fixed later.
1808 mutex_lock(&ctx
->state_mutex
);
1809 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1810 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1811 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1815 if (free_elements
& 0xffff)
1816 mask
|= POLLOUT
| POLLWRNORM
;
1817 if (tagstatus
& ctx
->tagwait
)
1818 mask
|= POLLIN
| POLLRDNORM
;
1820 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__
,
1821 free_elements
, tagstatus
, ctx
->tagwait
);
1826 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1828 struct spu_context
*ctx
= file
->private_data
;
1831 ret
= spu_acquire(ctx
);
1835 /* this currently hangs */
1836 ret
= spufs_wait(ctx
->mfc_wq
,
1837 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1840 ret
= spufs_wait(ctx
->mfc_wq
,
1841 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1852 static int spufs_mfc_fsync(struct file
*file
, int datasync
)
1854 return spufs_mfc_flush(file
, NULL
);
1857 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1859 struct spu_context
*ctx
= file
->private_data
;
1861 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1864 static const struct file_operations spufs_mfc_fops
= {
1865 .open
= spufs_mfc_open
,
1866 .release
= spufs_mfc_release
,
1867 .read
= spufs_mfc_read
,
1868 .write
= spufs_mfc_write
,
1869 .poll
= spufs_mfc_poll
,
1870 .flush
= spufs_mfc_flush
,
1871 .fsync
= spufs_mfc_fsync
,
1872 .fasync
= spufs_mfc_fasync
,
1873 .mmap
= spufs_mfc_mmap
,
1876 static int spufs_npc_set(void *data
, u64 val
)
1878 struct spu_context
*ctx
= data
;
1881 ret
= spu_acquire(ctx
);
1884 ctx
->ops
->npc_write(ctx
, val
);
1890 static u64
spufs_npc_get(struct spu_context
*ctx
)
1892 return ctx
->ops
->npc_read(ctx
);
1894 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1895 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1897 static int spufs_decr_set(void *data
, u64 val
)
1899 struct spu_context
*ctx
= data
;
1900 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1903 ret
= spu_acquire_saved(ctx
);
1906 lscsa
->decr
.slot
[0] = (u32
) val
;
1907 spu_release_saved(ctx
);
1912 static u64
spufs_decr_get(struct spu_context
*ctx
)
1914 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1915 return lscsa
->decr
.slot
[0];
1917 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1918 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1920 static int spufs_decr_status_set(void *data
, u64 val
)
1922 struct spu_context
*ctx
= data
;
1925 ret
= spu_acquire_saved(ctx
);
1929 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1931 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1932 spu_release_saved(ctx
);
1937 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1939 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1940 return SPU_DECR_STATUS_RUNNING
;
1944 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1945 spufs_decr_status_set
, "0x%llx\n",
1946 SPU_ATTR_ACQUIRE_SAVED
);
1948 static int spufs_event_mask_set(void *data
, u64 val
)
1950 struct spu_context
*ctx
= data
;
1951 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1954 ret
= spu_acquire_saved(ctx
);
1957 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1958 spu_release_saved(ctx
);
1963 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1965 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1966 return lscsa
->event_mask
.slot
[0];
1969 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1970 spufs_event_mask_set
, "0x%llx\n",
1971 SPU_ATTR_ACQUIRE_SAVED
);
1973 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1975 struct spu_state
*state
= &ctx
->csa
;
1977 stat
= state
->spu_chnlcnt_RW
[0];
1979 return state
->spu_chnldata_RW
[0];
1982 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1983 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1985 static int spufs_srr0_set(void *data
, u64 val
)
1987 struct spu_context
*ctx
= data
;
1988 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1991 ret
= spu_acquire_saved(ctx
);
1994 lscsa
->srr0
.slot
[0] = (u32
) val
;
1995 spu_release_saved(ctx
);
2000 static u64
spufs_srr0_get(struct spu_context
*ctx
)
2002 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
2003 return lscsa
->srr0
.slot
[0];
2005 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
2006 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
2008 static u64
spufs_id_get(struct spu_context
*ctx
)
2012 if (ctx
->state
== SPU_STATE_RUNNABLE
)
2013 num
= ctx
->spu
->number
;
2015 num
= (unsigned int)-1;
2019 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
2022 static u64
spufs_object_id_get(struct spu_context
*ctx
)
2024 /* FIXME: Should there really be no locking here? */
2025 return ctx
->object_id
;
2028 static int spufs_object_id_set(void *data
, u64 id
)
2030 struct spu_context
*ctx
= data
;
2031 ctx
->object_id
= id
;
2036 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
2037 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
2039 static u64
spufs_lslr_get(struct spu_context
*ctx
)
2041 return ctx
->csa
.priv2
.spu_lslr_RW
;
2043 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
2044 SPU_ATTR_ACQUIRE_SAVED
);
2046 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
2048 struct spufs_inode_info
*i
= SPUFS_I(inode
);
2049 struct spu_context
*ctx
= i
->i_ctx
;
2050 file
->private_data
= ctx
;
2054 static int spufs_caps_show(struct seq_file
*s
, void *private)
2056 struct spu_context
*ctx
= s
->private;
2058 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
2059 seq_puts(s
, "sched\n");
2060 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
2061 seq_puts(s
, "step\n");
2065 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
2067 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
2070 static const struct file_operations spufs_caps_fops
= {
2071 .open
= spufs_caps_open
,
2073 .llseek
= seq_lseek
,
2074 .release
= single_release
,
2077 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
2078 char __user
*buf
, size_t len
, loff_t
*pos
)
2082 /* EOF if there's no entry in the mbox */
2083 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
2086 data
= ctx
->csa
.prob
.pu_mb_R
;
2088 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2091 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
2092 size_t len
, loff_t
*pos
)
2095 struct spu_context
*ctx
= file
->private_data
;
2097 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2100 ret
= spu_acquire_saved(ctx
);
2103 spin_lock(&ctx
->csa
.register_lock
);
2104 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2105 spin_unlock(&ctx
->csa
.register_lock
);
2106 spu_release_saved(ctx
);
2111 static const struct file_operations spufs_mbox_info_fops
= {
2112 .open
= spufs_info_open
,
2113 .read
= spufs_mbox_info_read
,
2114 .llseek
= generic_file_llseek
,
2117 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2118 char __user
*buf
, size_t len
, loff_t
*pos
)
2122 /* EOF if there's no entry in the ibox */
2123 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2126 data
= ctx
->csa
.priv2
.puint_mb_R
;
2128 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2131 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2132 size_t len
, loff_t
*pos
)
2134 struct spu_context
*ctx
= file
->private_data
;
2137 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2140 ret
= spu_acquire_saved(ctx
);
2143 spin_lock(&ctx
->csa
.register_lock
);
2144 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2145 spin_unlock(&ctx
->csa
.register_lock
);
2146 spu_release_saved(ctx
);
2151 static const struct file_operations spufs_ibox_info_fops
= {
2152 .open
= spufs_info_open
,
2153 .read
= spufs_ibox_info_read
,
2154 .llseek
= generic_file_llseek
,
2157 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2158 char __user
*buf
, size_t len
, loff_t
*pos
)
2164 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2165 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2166 for (i
= 0; i
< cnt
; i
++) {
2167 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2170 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2174 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2175 size_t len
, loff_t
*pos
)
2177 struct spu_context
*ctx
= file
->private_data
;
2180 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2183 ret
= spu_acquire_saved(ctx
);
2186 spin_lock(&ctx
->csa
.register_lock
);
2187 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2188 spin_unlock(&ctx
->csa
.register_lock
);
2189 spu_release_saved(ctx
);
2194 static const struct file_operations spufs_wbox_info_fops
= {
2195 .open
= spufs_info_open
,
2196 .read
= spufs_wbox_info_read
,
2197 .llseek
= generic_file_llseek
,
2200 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2201 char __user
*buf
, size_t len
, loff_t
*pos
)
2203 struct spu_dma_info info
;
2204 struct mfc_cq_sr
*qp
, *spuqp
;
2207 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2208 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2209 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2210 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2211 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2212 for (i
= 0; i
< 16; i
++) {
2213 qp
= &info
.dma_info_command_data
[i
];
2214 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2216 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2217 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2218 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2219 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2222 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2226 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2227 size_t len
, loff_t
*pos
)
2229 struct spu_context
*ctx
= file
->private_data
;
2232 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2235 ret
= spu_acquire_saved(ctx
);
2238 spin_lock(&ctx
->csa
.register_lock
);
2239 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2240 spin_unlock(&ctx
->csa
.register_lock
);
2241 spu_release_saved(ctx
);
2246 static const struct file_operations spufs_dma_info_fops
= {
2247 .open
= spufs_info_open
,
2248 .read
= spufs_dma_info_read
,
2251 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2252 char __user
*buf
, size_t len
, loff_t
*pos
)
2254 struct spu_proxydma_info info
;
2255 struct mfc_cq_sr
*qp
, *puqp
;
2256 int ret
= sizeof info
;
2262 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2265 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2266 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2267 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2268 for (i
= 0; i
< 8; i
++) {
2269 qp
= &info
.proxydma_info_command_data
[i
];
2270 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2272 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2273 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2274 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2275 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2278 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2282 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2283 size_t len
, loff_t
*pos
)
2285 struct spu_context
*ctx
= file
->private_data
;
2288 ret
= spu_acquire_saved(ctx
);
2291 spin_lock(&ctx
->csa
.register_lock
);
2292 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2293 spin_unlock(&ctx
->csa
.register_lock
);
2294 spu_release_saved(ctx
);
2299 static const struct file_operations spufs_proxydma_info_fops
= {
2300 .open
= spufs_info_open
,
2301 .read
= spufs_proxydma_info_read
,
2304 static int spufs_show_tid(struct seq_file
*s
, void *private)
2306 struct spu_context
*ctx
= s
->private;
2308 seq_printf(s
, "%d\n", ctx
->tid
);
2312 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2314 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2317 static const struct file_operations spufs_tid_fops
= {
2318 .open
= spufs_tid_open
,
2320 .llseek
= seq_lseek
,
2321 .release
= single_release
,
2324 static const char *ctx_state_names
[] = {
2325 "user", "system", "iowait", "loaded"
2328 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2329 enum spu_utilization_state state
)
2332 unsigned long long time
= ctx
->stats
.times
[state
];
2335 * In general, utilization statistics are updated by the controlling
2336 * thread as the spu context moves through various well defined
2337 * state transitions, but if the context is lazily loaded its
2338 * utilization statistics are not updated as the controlling thread
2339 * is not tightly coupled with the execution of the spu context. We
2340 * calculate and apply the time delta from the last recorded state
2341 * of the spu context.
2343 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2345 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2348 return time
/ NSEC_PER_MSEC
;
2351 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2353 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2355 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2356 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2357 ctx
->stats
.slb_flt_base
);
2363 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2365 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2367 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2368 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2369 ctx
->stats
.class2_intr_base
);
2372 return class2_intrs
;
2376 static int spufs_show_stat(struct seq_file
*s
, void *private)
2378 struct spu_context
*ctx
= s
->private;
2381 ret
= spu_acquire(ctx
);
2385 seq_printf(s
, "%s %llu %llu %llu %llu "
2386 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2387 ctx_state_names
[ctx
->stats
.util_state
],
2388 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2389 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2390 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2391 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2392 ctx
->stats
.vol_ctx_switch
,
2393 ctx
->stats
.invol_ctx_switch
,
2394 spufs_slb_flts(ctx
),
2395 ctx
->stats
.hash_flt
,
2398 spufs_class2_intrs(ctx
),
2399 ctx
->stats
.libassist
);
2404 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2406 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2409 static const struct file_operations spufs_stat_fops
= {
2410 .open
= spufs_stat_open
,
2412 .llseek
= seq_lseek
,
2413 .release
= single_release
,
2416 static inline int spufs_switch_log_used(struct spu_context
*ctx
)
2418 return (ctx
->switch_log
->head
- ctx
->switch_log
->tail
) %
2422 static inline int spufs_switch_log_avail(struct spu_context
*ctx
)
2424 return SWITCH_LOG_BUFSIZE
- spufs_switch_log_used(ctx
);
2427 static int spufs_switch_log_open(struct inode
*inode
, struct file
*file
)
2429 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2432 rc
= spu_acquire(ctx
);
2436 if (ctx
->switch_log
) {
2441 ctx
->switch_log
= kmalloc(sizeof(struct switch_log
) +
2442 SWITCH_LOG_BUFSIZE
* sizeof(struct switch_log_entry
),
2445 if (!ctx
->switch_log
) {
2450 ctx
->switch_log
->head
= ctx
->switch_log
->tail
= 0;
2451 init_waitqueue_head(&ctx
->switch_log
->wait
);
2459 static int spufs_switch_log_release(struct inode
*inode
, struct file
*file
)
2461 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2464 rc
= spu_acquire(ctx
);
2468 kfree(ctx
->switch_log
);
2469 ctx
->switch_log
= NULL
;
2475 static int switch_log_sprint(struct spu_context
*ctx
, char *tbuf
, int n
)
2477 struct switch_log_entry
*p
;
2479 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->tail
% SWITCH_LOG_BUFSIZE
;
2481 return snprintf(tbuf
, n
, "%u.%09u %d %u %u %llu\n",
2482 (unsigned int) p
->tstamp
.tv_sec
,
2483 (unsigned int) p
->tstamp
.tv_nsec
,
2485 (unsigned int) p
->type
,
2486 (unsigned int) p
->val
,
2487 (unsigned long long) p
->timebase
);
2490 static ssize_t
spufs_switch_log_read(struct file
*file
, char __user
*buf
,
2491 size_t len
, loff_t
*ppos
)
2493 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
2494 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2495 int error
= 0, cnt
= 0;
2500 error
= spu_acquire(ctx
);
2508 if (spufs_switch_log_used(ctx
) == 0) {
2510 /* If there's data ready to go, we can
2511 * just return straight away */
2514 } else if (file
->f_flags
& O_NONBLOCK
) {
2519 /* spufs_wait will drop the mutex and
2520 * re-acquire, but since we're in read(), the
2521 * file cannot be _released (and so
2522 * ctx->switch_log is stable).
2524 error
= spufs_wait(ctx
->switch_log
->wait
,
2525 spufs_switch_log_used(ctx
) > 0);
2527 /* On error, spufs_wait returns without the
2528 * state mutex held */
2532 /* We may have had entries read from underneath
2533 * us while we dropped the mutex in spufs_wait,
2535 if (spufs_switch_log_used(ctx
) == 0)
2540 width
= switch_log_sprint(ctx
, tbuf
, sizeof(tbuf
));
2542 ctx
->switch_log
->tail
=
2543 (ctx
->switch_log
->tail
+ 1) %
2546 /* If the record is greater than space available return
2547 * partial buffer (so far) */
2550 error
= copy_to_user(buf
+ cnt
, tbuf
, width
);
2558 return cnt
== 0 ? error
: cnt
;
2561 static unsigned int spufs_switch_log_poll(struct file
*file
, poll_table
*wait
)
2563 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
2564 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2565 unsigned int mask
= 0;
2568 poll_wait(file
, &ctx
->switch_log
->wait
, wait
);
2570 rc
= spu_acquire(ctx
);
2574 if (spufs_switch_log_used(ctx
) > 0)
2582 static const struct file_operations spufs_switch_log_fops
= {
2583 .owner
= THIS_MODULE
,
2584 .open
= spufs_switch_log_open
,
2585 .read
= spufs_switch_log_read
,
2586 .poll
= spufs_switch_log_poll
,
2587 .release
= spufs_switch_log_release
,
2591 * Log a context switch event to a switch log reader.
2593 * Must be called with ctx->state_mutex held.
2595 void spu_switch_log_notify(struct spu
*spu
, struct spu_context
*ctx
,
2598 if (!ctx
->switch_log
)
2601 if (spufs_switch_log_avail(ctx
) > 1) {
2602 struct switch_log_entry
*p
;
2604 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->head
;
2605 ktime_get_ts(&p
->tstamp
);
2606 p
->timebase
= get_tb();
2607 p
->spu_id
= spu
? spu
->number
: -1;
2611 ctx
->switch_log
->head
=
2612 (ctx
->switch_log
->head
+ 1) % SWITCH_LOG_BUFSIZE
;
2615 wake_up(&ctx
->switch_log
->wait
);
2618 static int spufs_show_ctx(struct seq_file
*s
, void *private)
2620 struct spu_context
*ctx
= s
->private;
2623 mutex_lock(&ctx
->state_mutex
);
2625 struct spu
*spu
= ctx
->spu
;
2626 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
2628 spin_lock_irq(&spu
->register_lock
);
2629 mfc_control_RW
= in_be64(&priv2
->mfc_control_RW
);
2630 spin_unlock_irq(&spu
->register_lock
);
2632 struct spu_state
*csa
= &ctx
->csa
;
2634 mfc_control_RW
= csa
->priv2
.mfc_control_RW
;
2637 seq_printf(s
, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2638 " %c %llx %llx %llx %llx %x %x\n",
2639 ctx
->state
== SPU_STATE_SAVED
? 'S' : 'R',
2644 ctx
->spu
? ctx
->spu
->number
: -1,
2645 !list_empty(&ctx
->rq
) ? 'q' : ' ',
2646 ctx
->csa
.class_0_pending
,
2647 ctx
->csa
.class_0_dar
,
2648 ctx
->csa
.class_1_dsisr
,
2650 ctx
->ops
->runcntl_read(ctx
),
2651 ctx
->ops
->status_read(ctx
));
2653 mutex_unlock(&ctx
->state_mutex
);
2658 static int spufs_ctx_open(struct inode
*inode
, struct file
*file
)
2660 return single_open(file
, spufs_show_ctx
, SPUFS_I(inode
)->i_ctx
);
2663 static const struct file_operations spufs_ctx_fops
= {
2664 .open
= spufs_ctx_open
,
2666 .llseek
= seq_lseek
,
2667 .release
= single_release
,
2670 const struct spufs_tree_descr spufs_dir_contents
[] = {
2671 { "capabilities", &spufs_caps_fops
, 0444, },
2672 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2673 { "regs", &spufs_regs_fops
, 0666, sizeof(struct spu_reg128
[128]), },
2674 { "mbox", &spufs_mbox_fops
, 0444, },
2675 { "ibox", &spufs_ibox_fops
, 0444, },
2676 { "wbox", &spufs_wbox_fops
, 0222, },
2677 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2678 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2679 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2680 { "signal1", &spufs_signal1_fops
, 0666, },
2681 { "signal2", &spufs_signal2_fops
, 0666, },
2682 { "signal1_type", &spufs_signal1_type
, 0666, },
2683 { "signal2_type", &spufs_signal2_type
, 0666, },
2684 { "cntl", &spufs_cntl_fops
, 0666, },
2685 { "fpcr", &spufs_fpcr_fops
, 0666, sizeof(struct spu_reg128
), },
2686 { "lslr", &spufs_lslr_ops
, 0444, },
2687 { "mfc", &spufs_mfc_fops
, 0666, },
2688 { "mss", &spufs_mss_fops
, 0666, },
2689 { "npc", &spufs_npc_ops
, 0666, },
2690 { "srr0", &spufs_srr0_ops
, 0666, },
2691 { "decr", &spufs_decr_ops
, 0666, },
2692 { "decr_status", &spufs_decr_status_ops
, 0666, },
2693 { "event_mask", &spufs_event_mask_ops
, 0666, },
2694 { "event_status", &spufs_event_status_ops
, 0444, },
2695 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2696 { "phys-id", &spufs_id_ops
, 0666, },
2697 { "object-id", &spufs_object_id_ops
, 0666, },
2698 { "mbox_info", &spufs_mbox_info_fops
, 0444, sizeof(u32
), },
2699 { "ibox_info", &spufs_ibox_info_fops
, 0444, sizeof(u32
), },
2700 { "wbox_info", &spufs_wbox_info_fops
, 0444, sizeof(u32
), },
2701 { "dma_info", &spufs_dma_info_fops
, 0444,
2702 sizeof(struct spu_dma_info
), },
2703 { "proxydma_info", &spufs_proxydma_info_fops
, 0444,
2704 sizeof(struct spu_proxydma_info
)},
2705 { "tid", &spufs_tid_fops
, 0444, },
2706 { "stat", &spufs_stat_fops
, 0444, },
2707 { "switch_log", &spufs_switch_log_fops
, 0444 },
2711 const struct spufs_tree_descr spufs_dir_nosched_contents
[] = {
2712 { "capabilities", &spufs_caps_fops
, 0444, },
2713 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2714 { "mbox", &spufs_mbox_fops
, 0444, },
2715 { "ibox", &spufs_ibox_fops
, 0444, },
2716 { "wbox", &spufs_wbox_fops
, 0222, },
2717 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2718 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2719 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2720 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2721 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2722 { "signal1_type", &spufs_signal1_type
, 0666, },
2723 { "signal2_type", &spufs_signal2_type
, 0666, },
2724 { "mss", &spufs_mss_fops
, 0666, },
2725 { "mfc", &spufs_mfc_fops
, 0666, },
2726 { "cntl", &spufs_cntl_fops
, 0666, },
2727 { "npc", &spufs_npc_ops
, 0666, },
2728 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2729 { "phys-id", &spufs_id_ops
, 0666, },
2730 { "object-id", &spufs_object_id_ops
, 0666, },
2731 { "tid", &spufs_tid_fops
, 0444, },
2732 { "stat", &spufs_stat_fops
, 0444, },
2736 const struct spufs_tree_descr spufs_dir_debug_contents
[] = {
2737 { ".ctx", &spufs_ctx_fops
, 0444, },
2741 const struct spufs_coredump_reader spufs_coredump_read
[] = {
2742 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2743 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2744 { "lslr", NULL
, spufs_lslr_get
, 19 },
2745 { "decr", NULL
, spufs_decr_get
, 19 },
2746 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2747 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2748 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2749 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2750 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2751 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2752 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2753 { "event_status", NULL
, spufs_event_status_get
, 19 },
2754 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2755 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2756 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2757 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2758 { "proxydma_info", __spufs_proxydma_info_read
,
2759 NULL
, sizeof(struct spu_proxydma_info
)},
2760 { "object-id", NULL
, spufs_object_id_get
, 19 },
2761 { "npc", NULL
, spufs_npc_get
, 19 },