2 * Copyright (C) 2013 Google, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/bitops.h>
16 #include <linux/circ_buf.h>
18 #include <linux/module.h>
19 #include <linux/poll.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
23 #include <video/adf_client.h>
24 #include <video/adf_format.h>
31 #include "adf_sysfs.h"
34 #include "adf_fops32.h"
37 static int adf_obj_set_event(struct adf_obj
*obj
, struct adf_file
*file
,
38 struct adf_set_event __user
*arg
)
40 struct adf_set_event data
;
45 if (copy_from_user(&data
, arg
, sizeof(data
)))
48 err
= adf_obj_check_supports_event(obj
, data
.type
);
52 spin_lock_irqsave(&obj
->file_lock
, flags
);
54 enabled
= test_and_set_bit(data
.type
,
55 file
->event_subscriptions
);
57 enabled
= test_and_clear_bit(data
.type
,
58 file
->event_subscriptions
);
59 spin_unlock_irqrestore(&obj
->file_lock
, flags
);
61 if (data
.enabled
== enabled
)
65 adf_event_get(obj
, data
.type
);
67 adf_event_put(obj
, data
.type
);
72 static int adf_obj_copy_custom_data_to_user(struct adf_obj
*obj
,
73 void __user
*dst
, size_t *dst_size
)
76 size_t custom_data_size
;
79 if (!obj
->ops
|| !obj
->ops
->custom_data
) {
80 dev_dbg(&obj
->dev
, "%s: no custom_data op\n", __func__
);
84 custom_data
= kzalloc(ADF_MAX_CUSTOM_DATA_SIZE
, GFP_KERNEL
);
88 ret
= obj
->ops
->custom_data(obj
, custom_data
, &custom_data_size
);
92 if (copy_to_user(dst
, custom_data
, min(*dst_size
, custom_data_size
))) {
96 *dst_size
= custom_data_size
;
103 static int adf_eng_get_data(struct adf_overlay_engine
*eng
,
104 struct adf_overlay_engine_data __user
*arg
)
106 struct adf_device
*dev
= adf_overlay_engine_parent(eng
);
107 struct adf_overlay_engine_data data
;
108 size_t n_supported_formats
;
109 u32
*supported_formats
= NULL
;
112 if (copy_from_user(&data
, arg
, sizeof(data
)))
115 strlcpy(data
.name
, eng
->base
.name
, sizeof(data
.name
));
117 if (data
.n_supported_formats
> ADF_MAX_SUPPORTED_FORMATS
)
120 n_supported_formats
= data
.n_supported_formats
;
121 data
.n_supported_formats
= eng
->ops
->n_supported_formats
;
123 if (n_supported_formats
) {
124 supported_formats
= kzalloc(n_supported_formats
*
125 sizeof(supported_formats
[0]), GFP_KERNEL
);
126 if (!supported_formats
)
130 memcpy(supported_formats
, eng
->ops
->supported_formats
,
131 sizeof(u32
) * min(n_supported_formats
,
132 eng
->ops
->n_supported_formats
));
134 mutex_lock(&dev
->client_lock
);
135 ret
= adf_obj_copy_custom_data_to_user(&eng
->base
, arg
->custom_data
,
136 &data
.custom_data_size
);
137 mutex_unlock(&dev
->client_lock
);
142 if (copy_to_user(arg
, &data
, sizeof(data
))) {
147 if (supported_formats
&& copy_to_user(arg
->supported_formats
,
149 n_supported_formats
* sizeof(supported_formats
[0])))
153 kfree(supported_formats
);
157 static int adf_buffer_import(struct adf_device
*dev
,
158 struct adf_buffer_config __user
*cfg
, struct adf_buffer
*buf
)
160 struct adf_buffer_config user_buf
;
164 if (copy_from_user(&user_buf
, cfg
, sizeof(user_buf
)))
167 memset(buf
, 0, sizeof(*buf
));
169 if (user_buf
.n_planes
> ADF_MAX_PLANES
) {
170 dev_err(&dev
->base
.dev
, "invalid plane count %u\n",
175 buf
->overlay_engine
= idr_find(&dev
->overlay_engines
,
176 user_buf
.overlay_engine
);
177 if (!buf
->overlay_engine
) {
178 dev_err(&dev
->base
.dev
, "invalid overlay engine id %u\n",
179 user_buf
.overlay_engine
);
185 buf
->format
= user_buf
.format
;
186 for (i
= 0; i
< user_buf
.n_planes
; i
++) {
187 buf
->dma_bufs
[i
] = dma_buf_get(user_buf
.fd
[i
]);
188 if (IS_ERR(buf
->dma_bufs
[i
])) {
189 ret
= PTR_ERR(buf
->dma_bufs
[i
]);
190 dev_err(&dev
->base
.dev
, "importing dma_buf fd %d failed: %d\n",
191 user_buf
.fd
[i
], ret
);
192 buf
->dma_bufs
[i
] = NULL
;
195 buf
->offset
[i
] = user_buf
.offset
[i
];
196 buf
->pitch
[i
] = user_buf
.pitch
[i
];
198 buf
->n_planes
= user_buf
.n_planes
;
200 if (user_buf
.acquire_fence
>= 0) {
201 buf
->acquire_fence
= sync_fence_fdget(user_buf
.acquire_fence
);
202 if (!buf
->acquire_fence
) {
203 dev_err(&dev
->base
.dev
, "getting fence fd %d failed\n",
204 user_buf
.acquire_fence
);
212 adf_buffer_cleanup(buf
);
216 static int adf_device_post_config(struct adf_device
*dev
,
217 struct adf_post_config __user
*arg
)
219 struct sync_fence
*complete_fence
;
220 int complete_fence_fd
;
221 struct adf_buffer
*bufs
= NULL
;
222 struct adf_interface
**intfs
= NULL
;
223 size_t n_intfs
, n_bufs
, i
;
224 void *custom_data
= NULL
;
225 size_t custom_data_size
;
228 complete_fence_fd
= get_unused_fd();
229 if (complete_fence_fd
< 0)
230 return complete_fence_fd
;
232 if (get_user(n_intfs
, &arg
->n_interfaces
)) {
237 if (n_intfs
> ADF_MAX_INTERFACES
) {
242 if (get_user(n_bufs
, &arg
->n_bufs
)) {
247 if (n_bufs
> ADF_MAX_BUFFERS
) {
252 if (get_user(custom_data_size
, &arg
->custom_data_size
)) {
257 if (custom_data_size
> ADF_MAX_CUSTOM_DATA_SIZE
) {
263 intfs
= kmalloc(sizeof(intfs
[0]) * n_intfs
, GFP_KERNEL
);
270 for (i
= 0; i
< n_intfs
; i
++) {
272 if (get_user(intf_id
, &arg
->interfaces
[i
])) {
277 intfs
[i
] = idr_find(&dev
->interfaces
, intf_id
);
285 bufs
= kzalloc(sizeof(bufs
[0]) * n_bufs
, GFP_KERNEL
);
292 for (i
= 0; i
< n_bufs
; i
++) {
293 ret
= adf_buffer_import(dev
, &arg
->bufs
[i
], &bufs
[i
]);
295 memset(&bufs
[i
], 0, sizeof(bufs
[i
]));
300 if (custom_data_size
) {
301 custom_data
= kzalloc(custom_data_size
, GFP_KERNEL
);
307 if (copy_from_user(custom_data
, arg
->custom_data
,
314 if (put_user(complete_fence_fd
, &arg
->complete_fence
)) {
319 complete_fence
= adf_device_post_nocopy(dev
, intfs
, n_intfs
, bufs
,
320 n_bufs
, custom_data
, custom_data_size
);
321 if (IS_ERR(complete_fence
)) {
322 ret
= PTR_ERR(complete_fence
);
326 sync_fence_install(complete_fence
, complete_fence_fd
);
330 for (i
= 0; i
< n_bufs
; i
++)
331 adf_buffer_cleanup(&bufs
[i
]);
337 put_unused_fd(complete_fence_fd
);
341 static int adf_intf_simple_post_config(struct adf_interface
*intf
,
342 struct adf_simple_post_config __user
*arg
)
344 struct adf_device
*dev
= intf
->base
.parent
;
345 struct sync_fence
*complete_fence
;
346 int complete_fence_fd
;
347 struct adf_buffer buf
;
350 complete_fence_fd
= get_unused_fd();
351 if (complete_fence_fd
< 0)
352 return complete_fence_fd
;
354 ret
= adf_buffer_import(dev
, &arg
->buf
, &buf
);
358 if (put_user(complete_fence_fd
, &arg
->complete_fence
)) {
363 complete_fence
= adf_interface_simple_post(intf
, &buf
);
364 if (IS_ERR(complete_fence
)) {
365 ret
= PTR_ERR(complete_fence
);
369 sync_fence_install(complete_fence
, complete_fence_fd
);
373 adf_buffer_cleanup(&buf
);
375 put_unused_fd(complete_fence_fd
);
379 static int adf_intf_simple_buffer_alloc(struct adf_interface
*intf
,
380 struct adf_simple_buffer_alloc __user
*arg
)
382 struct adf_simple_buffer_alloc data
;
383 struct dma_buf
*dma_buf
;
386 if (copy_from_user(&data
, arg
, sizeof(data
)))
389 data
.fd
= get_unused_fd_flags(O_CLOEXEC
);
393 ret
= adf_interface_simple_buffer_alloc(intf
, data
.w
, data
.h
,
394 data
.format
, &dma_buf
, &data
.offset
, &data
.pitch
);
398 if (copy_to_user(arg
, &data
, sizeof(*arg
))) {
403 fd_install(data
.fd
, dma_buf
->file
);
407 dma_buf_put(dma_buf
);
410 put_unused_fd(data
.fd
);
414 static int adf_copy_attachment_list_to_user(
415 struct adf_attachment_config __user
*to
, size_t n_to
,
416 struct adf_attachment
*from
, size_t n_from
)
418 struct adf_attachment_config
*temp
;
419 size_t n
= min(n_to
, n_from
);
426 temp
= kzalloc(n
* sizeof(temp
[0]), GFP_KERNEL
);
430 for (i
= 0; i
< n
; i
++) {
431 temp
[i
].interface
= from
[i
].interface
->base
.id
;
432 temp
[i
].overlay_engine
= from
[i
].overlay_engine
->base
.id
;
435 if (copy_to_user(to
, temp
, n
* sizeof(to
[0]))) {
445 static int adf_device_get_data(struct adf_device
*dev
,
446 struct adf_device_data __user
*arg
)
448 struct adf_device_data data
;
450 struct adf_attachment
*attach
= NULL
;
451 size_t n_allowed_attach
;
452 struct adf_attachment
*allowed_attach
= NULL
;
455 if (copy_from_user(&data
, arg
, sizeof(data
)))
458 if (data
.n_attachments
> ADF_MAX_ATTACHMENTS
||
459 data
.n_allowed_attachments
> ADF_MAX_ATTACHMENTS
)
462 strlcpy(data
.name
, dev
->base
.name
, sizeof(data
.name
));
464 if (data
.n_attachments
) {
465 attach
= kzalloc(data
.n_attachments
* sizeof(attach
[0]),
470 n_attach
= adf_device_attachments(dev
, attach
, data
.n_attachments
);
472 if (data
.n_allowed_attachments
) {
473 allowed_attach
= kzalloc(data
.n_allowed_attachments
*
474 sizeof(allowed_attach
[0]), GFP_KERNEL
);
475 if (!allowed_attach
) {
480 n_allowed_attach
= adf_device_attachments_allowed(dev
, allowed_attach
,
481 data
.n_allowed_attachments
);
483 mutex_lock(&dev
->client_lock
);
484 ret
= adf_obj_copy_custom_data_to_user(&dev
->base
, arg
->custom_data
,
485 &data
.custom_data_size
);
486 mutex_unlock(&dev
->client_lock
);
491 ret
= adf_copy_attachment_list_to_user(arg
->attachments
,
492 data
.n_attachments
, attach
, n_attach
);
496 ret
= adf_copy_attachment_list_to_user(arg
->allowed_attachments
,
497 data
.n_allowed_attachments
, allowed_attach
,
502 data
.n_attachments
= n_attach
;
503 data
.n_allowed_attachments
= n_allowed_attach
;
505 if (copy_to_user(arg
, &data
, sizeof(data
)))
509 kfree(allowed_attach
);
514 static int adf_device_handle_attachment(struct adf_device
*dev
,
515 struct adf_attachment_config __user
*arg
, bool attach
)
517 struct adf_attachment_config data
;
518 struct adf_overlay_engine
*eng
;
519 struct adf_interface
*intf
;
521 if (copy_from_user(&data
, arg
, sizeof(data
)))
524 eng
= idr_find(&dev
->overlay_engines
, data
.overlay_engine
);
526 dev_err(&dev
->base
.dev
, "invalid overlay engine id %u\n",
527 data
.overlay_engine
);
531 intf
= idr_find(&dev
->interfaces
, data
.interface
);
533 dev_err(&dev
->base
.dev
, "invalid interface id %u\n",
539 return adf_device_attach(dev
, eng
, intf
);
541 return adf_device_detach(dev
, eng
, intf
);
544 static int adf_intf_set_mode(struct adf_interface
*intf
,
545 struct drm_mode_modeinfo __user
*arg
)
547 struct drm_mode_modeinfo mode
;
549 if (copy_from_user(&mode
, arg
, sizeof(mode
)))
552 return adf_interface_set_mode(intf
, &mode
);
555 static int adf_intf_get_data(struct adf_interface
*intf
,
556 struct adf_interface_data __user
*arg
)
558 struct adf_device
*dev
= adf_interface_parent(intf
);
559 struct adf_interface_data data
;
560 struct drm_mode_modeinfo
*modelist
;
561 size_t modelist_size
;
566 if (copy_from_user(&data
, arg
, sizeof(data
)))
569 strlcpy(data
.name
, intf
->base
.name
, sizeof(data
.name
));
571 data
.type
= intf
->type
;
573 data
.flags
= intf
->flags
;
575 err
= adf_interface_get_screen_size(intf
, &data
.width_mm
,
582 modelist
= kmalloc(sizeof(modelist
[0]) * ADF_MAX_MODES
, GFP_KERNEL
);
586 mutex_lock(&dev
->client_lock
);
587 read_lock_irqsave(&intf
->hotplug_modelist_lock
, flags
);
588 data
.hotplug_detect
= intf
->hotplug_detect
;
589 modelist_size
= min(data
.n_available_modes
, intf
->n_modes
) *
590 sizeof(intf
->modelist
[0]);
591 memcpy(modelist
, intf
->modelist
, modelist_size
);
592 data
.n_available_modes
= intf
->n_modes
;
593 read_unlock_irqrestore(&intf
->hotplug_modelist_lock
, flags
);
595 if (copy_to_user(arg
->available_modes
, modelist
, modelist_size
)) {
600 data
.dpms_state
= intf
->dpms_state
;
601 memcpy(&data
.current_mode
, &intf
->current_mode
,
602 sizeof(intf
->current_mode
));
604 ret
= adf_obj_copy_custom_data_to_user(&intf
->base
, arg
->custom_data
,
605 &data
.custom_data_size
);
607 mutex_unlock(&dev
->client_lock
);
613 if (copy_to_user(arg
, &data
, sizeof(data
)))
619 static inline long adf_obj_custom_ioctl(struct adf_obj
*obj
, unsigned int cmd
,
622 if (obj
->ops
&& obj
->ops
->ioctl
)
623 return obj
->ops
->ioctl(obj
, cmd
, arg
);
627 static long adf_overlay_engine_ioctl(struct adf_overlay_engine
*eng
,
628 struct adf_file
*file
, unsigned int cmd
, unsigned long arg
)
632 return adf_obj_set_event(&eng
->base
, file
,
633 (struct adf_set_event __user
*)arg
);
635 case ADF_GET_OVERLAY_ENGINE_DATA
:
636 return adf_eng_get_data(eng
,
637 (struct adf_overlay_engine_data __user
*)arg
);
640 case ADF_POST_CONFIG
:
642 case ADF_GET_DEVICE_DATA
:
643 case ADF_GET_INTERFACE_DATA
:
644 case ADF_SIMPLE_POST_CONFIG
:
645 case ADF_SIMPLE_BUFFER_ALLOC
:
651 return adf_obj_custom_ioctl(&eng
->base
, cmd
, arg
);
655 static long adf_interface_ioctl(struct adf_interface
*intf
,
656 struct adf_file
*file
, unsigned int cmd
, unsigned long arg
)
660 return adf_obj_set_event(&intf
->base
, file
,
661 (struct adf_set_event __user
*)arg
);
664 return adf_interface_blank(intf
, arg
);
667 return adf_intf_set_mode(intf
,
668 (struct drm_mode_modeinfo __user
*)arg
);
670 case ADF_GET_INTERFACE_DATA
:
671 return adf_intf_get_data(intf
,
672 (struct adf_interface_data __user
*)arg
);
674 case ADF_SIMPLE_POST_CONFIG
:
675 return adf_intf_simple_post_config(intf
,
676 (struct adf_simple_post_config __user
*)arg
);
678 case ADF_SIMPLE_BUFFER_ALLOC
:
679 return adf_intf_simple_buffer_alloc(intf
,
680 (struct adf_simple_buffer_alloc __user
*)arg
);
682 case ADF_POST_CONFIG
:
683 case ADF_GET_DEVICE_DATA
:
684 case ADF_GET_OVERLAY_ENGINE_DATA
:
690 return adf_obj_custom_ioctl(&intf
->base
, cmd
, arg
);
694 static long adf_device_ioctl(struct adf_device
*dev
, struct adf_file
*file
,
695 unsigned int cmd
, unsigned long arg
)
699 return adf_obj_set_event(&dev
->base
, file
,
700 (struct adf_set_event __user
*)arg
);
702 case ADF_POST_CONFIG
:
703 return adf_device_post_config(dev
,
704 (struct adf_post_config __user
*)arg
);
706 case ADF_GET_DEVICE_DATA
:
707 return adf_device_get_data(dev
,
708 (struct adf_device_data __user
*)arg
);
711 return adf_device_handle_attachment(dev
,
712 (struct adf_attachment_config __user
*)arg
,
716 return adf_device_handle_attachment(dev
,
717 (struct adf_attachment_config __user
*)arg
,
722 case ADF_GET_INTERFACE_DATA
:
723 case ADF_GET_OVERLAY_ENGINE_DATA
:
724 case ADF_SIMPLE_POST_CONFIG
:
725 case ADF_SIMPLE_BUFFER_ALLOC
:
729 return adf_obj_custom_ioctl(&dev
->base
, cmd
, arg
);
733 static int adf_file_open(struct inode
*inode
, struct file
*file
)
736 struct adf_file
*fpriv
= NULL
;
740 obj
= adf_obj_sysfs_find(iminor(inode
));
744 dev_dbg(&obj
->dev
, "opening %s\n", dev_name(&obj
->dev
));
746 if (!try_module_get(obj
->parent
->ops
->owner
)) {
747 dev_err(&obj
->dev
, "getting owner module failed\n");
751 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
757 INIT_LIST_HEAD(&fpriv
->head
);
759 init_waitqueue_head(&fpriv
->event_wait
);
761 file
->private_data
= fpriv
;
763 if (obj
->ops
&& obj
->ops
->open
) {
764 ret
= obj
->ops
->open(obj
, inode
, file
);
769 spin_lock_irqsave(&obj
->file_lock
, flags
);
770 list_add_tail(&fpriv
->head
, &obj
->file_list
);
771 spin_unlock_irqrestore(&obj
->file_lock
, flags
);
776 module_put(obj
->parent
->ops
->owner
);
781 static int adf_file_release(struct inode
*inode
, struct file
*file
)
783 struct adf_file
*fpriv
= file
->private_data
;
784 struct adf_obj
*obj
= fpriv
->obj
;
785 enum adf_event_type event_type
;
788 if (obj
->ops
&& obj
->ops
->release
)
789 obj
->ops
->release(obj
, inode
, file
);
791 spin_lock_irqsave(&obj
->file_lock
, flags
);
792 list_del(&fpriv
->head
);
793 spin_unlock_irqrestore(&obj
->file_lock
, flags
);
795 for_each_set_bit(event_type
, fpriv
->event_subscriptions
,
796 ADF_EVENT_TYPE_MAX
) {
797 adf_event_put(obj
, event_type
);
801 module_put(obj
->parent
->ops
->owner
);
803 dev_dbg(&obj
->dev
, "released %s\n", dev_name(&obj
->dev
));
807 long adf_file_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
809 struct adf_file
*fpriv
= file
->private_data
;
810 struct adf_obj
*obj
= fpriv
->obj
;
813 dev_dbg(&obj
->dev
, "%s ioctl %u\n", dev_name(&obj
->dev
), _IOC_NR(cmd
));
816 case ADF_OBJ_OVERLAY_ENGINE
:
817 ret
= adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj
),
821 case ADF_OBJ_INTERFACE
:
822 ret
= adf_interface_ioctl(adf_obj_to_interface(obj
), fpriv
, cmd
,
827 ret
= adf_device_ioctl(adf_obj_to_device(obj
), fpriv
, cmd
, arg
);
834 static inline bool adf_file_event_available(struct adf_file
*fpriv
)
836 int head
= fpriv
->event_head
;
837 int tail
= fpriv
->event_tail
;
838 return CIRC_CNT(head
, tail
, sizeof(fpriv
->event_buf
)) != 0;
841 void adf_file_queue_event(struct adf_file
*fpriv
, struct adf_event
*event
)
843 int head
= fpriv
->event_head
;
844 int tail
= fpriv
->event_tail
;
845 size_t space
= CIRC_SPACE(head
, tail
, sizeof(fpriv
->event_buf
));
846 size_t space_to_end
=
847 CIRC_SPACE_TO_END(head
, tail
, sizeof(fpriv
->event_buf
));
849 if (space
< event
->length
) {
850 dev_dbg(&fpriv
->obj
->dev
,
851 "insufficient buffer space for event %u\n",
856 if (space_to_end
>= event
->length
) {
857 memcpy(fpriv
->event_buf
+ head
, event
, event
->length
);
859 memcpy(fpriv
->event_buf
+ head
, event
, space_to_end
);
860 memcpy(fpriv
->event_buf
, (u8
*)event
+ space_to_end
,
861 event
->length
- space_to_end
);
865 fpriv
->event_head
= (fpriv
->event_head
+ event
->length
) &
866 (sizeof(fpriv
->event_buf
) - 1);
867 wake_up_interruptible_all(&fpriv
->event_wait
);
870 static ssize_t
adf_file_copy_to_user(struct adf_file
*fpriv
,
871 char __user
*buffer
, size_t buffer_size
)
875 size_t cnt
, cnt_to_end
, copy_size
= 0;
879 event_buf
= kmalloc(min(buffer_size
, sizeof(fpriv
->event_buf
)),
884 spin_lock_irqsave(&fpriv
->obj
->file_lock
, flags
);
886 if (!adf_file_event_available(fpriv
))
889 head
= fpriv
->event_head
;
890 tail
= fpriv
->event_tail
;
892 cnt
= CIRC_CNT(head
, tail
, sizeof(fpriv
->event_buf
));
893 cnt_to_end
= CIRC_CNT_TO_END(head
, tail
, sizeof(fpriv
->event_buf
));
894 copy_size
= min(buffer_size
, cnt
);
896 if (cnt_to_end
>= copy_size
) {
897 memcpy(event_buf
, fpriv
->event_buf
+ tail
, copy_size
);
899 memcpy(event_buf
, fpriv
->event_buf
+ tail
, cnt_to_end
);
900 memcpy(event_buf
+ cnt_to_end
, fpriv
->event_buf
,
901 copy_size
- cnt_to_end
);
904 fpriv
->event_tail
= (fpriv
->event_tail
+ copy_size
) &
905 (sizeof(fpriv
->event_buf
) - 1);
908 spin_unlock_irqrestore(&fpriv
->obj
->file_lock
, flags
);
910 if (copy_to_user(buffer
, event_buf
, copy_size
))
919 ssize_t
adf_file_read(struct file
*filp
, char __user
*buffer
,
920 size_t count
, loff_t
*offset
)
922 struct adf_file
*fpriv
= filp
->private_data
;
925 err
= wait_event_interruptible(fpriv
->event_wait
,
926 adf_file_event_available(fpriv
));
930 return adf_file_copy_to_user(fpriv
, buffer
, count
);
933 unsigned int adf_file_poll(struct file
*filp
, struct poll_table_struct
*wait
)
935 struct adf_file
*fpriv
= filp
->private_data
;
936 unsigned int mask
= 0;
938 poll_wait(filp
, &fpriv
->event_wait
, wait
);
940 if (adf_file_event_available(fpriv
))
941 mask
|= POLLIN
| POLLRDNORM
;
946 const struct file_operations adf_fops
= {
947 .owner
= THIS_MODULE
,
948 .unlocked_ioctl
= adf_file_ioctl
,
950 .compat_ioctl
= adf_file_compat_ioctl
,
952 .open
= adf_file_open
,
953 .release
= adf_file_release
,
954 .llseek
= default_llseek
,
955 .read
= adf_file_read
,
956 .poll
= adf_file_poll
,