2 * Copyright (C) 2013 Google, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/kthread.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
21 #include <video/adf.h>
22 #include <video/adf_client.h>
23 #include <video/adf_format.h>
27 static inline bool vsync_active(u8 state
)
29 return state
== DRM_MODE_DPMS_ON
|| state
== DRM_MODE_DPMS_STANDBY
;
33 * adf_interface_blank - set interface's DPMS state
35 * @intf: the interface
36 * @state: one of %DRM_MODE_DPMS_*
38 * Returns 0 on success or -errno on failure.
40 int adf_interface_blank(struct adf_interface
*intf
, u8 state
)
42 struct adf_device
*dev
= adf_interface_parent(intf
);
47 struct adf_event_refcount
*vsync_refcount
;
49 if (!intf
->ops
|| !intf
->ops
->blank
)
52 if (state
> DRM_MODE_DPMS_OFF
)
55 mutex_lock(&dev
->client_lock
);
56 if (state
!= DRM_MODE_DPMS_ON
)
57 flush_kthread_worker(&dev
->post_worker
);
58 mutex_lock(&intf
->base
.event_lock
);
60 vsync_refcount
= adf_obj_find_event_refcount(&intf
->base
,
62 if (!vsync_refcount
) {
67 prev_state
= intf
->dpms_state
;
68 if (prev_state
== state
) {
73 disable_vsync
= vsync_active(prev_state
) &&
74 !vsync_active(state
) &&
75 vsync_refcount
->refcount
;
76 enable_vsync
= !vsync_active(prev_state
) &&
77 vsync_active(state
) &&
78 vsync_refcount
->refcount
;
81 intf
->base
.ops
->set_event(&intf
->base
, ADF_EVENT_VSYNC
,
84 ret
= intf
->ops
->blank(intf
, state
);
87 intf
->base
.ops
->set_event(&intf
->base
, ADF_EVENT_VSYNC
,
93 intf
->base
.ops
->set_event(&intf
->base
, ADF_EVENT_VSYNC
,
96 intf
->dpms_state
= state
;
98 mutex_unlock(&intf
->base
.event_lock
);
99 mutex_unlock(&dev
->client_lock
);
102 EXPORT_SYMBOL(adf_interface_blank
);
105 * adf_interface_blank - get interface's current DPMS state
107 * @intf: the interface
109 * Returns one of %DRM_MODE_DPMS_*.
111 u8
adf_interface_dpms_state(struct adf_interface
*intf
)
113 struct adf_device
*dev
= adf_interface_parent(intf
);
116 mutex_lock(&dev
->client_lock
);
117 dpms_state
= intf
->dpms_state
;
118 mutex_unlock(&dev
->client_lock
);
122 EXPORT_SYMBOL(adf_interface_dpms_state
);
125 * adf_interface_current_mode - get interface's current display mode
127 * @intf: the interface
128 * @mode: returns the current mode
130 void adf_interface_current_mode(struct adf_interface
*intf
,
131 struct drm_mode_modeinfo
*mode
)
133 struct adf_device
*dev
= adf_interface_parent(intf
);
135 mutex_lock(&dev
->client_lock
);
136 memcpy(mode
, &intf
->current_mode
, sizeof(*mode
));
137 mutex_unlock(&dev
->client_lock
);
139 EXPORT_SYMBOL(adf_interface_current_mode
);
142 * adf_interface_modelist - get interface's modelist
144 * @intf: the interface
145 * @modelist: storage for the modelist (optional)
146 * @n_modes: length of @modelist
148 * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
149 * modelist entries into @modelist.
151 * Returns the length of the modelist.
153 size_t adf_interface_modelist(struct adf_interface
*intf
,
154 struct drm_mode_modeinfo
*modelist
, size_t n_modes
)
159 read_lock_irqsave(&intf
->hotplug_modelist_lock
, flags
);
161 memcpy(modelist
, intf
->modelist
, sizeof(modelist
[0]) *
162 min(n_modes
, intf
->n_modes
));
163 retval
= intf
->n_modes
;
164 read_unlock_irqrestore(&intf
->hotplug_modelist_lock
, flags
);
168 EXPORT_SYMBOL(adf_interface_modelist
);
171 * adf_interface_set_mode - set interface's display mode
173 * @intf: the interface
174 * @mode: the new mode
176 * Returns 0 on success or -errno on failure.
178 int adf_interface_set_mode(struct adf_interface
*intf
,
179 struct drm_mode_modeinfo
*mode
)
181 struct adf_device
*dev
= adf_interface_parent(intf
);
184 if (!intf
->ops
|| !intf
->ops
->modeset
)
187 mutex_lock(&dev
->client_lock
);
188 flush_kthread_worker(&dev
->post_worker
);
190 ret
= intf
->ops
->modeset(intf
, mode
);
194 memcpy(&intf
->current_mode
, mode
, sizeof(*mode
));
196 mutex_unlock(&dev
->client_lock
);
199 EXPORT_SYMBOL(adf_interface_set_mode
);
202 * adf_interface_screen_size - get size of screen connected to interface
204 * @intf: the interface
205 * @width_mm: returns the screen width in mm
206 * @height_mm: returns the screen width in mm
208 * Returns 0 on success or -errno on failure.
210 int adf_interface_get_screen_size(struct adf_interface
*intf
, u16
*width_mm
,
213 struct adf_device
*dev
= adf_interface_parent(intf
);
216 if (!intf
->ops
|| !intf
->ops
->screen_size
)
219 mutex_lock(&dev
->client_lock
);
220 ret
= intf
->ops
->screen_size(intf
, width_mm
, height_mm
);
221 mutex_unlock(&dev
->client_lock
);
225 EXPORT_SYMBOL(adf_interface_get_screen_size
);
228 * adf_overlay_engine_supports_format - returns whether a format is in an
229 * overlay engine's supported list
231 * @eng: the overlay engine
232 * @format: format fourcc
234 bool adf_overlay_engine_supports_format(struct adf_overlay_engine
*eng
,
238 for (i
= 0; i
< eng
->ops
->n_supported_formats
; i
++)
239 if (format
== eng
->ops
->supported_formats
[i
])
244 EXPORT_SYMBOL(adf_overlay_engine_supports_format
);
246 static int adf_buffer_validate(struct adf_buffer
*buf
)
248 struct adf_overlay_engine
*eng
= buf
->overlay_engine
;
249 struct device
*dev
= &eng
->base
.dev
;
250 struct adf_device
*parent
= adf_overlay_engine_parent(eng
);
251 u8 hsub
, vsub
, num_planes
, cpp
[ADF_MAX_PLANES
], i
;
253 if (!adf_overlay_engine_supports_format(eng
, buf
->format
)) {
254 char format_str
[ADF_FORMAT_STR_SIZE
];
255 adf_format_str(buf
->format
, format_str
);
256 dev_err(dev
, "unsupported format %s\n", format_str
);
260 if (!adf_format_is_standard(buf
->format
))
261 return parent
->ops
->validate_custom_format(parent
, buf
);
263 hsub
= adf_format_horz_chroma_subsampling(buf
->format
);
264 vsub
= adf_format_vert_chroma_subsampling(buf
->format
);
265 num_planes
= adf_format_num_planes(buf
->format
);
266 for (i
= 0; i
< num_planes
; i
++)
267 cpp
[i
] = adf_format_plane_cpp(buf
->format
, i
);
269 return adf_format_validate_yuv(parent
, buf
, num_planes
, hsub
, vsub
,
273 static int adf_buffer_map(struct adf_device
*dev
, struct adf_buffer
*buf
,
274 struct adf_buffer_mapping
*mapping
)
279 for (i
= 0; i
< buf
->n_planes
; i
++) {
280 struct dma_buf_attachment
*attachment
;
281 struct sg_table
*sg_table
;
283 attachment
= dma_buf_attach(buf
->dma_bufs
[i
], dev
->dev
);
284 if (IS_ERR(attachment
)) {
285 ret
= PTR_ERR(attachment
);
286 dev_err(&dev
->base
.dev
, "attaching plane %zu failed: %d\n",
290 mapping
->attachments
[i
] = attachment
;
292 sg_table
= dma_buf_map_attachment(attachment
, DMA_TO_DEVICE
);
293 if (IS_ERR(sg_table
)) {
294 ret
= PTR_ERR(sg_table
);
295 dev_err(&dev
->base
.dev
, "mapping plane %zu failed: %d",
298 } else if (!sg_table
) {
300 dev_err(&dev
->base
.dev
, "mapping plane %zu failed\n",
304 mapping
->sg_tables
[i
] = sg_table
;
309 adf_buffer_mapping_cleanup(mapping
, buf
);
310 memset(mapping
, 0, sizeof(*mapping
));
316 static struct sync_fence
*adf_sw_complete_fence(struct adf_device
*dev
)
319 struct sync_fence
*complete_fence
;
321 if (!dev
->timeline
) {
322 dev
->timeline
= sw_sync_timeline_create(dev
->base
.name
);
324 return ERR_PTR(-ENOMEM
);
325 dev
->timeline_max
= 1;
329 pt
= sw_sync_pt_create(dev
->timeline
, dev
->timeline_max
);
332 complete_fence
= sync_fence_create(dev
->base
.name
, pt
);
334 goto err_fence_create
;
336 return complete_fence
;
342 return ERR_PTR(-ENOSYS
);
346 * adf_device_post - flip to a new set of buffers
348 * @dev: device targeted by the flip
349 * @intfs: interfaces targeted by the flip
350 * @n_intfs: number of targeted interfaces
351 * @bufs: description of buffers displayed
352 * @n_bufs: number of buffers displayed
353 * @custom_data: driver-private data
354 * @custom_data_size: size of driver-private data
356 * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
357 * point to variables on the stack. adf_device_post() also takes its own
358 * reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy()
359 * variant transfers ownership of these resources to ADF instead.
361 * On success, returns a sync fence which signals when the buffers are removed
362 * from the screen. On failure, returns ERR_PTR(-errno).
364 struct sync_fence
*adf_device_post(struct adf_device
*dev
,
365 struct adf_interface
**intfs
, size_t n_intfs
,
366 struct adf_buffer
*bufs
, size_t n_bufs
, void *custom_data
,
367 size_t custom_data_size
)
369 struct adf_interface
**intfs_copy
= NULL
;
370 struct adf_buffer
*bufs_copy
= NULL
;
371 void *custom_data_copy
= NULL
;
372 struct sync_fence
*ret
;
375 intfs_copy
= kzalloc(sizeof(intfs_copy
[0]) * n_intfs
, GFP_KERNEL
);
377 return ERR_PTR(-ENOMEM
);
379 bufs_copy
= kzalloc(sizeof(bufs_copy
[0]) * n_bufs
, GFP_KERNEL
);
381 ret
= ERR_PTR(-ENOMEM
);
385 custom_data_copy
= kzalloc(custom_data_size
, GFP_KERNEL
);
386 if (!custom_data_copy
) {
387 ret
= ERR_PTR(-ENOMEM
);
391 for (i
= 0; i
< n_bufs
; i
++) {
393 for (j
= 0; j
< bufs
[i
].n_planes
; j
++)
394 get_dma_buf(bufs
[i
].dma_bufs
[j
]);
397 memcpy(intfs_copy
, intfs
, sizeof(intfs_copy
[0]) * n_intfs
);
398 memcpy(bufs_copy
, bufs
, sizeof(bufs_copy
[0]) * n_bufs
);
399 memcpy(custom_data_copy
, custom_data
, custom_data_size
);
401 ret
= adf_device_post_nocopy(dev
, intfs_copy
, n_intfs
, bufs_copy
,
402 n_bufs
, custom_data_copy
, custom_data_size
);
409 for (i
= 0; i
< n_bufs
; i
++) {
411 for (j
= 0; j
< bufs
[i
].n_planes
; j
++)
412 dma_buf_put(bufs
[i
].dma_bufs
[j
]);
415 kfree(custom_data_copy
);
420 EXPORT_SYMBOL(adf_device_post
);
423 * adf_device_post_nocopy - flip to a new set of buffers
425 * adf_device_post_nocopy() has the same behavior as adf_device_post(),
426 * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
427 * not take an extra reference on the dma-bufs in @bufs.
429 * @intfs, @bufs, and @custom_data must point to buffers allocated by
430 * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs
431 * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
432 * On failure, adf_device_post_nocopy() does NOT take ownership of these
433 * buffers or the dma-bufs, and the caller must clean them up.
435 * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
436 * Clients may find the nocopy variant useful in limited cases, but most should
437 * call adf_device_post() instead.
439 struct sync_fence
*adf_device_post_nocopy(struct adf_device
*dev
,
440 struct adf_interface
**intfs
, size_t n_intfs
,
441 struct adf_buffer
*bufs
, size_t n_bufs
,
442 void *custom_data
, size_t custom_data_size
)
444 struct adf_pending_post
*cfg
;
445 struct adf_buffer_mapping
*mappings
;
446 struct sync_fence
*ret
;
450 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
452 return ERR_PTR(-ENOMEM
);
454 mappings
= kzalloc(sizeof(mappings
[0]) * n_bufs
, GFP_KERNEL
);
456 ret
= ERR_PTR(-ENOMEM
);
460 mutex_lock(&dev
->client_lock
);
462 for (i
= 0; i
< n_bufs
; i
++) {
463 err
= adf_buffer_validate(&bufs
[i
]);
469 err
= adf_buffer_map(dev
, &bufs
[i
], &mappings
[i
]);
476 INIT_LIST_HEAD(&cfg
->head
);
477 cfg
->config
.n_bufs
= n_bufs
;
478 cfg
->config
.bufs
= bufs
;
479 cfg
->config
.mappings
= mappings
;
480 cfg
->config
.custom_data
= custom_data
;
481 cfg
->config
.custom_data_size
= custom_data_size
;
483 err
= dev
->ops
->validate(dev
, &cfg
->config
, &cfg
->state
);
489 mutex_lock(&dev
->post_lock
);
491 if (dev
->ops
->complete_fence
)
492 ret
= dev
->ops
->complete_fence(dev
, &cfg
->config
,
495 ret
= adf_sw_complete_fence(dev
);
500 list_add_tail(&cfg
->head
, &dev
->post_list
);
501 queue_kthread_work(&dev
->post_worker
, &dev
->post_work
);
502 mutex_unlock(&dev
->post_lock
);
503 mutex_unlock(&dev
->client_lock
);
508 mutex_unlock(&dev
->post_lock
);
511 for (i
= 0; i
< n_bufs
; i
++)
512 adf_buffer_mapping_cleanup(&mappings
[i
], &bufs
[i
]);
514 mutex_unlock(&dev
->client_lock
);
521 EXPORT_SYMBOL(adf_device_post_nocopy
);
523 static void adf_attachment_list_to_array(struct adf_device
*dev
,
524 struct list_head
*src
, struct adf_attachment
*dst
, size_t size
)
526 struct adf_attachment_list
*entry
;
532 list_for_each_entry(entry
, src
, head
) {
535 dst
[i
] = entry
->attachment
;
541 * adf_device_attachments - get device's list of active attachments
544 * @attachments: storage for the attachment list (optional)
545 * @n_attachments: length of @attachments
547 * If @attachments is not NULL, adf_device_attachments() will copy up to
548 * @n_attachments entries into @attachments.
550 * Returns the length of the active attachment list.
552 size_t adf_device_attachments(struct adf_device
*dev
,
553 struct adf_attachment
*attachments
, size_t n_attachments
)
557 mutex_lock(&dev
->client_lock
);
558 adf_attachment_list_to_array(dev
, &dev
->attached
, attachments
,
560 retval
= dev
->n_attached
;
561 mutex_unlock(&dev
->client_lock
);
565 EXPORT_SYMBOL(adf_device_attachments
);
568 * adf_device_attachments_allowed - get device's list of allowed attachments
571 * @attachments: storage for the attachment list (optional)
572 * @n_attachments: length of @attachments
574 * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
575 * @n_attachments entries into @attachments.
577 * Returns the length of the allowed attachment list.
579 size_t adf_device_attachments_allowed(struct adf_device
*dev
,
580 struct adf_attachment
*attachments
, size_t n_attachments
)
584 mutex_lock(&dev
->client_lock
);
585 adf_attachment_list_to_array(dev
, &dev
->attach_allowed
, attachments
,
587 retval
= dev
->n_attach_allowed
;
588 mutex_unlock(&dev
->client_lock
);
592 EXPORT_SYMBOL(adf_device_attachments_allowed
);
595 * adf_device_attached - return whether an overlay engine and interface are
598 * @dev: the parent device
599 * @eng: the overlay engine
600 * @intf: the interface
602 bool adf_device_attached(struct adf_device
*dev
, struct adf_overlay_engine
*eng
,
603 struct adf_interface
*intf
)
605 struct adf_attachment_list
*attachment
;
607 mutex_lock(&dev
->client_lock
);
608 attachment
= adf_attachment_find(&dev
->attached
, eng
, intf
);
609 mutex_unlock(&dev
->client_lock
);
611 return attachment
!= NULL
;
613 EXPORT_SYMBOL(adf_device_attached
);
616 * adf_device_attach_allowed - return whether the ADF device supports attaching
617 * an overlay engine and interface
619 * @dev: the parent device
620 * @eng: the overlay engine
621 * @intf: the interface
623 bool adf_device_attach_allowed(struct adf_device
*dev
,
624 struct adf_overlay_engine
*eng
, struct adf_interface
*intf
)
626 struct adf_attachment_list
*attachment
;
628 mutex_lock(&dev
->client_lock
);
629 attachment
= adf_attachment_find(&dev
->attach_allowed
, eng
, intf
);
630 mutex_unlock(&dev
->client_lock
);
632 return attachment
!= NULL
;
634 EXPORT_SYMBOL(adf_device_attach_allowed
);
636 * adf_device_attach - attach an overlay engine to an interface
638 * @dev: the parent device
639 * @eng: the overlay engine
640 * @intf: the interface
642 * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
643 * -%EALREADY if @intf and @eng are already attached, or -errno on any other
646 int adf_device_attach(struct adf_device
*dev
, struct adf_overlay_engine
*eng
,
647 struct adf_interface
*intf
)
650 struct adf_attachment_list
*attachment
= NULL
;
652 ret
= adf_attachment_validate(dev
, eng
, intf
);
656 mutex_lock(&dev
->client_lock
);
658 if (dev
->n_attached
== ADF_MAX_ATTACHMENTS
) {
663 if (!adf_attachment_find(&dev
->attach_allowed
, eng
, intf
)) {
668 if (adf_attachment_find(&dev
->attached
, eng
, intf
)) {
673 ret
= adf_device_attach_op(dev
, eng
, intf
);
677 attachment
= kzalloc(sizeof(*attachment
), GFP_KERNEL
);
683 attachment
->attachment
.interface
= intf
;
684 attachment
->attachment
.overlay_engine
= eng
;
685 list_add_tail(&attachment
->head
, &dev
->attached
);
689 mutex_unlock(&dev
->client_lock
);
695 EXPORT_SYMBOL(adf_device_attach
);
698 * adf_device_detach - detach an overlay engine from an interface
700 * @dev: the parent device
701 * @eng: the overlay engine
702 * @intf: the interface
704 * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
705 * or -errno on any other failure.
707 int adf_device_detach(struct adf_device
*dev
, struct adf_overlay_engine
*eng
,
708 struct adf_interface
*intf
)
711 struct adf_attachment_list
*attachment
;
713 ret
= adf_attachment_validate(dev
, eng
, intf
);
717 mutex_lock(&dev
->client_lock
);
719 attachment
= adf_attachment_find(&dev
->attached
, eng
, intf
);
725 ret
= adf_device_detach_op(dev
, eng
, intf
);
729 adf_attachment_free(attachment
);
732 mutex_unlock(&dev
->client_lock
);
735 EXPORT_SYMBOL(adf_device_detach
);
738 * adf_interface_simple_buffer_alloc - allocate a simple buffer
740 * @intf: target interface
741 * @w: width in pixels
742 * @h: height in pixels
743 * @format: format fourcc
744 * @dma_buf: returns the allocated buffer
745 * @offset: returns the byte offset of the allocated buffer's first pixel
746 * @pitch: returns the allocated buffer's pitch
748 * See &struct adf_simple_buffer_alloc for a description of simple buffers and
751 * Returns 0 on success or -errno on failure.
753 int adf_interface_simple_buffer_alloc(struct adf_interface
*intf
, u16 w
, u16 h
,
754 u32 format
, struct dma_buf
**dma_buf
, u32
*offset
, u32
*pitch
)
756 if (!intf
->ops
|| !intf
->ops
->alloc_simple_buffer
)
759 if (!adf_format_is_rgb(format
))
762 return intf
->ops
->alloc_simple_buffer(intf
, w
, h
, format
, dma_buf
,
765 EXPORT_SYMBOL(adf_interface_simple_buffer_alloc
);
768 * adf_interface_simple_post - flip to a single buffer
770 * @intf: interface targeted by the flip
771 * @buf: buffer to display
773 * adf_interface_simple_post() can be used generically for simple display
774 * configurations, since the client does not need to provide any driver-private
775 * configuration data.
777 * adf_interface_simple_post() has the same copying semantics as
780 * On success, returns a sync fence which signals when the buffer is removed
781 * from the screen. On failure, returns ERR_PTR(-errno).
783 struct sync_fence
*adf_interface_simple_post(struct adf_interface
*intf
,
784 struct adf_buffer
*buf
)
786 size_t custom_data_size
= 0;
787 void *custom_data
= NULL
;
788 struct sync_fence
*ret
;
790 if (intf
->ops
&& intf
->ops
->describe_simple_post
) {
793 custom_data
= kzalloc(ADF_MAX_CUSTOM_DATA_SIZE
, GFP_KERNEL
);
795 ret
= ERR_PTR(-ENOMEM
);
799 err
= intf
->ops
->describe_simple_post(intf
, buf
, custom_data
,
807 ret
= adf_device_post(adf_interface_parent(intf
), &intf
, 1, buf
, 1,
808 custom_data
, custom_data_size
);
813 EXPORT_SYMBOL(adf_interface_simple_post
);