Commit | Line | Data |
---|---|---|
6fa3eb70 S |
1 | /* |
2 | * Copyright (C) 2013 Google, Inc. | |
3 | * | |
4 | * This software is licensed under the terms of the GNU General Public | |
5 | * License version 2, as published by the Free Software Foundation, and | |
6 | * may be copied, distributed, and modified under those terms. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <linux/kthread.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/slab.h> | |
18 | ||
19 | #include "sw_sync.h" | |
20 | ||
21 | #include <video/adf.h> | |
22 | #include <video/adf_client.h> | |
23 | #include <video/adf_format.h> | |
24 | ||
25 | #include "adf.h" | |
26 | ||
27 | static inline bool vsync_active(u8 state) | |
28 | { | |
29 | return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY; | |
30 | } | |
31 | ||
32 | /** | |
33 | * adf_interface_blank - set interface's DPMS state | |
34 | * | |
35 | * @intf: the interface | |
36 | * @state: one of %DRM_MODE_DPMS_* | |
37 | * | |
38 | * Returns 0 on success or -errno on failure. | |
39 | */ | |
40 | int adf_interface_blank(struct adf_interface *intf, u8 state) | |
41 | { | |
42 | struct adf_device *dev = adf_interface_parent(intf); | |
43 | u8 prev_state; | |
44 | bool disable_vsync; | |
45 | bool enable_vsync; | |
46 | int ret = 0; | |
47 | struct adf_event_refcount *vsync_refcount; | |
48 | ||
49 | if (!intf->ops || !intf->ops->blank) | |
50 | return -EOPNOTSUPP; | |
51 | ||
52 | if (state > DRM_MODE_DPMS_OFF) | |
53 | return -EINVAL; | |
54 | ||
55 | mutex_lock(&dev->client_lock); | |
56 | if (state != DRM_MODE_DPMS_ON) | |
57 | flush_kthread_worker(&dev->post_worker); | |
58 | mutex_lock(&intf->base.event_lock); | |
59 | ||
60 | vsync_refcount = adf_obj_find_event_refcount(&intf->base, | |
61 | ADF_EVENT_VSYNC); | |
62 | if (!vsync_refcount) { | |
63 | ret = -ENOMEM; | |
64 | goto done; | |
65 | } | |
66 | ||
67 | prev_state = intf->dpms_state; | |
68 | if (prev_state == state) { | |
69 | ret = -EBUSY; | |
70 | goto done; | |
71 | } | |
72 | ||
73 | disable_vsync = vsync_active(prev_state) && | |
74 | !vsync_active(state) && | |
75 | vsync_refcount->refcount; | |
76 | enable_vsync = !vsync_active(prev_state) && | |
77 | vsync_active(state) && | |
78 | vsync_refcount->refcount; | |
79 | ||
80 | if (disable_vsync) | |
81 | intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC, | |
82 | false); | |
83 | ||
84 | ret = intf->ops->blank(intf, state); | |
85 | if (ret < 0) { | |
86 | if (disable_vsync) | |
87 | intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC, | |
88 | true); | |
89 | goto done; | |
90 | } | |
91 | ||
92 | if (enable_vsync) | |
93 | intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC, | |
94 | true); | |
95 | ||
96 | intf->dpms_state = state; | |
97 | done: | |
98 | mutex_unlock(&intf->base.event_lock); | |
99 | mutex_unlock(&dev->client_lock); | |
100 | return ret; | |
101 | } | |
102 | EXPORT_SYMBOL(adf_interface_blank); | |
103 | ||
104 | /** | |
105 | * adf_interface_blank - get interface's current DPMS state | |
106 | * | |
107 | * @intf: the interface | |
108 | * | |
109 | * Returns one of %DRM_MODE_DPMS_*. | |
110 | */ | |
111 | u8 adf_interface_dpms_state(struct adf_interface *intf) | |
112 | { | |
113 | struct adf_device *dev = adf_interface_parent(intf); | |
114 | u8 dpms_state; | |
115 | ||
116 | mutex_lock(&dev->client_lock); | |
117 | dpms_state = intf->dpms_state; | |
118 | mutex_unlock(&dev->client_lock); | |
119 | ||
120 | return dpms_state; | |
121 | } | |
122 | EXPORT_SYMBOL(adf_interface_dpms_state); | |
123 | ||
124 | /** | |
125 | * adf_interface_current_mode - get interface's current display mode | |
126 | * | |
127 | * @intf: the interface | |
128 | * @mode: returns the current mode | |
129 | */ | |
130 | void adf_interface_current_mode(struct adf_interface *intf, | |
131 | struct drm_mode_modeinfo *mode) | |
132 | { | |
133 | struct adf_device *dev = adf_interface_parent(intf); | |
134 | ||
135 | mutex_lock(&dev->client_lock); | |
136 | memcpy(mode, &intf->current_mode, sizeof(*mode)); | |
137 | mutex_unlock(&dev->client_lock); | |
138 | } | |
139 | EXPORT_SYMBOL(adf_interface_current_mode); | |
140 | ||
141 | /** | |
142 | * adf_interface_modelist - get interface's modelist | |
143 | * | |
144 | * @intf: the interface | |
145 | * @modelist: storage for the modelist (optional) | |
146 | * @n_modes: length of @modelist | |
147 | * | |
148 | * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes | |
149 | * modelist entries into @modelist. | |
150 | * | |
151 | * Returns the length of the modelist. | |
152 | */ | |
153 | size_t adf_interface_modelist(struct adf_interface *intf, | |
154 | struct drm_mode_modeinfo *modelist, size_t n_modes) | |
155 | { | |
156 | unsigned long flags; | |
157 | size_t retval; | |
158 | ||
159 | read_lock_irqsave(&intf->hotplug_modelist_lock, flags); | |
160 | if (modelist) | |
161 | memcpy(modelist, intf->modelist, sizeof(modelist[0]) * | |
162 | min(n_modes, intf->n_modes)); | |
163 | retval = intf->n_modes; | |
164 | read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags); | |
165 | ||
166 | return retval; | |
167 | } | |
168 | EXPORT_SYMBOL(adf_interface_modelist); | |
169 | ||
170 | /** | |
171 | * adf_interface_set_mode - set interface's display mode | |
172 | * | |
173 | * @intf: the interface | |
174 | * @mode: the new mode | |
175 | * | |
176 | * Returns 0 on success or -errno on failure. | |
177 | */ | |
178 | int adf_interface_set_mode(struct adf_interface *intf, | |
179 | struct drm_mode_modeinfo *mode) | |
180 | { | |
181 | struct adf_device *dev = adf_interface_parent(intf); | |
182 | int ret = 0; | |
183 | ||
184 | if (!intf->ops || !intf->ops->modeset) | |
185 | return -EOPNOTSUPP; | |
186 | ||
187 | mutex_lock(&dev->client_lock); | |
188 | flush_kthread_worker(&dev->post_worker); | |
189 | ||
190 | ret = intf->ops->modeset(intf, mode); | |
191 | if (ret < 0) | |
192 | goto done; | |
193 | ||
194 | memcpy(&intf->current_mode, mode, sizeof(*mode)); | |
195 | done: | |
196 | mutex_unlock(&dev->client_lock); | |
197 | return ret; | |
198 | } | |
199 | EXPORT_SYMBOL(adf_interface_set_mode); | |
200 | ||
201 | /** | |
202 | * adf_interface_screen_size - get size of screen connected to interface | |
203 | * | |
204 | * @intf: the interface | |
205 | * @width_mm: returns the screen width in mm | |
206 | * @height_mm: returns the screen width in mm | |
207 | * | |
208 | * Returns 0 on success or -errno on failure. | |
209 | */ | |
210 | int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm, | |
211 | u16 *height_mm) | |
212 | { | |
213 | struct adf_device *dev = adf_interface_parent(intf); | |
214 | int ret; | |
215 | ||
216 | if (!intf->ops || !intf->ops->screen_size) | |
217 | return -EOPNOTSUPP; | |
218 | ||
219 | mutex_lock(&dev->client_lock); | |
220 | ret = intf->ops->screen_size(intf, width_mm, height_mm); | |
221 | mutex_unlock(&dev->client_lock); | |
222 | ||
223 | return ret; | |
224 | } | |
225 | EXPORT_SYMBOL(adf_interface_get_screen_size); | |
226 | ||
227 | /** | |
228 | * adf_overlay_engine_supports_format - returns whether a format is in an | |
229 | * overlay engine's supported list | |
230 | * | |
231 | * @eng: the overlay engine | |
232 | * @format: format fourcc | |
233 | */ | |
234 | bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng, | |
235 | u32 format) | |
236 | { | |
237 | size_t i; | |
238 | for (i = 0; i < eng->ops->n_supported_formats; i++) | |
239 | if (format == eng->ops->supported_formats[i]) | |
240 | return true; | |
241 | ||
242 | return false; | |
243 | } | |
244 | EXPORT_SYMBOL(adf_overlay_engine_supports_format); | |
245 | ||
246 | static int adf_buffer_validate(struct adf_buffer *buf) | |
247 | { | |
248 | struct adf_overlay_engine *eng = buf->overlay_engine; | |
249 | struct device *dev = &eng->base.dev; | |
250 | struct adf_device *parent = adf_overlay_engine_parent(eng); | |
251 | u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i; | |
252 | ||
253 | if (!adf_overlay_engine_supports_format(eng, buf->format)) { | |
254 | char format_str[ADF_FORMAT_STR_SIZE]; | |
255 | adf_format_str(buf->format, format_str); | |
256 | dev_err(dev, "unsupported format %s\n", format_str); | |
257 | return -EINVAL; | |
258 | } | |
259 | ||
260 | if (!adf_format_is_standard(buf->format)) | |
261 | return parent->ops->validate_custom_format(parent, buf); | |
262 | ||
263 | hsub = adf_format_horz_chroma_subsampling(buf->format); | |
264 | vsub = adf_format_vert_chroma_subsampling(buf->format); | |
265 | num_planes = adf_format_num_planes(buf->format); | |
266 | for (i = 0; i < num_planes; i++) | |
267 | cpp[i] = adf_format_plane_cpp(buf->format, i); | |
268 | ||
269 | return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub, | |
270 | cpp); | |
271 | } | |
272 | ||
273 | static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf, | |
274 | struct adf_buffer_mapping *mapping) | |
275 | { | |
276 | int ret = 0; | |
277 | size_t i; | |
278 | ||
279 | for (i = 0; i < buf->n_planes; i++) { | |
280 | struct dma_buf_attachment *attachment; | |
281 | struct sg_table *sg_table; | |
282 | ||
283 | attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev); | |
284 | if (IS_ERR(attachment)) { | |
285 | ret = PTR_ERR(attachment); | |
286 | dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n", | |
287 | i, ret); | |
288 | goto done; | |
289 | } | |
290 | mapping->attachments[i] = attachment; | |
291 | ||
292 | sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE); | |
293 | if (IS_ERR(sg_table)) { | |
294 | ret = PTR_ERR(sg_table); | |
295 | dev_err(&dev->base.dev, "mapping plane %zu failed: %d", | |
296 | i, ret); | |
297 | goto done; | |
298 | } else if (!sg_table) { | |
299 | ret = -ENOMEM; | |
300 | dev_err(&dev->base.dev, "mapping plane %zu failed\n", | |
301 | i); | |
302 | goto done; | |
303 | } | |
304 | mapping->sg_tables[i] = sg_table; | |
305 | } | |
306 | ||
307 | done: | |
4b9e9796 | 308 | if (ret < 0) { |
6fa3eb70 | 309 | adf_buffer_mapping_cleanup(mapping, buf); |
4b9e9796 S |
310 | memset(mapping, 0, sizeof(*mapping)); |
311 | } | |
6fa3eb70 S |
312 | |
313 | return ret; | |
314 | } | |
315 | ||
316 | static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev) | |
317 | { | |
318 | struct sync_pt *pt; | |
319 | struct sync_fence *complete_fence; | |
320 | ||
321 | if (!dev->timeline) { | |
322 | dev->timeline = sw_sync_timeline_create(dev->base.name); | |
323 | if (!dev->timeline) | |
324 | return ERR_PTR(-ENOMEM); | |
325 | dev->timeline_max = 1; | |
326 | } | |
327 | ||
328 | dev->timeline_max++; | |
329 | pt = sw_sync_pt_create(dev->timeline, dev->timeline_max); | |
330 | if (!pt) | |
331 | goto err_pt_create; | |
332 | complete_fence = sync_fence_create(dev->base.name, pt); | |
333 | if (!complete_fence) | |
334 | goto err_fence_create; | |
335 | ||
336 | return complete_fence; | |
337 | ||
338 | err_fence_create: | |
339 | sync_pt_free(pt); | |
340 | err_pt_create: | |
341 | dev->timeline_max--; | |
342 | return ERR_PTR(-ENOSYS); | |
343 | } | |
344 | ||
345 | /** | |
346 | * adf_device_post - flip to a new set of buffers | |
347 | * | |
348 | * @dev: device targeted by the flip | |
349 | * @intfs: interfaces targeted by the flip | |
350 | * @n_intfs: number of targeted interfaces | |
351 | * @bufs: description of buffers displayed | |
352 | * @n_bufs: number of buffers displayed | |
353 | * @custom_data: driver-private data | |
354 | * @custom_data_size: size of driver-private data | |
355 | * | |
356 | * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may | |
357 | * point to variables on the stack. adf_device_post() also takes its own | |
358 | * reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy() | |
359 | * variant transfers ownership of these resources to ADF instead. | |
360 | * | |
361 | * On success, returns a sync fence which signals when the buffers are removed | |
362 | * from the screen. On failure, returns ERR_PTR(-errno). | |
363 | */ | |
364 | struct sync_fence *adf_device_post(struct adf_device *dev, | |
365 | struct adf_interface **intfs, size_t n_intfs, | |
366 | struct adf_buffer *bufs, size_t n_bufs, void *custom_data, | |
367 | size_t custom_data_size) | |
368 | { | |
369 | struct adf_interface **intfs_copy = NULL; | |
370 | struct adf_buffer *bufs_copy = NULL; | |
371 | void *custom_data_copy = NULL; | |
372 | struct sync_fence *ret; | |
373 | size_t i; | |
374 | ||
375 | intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL); | |
376 | if (!intfs_copy) | |
377 | return ERR_PTR(-ENOMEM); | |
378 | ||
379 | bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL); | |
380 | if (!bufs_copy) { | |
381 | ret = ERR_PTR(-ENOMEM); | |
382 | goto err_alloc; | |
383 | } | |
384 | ||
385 | custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL); | |
386 | if (!custom_data_copy) { | |
387 | ret = ERR_PTR(-ENOMEM); | |
388 | goto err_alloc; | |
389 | } | |
390 | ||
391 | for (i = 0; i < n_bufs; i++) { | |
392 | size_t j; | |
393 | for (j = 0; j < bufs[i].n_planes; j++) | |
394 | get_dma_buf(bufs[i].dma_bufs[j]); | |
395 | } | |
396 | ||
397 | memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs); | |
398 | memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs); | |
399 | memcpy(custom_data_copy, custom_data, custom_data_size); | |
400 | ||
401 | ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy, | |
402 | n_bufs, custom_data_copy, custom_data_size); | |
403 | if (IS_ERR(ret)) | |
404 | goto err_post; | |
405 | ||
406 | return ret; | |
407 | ||
408 | err_post: | |
409 | for (i = 0; i < n_bufs; i++) { | |
410 | size_t j; | |
411 | for (j = 0; j < bufs[i].n_planes; j++) | |
412 | dma_buf_put(bufs[i].dma_bufs[j]); | |
413 | } | |
414 | err_alloc: | |
415 | kfree(custom_data_copy); | |
416 | kfree(bufs_copy); | |
417 | kfree(intfs_copy); | |
418 | return ret; | |
419 | } | |
420 | EXPORT_SYMBOL(adf_device_post); | |
421 | ||
422 | /** | |
423 | * adf_device_post_nocopy - flip to a new set of buffers | |
424 | * | |
425 | * adf_device_post_nocopy() has the same behavior as adf_device_post(), | |
426 | * except ADF does not copy @intfs, @bufs, or @custom_data, and it does | |
427 | * not take an extra reference on the dma-bufs in @bufs. | |
428 | * | |
429 | * @intfs, @bufs, and @custom_data must point to buffers allocated by | |
430 | * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs | |
431 | * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed. | |
432 | * On failure, adf_device_post_nocopy() does NOT take ownership of these | |
433 | * buffers or the dma-bufs, and the caller must clean them up. | |
434 | * | |
435 | * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls. | |
436 | * Clients may find the nocopy variant useful in limited cases, but most should | |
437 | * call adf_device_post() instead. | |
438 | */ | |
439 | struct sync_fence *adf_device_post_nocopy(struct adf_device *dev, | |
440 | struct adf_interface **intfs, size_t n_intfs, | |
441 | struct adf_buffer *bufs, size_t n_bufs, | |
442 | void *custom_data, size_t custom_data_size) | |
443 | { | |
444 | struct adf_pending_post *cfg; | |
445 | struct adf_buffer_mapping *mappings; | |
446 | struct sync_fence *ret; | |
447 | size_t i; | |
448 | int err; | |
449 | ||
450 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | |
451 | if (!cfg) | |
452 | return ERR_PTR(-ENOMEM); | |
453 | ||
454 | mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL); | |
455 | if (!mappings) { | |
456 | ret = ERR_PTR(-ENOMEM); | |
457 | goto err_alloc; | |
458 | } | |
459 | ||
460 | mutex_lock(&dev->client_lock); | |
461 | ||
462 | for (i = 0; i < n_bufs; i++) { | |
463 | err = adf_buffer_validate(&bufs[i]); | |
464 | if (err < 0) { | |
465 | ret = ERR_PTR(err); | |
466 | goto err_buf; | |
467 | } | |
468 | ||
469 | err = adf_buffer_map(dev, &bufs[i], &mappings[i]); | |
470 | if (err < 0) { | |
471 | ret = ERR_PTR(err); | |
472 | goto err_buf; | |
473 | } | |
474 | } | |
475 | ||
476 | INIT_LIST_HEAD(&cfg->head); | |
477 | cfg->config.n_bufs = n_bufs; | |
478 | cfg->config.bufs = bufs; | |
479 | cfg->config.mappings = mappings; | |
480 | cfg->config.custom_data = custom_data; | |
481 | cfg->config.custom_data_size = custom_data_size; | |
482 | ||
483 | err = dev->ops->validate(dev, &cfg->config, &cfg->state); | |
484 | if (err < 0) { | |
485 | ret = ERR_PTR(err); | |
486 | goto err_buf; | |
487 | } | |
488 | ||
489 | mutex_lock(&dev->post_lock); | |
490 | ||
491 | if (dev->ops->complete_fence) | |
492 | ret = dev->ops->complete_fence(dev, &cfg->config, | |
493 | cfg->state); | |
494 | else | |
495 | ret = adf_sw_complete_fence(dev); | |
496 | ||
497 | if (IS_ERR(ret)) | |
498 | goto err_fence; | |
499 | ||
500 | list_add_tail(&cfg->head, &dev->post_list); | |
501 | queue_kthread_work(&dev->post_worker, &dev->post_work); | |
502 | mutex_unlock(&dev->post_lock); | |
503 | mutex_unlock(&dev->client_lock); | |
504 | kfree(intfs); | |
505 | return ret; | |
506 | ||
507 | err_fence: | |
508 | mutex_unlock(&dev->post_lock); | |
509 | ||
510 | err_buf: | |
511 | for (i = 0; i < n_bufs; i++) | |
512 | adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]); | |
513 | ||
514 | mutex_unlock(&dev->client_lock); | |
515 | kfree(mappings); | |
516 | ||
517 | err_alloc: | |
518 | kfree(cfg); | |
519 | return ret; | |
520 | } | |
521 | EXPORT_SYMBOL(adf_device_post_nocopy); | |
522 | ||
523 | static void adf_attachment_list_to_array(struct adf_device *dev, | |
524 | struct list_head *src, struct adf_attachment *dst, size_t size) | |
525 | { | |
526 | struct adf_attachment_list *entry; | |
527 | size_t i = 0; | |
528 | ||
529 | if (!dst) | |
530 | return; | |
531 | ||
532 | list_for_each_entry(entry, src, head) { | |
533 | if (i == size) | |
534 | return; | |
535 | dst[i] = entry->attachment; | |
536 | i++; | |
537 | } | |
538 | } | |
539 | ||
540 | /** | |
541 | * adf_device_attachments - get device's list of active attachments | |
542 | * | |
543 | * @dev: the device | |
544 | * @attachments: storage for the attachment list (optional) | |
545 | * @n_attachments: length of @attachments | |
546 | * | |
547 | * If @attachments is not NULL, adf_device_attachments() will copy up to | |
548 | * @n_attachments entries into @attachments. | |
549 | * | |
550 | * Returns the length of the active attachment list. | |
551 | */ | |
552 | size_t adf_device_attachments(struct adf_device *dev, | |
553 | struct adf_attachment *attachments, size_t n_attachments) | |
554 | { | |
555 | size_t retval; | |
556 | ||
557 | mutex_lock(&dev->client_lock); | |
558 | adf_attachment_list_to_array(dev, &dev->attached, attachments, | |
559 | n_attachments); | |
560 | retval = dev->n_attached; | |
561 | mutex_unlock(&dev->client_lock); | |
562 | ||
563 | return retval; | |
564 | } | |
565 | EXPORT_SYMBOL(adf_device_attachments); | |
566 | ||
567 | /** | |
568 | * adf_device_attachments_allowed - get device's list of allowed attachments | |
569 | * | |
570 | * @dev: the device | |
571 | * @attachments: storage for the attachment list (optional) | |
572 | * @n_attachments: length of @attachments | |
573 | * | |
574 | * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to | |
575 | * @n_attachments entries into @attachments. | |
576 | * | |
577 | * Returns the length of the allowed attachment list. | |
578 | */ | |
579 | size_t adf_device_attachments_allowed(struct adf_device *dev, | |
580 | struct adf_attachment *attachments, size_t n_attachments) | |
581 | { | |
582 | size_t retval; | |
583 | ||
584 | mutex_lock(&dev->client_lock); | |
585 | adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments, | |
586 | n_attachments); | |
587 | retval = dev->n_attach_allowed; | |
588 | mutex_unlock(&dev->client_lock); | |
589 | ||
590 | return retval; | |
591 | } | |
592 | EXPORT_SYMBOL(adf_device_attachments_allowed); | |
593 | ||
594 | /** | |
595 | * adf_device_attached - return whether an overlay engine and interface are | |
596 | * attached | |
597 | * | |
598 | * @dev: the parent device | |
599 | * @eng: the overlay engine | |
600 | * @intf: the interface | |
601 | */ | |
602 | bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng, | |
603 | struct adf_interface *intf) | |
604 | { | |
605 | struct adf_attachment_list *attachment; | |
606 | ||
607 | mutex_lock(&dev->client_lock); | |
608 | attachment = adf_attachment_find(&dev->attached, eng, intf); | |
609 | mutex_unlock(&dev->client_lock); | |
610 | ||
611 | return attachment != NULL; | |
612 | } | |
613 | EXPORT_SYMBOL(adf_device_attached); | |
614 | ||
615 | /** | |
616 | * adf_device_attach_allowed - return whether the ADF device supports attaching | |
617 | * an overlay engine and interface | |
618 | * | |
619 | * @dev: the parent device | |
620 | * @eng: the overlay engine | |
621 | * @intf: the interface | |
622 | */ | |
623 | bool adf_device_attach_allowed(struct adf_device *dev, | |
624 | struct adf_overlay_engine *eng, struct adf_interface *intf) | |
625 | { | |
626 | struct adf_attachment_list *attachment; | |
627 | ||
628 | mutex_lock(&dev->client_lock); | |
629 | attachment = adf_attachment_find(&dev->attach_allowed, eng, intf); | |
630 | mutex_unlock(&dev->client_lock); | |
631 | ||
632 | return attachment != NULL; | |
633 | } | |
634 | EXPORT_SYMBOL(adf_device_attach_allowed); | |
635 | /** | |
636 | * adf_device_attach - attach an overlay engine to an interface | |
637 | * | |
638 | * @dev: the parent device | |
639 | * @eng: the overlay engine | |
640 | * @intf: the interface | |
641 | * | |
642 | * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed, | |
643 | * -%EALREADY if @intf and @eng are already attached, or -errno on any other | |
644 | * failure. | |
645 | */ | |
646 | int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng, | |
647 | struct adf_interface *intf) | |
648 | { | |
649 | int ret; | |
650 | struct adf_attachment_list *attachment = NULL; | |
651 | ||
652 | ret = adf_attachment_validate(dev, eng, intf); | |
653 | if (ret < 0) | |
654 | return ret; | |
655 | ||
656 | mutex_lock(&dev->client_lock); | |
657 | ||
658 | if (dev->n_attached == ADF_MAX_ATTACHMENTS) { | |
659 | ret = -ENOMEM; | |
660 | goto done; | |
661 | } | |
662 | ||
663 | if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) { | |
664 | ret = -EINVAL; | |
665 | goto done; | |
666 | } | |
667 | ||
668 | if (adf_attachment_find(&dev->attached, eng, intf)) { | |
669 | ret = -EALREADY; | |
670 | goto done; | |
671 | } | |
672 | ||
673 | ret = adf_device_attach_op(dev, eng, intf); | |
674 | if (ret < 0) | |
675 | goto done; | |
676 | ||
677 | attachment = kzalloc(sizeof(*attachment), GFP_KERNEL); | |
678 | if (!attachment) { | |
679 | ret = -ENOMEM; | |
680 | goto done; | |
681 | } | |
682 | ||
683 | attachment->attachment.interface = intf; | |
684 | attachment->attachment.overlay_engine = eng; | |
685 | list_add_tail(&attachment->head, &dev->attached); | |
686 | dev->n_attached++; | |
687 | ||
688 | done: | |
689 | mutex_unlock(&dev->client_lock); | |
690 | if (ret < 0) | |
691 | kfree(attachment); | |
692 | ||
693 | return ret; | |
694 | } | |
695 | EXPORT_SYMBOL(adf_device_attach); | |
696 | ||
697 | /** | |
698 | * adf_device_detach - detach an overlay engine from an interface | |
699 | * | |
700 | * @dev: the parent device | |
701 | * @eng: the overlay engine | |
702 | * @intf: the interface | |
703 | * | |
704 | * Returns 0 on success, -%EINVAL if @intf and @eng are not attached, | |
705 | * or -errno on any other failure. | |
706 | */ | |
707 | int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng, | |
708 | struct adf_interface *intf) | |
709 | { | |
710 | int ret; | |
711 | struct adf_attachment_list *attachment; | |
712 | ||
713 | ret = adf_attachment_validate(dev, eng, intf); | |
714 | if (ret < 0) | |
715 | return ret; | |
716 | ||
717 | mutex_lock(&dev->client_lock); | |
718 | ||
719 | attachment = adf_attachment_find(&dev->attached, eng, intf); | |
720 | if (!attachment) { | |
721 | ret = -EINVAL; | |
722 | goto done; | |
723 | } | |
724 | ||
725 | ret = adf_device_detach_op(dev, eng, intf); | |
726 | if (ret < 0) | |
727 | goto done; | |
728 | ||
729 | adf_attachment_free(attachment); | |
730 | dev->n_attached--; | |
731 | done: | |
732 | mutex_unlock(&dev->client_lock); | |
733 | return ret; | |
734 | } | |
735 | EXPORT_SYMBOL(adf_device_detach); | |
736 | ||
737 | /** | |
738 | * adf_interface_simple_buffer_alloc - allocate a simple buffer | |
739 | * | |
740 | * @intf: target interface | |
741 | * @w: width in pixels | |
742 | * @h: height in pixels | |
743 | * @format: format fourcc | |
744 | * @dma_buf: returns the allocated buffer | |
745 | * @offset: returns the byte offset of the allocated buffer's first pixel | |
746 | * @pitch: returns the allocated buffer's pitch | |
747 | * | |
748 | * See &struct adf_simple_buffer_alloc for a description of simple buffers and | |
749 | * their limitations. | |
750 | * | |
751 | * Returns 0 on success or -errno on failure. | |
752 | */ | |
753 | int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h, | |
754 | u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch) | |
755 | { | |
756 | if (!intf->ops || !intf->ops->alloc_simple_buffer) | |
757 | return -EOPNOTSUPP; | |
758 | ||
759 | if (!adf_format_is_rgb(format)) | |
760 | return -EINVAL; | |
761 | ||
762 | return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf, | |
763 | offset, pitch); | |
764 | } | |
765 | EXPORT_SYMBOL(adf_interface_simple_buffer_alloc); | |
766 | ||
767 | /** | |
768 | * adf_interface_simple_post - flip to a single buffer | |
769 | * | |
770 | * @intf: interface targeted by the flip | |
771 | * @buf: buffer to display | |
772 | * | |
773 | * adf_interface_simple_post() can be used generically for simple display | |
774 | * configurations, since the client does not need to provide any driver-private | |
775 | * configuration data. | |
776 | * | |
777 | * adf_interface_simple_post() has the same copying semantics as | |
778 | * adf_device_post(). | |
779 | * | |
780 | * On success, returns a sync fence which signals when the buffer is removed | |
781 | * from the screen. On failure, returns ERR_PTR(-errno). | |
782 | */ | |
783 | struct sync_fence *adf_interface_simple_post(struct adf_interface *intf, | |
784 | struct adf_buffer *buf) | |
785 | { | |
786 | size_t custom_data_size = 0; | |
787 | void *custom_data = NULL; | |
788 | struct sync_fence *ret; | |
789 | ||
790 | if (intf->ops && intf->ops->describe_simple_post) { | |
791 | int err; | |
792 | ||
793 | custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL); | |
794 | if (!custom_data) { | |
795 | ret = ERR_PTR(-ENOMEM); | |
796 | goto done; | |
797 | } | |
798 | ||
799 | err = intf->ops->describe_simple_post(intf, buf, custom_data, | |
800 | &custom_data_size); | |
801 | if (err < 0) { | |
802 | ret = ERR_PTR(err); | |
803 | goto done; | |
804 | } | |
805 | } | |
806 | ||
807 | ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1, | |
808 | custom_data, custom_data_size); | |
809 | done: | |
810 | kfree(custom_data); | |
811 | return ret; | |
812 | } | |
813 | EXPORT_SYMBOL(adf_interface_simple_post); |