3 * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * Implementation of the CRTC functions for PL111 DRM
22 #include <linux/amba/bus.h>
23 #include <linux/amba/clcd.h>
24 #include <linux/version.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/dma-buf.h>
27 #include <linux/module.h>
30 #include <drm/drm_crtc_helper.h>
32 #include "pl111_drm.h"
34 static int pl111_crtc_num
;
36 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0))
37 #define export_dma_buf export_dma_buf
39 #define export_dma_buf dma_buf
42 void pl111_common_irq(struct pl111_drm_crtc
*pl111_crtc
)
44 struct drm_device
*dev
= pl111_crtc
->crtc
.dev
;
45 struct pl111_drm_flip_resource
*old_flip_res
;
46 struct pl111_gem_bo
*bo
;
47 unsigned long irq_flags
;
49 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
53 spin_lock_irqsave(&pl111_crtc
->base_update_lock
, irq_flags
);
56 * Cache the flip resource that caused the IRQ since it will be
57 * dispatched later. Early return if the IRQ isn't associated to
58 * a base register update.
60 * TODO MIDBASE-2790: disable IRQs when a flip is not pending.
62 old_flip_res
= pl111_crtc
->current_update_res
;
64 spin_unlock_irqrestore(&pl111_crtc
->base_update_lock
, irq_flags
);
67 pl111_crtc
->current_update_res
= NULL
;
69 /* Prepare the next flip (if any) of the queue as soon as possible. */
70 if (!list_empty(&pl111_crtc
->update_queue
)) {
71 struct pl111_drm_flip_resource
*flip_res
;
72 /* Remove the head of the list */
73 flip_res
= list_first_entry(&pl111_crtc
->update_queue
,
74 struct pl111_drm_flip_resource
, link
);
75 list_del(&flip_res
->link
);
76 do_flip_to_res(flip_res
);
78 * current_update_res will be set, so guarentees that
79 * another flip_res coming in gets queued instead of
83 spin_unlock_irqrestore(&pl111_crtc
->base_update_lock
, irq_flags
);
85 /* Finalize properly the flip that caused the IRQ */
86 DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res
);
88 bo
= PL111_BO_FROM_FRAMEBUFFER(old_flip_res
->fb
);
89 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
90 spin_lock_irqsave(&pl111_crtc
->current_displaying_lock
, flags
);
91 release_kds_resource_and_display(old_flip_res
);
92 spin_unlock_irqrestore(&pl111_crtc
->current_displaying_lock
, flags
);
94 /* Release DMA buffer on this flip */
96 if (bo
->gem_object
.export_dma_buf
!= NULL
)
97 dma_buf_put(bo
->gem_object
.export_dma_buf
);
99 drm_handle_vblank(dev
, pl111_crtc
->crtc_index
);
101 /* Wake up any processes waiting for page flip event */
102 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
103 if (old_flip_res
->event
) {
104 spin_lock_bh(&dev
->event_lock
);
105 drm_send_vblank_event(dev
, pl111_crtc
->crtc_index
,
106 old_flip_res
->event
);
107 spin_unlock_bh(&dev
->event_lock
);
110 if (old_flip_res
->event
) {
111 struct drm_pending_vblank_event
*e
= old_flip_res
->event
;
115 DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__
,
116 old_flip_res
->event
);
118 spin_lock_bh(&dev
->event_lock
);
119 seq
= drm_vblank_count_and_time(dev
, pl111_crtc
->crtc_index
,
121 e
->pipe
= pl111_crtc
->crtc_index
;
122 e
->event
.sequence
= seq
;
123 e
->event
.tv_sec
= now
.tv_sec
;
124 e
->event
.tv_usec
= now
.tv_usec
;
126 list_add_tail(&e
->base
.link
,
127 &e
->base
.file_priv
->event_list
);
129 wake_up_interruptible(&e
->base
.file_priv
->event_wait
);
130 spin_unlock_bh(&dev
->event_lock
);
134 drm_vblank_put(dev
, pl111_crtc
->crtc_index
);
137 * workqueue.c:process_one_work():
138 * "It is permissible to free the struct work_struct from
139 * inside the function that is called from it"
141 kmem_cache_free(priv
.page_flip_slab
, old_flip_res
);
143 flips_in_flight
= atomic_dec_return(&priv
.nr_flips_in_flight
);
144 if (flips_in_flight
== 0 ||
145 flips_in_flight
== (NR_FLIPS_IN_FLIGHT_THRESHOLD
- 1))
146 wake_up(&priv
.wait_for_flips
);
148 DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res
);
151 void show_framebuffer_on_crtc_cb(void *cb1
, void *cb2
)
153 struct pl111_drm_flip_resource
*flip_res
= cb1
;
154 struct pl111_drm_crtc
*pl111_crtc
= to_pl111_crtc(flip_res
->crtc
);
156 pl111_crtc
->show_framebuffer_cb(cb1
, cb2
);
159 int show_framebuffer_on_crtc(struct drm_crtc
*crtc
,
160 struct drm_framebuffer
*fb
, bool page_flip
,
161 struct drm_pending_vblank_event
*event
)
163 struct pl111_gem_bo
*bo
;
164 struct pl111_drm_flip_resource
*flip_res
;
166 int old_flips_in_flight
;
168 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
171 crtc
->primary
->fb
= fb
;
174 bo
= PL111_BO_FROM_FRAMEBUFFER(fb
);
176 DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n");
180 /* If this is a full modeset, wait for all outstanding flips to complete
181 * before continuing. This avoids unnecessary complication from being
182 * able to queue up multiple modesets and queues of mixed modesets and
185 * Modesets should be uncommon and will not be performant anyway, so
186 * making them synchronous should have negligible performance impact.
189 int ret
= wait_event_killable(priv
.wait_for_flips
,
190 atomic_read(&priv
.nr_flips_in_flight
) == 0);
196 * There can be more 'early display' flips in flight than there are
197 * buffers, and there is (currently) no explicit bound on the number of
198 * flips. Hence, we need a new allocation for each one.
200 * Note: this could be optimized down if we knew a bound on the flips,
201 * since an application can only have so many buffers in flight to be
202 * useful/not hog all the memory
204 flip_res
= kmem_cache_alloc(priv
.page_flip_slab
, GFP_KERNEL
);
205 if (flip_res
== NULL
) {
206 pr_err("kmem_cache_alloc failed to alloc - flip ignored\n");
211 * increment flips in flight, whilst blocking when we reach
212 * NR_FLIPS_IN_FLIGHT_THRESHOLD
216 * Note: use of assign-and-then-compare in the condition to set
219 int ret
= wait_event_killable(priv
.wait_for_flips
,
221 atomic_read(&priv
.nr_flips_in_flight
))
222 < NR_FLIPS_IN_FLIGHT_THRESHOLD
);
224 kmem_cache_free(priv
.page_flip_slab
, flip_res
);
228 old_flips_in_flight
= atomic_cmpxchg(&priv
.nr_flips_in_flight
,
229 flips_in_flight
, flips_in_flight
+ 1);
230 } while (old_flips_in_flight
!= flips_in_flight
);
233 flip_res
->crtc
= crtc
;
234 flip_res
->page_flip
= page_flip
;
235 flip_res
->event
= event
;
236 INIT_LIST_HEAD(&flip_res
->link
);
237 DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res
);
238 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
239 if (bo
->gem_object
.export_dma_buf
!= NULL
) {
240 struct dma_buf
*buf
= bo
->gem_object
.export_dma_buf
;
241 unsigned long shared
[1] = { 0 };
242 struct kds_resource
*resource_list
[1] = {
243 get_dma_buf_kds_resource(buf
) };
247 DRM_DEBUG_KMS("Got dma_buf %p\n", buf
);
249 /* Wait for the KDS resource associated with this buffer */
250 err
= kds_async_waitall(&flip_res
->kds_res_set
,
251 &priv
.kds_cb
, flip_res
, fb
, 1, shared
,
255 struct pl111_drm_crtc
*pl111_crtc
= to_pl111_crtc(crtc
);
257 DRM_DEBUG_KMS("No dma_buf for this flip\n");
259 /* No dma-buf attached so just call the callback directly */
260 flip_res
->kds_res_set
= NULL
;
261 pl111_crtc
->show_framebuffer_cb(flip_res
, fb
);
264 if (bo
->gem_object
.export_dma_buf
!= NULL
) {
265 struct dma_buf
*buf
= bo
->gem_object
.export_dma_buf
;
268 DRM_DEBUG_KMS("Got dma_buf %p\n", buf
);
270 DRM_DEBUG_KMS("No dma_buf for this flip\n");
273 /* No dma-buf attached to this so just call the callback directly */
275 struct pl111_drm_crtc
*pl111_crtc
= to_pl111_crtc(crtc
);
276 pl111_crtc
->show_framebuffer_cb(flip_res
, fb
);
280 /* For the same reasons as the wait at the start of this function,
281 * wait for the modeset to complete before continuing.
284 int ret
= wait_event_killable(priv
.wait_for_flips
,
285 flips_in_flight
== 0);
293 int pl111_crtc_page_flip(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
294 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0))
295 struct drm_pending_vblank_event
*event
)
297 struct drm_pending_vblank_event
*event
,
301 DRM_DEBUG_KMS("%s: crtc=%p, fb=%p, event=%p\n",
302 __func__
, crtc
, fb
, event
);
303 return show_framebuffer_on_crtc(crtc
, fb
, true, event
);
306 int pl111_crtc_helper_mode_set(struct drm_crtc
*crtc
,
307 struct drm_display_mode
*mode
,
308 struct drm_display_mode
*adjusted_mode
,
309 int x
, int y
, struct drm_framebuffer
*old_fb
)
312 struct pl111_drm_crtc
*pl111_crtc
= to_pl111_crtc(crtc
);
313 struct drm_display_mode
*duplicated_mode
;
315 DRM_DEBUG_KMS("DRM crtc_helper_mode_set, x=%d y=%d bpp=%d\n",
316 adjusted_mode
->hdisplay
, adjusted_mode
->vdisplay
,
317 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
318 crtc
->fb
->bits_per_pixel
);
320 crtc
->primary
->fb
->bits_per_pixel
);
323 duplicated_mode
= drm_mode_duplicate(crtc
->dev
, adjusted_mode
);
324 if (!duplicated_mode
)
327 pl111_crtc
->new_mode
= duplicated_mode
;
328 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
329 ret
= show_framebuffer_on_crtc(crtc
, crtc
->fb
, false, NULL
);
331 ret
= show_framebuffer_on_crtc(crtc
, crtc
->primary
->fb
, false, NULL
);
334 pl111_crtc
->new_mode
= pl111_crtc
->current_mode
;
335 drm_mode_destroy(crtc
->dev
, duplicated_mode
);
341 void pl111_crtc_helper_prepare(struct drm_crtc
*crtc
)
343 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__
, crtc
);
346 void pl111_crtc_helper_commit(struct drm_crtc
*crtc
)
348 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__
, crtc
);
351 bool pl111_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
353 const struct drm_display_mode
*mode
,
355 struct drm_display_mode
*mode
,
357 struct drm_display_mode
*adjusted_mode
)
359 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__
, crtc
);
361 #ifdef CONFIG_ARCH_VEXPRESS
363 * 1024x768 with more than 16 bits per pixel may not work
364 * correctly on Versatile Express due to bandwidth issues
366 if (mode
->hdisplay
== 1024 && mode
->vdisplay
== 768 &&
367 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
368 crtc
->fb
->bits_per_pixel
> 16) {
370 crtc
->primary
->fb
->bits_per_pixel
> 16) {
372 DRM_INFO("*WARNING* 1024x768 at > 16 bpp may suffer corruption\n");
379 void pl111_crtc_helper_disable(struct drm_crtc
*crtc
)
383 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__
, crtc
);
385 /* don't disable crtc until no flips in flight as irq will be disabled */
386 ret
= wait_event_killable(priv
.wait_for_flips
, atomic_read(&priv
.nr_flips_in_flight
) == 0);
388 pr_err("pl111_crtc_helper_disable failed\n");
394 void pl111_crtc_destroy(struct drm_crtc
*crtc
)
396 struct pl111_drm_crtc
*pl111_crtc
= to_pl111_crtc(crtc
);
398 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__
, crtc
);
400 drm_crtc_cleanup(crtc
);
404 const struct drm_crtc_funcs crtc_funcs
= {
405 .cursor_set
= pl111_crtc_cursor_set
,
406 .cursor_move
= pl111_crtc_cursor_move
,
407 .set_config
= drm_crtc_helper_set_config
,
408 .page_flip
= pl111_crtc_page_flip
,
409 .destroy
= pl111_crtc_destroy
412 const struct drm_crtc_helper_funcs crtc_helper_funcs
= {
413 .mode_set
= pl111_crtc_helper_mode_set
,
414 .prepare
= pl111_crtc_helper_prepare
,
415 .commit
= pl111_crtc_helper_commit
,
416 .mode_fixup
= pl111_crtc_helper_mode_fixup
,
417 .disable
= pl111_crtc_helper_disable
,
420 struct pl111_drm_crtc
*pl111_crtc_create(struct drm_device
*dev
)
422 struct pl111_drm_crtc
*pl111_crtc
;
424 pl111_crtc
= kzalloc(sizeof(struct pl111_drm_crtc
), GFP_KERNEL
);
425 if (pl111_crtc
== NULL
) {
426 pr_err("Failed to allocated pl111_drm_crtc\n");
430 drm_crtc_init(dev
, &pl111_crtc
->crtc
, &crtc_funcs
);
431 drm_crtc_helper_add(&pl111_crtc
->crtc
, &crtc_helper_funcs
);
433 pl111_crtc
->crtc_index
= pl111_crtc_num
;
435 pl111_crtc
->crtc
.enabled
= 0;
436 pl111_crtc
->last_bpp
= 0;
437 pl111_crtc
->current_update_res
= NULL
;
438 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
439 pl111_crtc
->displaying_fb
= NULL
;
440 pl111_crtc
->old_kds_res_set
= NULL
;
441 spin_lock_init(&pl111_crtc
->current_displaying_lock
);
443 pl111_crtc
->show_framebuffer_cb
= show_framebuffer_on_crtc_cb_internal
;
444 INIT_LIST_HEAD(&pl111_crtc
->update_queue
);
445 spin_lock_init(&pl111_crtc
->base_update_lock
);