Revert "sync: fix dEQP-EGL*get_frame_timestamps* "
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_mali-driver.git] / t83x / kernel / drivers / gpu / drm / pl111 / pl111_drm_crtc.c
1 /*
2 *
3 * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18 /**
19 * pl111_drm_crtc.c
20 * Implementation of the CRTC functions for PL111 DRM
21 */
22 #include <linux/amba/bus.h>
23 #include <linux/amba/clcd.h>
24 #include <linux/version.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/dma-buf.h>
27 #include <linux/module.h>
28
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31
32 #include "pl111_drm.h"
33
34 static int pl111_crtc_num;
35
36 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0))
37 #define export_dma_buf export_dma_buf
38 #else
39 #define export_dma_buf dma_buf
40 #endif
41
42 void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc)
43 {
44 struct drm_device *dev = pl111_crtc->crtc.dev;
45 struct pl111_drm_flip_resource *old_flip_res;
46 struct pl111_gem_bo *bo;
47 unsigned long irq_flags;
48 int flips_in_flight;
49 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
50 unsigned long flags;
51 #endif
52
53 spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags);
54
55 /*
56 * Cache the flip resource that caused the IRQ since it will be
57 * dispatched later. Early return if the IRQ isn't associated to
58 * a base register update.
59 *
60 * TODO MIDBASE-2790: disable IRQs when a flip is not pending.
61 */
62 old_flip_res = pl111_crtc->current_update_res;
63 if (!old_flip_res) {
64 spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags);
65 return;
66 }
67 pl111_crtc->current_update_res = NULL;
68
69 /* Prepare the next flip (if any) of the queue as soon as possible. */
70 if (!list_empty(&pl111_crtc->update_queue)) {
71 struct pl111_drm_flip_resource *flip_res;
72 /* Remove the head of the list */
73 flip_res = list_first_entry(&pl111_crtc->update_queue,
74 struct pl111_drm_flip_resource, link);
75 list_del(&flip_res->link);
76 do_flip_to_res(flip_res);
77 /*
78 * current_update_res will be set, so guarentees that
79 * another flip_res coming in gets queued instead of
80 * handled immediately
81 */
82 }
83 spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags);
84
85 /* Finalize properly the flip that caused the IRQ */
86 DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res);
87
88 bo = PL111_BO_FROM_FRAMEBUFFER(old_flip_res->fb);
89 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
90 spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags);
91 release_kds_resource_and_display(old_flip_res);
92 spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags);
93 #endif
94 /* Release DMA buffer on this flip */
95
96 if (bo->gem_object.export_dma_buf != NULL)
97 dma_buf_put(bo->gem_object.export_dma_buf);
98
99 drm_handle_vblank(dev, pl111_crtc->crtc_index);
100
101 /* Wake up any processes waiting for page flip event */
102 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
103 if (old_flip_res->event) {
104 spin_lock_bh(&dev->event_lock);
105 drm_send_vblank_event(dev, pl111_crtc->crtc_index,
106 old_flip_res->event);
107 spin_unlock_bh(&dev->event_lock);
108 }
109 #else
110 if (old_flip_res->event) {
111 struct drm_pending_vblank_event *e = old_flip_res->event;
112 struct timeval now;
113 unsigned int seq;
114
115 DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__,
116 old_flip_res->event);
117
118 spin_lock_bh(&dev->event_lock);
119 seq = drm_vblank_count_and_time(dev, pl111_crtc->crtc_index,
120 &now);
121 e->pipe = pl111_crtc->crtc_index;
122 e->event.sequence = seq;
123 e->event.tv_sec = now.tv_sec;
124 e->event.tv_usec = now.tv_usec;
125
126 list_add_tail(&e->base.link,
127 &e->base.file_priv->event_list);
128
129 wake_up_interruptible(&e->base.file_priv->event_wait);
130 spin_unlock_bh(&dev->event_lock);
131 }
132 #endif
133
134 drm_vblank_put(dev, pl111_crtc->crtc_index);
135
136 /*
137 * workqueue.c:process_one_work():
138 * "It is permissible to free the struct work_struct from
139 * inside the function that is called from it"
140 */
141 kmem_cache_free(priv.page_flip_slab, old_flip_res);
142
143 flips_in_flight = atomic_dec_return(&priv.nr_flips_in_flight);
144 if (flips_in_flight == 0 ||
145 flips_in_flight == (NR_FLIPS_IN_FLIGHT_THRESHOLD - 1))
146 wake_up(&priv.wait_for_flips);
147
148 DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res);
149 }
150
151 void show_framebuffer_on_crtc_cb(void *cb1, void *cb2)
152 {
153 struct pl111_drm_flip_resource *flip_res = cb1;
154 struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc);
155
156 pl111_crtc->show_framebuffer_cb(cb1, cb2);
157 }
158
159 int show_framebuffer_on_crtc(struct drm_crtc *crtc,
160 struct drm_framebuffer *fb, bool page_flip,
161 struct drm_pending_vblank_event *event)
162 {
163 struct pl111_gem_bo *bo;
164 struct pl111_drm_flip_resource *flip_res;
165 int flips_in_flight;
166 int old_flips_in_flight;
167
168 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
169 crtc->fb = fb;
170 #else
171 crtc->primary->fb = fb;
172 #endif
173
174 bo = PL111_BO_FROM_FRAMEBUFFER(fb);
175 if (bo == NULL) {
176 DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n");
177 return -EINVAL;
178 }
179
180 /* If this is a full modeset, wait for all outstanding flips to complete
181 * before continuing. This avoids unnecessary complication from being
182 * able to queue up multiple modesets and queues of mixed modesets and
183 * page flips.
184 *
185 * Modesets should be uncommon and will not be performant anyway, so
186 * making them synchronous should have negligible performance impact.
187 */
188 if (!page_flip) {
189 int ret = wait_event_killable(priv.wait_for_flips,
190 atomic_read(&priv.nr_flips_in_flight) == 0);
191 if (ret)
192 return ret;
193 }
194
195 /*
196 * There can be more 'early display' flips in flight than there are
197 * buffers, and there is (currently) no explicit bound on the number of
198 * flips. Hence, we need a new allocation for each one.
199 *
200 * Note: this could be optimized down if we knew a bound on the flips,
201 * since an application can only have so many buffers in flight to be
202 * useful/not hog all the memory
203 */
204 flip_res = kmem_cache_alloc(priv.page_flip_slab, GFP_KERNEL);
205 if (flip_res == NULL) {
206 pr_err("kmem_cache_alloc failed to alloc - flip ignored\n");
207 return -ENOMEM;
208 }
209
210 /*
211 * increment flips in flight, whilst blocking when we reach
212 * NR_FLIPS_IN_FLIGHT_THRESHOLD
213 */
214 do {
215 /*
216 * Note: use of assign-and-then-compare in the condition to set
217 * flips_in_flight
218 */
219 int ret = wait_event_killable(priv.wait_for_flips,
220 (flips_in_flight =
221 atomic_read(&priv.nr_flips_in_flight))
222 < NR_FLIPS_IN_FLIGHT_THRESHOLD);
223 if (ret != 0) {
224 kmem_cache_free(priv.page_flip_slab, flip_res);
225 return ret;
226 }
227
228 old_flips_in_flight = atomic_cmpxchg(&priv.nr_flips_in_flight,
229 flips_in_flight, flips_in_flight + 1);
230 } while (old_flips_in_flight != flips_in_flight);
231
232 flip_res->fb = fb;
233 flip_res->crtc = crtc;
234 flip_res->page_flip = page_flip;
235 flip_res->event = event;
236 INIT_LIST_HEAD(&flip_res->link);
237 DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res);
238 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
239 if (bo->gem_object.export_dma_buf != NULL) {
240 struct dma_buf *buf = bo->gem_object.export_dma_buf;
241 unsigned long shared[1] = { 0 };
242 struct kds_resource *resource_list[1] = {
243 get_dma_buf_kds_resource(buf) };
244 int err;
245
246 get_dma_buf(buf);
247 DRM_DEBUG_KMS("Got dma_buf %p\n", buf);
248
249 /* Wait for the KDS resource associated with this buffer */
250 err = kds_async_waitall(&flip_res->kds_res_set,
251 &priv.kds_cb, flip_res, fb, 1, shared,
252 resource_list);
253 BUG_ON(err);
254 } else {
255 struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);
256
257 DRM_DEBUG_KMS("No dma_buf for this flip\n");
258
259 /* No dma-buf attached so just call the callback directly */
260 flip_res->kds_res_set = NULL;
261 pl111_crtc->show_framebuffer_cb(flip_res, fb);
262 }
263 #else
264 if (bo->gem_object.export_dma_buf != NULL) {
265 struct dma_buf *buf = bo->gem_object.export_dma_buf;
266
267 get_dma_buf(buf);
268 DRM_DEBUG_KMS("Got dma_buf %p\n", buf);
269 } else {
270 DRM_DEBUG_KMS("No dma_buf for this flip\n");
271 }
272
273 /* No dma-buf attached to this so just call the callback directly */
274 {
275 struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);
276 pl111_crtc->show_framebuffer_cb(flip_res, fb);
277 }
278 #endif
279
280 /* For the same reasons as the wait at the start of this function,
281 * wait for the modeset to complete before continuing.
282 */
283 if (!page_flip) {
284 int ret = wait_event_killable(priv.wait_for_flips,
285 flips_in_flight == 0);
286 if (ret)
287 return ret;
288 }
289
290 return 0;
291 }
292
293 int pl111_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
294 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0))
295 struct drm_pending_vblank_event *event)
296 #else
297 struct drm_pending_vblank_event *event,
298 uint32_t flags)
299 #endif
300 {
301 DRM_DEBUG_KMS("%s: crtc=%p, fb=%p, event=%p\n",
302 __func__, crtc, fb, event);
303 return show_framebuffer_on_crtc(crtc, fb, true, event);
304 }
305
306 int pl111_crtc_helper_mode_set(struct drm_crtc *crtc,
307 struct drm_display_mode *mode,
308 struct drm_display_mode *adjusted_mode,
309 int x, int y, struct drm_framebuffer *old_fb)
310 {
311 int ret;
312 struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);
313 struct drm_display_mode *duplicated_mode;
314
315 DRM_DEBUG_KMS("DRM crtc_helper_mode_set, x=%d y=%d bpp=%d\n",
316 adjusted_mode->hdisplay, adjusted_mode->vdisplay,
317 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
318 crtc->fb->bits_per_pixel);
319 #else
320 crtc->primary->fb->bits_per_pixel);
321 #endif
322
323 duplicated_mode = drm_mode_duplicate(crtc->dev, adjusted_mode);
324 if (!duplicated_mode)
325 return -ENOMEM;
326
327 pl111_crtc->new_mode = duplicated_mode;
328 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
329 ret = show_framebuffer_on_crtc(crtc, crtc->fb, false, NULL);
330 #else
331 ret = show_framebuffer_on_crtc(crtc, crtc->primary->fb, false, NULL);
332 #endif
333 if (ret != 0) {
334 pl111_crtc->new_mode = pl111_crtc->current_mode;
335 drm_mode_destroy(crtc->dev, duplicated_mode);
336 }
337
338 return ret;
339 }
340
341 void pl111_crtc_helper_prepare(struct drm_crtc *crtc)
342 {
343 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc);
344 }
345
346 void pl111_crtc_helper_commit(struct drm_crtc *crtc)
347 {
348 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc);
349 }
350
351 bool pl111_crtc_helper_mode_fixup(struct drm_crtc *crtc,
352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
353 const struct drm_display_mode *mode,
354 #else
355 struct drm_display_mode *mode,
356 #endif
357 struct drm_display_mode *adjusted_mode)
358 {
359 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc);
360
361 #ifdef CONFIG_ARCH_VEXPRESS
362 /*
363 * 1024x768 with more than 16 bits per pixel may not work
364 * correctly on Versatile Express due to bandwidth issues
365 */
366 if (mode->hdisplay == 1024 && mode->vdisplay == 768 &&
367 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0))
368 crtc->fb->bits_per_pixel > 16) {
369 #else
370 crtc->primary->fb->bits_per_pixel > 16) {
371 #endif
372 DRM_INFO("*WARNING* 1024x768 at > 16 bpp may suffer corruption\n");
373 }
374 #endif
375
376 return true;
377 }
378
379 void pl111_crtc_helper_disable(struct drm_crtc *crtc)
380 {
381 int ret;
382
383 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc);
384
385 /* don't disable crtc until no flips in flight as irq will be disabled */
386 ret = wait_event_killable(priv.wait_for_flips, atomic_read(&priv.nr_flips_in_flight) == 0);
387 if(ret) {
388 pr_err("pl111_crtc_helper_disable failed\n");
389 return;
390 }
391 clcd_disable(crtc);
392 }
393
394 void pl111_crtc_destroy(struct drm_crtc *crtc)
395 {
396 struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);
397
398 DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc);
399
400 drm_crtc_cleanup(crtc);
401 kfree(pl111_crtc);
402 }
403
404 const struct drm_crtc_funcs crtc_funcs = {
405 .cursor_set = pl111_crtc_cursor_set,
406 .cursor_move = pl111_crtc_cursor_move,
407 .set_config = drm_crtc_helper_set_config,
408 .page_flip = pl111_crtc_page_flip,
409 .destroy = pl111_crtc_destroy
410 };
411
412 const struct drm_crtc_helper_funcs crtc_helper_funcs = {
413 .mode_set = pl111_crtc_helper_mode_set,
414 .prepare = pl111_crtc_helper_prepare,
415 .commit = pl111_crtc_helper_commit,
416 .mode_fixup = pl111_crtc_helper_mode_fixup,
417 .disable = pl111_crtc_helper_disable,
418 };
419
420 struct pl111_drm_crtc *pl111_crtc_create(struct drm_device *dev)
421 {
422 struct pl111_drm_crtc *pl111_crtc;
423
424 pl111_crtc = kzalloc(sizeof(struct pl111_drm_crtc), GFP_KERNEL);
425 if (pl111_crtc == NULL) {
426 pr_err("Failed to allocated pl111_drm_crtc\n");
427 return NULL;
428 }
429
430 drm_crtc_init(dev, &pl111_crtc->crtc, &crtc_funcs);
431 drm_crtc_helper_add(&pl111_crtc->crtc, &crtc_helper_funcs);
432
433 pl111_crtc->crtc_index = pl111_crtc_num;
434 pl111_crtc_num++;
435 pl111_crtc->crtc.enabled = 0;
436 pl111_crtc->last_bpp = 0;
437 pl111_crtc->current_update_res = NULL;
438 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
439 pl111_crtc->displaying_fb = NULL;
440 pl111_crtc->old_kds_res_set = NULL;
441 spin_lock_init(&pl111_crtc->current_displaying_lock);
442 #endif
443 pl111_crtc->show_framebuffer_cb = show_framebuffer_on_crtc_cb_internal;
444 INIT_LIST_HEAD(&pl111_crtc->update_queue);
445 spin_lock_init(&pl111_crtc->base_update_lock);
446
447 return pl111_crtc;
448 }
449