drm/amdgpu: Refactor flip into prepare submit and submit. (v3)
authorAndrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Mon, 5 Dec 2016 20:15:33 +0000 (15:15 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 27 Jan 2017 16:13:31 +0000 (11:13 -0500)
Make pflip atomic friendly. Split the fuinction into
whatever can fail part and the actual flip submit part.
Call the pre-submit function before atomic states
are swapped so in case of error we can fail the
IOCTL.

v2:
Update due to  target_vblank code change.
Fix identetation.
Change return type for amdgpu_crtc_submit_flip to void
v3: agd: fix formatting

Signed-off-by: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Reviewed-by Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

index 4986340326e29c3072efc4e066f7947777baf823..39fc388f222ac61966eb7199e6727f913c745dd3 100644 (file)
@@ -138,10 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
        kfree(work);
 }
 
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
-                                struct drm_framebuffer *fb,
-                                struct drm_pending_vblank_event *event,
-                                uint32_t page_flip_flags, uint32_t target)
+
+static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
+{
+       int i;
+
+       amdgpu_bo_unref(&work->old_abo);
+       dma_fence_put(work->excl);
+       for (i = 0; i < work->shared_count; ++i)
+               dma_fence_put(work->shared[i]);
+       kfree(work->shared);
+       kfree(work);
+}
+
+static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
+                                         struct amdgpu_bo *new_abo)
+{
+       amdgpu_bo_unreserve(new_abo);
+       amdgpu_flip_work_cleanup(work);
+}
+
+static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
+                                     struct amdgpu_bo *new_abo)
+{
+       if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
+               DRM_ERROR("failed to unpin new abo in error path\n");
+       amdgpu_flip_cleanup_unreserve(work, new_abo);
+}
+
+void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
+                                 struct amdgpu_bo *new_abo)
+{
+       if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+               DRM_ERROR("failed to reserve new abo in error path\n");
+               amdgpu_flip_work_cleanup(work);
+               return;
+       }
+       amdgpu_flip_cleanup_unpin(work, new_abo);
+}
+
+int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            struct drm_pending_vblank_event *event,
+                            uint32_t page_flip_flags,
+                            uint32_t target,
+                            struct amdgpu_flip_work **work_p,
+                            struct amdgpu_bo **new_abo_p)
 {
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
@@ -154,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
        unsigned long flags;
        u64 tiling_flags;
        u64 base;
-       int i, r;
+       int r;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL)
@@ -215,41 +257,79 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                r = -EBUSY;
                goto pflip_cleanup;
+
        }
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+       *work_p = work;
+       *new_abo_p = new_abo;
+
+       return 0;
+
+pflip_cleanup:
+       amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
+       return r;
+
+unpin:
+       amdgpu_flip_cleanup_unpin(work, new_abo);
+       return r;
+
+unreserve:
+       amdgpu_flip_cleanup_unreserve(work, new_abo);
+       return r;
 
+cleanup:
+       amdgpu_flip_work_cleanup(work);
+       return r;
+
+}
+
+void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            struct amdgpu_flip_work *work,
+                            struct amdgpu_bo *new_abo)
+{
+       unsigned long flags;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
        amdgpu_crtc->pflip_works = work;
 
-
-       DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
-                                        amdgpu_crtc->crtc_id, amdgpu_crtc, work);
        /* update crtc fb */
        crtc->primary->fb = fb;
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+       DRM_DEBUG_DRIVER(
+                       "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
+                       amdgpu_crtc->crtc_id, amdgpu_crtc, work);
+
        amdgpu_flip_work_func(&work->flip_work.work);
-       return 0;
+}
 
-pflip_cleanup:
-       if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
-               DRM_ERROR("failed to reserve new abo in error path\n");
-               goto cleanup;
-       }
-unpin:
-       if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
-               DRM_ERROR("failed to unpin new abo in error path\n");
-       }
-unreserve:
-       amdgpu_bo_unreserve(new_abo);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_pending_vblank_event *event,
+                                uint32_t page_flip_flags,
+                                uint32_t target)
+{
+       struct amdgpu_bo *new_abo;
+       struct amdgpu_flip_work *work;
+       int r;
 
-cleanup:
-       amdgpu_bo_unref(&work->old_abo);
-       dma_fence_put(work->excl);
-       for (i = 0; i < work->shared_count; ++i)
-               dma_fence_put(work->shared[i]);
-       kfree(work->shared);
-       kfree(work);
+       r = amdgpu_crtc_prepare_flip(crtc,
+                                    fb,
+                                    event,
+                                    page_flip_flags,
+                                    target,
+                                    &work,
+                                    &new_abo);
+       if (r)
+               return r;
 
-       return r;
+       amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
+
+       return 0;
 }
 
 int amdgpu_crtc_set_config(struct drm_mode_set *set)
index b60346792bf8aaab47fd1669a0d7600c503cde34..c12497bd38895e5668351fb689524fb24524ec35 100644 (file)
@@ -595,6 +595,21 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_pending_vblank_event *event,
                                 uint32_t page_flip_flags, uint32_t target);
+void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
+                                 struct amdgpu_bo *new_abo);
+int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            struct drm_pending_vblank_event *event,
+                            uint32_t page_flip_flags,
+                            uint32_t target,
+                            struct amdgpu_flip_work **work,
+                            struct amdgpu_bo **new_abo);
+
+void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            struct amdgpu_flip_work *work,
+                            struct amdgpu_bo *new_abo);
+
 extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
 
 #endif