drm/radeon: stop trying to suspend UVD sessions
authorChristian König <christian.koenig@amd.com>
Thu, 7 May 2015 13:19:25 +0000 (15:19 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 7 May 2015 15:00:18 +0000 (11:00 -0400)
Saving the current UVD state on suspend and restoring it on resume
just doesn't work reliable. Just close cleanup all sessions on suspend.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_uvd.c

index d2abe481954fc14a8146c0f9db0f0a6e73816900..46eb0fa75a614307286446a99d7c1c2037973ab4 100644 (file)
@@ -1673,7 +1673,6 @@ struct radeon_uvd {
        struct radeon_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       void                    *saved_bo;
        atomic_t                handles[RADEON_MAX_UVD_HANDLES];
        struct drm_file         *filp[RADEON_MAX_UVD_HANDLES];
        unsigned                img_size[RADEON_MAX_UVD_HANDLES];
index cd630287cf0a33fb404d36000f27630a40872e72..6edcb54850922a87535a3440a24d0e316977cf05 100644 (file)
@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
 
 int radeon_uvd_suspend(struct radeon_device *rdev)
 {
-       unsigned size;
-       void *ptr;
-       int i;
+       int i, r;
 
        if (rdev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
-               if (atomic_read(&rdev->uvd.handles[i]))
-                       break;
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+               uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+               if (handle != 0) {
+                       struct radeon_fence *fence;
 
-       if (i == RADEON_MAX_UVD_HANDLES)
-               return 0;
+                       radeon_uvd_note_usage(rdev);
 
-       size = radeon_bo_size(rdev->uvd.vcpu_bo);
-       size -= rdev->uvd_fw->size;
+                       r = radeon_uvd_get_destroy_msg(rdev,
+                               R600_RING_TYPE_UVD_INDEX, handle, &fence);
+                       if (r) {
+                               DRM_ERROR("Error destroying UVD (%d)!\n", r);
+                               continue;
+                       }
 
-       ptr = rdev->uvd.cpu_addr;
-       ptr += rdev->uvd_fw->size;
+                       radeon_fence_wait(fence, false);
+                       radeon_fence_unref(&fence);
 
-       rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-       memcpy(rdev->uvd.saved_bo, ptr, size);
+                       rdev->uvd.filp[i] = NULL;
+                       atomic_set(&rdev->uvd.handles[i], 0);
+               }
+       }
 
        return 0;
 }
@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        ptr = rdev->uvd.cpu_addr;
        ptr += rdev->uvd_fw->size;
 
-       if (rdev->uvd.saved_bo != NULL) {
-               memcpy(ptr, rdev->uvd.saved_bo, size);
-               kfree(rdev->uvd.saved_bo);
-               rdev->uvd.saved_bo = NULL;
-       } else
-               memset(ptr, 0, size);
+       memset(ptr, 0, size);
 
        return 0;
 }