From: Christian König Date: Thu, 7 May 2015 13:19:25 +0000 (+0200) Subject: drm/amdgpu: stop trying to suspend UVD sessions v2 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=8f8202f771c182a7244caa5880f50def1bedd713;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git drm/amdgpu: stop trying to suspend UVD sessions v2 Saving the current UVD state on suspend and restoring it on resume just doesn't work reliable. Just close cleanup all sessions on suspend. Ported from radeon commit "12e49feadff6d7b7ebbe852b36943a71524d8d34". v2: rebased Signed-off-by: Christian König Reviewed-by: Alex Deucher (v1) --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 719506808b4a..9fefcd9c1af1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1666,7 +1666,6 @@ struct amdgpu_uvd { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - void *saved_bo; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b87355ccfb1d..3ad4a83c418f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -221,31 +221,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_suspend(struct amdgpu_device *adev) { - unsigned size; - void *ptr; - const struct common_firmware_header *hdr; - int i; + struct amdgpu_ring *ring = &adev->uvd.ring; + int i, r; if (adev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; - - if (i == AMDGPU_MAX_UVD_HANDLES) - return 0; + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + uint32_t handle = atomic_read(&adev->uvd.handles[i]); + if (handle != 0) { + struct fence *fence; - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + amdgpu_uvd_note_usage(adev); - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - size -= le32_to_cpu(hdr->ucode_size_bytes); + r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); + if (r) { + DRM_ERROR("Error destroying UVD (%d)!\n", r); + continue; + } - ptr = adev->uvd.cpu_addr; - ptr += le32_to_cpu(hdr->ucode_size_bytes); + fence_wait(fence, false); + fence_put(fence); - adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - memcpy(adev->uvd.saved_bo, ptr, size); + adev->uvd.filp[i] = NULL; + atomic_set(&adev->uvd.handles[i], 0); + } + } return 0; } @@ -270,12 +271,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.cpu_addr; ptr += le32_to_cpu(hdr->ucode_size_bytes); - if (adev->uvd.saved_bo != NULL) { - memcpy(ptr, adev->uvd.saved_bo, size); - kfree(adev->uvd.saved_bo); - adev->uvd.saved_bo = NULL; - } else - memset(ptr, 0, size); + memset(ptr, 0, size); return 0; }