From 5ceb54c68a28fc8af5cf8d32c4fde29c97dd3c18 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 5 Aug 2015 12:41:48 -0400 Subject: [PATCH] drm/amdgpu: add fence suspend/resume functions Added to: - handle draining the ring on suspend - properly enable/disable interrupts on suspend and resume Fix breakages from: commit 467ee3be53d240d08beed2e82a941e820c1ac323 Author: Chunming Zhou Date: Mon Jun 1 14:14:32 2015 +0800 drm/amdgpu: always enable EOP interrupt v2 Tested-by: Audrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 21 ++------ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 61 ++++++++++++++++++++++ 3 files changed, 67 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1ec89d2864dd..551143287698 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -426,6 +426,8 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); +void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); +void amdgpu_fence_driver_resume(struct amdgpu_device *adev); int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, struct amdgpu_fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 99f158e1baff..a816580fb474 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1627,8 +1627,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; - int i, r; - bool force_completion = false; + int r; if (dev == NULL || dev->dev_private == NULL) { return -ENODEV; @@ -1667,21 +1666,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) /* evict vram memory */ amdgpu_bo_evict_vram(adev); - /* wait for gpu to finish processing current batch */ - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (!ring) - continue; - - r = amdgpu_fence_wait_empty(ring); - if (r) { - /* delay GPU reset to resume */ - force_completion = true; - } - } - if (force_completion) { - amdgpu_fence_driver_force_completion(adev); - } + amdgpu_fence_driver_suspend(adev); r = amdgpu_suspend(adev); @@ -1739,6 +1724,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) r = amdgpu_resume(adev); + amdgpu_fence_driver_resume(adev); + r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index b89dafec9ecf..1b0bc07d0c01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -955,6 +955,67 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) mutex_unlock(&adev->ring_lock); } +/** + * amdgpu_fence_driver_suspend - suspend the fence driver + * for all possible rings. + * + * @adev: amdgpu device pointer + * + * Suspend the fence driver for all possible rings (all asics). + */ +void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) +{ + int i, r; + + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) + continue; + + /* wait for gpu to finish processing current batch */ + r = amdgpu_fence_wait_empty(ring); + if (r) { + /* delay GPU reset to resume */ + amdgpu_fence_driver_force_completion(adev); + } + + /* disable the interrupt */ + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + } + mutex_unlock(&adev->ring_lock); +} + +/** + * amdgpu_fence_driver_resume - resume the fence driver + * for all possible rings. + * + * @adev: amdgpu device pointer + * + * Resume the fence driver for all possible rings (all asics). + * Not all asics have all rings, so each asic will only + * start the fence driver on the rings it has using + * amdgpu_fence_driver_start_ring(). + * Returns 0 for success. + */ +void amdgpu_fence_driver_resume(struct amdgpu_device *adev) +{ + int i; + + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) + continue; + + /* enable the interrupt */ + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + } + mutex_unlock(&adev->ring_lock); +} + /** * amdgpu_fence_driver_force_completion - force all fence waiter to complete * -- 2.20.1