drm/amdgpu: add fence suspend/resume functions
authorAlex Deucher <alexander.deucher@amd.com>
Wed, 5 Aug 2015 16:41:48 +0000 (12:41 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:18 +0000 (16:50 -0400)
Added to:
- handle draining the ring on suspend
- properly enable/disable interrupts on suspend and resume

Fix breakages from:
commit 467ee3be53d240d08beed2e82a941e820c1ac323
Author: Chunming Zhou <david1.zhou@amd.com>
Date:   Mon Jun 1 14:14:32 2015 +0800

    drm/amdgpu: always enable EOP interrupt v2

Tested-by: Audrey Grodzovsky <audrey.grodzovsky@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

index 1ec89d2864dd2ed3415f363c0c3a029104686619..55114328769892752ea5d93ed818d2fa954e11f9 100644 (file)
@@ -426,6 +426,8 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   struct amdgpu_irq_src *irq_src,
                                   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
                      struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
index 99f158e1baffa711073d1251d6d05c72cb68976e..a816580fb4745d794c29029b92c16740ee8f93eb 100644 (file)
@@ -1627,8 +1627,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
        struct amdgpu_device *adev;
        struct drm_crtc *crtc;
        struct drm_connector *connector;
-       int i, r;
-       bool force_completion = false;
+       int r;
 
        if (dev == NULL || dev->dev_private == NULL) {
                return -ENODEV;
@@ -1667,21 +1666,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
-       /* wait for gpu to finish processing current batch */
-       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-               struct amdgpu_ring *ring = adev->rings[i];
-               if (!ring)
-                       continue;
-
-               r = amdgpu_fence_wait_empty(ring);
-               if (r) {
-                       /* delay GPU reset to resume */
-                       force_completion = true;
-               }
-       }
-       if (force_completion) {
-               amdgpu_fence_driver_force_completion(adev);
-       }
+       amdgpu_fence_driver_suspend(adev);
 
        r = amdgpu_suspend(adev);
 
@@ -1739,6 +1724,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 
        r = amdgpu_resume(adev);
 
+       amdgpu_fence_driver_resume(adev);
+
        r = amdgpu_ib_ring_tests(adev);
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
index b89dafec9ecf54b14589988c9ebe44efb80e33d6..1b0bc07d0c01e82191633cb34102c46a42032cc9 100644 (file)
@@ -955,6 +955,67 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
        mutex_unlock(&adev->ring_lock);
 }
 
+/**
+ * amdgpu_fence_driver_suspend - suspend the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Suspend the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+{
+       int i, r;
+
+       mutex_lock(&adev->ring_lock);
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+               if (!ring || !ring->fence_drv.initialized)
+                       continue;
+
+               /* wait for gpu to finish processing current batch */
+               r = amdgpu_fence_wait_empty(ring);
+               if (r) {
+                       /* delay GPU reset to resume */
+                       amdgpu_fence_driver_force_completion(adev);
+               }
+
+               /* disable the interrupt */
+               amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+                              ring->fence_drv.irq_type);
+       }
+       mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_resume - resume the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Resume the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+{
+       int i;
+
+       mutex_lock(&adev->ring_lock);
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+               if (!ring || !ring->fence_drv.initialized)
+                       continue;
+
+               /* enable the interrupt */
+               amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+                              ring->fence_drv.irq_type);
+       }
+       mutex_unlock(&adev->ring_lock);
+}
+
 /**
  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
  *