drm/amdgpu: modify sdma start sequence
authorMonk Liu <Monk.Liu@amd.com>
Wed, 25 May 2016 08:57:14 +0000 (16:57 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 9 Jun 2016 14:48:59 +0000 (10:48 -0400)
should fist halt engine, and then doing the register
programing, and later unhalt engine, and finally run
ring_test.

this help fix reloading driver hang issue of SDMA
ring

original sequence is wrong for it programing engine
after unhalt, which will lead to fault behavior when
doing driver reloading after unloaded.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c

index 76f73ab9294ef14ebe82eab645c6e67b4c7dcad2..0079916e6d936ae8a0b5155f9e5099b23f237678 100644 (file)
@@ -448,7 +448,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
+
+       cik_sdma_enable(adev, true);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -531,8 +536,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       /* unhalt the MEs */
-       cik_sdma_enable(adev, true);
+       /* halt the engine before programing */
+       cik_sdma_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = cik_sdma_gfx_resume(adev);
index e11a37416e19385843f678a7cf65a7a863a29c37..f6014b024f2ad6e74e65999ce2e8e5b2ee790f10 100644 (file)
@@ -491,7 +491,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       sdma_v2_4_enable(adev, true);
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -582,8 +586,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
                        return -EINVAL;
        }
 
-       /* unhalt the MEs */
-       sdma_v2_4_enable(adev, true);
+       /* halt the engine before programing */
+       sdma_v2_4_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v2_4_gfx_resume(adev);
index 585d8fe22baec974d3ace295c057960eb34611a4..33605d4ac2d984c6de8c6875929e8e92d2735296 100644 (file)
@@ -713,7 +713,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       /* unhalt the MEs */
+       sdma_v3_0_enable(adev, true);
+       /* enable sdma ring preemption */
+       sdma_v3_0_ctx_switch_enable(adev, true);
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -806,10 +814,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       /* unhalt the MEs */
-       sdma_v3_0_enable(adev, true);
-       /* enable sdma ring preemption */
-       sdma_v3_0_ctx_switch_enable(adev, true);
+       /* disble sdma engine before programing it */
+       sdma_v3_0_ctx_switch_enable(adev, false);
+       sdma_v3_0_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v3_0_gfx_resume(adev);