}
#endif
-static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
- else
- tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
- uvd_v5_0_set_bypass_mode(adev, enable);
-
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
uint32_t tmp;
int r;
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+
r = uvd_v6_0_start(adev);
if (r)
goto done;
}
#endif
-static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
- else
- tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- uvd_v6_0_set_bypass_mode(adev, enable);
-
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;