struct amdgpu_ring *ring;
int idx, r;
+ vce_v3_0_override_vce_clock_gating(adev, true);
+ if (!(adev->flags & AMD_IS_APU))
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, ring->wptr);
WREG32(mmVCE_RB_WPTR, ring->wptr);
return 0;
}
-static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
- else
- tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_TONGA) ||
- (adev->asic_type == CHIP_FIJI))
- vce_v3_0_set_bypass_mode(adev, enable);
-
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
- /* todo */
+ int r, i;
+ struct atom_clock_dividers dividers;
+ u32 tmp;
+
+ r = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ ecclk, false, ÷rs);
+ if (r)
+ return r;
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ tmp = RREG32_SMC(ixCG_ECLK_CNTL);
+ tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
+ CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+ tmp |= dividers.post_divider;
+ WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
return 0;
}