struct amdgpu_device *adev = (struct amdgpu_device *)handle;
vce_v3_0_override_vce_clock_gating(adev, true);
- if (!(adev->flags & AMD_IS_APU))
- amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
for (i = 0; i < adev->vce.num_rings; i++)
adev->vce.ring[i].ready = false;
#define ixGNB_CLK1_STATUS 0xD822010C
#define ixGNB_CLK2_DFS_CNTL 0xD8220110
#define ixGNB_CLK2_STATUS 0xD822012C
+#define ixGNB_CLK3_DFS_CNTL 0xD8220130
+#define ixGNB_CLK3_STATUS 0xD822014C
static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
int r, i;
struct atom_clock_dividers dividers;
u32 tmp;
+ u32 reg_ctrl;
+ u32 reg_status;
+ u32 status_mask;
+ u32 reg_mask;
+
+ if (adev->flags & AMD_IS_APU) {
+ reg_ctrl = ixGNB_CLK3_DFS_CNTL;
+ reg_status = ixGNB_CLK3_STATUS;
+ status_mask = 0x00010000;
+ reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
+ } else {
+ reg_ctrl = ixCG_ECLK_CNTL;
+ reg_status = ixCG_ECLK_STATUS;
+ status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
+ reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
+ }
r = amdgpu_atombios_get_clock_dividers(adev,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
return r;
for (i = 0; i < 100; i++) {
- if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ if (RREG32_SMC(reg_status) & status_mask)
break;
mdelay(10);
}
+
if (i == 100)
return -ETIMEDOUT;
- tmp = RREG32_SMC(ixCG_ECLK_CNTL);
- tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
- CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+ tmp = RREG32_SMC(reg_ctrl);
+ tmp &= ~reg_mask;
tmp |= dividers.post_divider;
- WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+ WREG32_SMC(reg_ctrl, tmp);
for (i = 0; i < 100; i++) {
- if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ if (RREG32_SMC(reg_status) & status_mask)
break;
mdelay(10);
}
+
if (i == 100)
return -ETIMEDOUT;