From: Christian König Date: Mon, 29 Apr 2013 09:55:02 +0000 (+0200) Subject: drm/radeon: consolidate UVD clock programming X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=facd112d1395fb6a0b6e460778aefc32197afcfc;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git drm/radeon: consolidate UVD clock programming Instead of duplicating the code over and over again, just use a single function to handle the clock calculations. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1531f167d152..105bafb6c29d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -989,62 +989,10 @@ done: return r; } -static int evergreen_uvd_calc_post_div(unsigned target_freq, - unsigned vco_freq, - unsigned *div) -{ - /* target larger than vco frequency ? */ - if (vco_freq < target_freq) - return -1; /* forget it */ - - /* Fclk = Fvco / PDIV */ - *div = vco_freq / target_freq; - - /* we alway need a frequency less than or equal the target */ - if ((vco_freq / *div) > target_freq) - *div += 1; - - /* dividers above 5 must be even */ - if (*div > 5 && *div % 2) - *div += 1; - - /* out of range ? */ - if (*div >= 128) - return -1; /* forget it */ - - return vco_freq / *div; -} - -static int evergreen_uvd_send_upll_ctlreq(struct radeon_device *rdev) -{ - unsigned i; - - /* assert UPLL_CTLREQ */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); - - /* wait for CTLACK and CTLACK2 to get asserted */ - for (i = 0; i < 100; ++i) { - uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; - if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask) - break; - mdelay(10); - } - if (i == 100) - return -ETIMEDOUT; - - /* deassert UPLL_CTLREQ */ - WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); - - return 0; -} - int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) { /* start off with something large */ - int optimal_diff_score = 0x7FFFFFF; - unsigned optimal_fb_div = 0, optimal_vclk_div = 0; - unsigned optimal_dclk_div = 0, optimal_vco_freq = 0; - unsigned vco_freq; + unsigned fb_div = 0, vclk_div = 0, dclk_div = 0; int r; /* bypass vclk and dclk with bclk */ @@ -1061,40 +1009,11 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return 0; } - /* loop through vco from low to high */ - for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) { - unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384; - int calc_clk, diff_score, diff_vclk, diff_dclk; - unsigned vclk_div, dclk_div; - - /* fb div out of range ? */ - if (fb_div > 0x03FFFFFF) - break; /* it can oly get worse */ - - /* calc vclk with current vco freq. */ - calc_clk = evergreen_uvd_calc_post_div(vclk, vco_freq, &vclk_div); - if (calc_clk == -1) - break; /* vco is too big, it has to stop. */ - diff_vclk = vclk - calc_clk; - - /* calc dclk with current vco freq. */ - calc_clk = evergreen_uvd_calc_post_div(dclk, vco_freq, &dclk_div); - if (calc_clk == -1) - break; /* vco is too big, it has to stop. */ - diff_dclk = dclk - calc_clk; - - /* determine if this vco setting is better than current optimal settings */ - diff_score = abs(diff_vclk) + abs(diff_dclk); - if (diff_score < optimal_diff_score) { - optimal_fb_div = fb_div; - optimal_vclk_div = vclk_div; - optimal_dclk_div = dclk_div; - optimal_vco_freq = vco_freq; - optimal_diff_score = diff_score; - if (optimal_diff_score == 0) - break; /* it can't get better than this */ - } - } + r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); @@ -1108,7 +1027,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) mdelay(1); - r = evergreen_uvd_send_upll_ctlreq(rdev); + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); if (r) return r; @@ -1119,19 +1038,19 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); /* set feedback divider */ - WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK); + WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK); /* set ref divider to 0 */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); - if (optimal_vco_freq < 187500) + if (fb_div < 307200) WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); else WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9); /* set PDIV_A and PDIV_B */ WREG32_P(CG_UPLL_FUNC_CNTL_2, - UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div), + UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div), ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); /* give the PLL some time to settle */ @@ -1145,7 +1064,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* switch from bypass mode to normal mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); - r = evergreen_uvd_send_upll_ctlreq(rdev); + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); if (r) return r; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 6105b25b18c3..acb146c06973 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1208,6 +1208,10 @@ #define UVD_CONTEXT_ID 0xf6f4 +# define UPLL_CTLREQ_MASK 0x00000008 +# define UPLL_CTLACK_MASK 0x40000000 +# define UPLL_CTLACK2_MASK 0x80000000 + /* * PM4 */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3ef7543a2986..1442ce765d48 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1162,6 +1162,17 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp); int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); void radeon_uvd_note_usage(struct radeon_device *rdev); +int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, + unsigned vclk, unsigned dclk, + unsigned vco_min, unsigned vco_max, + unsigned fb_factor, unsigned fb_mask, + unsigned pd_min, unsigned pd_max, + unsigned pd_even, + unsigned *optimal_fb_div, + unsigned *optimal_vclk_div, + unsigned *optimal_dclk_div); +int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, + unsigned cg_upll_func_cntl); struct r600_audio { int channels; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 0312a7f4d768..906e5c0ca3b9 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -692,3 +692,140 @@ void radeon_uvd_note_usage(struct radeon_device *rdev) if (set_clocks) radeon_set_uvd_clocks(rdev, 53300, 40000); } + +static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, + unsigned target_freq, + unsigned pd_min, + unsigned pd_even) +{ + unsigned post_div = vco_freq / target_freq; + + /* adjust to post divider minimum value */ + if (post_div < pd_min) + post_div = pd_min; + + /* we alway need a frequency less than or equal the target */ + if ((vco_freq / post_div) > target_freq) + post_div += 1; + + /* post dividers above a certain value must be even */ + if (post_div > pd_even && post_div % 2) + post_div += 1; + + return post_div; +} + +/** + * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers + * + * @rdev: radeon_device pointer + * @vclk: wanted VCLK + * @dclk: wanted DCLK + * @vco_min: minimum VCO frequency + * @vco_max: maximum VCO frequency + * @fb_factor: factor to multiply vco freq with + * @fb_mask: limit and bitmask for feedback divider + * @pd_min: post divider minimum + * @pd_max: post divider maximum + * @pd_even: post divider must be even above this value + * @optimal_fb_div: resulting feedback divider + * @optimal_vclk_div: resulting vclk post divider + * @optimal_dclk_div: resulting dclk post divider + * + * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). + * Returns zero on success -EINVAL on error. + */ +int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, + unsigned vclk, unsigned dclk, + unsigned vco_min, unsigned vco_max, + unsigned fb_factor, unsigned fb_mask, + unsigned pd_min, unsigned pd_max, + unsigned pd_even, + unsigned *optimal_fb_div, + unsigned *optimal_vclk_div, + unsigned *optimal_dclk_div) +{ + unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; + + /* start off with something large */ + unsigned optimal_score = ~0; + + /* loop through vco from low to high */ + vco_min = max(max(vco_min, vclk), dclk); + for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { + + uint64_t fb_div = (uint64_t)vco_freq * fb_factor; + unsigned vclk_div, dclk_div, score; + + do_div(fb_div, ref_freq); + + /* fb div out of range ? */ + if (fb_div > fb_mask) + break; /* it can oly get worse */ + + fb_div &= fb_mask; + + /* calc vclk divider with current vco freq */ + vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, + pd_min, pd_even); + if (vclk_div > pd_max) + break; /* vco is too big, it has to stop */ + + /* calc dclk divider with current vco freq */ + dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, + pd_min, pd_even); + if (vclk_div > pd_max) + break; /* vco is too big, it has to stop */ + + /* calc score with current vco freq */ + score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); + + /* determine if this vco setting is better than current optimal settings */ + if (score < optimal_score) { + *optimal_fb_div = fb_div; + *optimal_vclk_div = vclk_div; + *optimal_dclk_div = dclk_div; + optimal_score = score; + if (optimal_score == 0) + break; /* it can't get better than this */ + } + } + + /* did we found a valid setup ? */ + if (optimal_score == ~0) + return -EINVAL; + + return 0; +} + +int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, + unsigned cg_upll_func_cntl) +{ + unsigned i; + + /* make sure UPLL_CTLREQ is deasserted */ + WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); + + mdelay(10); + + /* assert UPLL_CTLREQ */ + WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); + + /* wait for CTLACK and CTLACK2 to get asserted */ + for (i = 0; i < 100; ++i) { + uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; + if ((RREG32(cg_upll_func_cntl) & mask) == mask) + break; + mdelay(10); + } + + /* deassert UPLL_CTLREQ */ + WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); + + if (i == 100) { + DRM_ERROR("Timeout setting UVD clocks!\n"); + return -ETIMEDOUT; + } + + return 0; +} diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 91530d4c11c4..83f612a9500b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -44,56 +44,9 @@ void rv770_fini(struct radeon_device *rdev); static void rv770_pcie_gen2_enable(struct radeon_device *rdev); int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); -static int rv770_uvd_calc_post_div(unsigned target_freq, - unsigned vco_freq, - unsigned *div) -{ - /* Fclk = Fvco / PDIV */ - *div = vco_freq / target_freq; - - /* we alway need a frequency less than or equal the target */ - if ((vco_freq / *div) > target_freq) - *div += 1; - - /* out of range ? */ - if (*div > 30) - return -1; /* forget it */ - - *div -= 1; - return vco_freq / (*div + 1); -} - -static int rv770_uvd_send_upll_ctlreq(struct radeon_device *rdev) -{ - unsigned i; - - /* assert UPLL_CTLREQ */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); - - /* wait for CTLACK and CTLACK2 to get asserted */ - for (i = 0; i < 100; ++i) { - uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; - if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask) - break; - mdelay(10); - } - if (i == 100) - return -ETIMEDOUT; - - /* deassert UPLL_CTLREQ */ - WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); - - return 0; -} - int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) { - /* start off with something large */ - int optimal_diff_score = 0x7FFFFFF; - unsigned optimal_fb_div = 0, optimal_vclk_div = 0; - unsigned optimal_dclk_div = 0, optimal_vco_freq = 0; - unsigned vco_freq, vco_min = 50000, vco_max = 160000; - unsigned ref_freq = rdev->clock.spll.reference_freq; + unsigned fb_div = 0, vclk_div = 0, dclk_div = 0; int r; /* RV740 uses evergreen uvd clk programming */ @@ -111,44 +64,15 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return 0; } - /* loop through vco from low to high */ - vco_min = max(max(vco_min, vclk), dclk); - for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 500) { - uint64_t fb_div = (uint64_t)vco_freq * 43663; - int calc_clk, diff_score, diff_vclk, diff_dclk; - unsigned vclk_div, dclk_div; - - do_div(fb_div, ref_freq); - fb_div |= 1; - - /* fb div out of range ? */ - if (fb_div > 0x03FFFFFF) - break; /* it can oly get worse */ - - /* calc vclk with current vco freq. */ - calc_clk = rv770_uvd_calc_post_div(vclk, vco_freq, &vclk_div); - if (calc_clk == -1) - break; /* vco is too big, it has to stop. */ - diff_vclk = vclk - calc_clk; - - /* calc dclk with current vco freq. */ - calc_clk = rv770_uvd_calc_post_div(dclk, vco_freq, &dclk_div); - if (calc_clk == -1) - break; /* vco is too big, it has to stop. */ - diff_dclk = dclk - calc_clk; - - /* determine if this vco setting is better than current optimal settings */ - diff_score = abs(diff_vclk) + abs(diff_dclk); - if (diff_score < optimal_diff_score) { - optimal_fb_div = fb_div; - optimal_vclk_div = vclk_div; - optimal_dclk_div = dclk_div; - optimal_vco_freq = vco_freq; - optimal_diff_score = diff_score; - if (optimal_diff_score == 0) - break; /* it can't get better than this */ - } - } + r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, + 43663, 0x03FFFFFE, 1, 30, ~0, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; + + fb_div |= 1; + vclk_div -= 1; + dclk_div -= 1; /* set UPLL_FB_DIV to 0x50000 */ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK); @@ -160,7 +84,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1)); - r = rv770_uvd_send_upll_ctlreq(rdev); + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); if (r) return r; @@ -170,13 +94,13 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* set the required FB_DIV, REF_DIV, Post divder values */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK); WREG32_P(CG_UPLL_FUNC_CNTL_2, - UPLL_SW_HILEN(optimal_vclk_div >> 1) | - UPLL_SW_LOLEN((optimal_vclk_div >> 1) + (optimal_vclk_div & 1)) | - UPLL_SW_HILEN2(optimal_dclk_div >> 1) | - UPLL_SW_LOLEN2((optimal_dclk_div >> 1) + (optimal_dclk_div & 1)), + UPLL_SW_HILEN(vclk_div >> 1) | + UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | + UPLL_SW_HILEN2(dclk_div >> 1) | + UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)), ~UPLL_SW_MASK); - WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), + WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK); /* give the PLL some time to settle */ @@ -191,7 +115,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1)); - r = rv770_uvd_send_upll_ctlreq(rdev); + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); if (r) return r; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index fe6b14e0021c..f0b6c2f87c4d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5415,62 +5415,9 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev) return clock; } -static int si_uvd_calc_post_div(unsigned target_freq, - unsigned vco_freq, - unsigned *div) -{ - /* target larger than vco frequency ? */ - if (vco_freq < target_freq) - return -1; /* forget it */ - - /* Fclk = Fvco / PDIV */ - *div = vco_freq / target_freq; - - /* we alway need a frequency less than or equal the target */ - if ((vco_freq / *div) > target_freq) - *div += 1; - - /* dividers above 5 must be even */ - if (*div > 5 && *div % 2) - *div += 1; - - /* out of range ? */ - if (*div >= 128) - return -1; /* forget it */ - - return vco_freq / *div; -} - -static int si_uvd_send_upll_ctlreq(struct radeon_device *rdev) -{ - unsigned i; - - /* assert UPLL_CTLREQ */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); - - /* wait for CTLACK and CTLACK2 to get asserted */ - for (i = 0; i < 100; ++i) { - uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; - if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask) - break; - mdelay(10); - } - if (i == 100) - return -ETIMEDOUT; - - /* deassert UPLL_CTLREQ */ - WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); - - return 0; -} - int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) { - /* start off with something large */ - int optimal_diff_score = 0x7FFFFFF; - unsigned optimal_fb_div = 0, optimal_vclk_div = 0; - unsigned optimal_dclk_div = 0, optimal_vco_freq = 0; - unsigned vco_freq; + unsigned fb_div = 0, vclk_div = 0, dclk_div = 0; int r; /* bypass vclk and dclk with bclk */ @@ -5487,40 +5434,11 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return 0; } - /* loop through vco from low to high */ - for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) { - unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384; - int calc_clk, diff_score, diff_vclk, diff_dclk; - unsigned vclk_div, dclk_div; - - /* fb div out of range ? */ - if (fb_div > 0x03FFFFFF) - break; /* it can oly get worse */ - - /* calc vclk with current vco freq. */ - calc_clk = si_uvd_calc_post_div(vclk, vco_freq, &vclk_div); - if (calc_clk == -1) - break; /* vco is too big, it has to stop. */ - diff_vclk = vclk - calc_clk; - - /* calc dclk with current vco freq. */ - calc_clk = si_uvd_calc_post_div(dclk, vco_freq, &dclk_div); - if (calc_clk == -1) - break; /* vco is too big, it has to stop. */ - diff_dclk = dclk - calc_clk; - - /* determine if this vco setting is better than current optimal settings */ - diff_score = abs(diff_vclk) + abs(diff_dclk); - if (diff_score < optimal_diff_score) { - optimal_fb_div = fb_div; - optimal_vclk_div = vclk_div; - optimal_dclk_div = dclk_div; - optimal_vco_freq = vco_freq; - optimal_diff_score = diff_score; - if (optimal_diff_score == 0) - break; /* it can't get better than this */ - } - } + r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; /* set RESET_ANTI_MUX to 0 */ WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); @@ -5537,7 +5455,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) mdelay(1); - r = si_uvd_send_upll_ctlreq(rdev); + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); if (r) return r; @@ -5548,19 +5466,19 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); /* set feedback divider */ - WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK); + WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK); /* set ref divider to 0 */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); - if (optimal_vco_freq < 187500) + if (fb_div < 307200) WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); else WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9); /* set PDIV_A and PDIV_B */ WREG32_P(CG_UPLL_FUNC_CNTL_2, - UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div), + UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div), ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); /* give the PLL some time to settle */ @@ -5574,7 +5492,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* switch from bypass mode to normal mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); - r = si_uvd_send_upll_ctlreq(rdev); + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); if (r) return r;