From: Archit Taneja Date: Tue, 3 Jan 2017 14:15:43 +0000 (+0530) Subject: drm/msm/dsi: Add PHY/PLL for 8x96 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=f079f6d999cbf857f899732de680f2b62f245b8c;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git drm/msm/dsi: Add PHY/PLL for 8x96 Extend the DSI PHY/PLL drivers to support the DSI 14nm PHY/PLL found on 8x96. These are picked up from the downstream driver. The PHY part is similar to the other DSI PHYs. The PLL driver requires some trickery so that one DSI PLL can drive both the DSIs (i.e, dual DSI mode). In the case of dual DSI mode. One DSI instance becomes the clock master, and other the clock slave. The master PLL's output (Byte and Pixel clock) is fed to both the DSI hosts/PHYs. When the DSIs are configured in dual DSI mode, the PHY driver communicates to the PLL driver using msm_dsi_pll_set_usecase() which instance is the master and which one is the slave. When setting rate, the master PLL also configures some of the slave PLL/PHY registers which need to be identical to the master's for correct dual DSI behaviour. There are 2 PLL post dividers that should have ideally been modelled as generic clk_divider clocks, but require some customization for dual DSI. In particular, when the master PLL's post-diviers are set, the slave PLL's post-dividers need to be set too. The clk_ops for these use clk_divider's helper ops and flags internally to prevent redundant code. Cc: Stephen Boyd Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 7f78da695dff..5b8e23d051f2 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY help Choose this option if the 28nm DSI PHY 8960 variant is used on the platform. + +config DRM_MSM_DSI_14NM_PHY + bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on 8996 is used on the platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 028c24df2291..39055362da95 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 9465996d0869..32369975d155 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -35,6 +35,7 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_LP, MSM_DSI_PHY_20NM, MSM_DSI_PHY_28NM_8960, + MSM_DSI_PHY_14NM, MSM_DSI_PHY_MAX }; @@ -117,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, struct clk **byte_clk_provider, struct clk **pixel_clk_provider); void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); +int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc); #else static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -137,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) { return 0; } +static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + return -ENODEV; +} #endif /* dsi host */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 541d7dfa13c0..0c2eb9c9a1fc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -390,6 +390,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY { .compatible = "qcom,dsi-phy-28nm-8960", .data = &dsi_phy_28nm_8960_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY + { .compatible = "qcom,dsi-phy-14nm", + .data = &dsi_phy_14nm_cfgs }, #endif {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index feee87094ef5..1733f6608a09 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -47,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; struct msm_dsi_dphy_timing { u32 clk_pre; @@ -77,6 +78,7 @@ struct msm_dsi_phy { struct platform_device *pdev; void __iomem *base; void __iomem *reg_base; + void __iomem *lane_base; int id; struct clk *ahb_clk; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c new file mode 100644 index 000000000000..513f4234adc1 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +#define PHY_14NM_CKLN_IDX 4 + +static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing, + int lane_idx) +{ + void __iomem *base = phy->lane_base; + bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX); + u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero; + u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare; + u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail; + u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst; + u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly; + u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln : + timing->hs_halfbyte_en; + + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx), + DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx), + halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0)); +} + +static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + u32 data; + int i; + int ret; + void __iomem *base = phy->base; + void __iomem *lane_base = phy->lane_base; + + if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + data = 0x1c; + if (phy->usecase != MSM_DSI_PHY_STANDALONE) + data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32); + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); + + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1); + + /* 4 data lanes + 1 clk lane configuration */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i), + 0x1d); + + dsi_phy_write(lane_base + + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff); + dsi_phy_write(lane_base + + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i), + (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06); + + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i), + (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i), + 0); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i), + 0x88); + + dsi_14nm_dphy_set_timing(phy, timing, i); + } + + /* Make sure PLL is not start */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00); + + wmb(); /* make sure everything is written before reset and enable */ + + /* reset digital block */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80); + wmb(); /* ensure reset is asserted */ + udelay(100); + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, + DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* Remove power down from PLL and all lanes */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff); + + return 0; +} + +static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0); + dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0); + + /* ensure that the phy is completely disabled */ + wmb(); +} + +static int dsi_14nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + dev_err(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { + .type = MSM_DSI_PHY_14NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vcca", 17000, 32}, + }, + }, + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .init = dsi_14nm_phy_init, + }, + .io_start = { 0x994400, 0x996400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 5cd438f91afe..bc289f5c9078 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) return 0; } +int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + if (pll->set_usecase) + return pll->set_usecase(pll, uc); + + return 0; +} + struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_28NM_8960: pll = msm_dsi_pll_28nm_8960_init(pdev, id); break; + case MSM_DSI_PHY_14NM: + pll = msm_dsi_pll_14nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 2cf1664723e8..f63e7ada74a8 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -41,6 +41,8 @@ struct msm_dsi_pll { void (*destroy)(struct msm_dsi_pll *pll); void (*save_state)(struct msm_dsi_pll *pll); int (*restore_state)(struct msm_dsi_pll *pll); + int (*set_usecase)(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc); }; #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) @@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init( } #endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c new file mode 100644 index 000000000000..fe15aa64086f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c @@ -0,0 +1,1104 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 14nm - clock diagram (eg: DSI0): + * + * dsi0n1_postdiv_clk + * | + * | + * +----+ | +----+ + * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte + * +----+ | +----+ + * | dsi0n1_postdivby2_clk + * | +----+ | + * o---| /2 |--o--|\ + * | +----+ | \ +----+ + * | | |--| n2 |-- dsi0pll + * o--------------| / +----+ + * |/ + */ + +#define POLL_MAX_READS 15 +#define POLL_TIMEOUT_US 1000 + +#define NUM_PROVIDED_CLKS 2 + +#define VCO_REF_CLK_RATE 19200000 +#define VCO_MIN_RATE 1300000000UL +#define VCO_MAX_RATE 2600000000UL + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 + +#define DSI_PLL_DEFAULT_VCO_POSTDIV 1 + +struct dsi_pll_input { + u32 fref; /* reference clk */ + u32 fdata; /* bit clock rate */ + u32 dsiclk_sel; /* Mux configuration (see diagram) */ + u32 ssc_en; /* SSC enable/disable */ + u32 ldo_en; + + /* fixed params */ + u32 refclk_dbler_en; + u32 vco_measure_time; + u32 kvco_measure_time; + u32 bandgap_timer; + u32 pll_wakeup_timer; + u32 plllock_cnt; + u32 plllock_rng; + u32 ssc_center; + u32 ssc_adj_period; + u32 ssc_spread; + u32 ssc_freq; + u32 pll_ie_trim; + u32 pll_ip_trim; + u32 pll_iptat_trim; + u32 pll_cpcset_cur; + u32 pll_cpmset_cur; + + u32 pll_icpmset; + u32 pll_icpcset; + + u32 pll_icpmset_p; + u32 pll_icpmset_m; + + u32 pll_icpcset_p; + u32 pll_icpcset_m; + + u32 pll_lpf_res1; + u32 pll_lpf_cap1; + u32 pll_lpf_cap2; + u32 pll_c3ctrl; + u32 pll_r3ctrl; +}; + +struct dsi_pll_output { + u32 pll_txclk_en; + u32 dec_start; + u32 div_frac_start; + u32 ssc_period; + u32 ssc_step_size; + u32 plllock_cmp; + u32 pll_vco_div_ref; + u32 pll_vco_count; + u32 pll_kvco_div_ref; + u32 pll_kvco_count; + u32 pll_misc1; + u32 pll_lpf2_postdiv; + u32 pll_resetsm_cntrl; + u32 pll_resetsm_cntrl2; + u32 pll_resetsm_cntrl5; + u32 pll_kvco_code; + + u32 cmn_clk_cfg0; + u32 cmn_clk_cfg1; + u32 cmn_ldo_cntrl; + + u32 pll_postdiv; + u32 fcvo; +}; + +struct pll_14nm_cached_state { + unsigned long vco_rate; + u8 n2postdiv; + u8 n1postdiv; +}; + +struct dsi_pll_14nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + int vco_delay; + + struct dsi_pll_input in; + struct dsi_pll_output out; + + /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + u64 vco_current_rate; + u64 vco_ref_clk_rate; + + /* private clocks: */ + struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; + u32 num_hws; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_14nm_cached_state cached_state; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_14nm *slave; +}; + +#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, base) + +/* + * Private struct for N1/N2 post-divider clocks. These clocks are similar to + * the generic clk_divider class of clocks. The only difference is that it + * also sets the slave DSI PLL's post-dividers if in Dual DSI mode + */ +struct dsi_pll_14nm_postdiv { + struct clk_hw hw; + + /* divider params */ + u8 shift; + u8 width; + u8 flags; /* same flags as used by clk_divider struct */ + + struct dsi_pll_14nm *pll; +}; + +#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; + +static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, + u32 nb_tries, u32 timeout_us) +{ + bool pll_locked = false; + void __iomem *base = pll_14nm->mmio; + u32 tries, val; + + tries = nb_tries; + while (tries--) { + val = pll_read(base + + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(5)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + + if (!pll_locked) { + tries = nb_tries; + while (tries--) { + val = pll_read(base + + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(0)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + } + + DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll) +{ + pll->in.fref = pll->vco_ref_clk_rate; + pll->in.fdata = 0; + pll->in.dsiclk_sel = 1; /* Use the /2 path in Mux */ + pll->in.ldo_en = 0; /* disabled for now */ + + /* fixed input */ + pll->in.refclk_dbler_en = 0; + pll->in.vco_measure_time = 5; + pll->in.kvco_measure_time = 5; + pll->in.bandgap_timer = 4; + pll->in.pll_wakeup_timer = 5; + pll->in.plllock_cnt = 1; + pll->in.plllock_rng = 0; + + /* + * SSC is enabled by default. We might need DT props for configuring + * some SSC params like PPM and center/down spread etc. + */ + pll->in.ssc_en = 1; + pll->in.ssc_center = 0; /* down spread by default */ + pll->in.ssc_spread = 5; /* PPM / 1000 */ + pll->in.ssc_freq = 31500; /* default recommended */ + pll->in.ssc_adj_period = 37; + + pll->in.pll_ie_trim = 4; + pll->in.pll_ip_trim = 4; + pll->in.pll_cpcset_cur = 1; + pll->in.pll_cpmset_cur = 1; + pll->in.pll_icpmset = 4; + pll->in.pll_icpcset = 4; + pll->in.pll_icpmset_p = 0; + pll->in.pll_icpmset_m = 0; + pll->in.pll_icpcset_p = 0; + pll->in.pll_icpcset_m = 0; + pll->in.pll_lpf_res1 = 3; + pll->in.pll_lpf_cap1 = 11; + pll->in.pll_lpf_cap2 = 1; + pll->in.pll_iptat_trim = 7; + pll->in.pll_c3ctrl = 2; + pll->in.pll_r3ctrl = 1; +} + +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) + +static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll) +{ + u32 period, ssc_period; + u32 ref, rem; + u64 step_size; + + DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate); + + ssc_period = pll->in.ssc_freq / 500; + period = (u32)pll->vco_ref_clk_rate / 1000; + ssc_period = CEIL(period, ssc_period); + ssc_period -= 1; + pll->out.ssc_period = ssc_period; + + DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq, + pll->in.ssc_spread, pll->out.ssc_period); + + step_size = (u32)pll->vco_current_rate; + ref = pll->vco_ref_clk_rate; + ref /= 1000; + step_size = div_u64(step_size, ref); + step_size <<= 20; + step_size = div_u64(step_size, 1000); + step_size *= pll->in.ssc_spread; + step_size = div_u64(step_size, 1000); + step_size *= (pll->in.ssc_adj_period + 1); + + rem = 0; + step_size = div_u64_rem(step_size, ssc_period + 1, &rem); + if (rem) + step_size++; + + DBG("step_size=%lld", step_size); + + step_size &= 0x0ffff; /* take lower 16 bits */ + + pll->out.ssc_step_size = step_size; +} + +static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll) +{ + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u64 multiplier = BIT(20); + u64 dec_start_multiple, dec_start, pll_comp_val; + u32 duration, div_frac_start; + u64 vco_clk_rate = pll->vco_current_rate; + u64 fref = pll->vco_ref_clk_rate; + + DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); + + dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); + div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); + + dec_start = div_u64(dec_start_multiple, multiplier); + + pout->dec_start = (u32)dec_start; + pout->div_frac_start = div_frac_start; + + if (pin->plllock_cnt == 0) + duration = 1024; + else if (pin->plllock_cnt == 1) + duration = 256; + else if (pin->plllock_cnt == 2) + duration = 128; + else + duration = 32; + + pll_comp_val = duration * dec_start_multiple; + pll_comp_val = div_u64(pll_comp_val, multiplier); + do_div(pll_comp_val, 10); + + pout->plllock_cmp = (u32)pll_comp_val; + + pout->pll_txclk_en = 1; + pout->cmn_ldo_cntrl = 0x3c; +} + +static u32 pll_14nm_kvco_slop(u32 vrate) +{ + u32 slop = 0; + + if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL) + slop = 600; + else if (vrate > 1800000000UL && vrate < 2300000000UL) + slop = 400; + else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE) + slop = 280; + + return slop; +} + +static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll) +{ + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u64 vco_clk_rate = pll->vco_current_rate; + u64 fref = pll->vco_ref_clk_rate; + u64 data; + u32 cnt; + + data = fref * pin->vco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 2; + pout->pll_vco_div_ref = data; + + data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */ + data *= pin->vco_measure_time; + do_div(data, 10); + pout->pll_vco_count = data; + + data = fref * pin->kvco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 1; + pout->pll_kvco_div_ref = data; + + cnt = pll_14nm_kvco_slop(vco_clk_rate); + cnt *= 2; + cnt /= 100; + cnt *= pin->kvco_measure_time; + pout->pll_kvco_count = cnt; + + pout->pll_misc1 = 16; + pout->pll_resetsm_cntrl = 48; + pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3; + pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer; + pout->pll_kvco_code = 0; +} + +static void pll_db_commit_ssc(struct dsi_pll_14nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u8 data; + + data = pin->ssc_adj_period; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data); + data = (pin->ssc_adj_period >> 8); + data &= 0x03; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data); + + data = pout->ssc_period; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data); + data = (pout->ssc_period >> 8); + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data); + + data = pout->ssc_step_size; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data); + data = (pout->ssc_step_size >> 8); + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data); + + data = (pin->ssc_center & 0x01); + data <<= 1; + data |= 0x01; /* enable */ + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data); + + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_common(struct dsi_pll_14nm *pll, + struct dsi_pll_input *pin, + struct dsi_pll_output *pout) +{ + void __iomem *base = pll->mmio; + u8 data; + + /* confgiure the non frequency dependent pll registers */ + data = 0; + pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data); + + data = pout->pll_txclk_en; + pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data); + + data = pout->pll_resetsm_cntrl; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data); + data = pout->pll_resetsm_cntrl2; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data); + data = pout->pll_resetsm_cntrl5; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data); + + data = pout->pll_vco_div_ref & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data); + data = (pout->pll_vco_div_ref >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data); + + data = pout->pll_kvco_div_ref & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data); + data = (pout->pll_kvco_div_ref >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data); + + data = pout->pll_misc1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data); + + data = pin->pll_ie_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data); + + data = pin->pll_ip_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data); + + data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur; + pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data); + + data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data); + + data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data); + + data = pin->pll_icpmset << 3 | pin->pll_icpcset; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data); + + data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data); + + data = pin->pll_iptat_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data); + + data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data); +} + +static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm) +{ + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + + /* de assert pll start and apply pll sw reset */ + + /* stop pll */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); + + /* pll sw reset */ + pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10); + wmb(); /* make sure register committed */ + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0); + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_14nm(struct dsi_pll_14nm *pll, + struct dsi_pll_input *pin, + struct dsi_pll_output *pout) +{ + void __iomem *base = pll->mmio; + void __iomem *cmn_base = pll->phy_cmn_mmio; + u8 data; + + DBG("DSI%d PLL", pll->id); + + data = pout->cmn_ldo_cntrl; + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); + + pll_db_commit_common(pll, pin, pout); + + pll_14nm_software_reset(pll); + + data = pin->dsiclk_sel; /* set dsiclk_sel = 1 */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data); + + data = 0xff; /* data, clk, pll normal operation */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data); + + /* configure the frequency dependent pll registers */ + data = pout->dec_start; + pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data); + + data = pout->div_frac_start & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data); + data = (pout->div_frac_start >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data); + data = (pout->div_frac_start >> 16) & 0xf; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data); + + data = pout->plllock_cmp & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data); + + data = (pout->plllock_cmp >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data); + + data = (pout->plllock_cmp >> 16) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data); + + data = pin->plllock_cnt << 1 | pin->plllock_rng << 3; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data); + + data = pout->pll_vco_count & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data); + data = (pout->pll_vco_count >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data); + + data = pout->pll_kvco_count & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data); + data = (pout->pll_kvco_count >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data); + + data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data); + + if (pin->ssc_en) + pll_db_commit_ssc(pll); + + wmb(); /* make sure register committed */ +} + +/* + * VCO clock Callbacks + */ +static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct dsi_pll_input *pin = &pll_14nm->in; + struct dsi_pll_output *pout = &pll_14nm->out; + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate, + parent_rate); + + pll_14nm->vco_current_rate = rate; + pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; + + dsi_pll_14nm_input_init(pll_14nm); + + /* + * This configures the post divider internal to the VCO. It's + * fixed to divide by 1 for now. + * + * tx_band = pll_postdiv. + * 0: divided by 1 + * 1: divided by 2 + * 2: divided by 4 + * 3: divided by 8 + */ + pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV; + + pll_14nm_dec_frac_calc(pll_14nm); + + if (pin->ssc_en) + pll_14nm_ssc_calc(pll_14nm); + + pll_14nm_calc_vco_count(pll_14nm); + + /* commit the slave DSI PLL registers if we're master. Note that we + * don't lock the slave PLL. We just ensure that the PLL/PHY registers + * of the master and slave are identical + */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + + pll_db_commit_14nm(pll_14nm_slave, pin, pout); + } + + pll_db_commit_14nm(pll_14nm, pin, pout); + + return 0; +} + +static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + u64 vco_rate, multiplier = BIT(20); + u32 div_frac_start; + u32 dec_start; + u64 ref_clk = parent_rate; + + dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START); + dec_start &= 0x0ff; + + DBG("dec_start = %x", dec_start); + + div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3) + & 0xf) << 16; + div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2) + & 0xff) << 8; + div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1) + & 0xff; + + DBG("div_frac_start = %x", div_frac_start); + + vco_rate = ref_clk * dec_start; + + vco_rate += ((ref_clk * div_frac_start) / multiplier); + + /* + * Recalculating the rate from dec_start and frac_start doesn't end up + * the rate we originally set. Convert the freq to KHz, round it up and + * convert it back to MHz. + */ + vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000; + + DBG("returning vco rate = %lu", (unsigned long)vco_rate); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_14nm_vco_set_rate, + .recalc_rate = dsi_pll_14nm_vco_recalc_rate, + .prepare = msm_dsi_pll_helper_clk_prepare, + .unprepare = msm_dsi_pll_helper_clk_unprepare, +}; + +/* + * N1 and N2 post-divider clock callbacks + */ +#define div_mask(width) ((1 << (width)) - 1) +static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy_cmn_mmio; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + u32 val; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate); + + val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift; + val &= div_mask(width); + + return divider_recalc_rate(hw, parent_rate, val, NULL, + postdiv->flags); +} + +static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate); + + return divider_round_rate(hw, rate, prate, NULL, + postdiv->width, + postdiv->flags); +} + +static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy_cmn_mmio; + spinlock_t *lock = &pll_14nm->postdiv_lock; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + unsigned int value; + unsigned long flags = 0; + u32 val; + + DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate, + parent_rate); + + value = divider_get_val(rate, parent_rate, NULL, postdiv->width, + postdiv->flags); + + spin_lock_irqsave(lock, flags); + + val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + val &= ~(div_mask(width) << shift); + + val |= value << shift; + pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); + + /* If we're master in dual DSI mode, then the slave PLL's post-dividers + * follow the master's post dividers + */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; + + pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); + } + + spin_unlock_irqrestore(lock, flags); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = { + .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate, + .round_rate = dsi_pll_14nm_postdiv_round_rate, + .set_rate = dsi_pll_14nm_postdiv_set_rate, +}; + +/* + * PLL Callbacks + */ + +static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + bool locked; + + DBG(""); + + pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10); + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1); + + locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS, + POLL_TIMEOUT_US); + + if (unlikely(!locked)) + dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL lock success"); + + return locked ? 0 : -EINVAL; +} + +static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + + DBG(""); + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); +} + +static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + u32 data; + + data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + + cached_state->n1postdiv = data & 0xf; + cached_state->n2postdiv = (data >> 4) & 0xf; + + DBG("DSI%d PLL save state %x %x", pll_14nm->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); +} + +static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + u32 data; + int ret; + + ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw, + cached_state->vco_rate, 0); + if (ret) { + dev_err(&pll_14nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + data = cached_state->n1postdiv | (cached_state->n2postdiv << 4); + + DBG("DSI%d PLL restore state %x %x", pll_14nm->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); + + /* also restore post-dividers for slave DSI PLL */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; + + pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); + } + + return 0; +} + +static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + u32 clkbuflr_en, bandgap = 0; + + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + clkbuflr_en = 0x1; + break; + case MSM_DSI_PHY_MASTER: + clkbuflr_en = 0x3; + pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + clkbuflr_en = 0x0; + bandgap = 0x3; + break; + default: + return -EINVAL; + } + + pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en); + if (bandgap) + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap); + + pll_14nm->uc = uc; + + return 0; +} + +static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data; + + if (byte_clk_provider) + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; + if (pixel_clk_provider) + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; + + return 0; +} + +static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct platform_device *pdev = pll_14nm->pdev; + int num_hws = pll_14nm->num_hws; + + of_clk_del_provider(pdev->dev.of_node); + + while (num_hws--) + clk_hw_unregister(pll_14nm->hws[num_hws]); +} + +static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, + const char *name, + const char *parent_name, + unsigned long flags, + u8 shift) +{ + struct dsi_pll_14nm_postdiv *pll_postdiv; + struct device *dev = &pll_14nm->pdev->dev; + struct clk_init_data postdiv_init = { + .parent_names = (const char *[]) { parent_name }, + .num_parents = 1, + .name = name, + .flags = flags, + .ops = &clk_ops_dsi_pll_14nm_postdiv, + }; + int ret; + + pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL); + if (!pll_postdiv) + return ERR_PTR(-ENOMEM); + + pll_postdiv->pll = pll_14nm; + pll_postdiv->shift = shift; + /* both N1 and N2 postdividers are 4 bits wide */ + pll_postdiv->width = 4; + /* range of each divider is from 1 to 15 */ + pll_postdiv->flags = CLK_DIVIDER_ONE_BASED; + pll_postdiv->hw.init = &postdiv_init; + + ret = clk_hw_register(dev, &pll_postdiv->hw); + if (ret) + return ERR_PTR(ret); + + return &pll_postdiv->hw; +} + +static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm) +{ + char clk_name[32], parent[32], vco_name[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_14nm_vco, + }; + struct device *dev = &pll_14nm->pdev->dev; + struct clk_hw **hws = pll_14nm->hws; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int num = 0; + int ret; + + DBG("DSI%d", pll_14nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id); + pll_14nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_14nm->base.clk_hw); + if (ret) + return ret; + + hws[num++] = &pll_14nm->base.clk_hw; + + snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id); + + /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, + CLK_SET_RATE_PARENT, 0); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + + /* DSI Byte clock = VCO_CLK / N1 / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + + /* + * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider + * on the way. Don't let it set parent. + */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); + + /* DSI pixel clock = VCO_CLK / N1 / 2 / N2 + * This is the output of N2 post-divider, bits 4-7 in + * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent. + */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + pll_14nm->num_hws = num; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_14nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_14nm->hw_data); + if (ret) { + dev_err(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_14nm *pll_14nm; + struct msm_dsi_pll *pll; + int ret; + + if (!pdev) + return ERR_PTR(-ENODEV); + + pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL); + if (!pll_14nm) + return ERR_PTR(-ENOMEM); + + DBG("PLL%d", id); + + pll_14nm->pdev = pdev; + pll_14nm->id = id; + pll_14nm_list[id] = pll_14nm; + + pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) { + dev_err(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_14nm->mmio)) { + dev_err(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&pll_14nm->postdiv_lock); + + pll = &pll_14nm->base; + pll->min_rate = VCO_MIN_RATE; + pll->max_rate = VCO_MAX_RATE; + pll->get_provider = dsi_pll_14nm_get_provider; + pll->destroy = dsi_pll_14nm_destroy; + pll->disable_seq = dsi_pll_14nm_disable_seq; + pll->save_state = dsi_pll_14nm_save_state; + pll->restore_state = dsi_pll_14nm_restore_state; + pll->set_usecase = dsi_pll_14nm_set_usecase; + + pll_14nm->vco_delay = 1; + + pll->en_seq_cnt = 1; + pll->enable_seqs[0] = dsi_pll_14nm_enable_seq; + + ret = pll_14nm_register(pll_14nm); + if (ret) { + dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + return pll; +}