2 * UFS Host Controller driver for Exynos specific extensions
4 * Copyright (C) 2013-2014 Samsung Electronics Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
14 #include <linux/of_address.h>
15 #include <linux/clk.h>
16 #include <linux/smc.h>
20 #include "ufshcd-pltfrm.h"
21 #include "ufs-exynos.h"
22 #include "ufs-exynos-fmp.h"
23 #include <soc/samsung/exynos-fsys0-tcxo.h>
24 #include <soc/samsung/exynos-cpupm.h>
25 #include <linux/mfd/syscon.h>
26 #include <linux/regmap.h>
27 #include <linux/soc/samsung/exynos-soc.h>
28 #include <linux/spinlock.h>
31 * Unipro attribute value
33 #define TXTRAILINGCLOCKS 0x10
34 #define TACTIVATE_10_USEC 400 /* unit: 10us */
38 #define PEER_DEV_ID 0x01
39 #define PEER_CPORT_ID 0x00
40 #define TRAFFIC_CLASS 0x00
42 #define IATOVAL_NSEC 20000 /* unit: ns */
44 /* UFS CAL interface */
47 * Debugging information, SFR/attributes/misc
49 static struct exynos_ufs
*ufs_host_backup
[1];
50 static int ufs_host_index
= 0;
51 static spinlock_t fsys0_tcxo_lock
;
53 static struct exynos_ufs_sfr_log ufs_log_std_sfr
[] = {
54 {"CAPABILITIES" , REG_CONTROLLER_CAPABILITIES
, 0},
55 {"UFS VERSION" , REG_UFS_VERSION
, 0},
56 {"PRODUCT ID" , REG_CONTROLLER_DEV_ID
, 0},
57 {"MANUFACTURE ID" , REG_CONTROLLER_PROD_ID
, 0},
58 {"INTERRUPT STATUS" , REG_INTERRUPT_STATUS
, 0},
59 {"INTERRUPT ENABLE" , REG_INTERRUPT_ENABLE
, 0},
60 {"CONTROLLER STATUS" , REG_CONTROLLER_STATUS
, 0},
61 {"CONTROLLER ENABLE" , REG_CONTROLLER_ENABLE
, 0},
62 {"UTP TRANSF REQ INT AGG CNTRL" , REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
, 0},
63 {"UTP TRANSF REQ LIST BASE L" , REG_UTP_TRANSFER_REQ_LIST_BASE_L
, 0},
64 {"UTP TRANSF REQ LIST BASE H" , REG_UTP_TRANSFER_REQ_LIST_BASE_H
, 0},
65 {"UTP TRANSF REQ DOOR BELL" , REG_UTP_TRANSFER_REQ_DOOR_BELL
, 0},
66 {"UTP TRANSF REQ LIST CLEAR" , REG_UTP_TRANSFER_REQ_LIST_CLEAR
, 0},
67 {"UTP TRANSF REQ LIST RUN STOP" , REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
, 0},
68 {"UTP TASK REQ LIST BASE L" , REG_UTP_TASK_REQ_LIST_BASE_L
, 0},
69 {"UTP TASK REQ LIST BASE H" , REG_UTP_TASK_REQ_LIST_BASE_H
, 0},
70 {"UTP TASK REQ DOOR BELL" , REG_UTP_TASK_REQ_DOOR_BELL
, 0},
71 {"UTP TASK REQ LIST CLEAR" , REG_UTP_TASK_REQ_LIST_CLEAR
, 0},
72 {"UTP TASK REQ LIST RUN STOP" , REG_UTP_TASK_REQ_LIST_RUN_STOP
, 0},
73 {"UIC COMMAND" , REG_UIC_COMMAND
, 0},
74 {"UIC COMMAND ARG1" , REG_UIC_COMMAND_ARG_1
, 0},
75 {"UIC COMMAND ARG2" , REG_UIC_COMMAND_ARG_2
, 0},
76 {"UIC COMMAND ARG3" , REG_UIC_COMMAND_ARG_3
, 0},
81 /* Helper for UFS CAL interface */
82 static inline int ufs_init_cal(struct exynos_ufs
*ufs
, int idx
,
83 struct platform_device
*pdev
)
86 struct device
*dev
= &pdev
->dev
;
87 struct ufs_cal_param
*p
= NULL
;
89 p
= devm_kzalloc(dev
, sizeof(*p
), GFP_KERNEL
);
91 dev_err(ufs
->dev
, "cannot allocate mem for cal param\n");
97 p
->board
= 0; /* ken: need a dt node for board */
98 if ((ret
= ufs_cal_init(p
, idx
)) != UFS_CAL_NO_ERROR
) {
99 dev_err(ufs
->dev
, "ufs_init_cal = %d!!!\n", ret
);
106 static inline int ufs_pre_link(struct exynos_ufs
*ufs
)
109 struct ufs_cal_param
*p
= ufs
->cal_param
;
111 p
->mclk_rate
= ufs
->mclk_rate
;
112 p
->target_lane
= ufs
->num_rx_lanes
;
113 p
->available_lane
= ufs
->num_rx_lanes
;
115 if ((ret
= ufs_cal_pre_link(p
)) != UFS_CAL_NO_ERROR
) {
116 dev_err(ufs
->dev
, "ufs_pre_link = %d!!!\n", ret
);
123 static inline int ufs_post_link(struct exynos_ufs
*ufs
)
127 if ((ret
= ufs_cal_post_link(ufs
->cal_param
)) != UFS_CAL_NO_ERROR
) {
128 dev_err(ufs
->dev
, "ufs_post_link = %d!!!\n", ret
);
135 static inline int ufs_pre_gear_change(struct exynos_ufs
*ufs
,
136 struct uic_pwr_mode
*pmd
)
138 struct ufs_cal_param
*p
= ufs
->cal_param
;
142 p
->target_lane
= pmd
->lane
;
143 if ((ret
= ufs_cal_pre_pmc(p
)) != UFS_CAL_NO_ERROR
) {
144 dev_err(ufs
->dev
, "ufs_pre_gear_change = %d!!!\n", ret
);
151 static inline int ufs_post_gear_change(struct exynos_ufs
*ufs
)
155 if ((ret
= ufs_cal_post_pmc(ufs
->cal_param
)) != UFS_CAL_NO_ERROR
) {
156 dev_err(ufs
->dev
, "ufs_post_gear_change = %d!!!\n", ret
);
163 static inline int ufs_post_h8_enter(struct exynos_ufs
*ufs
)
167 if ((ret
= ufs_cal_post_h8_enter(ufs
->cal_param
)) != UFS_CAL_NO_ERROR
) {
168 dev_err(ufs
->dev
, "ufs_post_h8_enter = %d!!!\n", ret
);
175 static inline int ufs_pre_h8_exit(struct exynos_ufs
*ufs
)
179 if ((ret
= ufs_cal_pre_h8_exit(ufs
->cal_param
)) != UFS_CAL_NO_ERROR
) {
180 dev_err(ufs
->dev
, "ufs_pre_h8_exit = %d!!!\n", ret
);
187 /* Adaptor for UFS CAL */
188 void ufs_lld_dme_set(void *h
, u32 addr
, u32 val
)
190 ufshcd_dme_set(((struct exynos_ufs
*)h
)->hba
, addr
, val
);
193 void ufs_lld_dme_get(void *h
, u32 addr
, u32
*val
)
195 ufshcd_dme_get(((struct exynos_ufs
*)h
)->hba
, addr
, val
);
198 void ufs_lld_dme_peer_set(void *h
, u32 addr
, u32 val
)
200 ufshcd_dme_peer_set(((struct exynos_ufs
*)h
)->hba
, addr
, val
);
203 void ufs_lld_pma_write(void *h
, u32 val
, u32 addr
)
205 phy_pma_writel((struct exynos_ufs
*)h
, val
, addr
);
208 u32
ufs_lld_pma_read(void *h
, u32 addr
)
210 return phy_pma_readl((struct exynos_ufs
*)h
, addr
);
213 void ufs_lld_unipro_write(void *h
, u32 val
, u32 addr
)
215 unipro_writel((struct exynos_ufs
*)h
, val
, addr
);
218 void ufs_lld_udelay(u32 val
)
223 void ufs_lld_usleep_delay(u32 min
, u32 max
)
225 usleep_range(min
, max
);
228 unsigned long ufs_lld_get_time_count(unsigned long offset
)
233 unsigned long ufs_lld_calc_timeout(const unsigned int ms
)
235 return msecs_to_jiffies(ms
);
238 static inline void exynos_ufs_ctrl_phy_pwr(struct exynos_ufs
*ufs
, bool en
)
243 ret
= regmap_update_bits(ufs
->pmureg
, ufs
->cxt_iso
.offset
,
244 ufs
->cxt_iso
.mask
, ufs
->cxt_iso
.val
);
246 ret
= regmap_update_bits(ufs
->pmureg
, ufs
->cxt_iso
.offset
,
247 ufs
->cxt_iso
.mask
, 0);
250 dev_err(ufs
->dev
, "Unable to update PHY ISO control\n");
253 #ifndef __EXYNOS_UFS_VS_DEBUG__
254 static void exynos_ufs_dump_std_sfr(struct ufs_hba
*hba
)
256 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
257 struct exynos_ufs_sfr_log
* cfg
= ufs
->debug
.std_sfr
;
259 dev_err(hba
->dev
, ": --------------------------------------------------- \n");
260 dev_err(hba
->dev
, ": \t\tREGISTER DUMP\n");
261 dev_err(hba
->dev
, ": --------------------------------------------------- \n");
266 cfg
->val
= ufshcd_readl(hba
, cfg
->offset
);
269 dev_err(hba
->dev
, ": %s(0x%04x):\t\t\t\t0x%08x\n",
270 cfg
->name
, cfg
->offset
, cfg
->val
);
279 * Exynos debugging main function
281 static void exynos_ufs_dump_debug_info(struct ufs_hba
*hba
)
283 #ifdef __EXYNOS_UFS_VS_DEBUG__
284 exynos_ufs_get_uic_info(hba
);
286 exynos_ufs_dump_std_sfr(hba
);
290 static void exynos_ufs_select_refclk(struct exynos_ufs
*ufs
, bool en
)
293 if (ufs
->hw_rev
!= UFS_VER_0004
)
297 * true : alternative clock path, false : active clock path
299 reg
= hci_readl(ufs
, HCI_MPHY_REFCLK_SEL
);
301 hci_writel(ufs
, reg
| MPHY_REFCLK_SEL
, HCI_MPHY_REFCLK_SEL
);
303 hci_writel(ufs
, reg
& ~MPHY_REFCLK_SEL
, HCI_MPHY_REFCLK_SEL
);
306 inline void exynos_ufs_set_hwacg_control(struct exynos_ufs
*ufs
, bool en
)
309 if ((ufs
->hw_rev
!= UFS_VER_0004
) && (ufs
->hw_rev
!= UFS_VER_0005
))
313 * default value 1->0 at KC. so,
314 * need to set "1(disable HWACG)" during UFS init
316 reg
= hci_readl(ufs
, HCI_UFS_ACG_DISABLE
);
318 hci_writel(ufs
, reg
& (~HCI_UFS_ACG_DISABLE_EN
), HCI_UFS_ACG_DISABLE
);
320 hci_writel(ufs
, reg
| HCI_UFS_ACG_DISABLE_EN
, HCI_UFS_ACG_DISABLE
);
324 inline void exynos_ufs_ctrl_auto_hci_clk(struct exynos_ufs
*ufs
, bool en
)
326 u32 reg
= hci_readl(ufs
, HCI_FORCE_HCS
);
329 hci_writel(ufs
, reg
| HCI_CORECLK_STOP_EN
, HCI_FORCE_HCS
);
331 hci_writel(ufs
, reg
& ~HCI_CORECLK_STOP_EN
, HCI_FORCE_HCS
);
334 static inline void exynos_ufs_ctrl_clk(struct exynos_ufs
*ufs
, bool en
)
336 u32 reg
= hci_readl(ufs
, HCI_FORCE_HCS
);
339 hci_writel(ufs
, reg
| CLK_STOP_CTRL_EN_ALL
, HCI_FORCE_HCS
);
341 hci_writel(ufs
, reg
& ~CLK_STOP_CTRL_EN_ALL
, HCI_FORCE_HCS
);
344 static inline void exynos_ufs_gate_clk(struct exynos_ufs
*ufs
, bool en
)
347 u32 reg
= hci_readl(ufs
, HCI_CLKSTOP_CTRL
);
350 hci_writel(ufs
, reg
| CLK_STOP_ALL
, HCI_CLKSTOP_CTRL
);
352 hci_writel(ufs
, reg
& ~CLK_STOP_ALL
, HCI_CLKSTOP_CTRL
);
355 static void exynos_ufs_set_unipro_mclk(struct exynos_ufs
*ufs
)
357 ufs
->mclk_rate
= (u32
)clk_get_rate(ufs
->clk_unipro
);
360 static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs
*ufs
)
365 /* IA_TICK_SEL : 1(1us_TO_CNT_VAL) */
366 nVal
= hci_readl(ufs
, HCI_UFSHCI_V2P1_CTRL
);
368 hci_writel(ufs
, nVal
, HCI_UFSHCI_V2P1_CTRL
);
370 cnt_val
= ufs
->mclk_rate
/ 1000000 ;
371 hci_writel(ufs
, cnt_val
& CNT_VAL_1US_MASK
, HCI_1US_TO_CNT_VAL
);
374 static void exynos_ufs_init_pmc_req(struct ufs_hba
*hba
,
375 struct ufs_pa_layer_attr
*pwr_max
,
376 struct ufs_pa_layer_attr
*pwr_req
)
379 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
380 struct uic_pwr_mode
*req_pmd
= &ufs
->req_pmd_parm
;
381 struct uic_pwr_mode
*act_pmd
= &ufs
->act_pmd_parm
;
383 /* update lane variable after link */
384 ufs
->num_rx_lanes
= pwr_max
->lane_rx
;
385 ufs
->num_tx_lanes
= pwr_max
->lane_tx
;
388 = act_pmd
->gear
= min_t(u8
, pwr_max
->gear_rx
, req_pmd
->gear
);
390 = act_pmd
->gear
= min_t(u8
, pwr_max
->gear_tx
, req_pmd
->gear
);
392 = act_pmd
->lane
= min_t(u8
, pwr_max
->lane_rx
, req_pmd
->lane
);
394 = act_pmd
->lane
= min_t(u8
, pwr_max
->lane_tx
, req_pmd
->lane
);
395 pwr_req
->pwr_rx
= act_pmd
->mode
= req_pmd
->mode
;
396 pwr_req
->pwr_tx
= act_pmd
->mode
= req_pmd
->mode
;
397 pwr_req
->hs_rate
= act_pmd
->hs_series
= req_pmd
->hs_series
;
400 static void exynos_ufs_config_intr(struct exynos_ufs
*ufs
, u32 errs
, u8 index
)
404 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERROR_EN_PA_LAYER
);
407 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERROR_EN_DL_LAYER
);
410 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERROR_EN_N_LAYER
);
413 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERROR_EN_T_LAYER
);
416 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERROR_EN_DME_LAYER
);
421 static void exynos_ufs_dev_hw_reset(struct ufs_hba
*hba
)
423 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
425 /* bit[1] for resetn */
426 hci_writel(ufs
, 0 << 0, HCI_GPIO_OUT
);
428 hci_writel(ufs
, 1 << 0, HCI_GPIO_OUT
);
431 static void exynos_ufs_init_host(struct exynos_ufs
*ufs
)
435 /* internal clock control */
436 exynos_ufs_ctrl_auto_hci_clk(ufs
, false);
437 exynos_ufs_set_unipro_mclk(ufs
);
439 /* period for interrupt aggregation */
440 exynos_ufs_fit_aggr_timeout(ufs
);
442 /* misc HCI configurations */
443 hci_writel(ufs
, 0xA, HCI_DATA_REORDER
);
444 hci_writel(ufs
, PRDT_PREFECT_EN
| PRDT_SET_SIZE(12),
445 HCI_TXPRDT_ENTRY_SIZE
);
446 hci_writel(ufs
, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE
);
447 hci_writel(ufs
, 0xFFFFFFFF, HCI_UTRL_NEXUS_TYPE
);
448 hci_writel(ufs
, 0xFFFFFFFF, HCI_UTMRL_NEXUS_TYPE
);
450 reg
= hci_readl(ufs
, HCI_AXIDMA_RWDATA_BURST_LEN
) &
452 hci_writel(ufs
, WLU_EN
| BURST_LEN(3),
453 HCI_AXIDMA_RWDATA_BURST_LEN
);
456 * Enable HWAGC control by IOP
458 * default value 1->0 at KC.
459 * always "0"(controlled by UFS_ACG_DISABLE)
461 reg
= hci_readl(ufs
, HCI_IOP_ACG_DISABLE
);
462 hci_writel(ufs
, reg
& (~HCI_IOP_ACG_DISABLE_EN
), HCI_IOP_ACG_DISABLE
);
465 static void exynos_ufs_pre_hibern8(struct ufs_hba
*hba
, u8 enter
)
469 static void exynos_ufs_post_hibern8(struct ufs_hba
*hba
, u8 enter
)
471 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
474 struct uic_pwr_mode
*act_pmd
= &ufs
->act_pmd_parm
;
477 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
478 if (mode
!= (act_pmd
->mode
<< 4 | act_pmd
->mode
)) {
479 dev_warn(hba
->dev
, "%s: power mode not matched, mode : 0x%x, act_mode : 0x%x\n",
480 __func__
, mode
, act_pmd
->mode
);
481 hba
->pwr_info
.pwr_rx
= (mode
>> 4) & 0xf;
482 hba
->pwr_info
.pwr_tx
= mode
& 0xf;
483 ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
488 static int exynos_ufs_init_system(struct exynos_ufs
*ufs
)
490 struct device
*dev
= ufs
->dev
;
492 bool is_io_coherency
;
493 bool is_dma_coherent
;
495 /* PHY isolation bypass */
496 exynos_ufs_ctrl_phy_pwr(ufs
, true);
499 is_io_coherency
= !IS_ERR(ufs
->sysreg
);
500 is_dma_coherent
= !!of_find_property(dev
->of_node
,
501 "dma-coherent", NULL
);
503 if (is_io_coherency
!= is_dma_coherent
)
506 if (!is_io_coherency
)
507 dev_err(dev
, "Not configured to use IO coherency\n");
509 ret
= regmap_update_bits(ufs
->sysreg
, ufs
->cxt_coherency
.offset
,
510 ufs
->cxt_coherency
.mask
, ufs
->cxt_coherency
.val
);
515 static int exynos_ufs_get_clks(struct ufs_hba
*hba
)
517 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
518 struct list_head
*head
= &hba
->clk_list_head
;
519 struct ufs_clk_info
*clki
;
522 ufs_host_backup
[ufs_host_index
++] = ufs
;
523 ufs
->debug
.std_sfr
= ufs_log_std_sfr
;
525 if (!head
|| list_empty(head
))
528 list_for_each_entry(clki
, head
, list
) {
530 * get clock with an order listed in device tree
533 ufs
->clk_hci
= clki
->clk
;
535 ufs
->clk_unipro
= clki
->clk
;
540 if (!ufs
->clk_hci
|| !ufs
->clk_unipro
)
546 static void exynos_ufs_set_features(struct ufs_hba
*hba
, u32 hw_rev
)
549 hba
->caps
= UFSHCD_CAP_CLK_GATING
|
550 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING
|
551 UFSHCD_CAP_INTR_AGGR
;
553 /* quirks of common driver */
554 hba
->quirks
= UFSHCD_QUIRK_PRDT_BYTE_GRAN
|
555 UFSHCI_QUIRK_SKIP_INTR_AGGR
|
556 UFSHCD_QUIRK_UNRESET_INTR_AGGR
|
557 UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR
;
559 hba
->quirks
|= UFSHCD_QUIRK_GET_UPMCRS_DIRECT
|
560 UFSHCD_QUIRK_GET_GENERRCODE_DIRECT
;
562 /* quirks of exynos-specific driver */
566 * Exynos-specific callback functions
568 * init | Pure SW init & system-related init
569 * host_reset | Host SW reset & init
570 * pre_setup_clocks | specific power down
571 * setup_clocks | specific power up
574 * Initializations for software, host controller and system
575 * should be contained only in ->host_reset() as possible.
578 static int exynos_ufs_init(struct ufs_hba
*hba
)
580 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
584 /* set features, such as caps or quirks */
585 exynos_ufs_set_features(hba
, ufs
->hw_rev
);
587 /* get some clock sources and debug infomation structures */
588 ret
= exynos_ufs_get_clks(hba
);
593 ret
= exynos_ufs_init_system(ufs
);
597 /* get fmp & smu id */
598 ret
= of_property_read_u32(ufs
->dev
->of_node
, "fmp-id", &id
);
600 ufs
->fmp
= SMU_ID_MAX
;
604 ret
= of_property_read_u32(ufs
->dev
->of_node
, "smu-id", &id
);
606 ufs
->smu
= SMU_ID_MAX
;
610 /* FMPSECURITY & SMU */
611 exynos_ufs_fmp_sec_cfg(ufs
);
612 exynos_ufs_smu_init(ufs
);
615 ret
= exynos_ufs_init_dbg(hba
);
620 ufs
->misc_flags
= EXYNOS_UFS_MISC_TOGGLE_LOG
;
625 static void exynos_ufs_host_reset(struct ufs_hba
*hba
)
627 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
628 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1);
630 exynos_ufs_ctrl_auto_hci_clk(ufs
, false);
632 hci_writel(ufs
, UFS_SW_RST_MASK
, HCI_SW_RST
);
635 if (!(hci_readl(ufs
, HCI_SW_RST
) & UFS_SW_RST_MASK
))
637 } while (time_before(jiffies
, timeout
));
639 dev_err(ufs
->dev
, "timeout host sw-reset\n");
641 exynos_ufs_dump_uic_info(hba
);
647 exynos_ufs_init_host(ufs
);
650 exynos_ufs_dev_hw_reset(hba
);
653 #ifdef CONFIG_EXYNOS_SMC_LOGGING
654 exynos_smc(SMC_CMD_UFS_LOG
, 0, 0, 0);
660 static inline void exynos_ufs_dev_reset_ctrl(struct exynos_ufs
*ufs
, bool en
)
664 hci_writel(ufs
, 1 << 0, HCI_GPIO_OUT
);
666 hci_writel(ufs
, 0 << 0, HCI_GPIO_OUT
);
669 static void exynos_ufs_tcxo_ctrl(struct exynos_ufs
*ufs
, bool tcxo_on
)
674 ret
= regmap_read(ufs
->pmureg
, ufs
->cxt_iso
.offset
, &val
);
682 ret
= regmap_write(ufs
->pmureg
, ufs
->cxt_iso
.offset
, val
);
685 dev_err(ufs
->dev
, "Unable to access the pmureg using regmap\n");
689 static bool tcxo_used_by
[OWNER_MAX
];
691 static int exynos_check_shared_resource(int owner
)
693 if (owner
== OWNER_FIRST
)
694 return tcxo_used_by
[OWNER_SECOND
];
696 return tcxo_used_by
[OWNER_FIRST
];
700 static bool exynos_use_shared_resource(int owner
, bool use
)
702 tcxo_used_by
[owner
] = use
;
704 return exynos_check_shared_resource(owner
);
706 static int exynos_ufs_pre_setup_clocks(struct ufs_hba
*hba
, bool on
)
708 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
713 #ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
714 exynos_update_ip_idle_status(ufs
->idle_ip_index
, 0);
717 if (ufs
->tcxo_ex_ctrl
) {
718 spin_lock_irqsave(&fsys0_tcxo_lock
, flags
);
719 if (exynos_use_shared_resource(OWNER_FIRST
, on
) == !on
)
720 exynos_ufs_tcxo_ctrl(ufs
, true);
721 spin_unlock_irqrestore(&fsys0_tcxo_lock
, flags
);
725 * Now all used blocks would not be turned off in a host.
727 exynos_ufs_ctrl_auto_hci_clk(ufs
, false);
728 exynos_ufs_gate_clk(ufs
, false);
731 exynos_ufs_set_hwacg_control(ufs
, false);
733 pm_qos_update_request(&ufs
->pm_qos_int
, 0);
734 pm_qos_update_request(&ufs
->pm_qos_fsys0
, 0);
739 ret
= ufs_post_h8_enter(ufs
);
745 static int exynos_ufs_setup_clocks(struct ufs_hba
*hba
, bool on
)
747 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
755 ret
= ufs_pre_h8_exit(ufs
);
757 pm_qos_update_request(&ufs
->pm_qos_int
, ufs
->pm_qos_int_value
);
758 pm_qos_update_request(&ufs
->pm_qos_fsys0
, ufs
->pm_qos_fsys0_value
);
762 * Now all used blocks would be turned off in a host.
764 //exynos_ufs_gate_clk(ufs, true);
765 exynos_ufs_ctrl_auto_hci_clk(ufs
, true);
768 exynos_ufs_set_hwacg_control(ufs
, true);
770 if (ufs
->tcxo_ex_ctrl
) {
771 spin_lock_irqsave(&fsys0_tcxo_lock
, flags
);
772 if (exynos_use_shared_resource(OWNER_FIRST
, on
) == on
)
773 exynos_ufs_tcxo_ctrl(ufs
, false);
774 spin_unlock_irqrestore(&fsys0_tcxo_lock
, flags
);
778 #ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
779 exynos_update_ip_idle_status(ufs
->idle_ip_index
, 1);
786 static int exynos_ufs_link_startup_notify(struct ufs_hba
*hba
,
787 enum ufs_notify_change_status status
)
789 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
798 exynos_ufs_config_intr(ufs
, DFES_DEF_DL_ERRS
, UNIP_DL_LYR
);
799 exynos_ufs_config_intr(ufs
, DFES_DEF_N_ERRS
, UNIP_N_LYR
);
800 exynos_ufs_config_intr(ufs
, DFES_DEF_T_ERRS
, UNIP_T_LYR
);
802 exynos_ufs_ctrl_clk(ufs
, true);
803 exynos_ufs_select_refclk(ufs
, true);
804 exynos_ufs_gate_clk(ufs
, false);
805 exynos_ufs_set_hwacg_control(ufs
, false);
807 if (ufs
->num_rx_lanes
== 0 || ufs
->num_tx_lanes
== 0) {
808 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_AVAILRXDATALANES
),
810 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_AVAILTXDATALANES
),
812 WARN(ufs
->num_rx_lanes
!= ufs
->num_tx_lanes
,
813 "available data lane is not equal(rx:%d, tx:%d)\n",
814 ufs
->num_rx_lanes
, ufs
->num_tx_lanes
);
817 ufs
->mclk_rate
= clk_get_rate(ufs
->clk_unipro
);
819 ret
= ufs_pre_link(ufs
);
822 /* UIC configuration table after link startup */
823 ret
= ufs_post_link(ufs
);
832 static int exynos_ufs_pwr_change_notify(struct ufs_hba
*hba
,
833 enum ufs_notify_change_status status
,
834 struct ufs_pa_layer_attr
*pwr_max
,
835 struct ufs_pa_layer_attr
*pwr_req
)
837 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
838 struct uic_pwr_mode
*act_pmd
= &ufs
->act_pmd_parm
;
844 /* Set PMC parameters to be requested */
845 exynos_ufs_init_pmc_req(hba
, pwr_max
, pwr_req
);
847 /* UIC configuration table before power mode change */
848 ret
= ufs_pre_gear_change(ufs
, act_pmd
);
852 /* UIC configuration table after power mode change */
853 ret
= ufs_post_gear_change(ufs
);
856 "Power mode change(%d): M(%d)G(%d)L(%d)HS-series(%d)\n",
857 ret
, act_pmd
->mode
, act_pmd
->gear
,
858 act_pmd
->lane
, act_pmd
->hs_series
);
867 static void exynos_ufs_set_nexus_t_xfer_req(struct ufs_hba
*hba
,
868 int tag
, struct scsi_cmnd
*cmd
)
870 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
873 type
= hci_readl(ufs
, HCI_UTRL_NEXUS_TYPE
);
880 hci_writel(ufs
, type
, HCI_UTRL_NEXUS_TYPE
);
883 static void exynos_ufs_set_nexus_t_task_mgmt(struct ufs_hba
*hba
, int tag
, u8 tm_func
)
885 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
888 type
= hci_readl(ufs
, HCI_UTMRL_NEXUS_TYPE
);
895 case UFS_ABORT_TASK_SET
:
896 case UFS_CLEAR_TASK_SET
:
897 case UFS_LOGICAL_RESET
:
898 case UFS_QUERY_TASK_SET
:
903 hci_writel(ufs
, type
, HCI_UTMRL_NEXUS_TYPE
);
906 static void exynos_ufs_hibern8_notify(struct ufs_hba
*hba
,
907 u8 enter
, bool notify
)
909 int noti
= (int) notify
;
913 exynos_ufs_pre_hibern8(hba
, enter
);
916 exynos_ufs_post_hibern8(hba
, enter
);
923 static int __exynos_ufs_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
925 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
927 pm_qos_update_request(&ufs
->pm_qos_int
, 0);
928 pm_qos_update_request(&ufs
->pm_qos_fsys0
, 0);
930 exynos_ufs_dev_reset_ctrl(ufs
, false);
932 exynos_ufs_ctrl_phy_pwr(ufs
, false);
937 static int __exynos_ufs_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
939 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
942 exynos_ufs_ctrl_phy_pwr(ufs
, true);
945 ret
= exynos_ufs_init_system(ufs
);
949 if (ufshcd_is_clkgating_allowed(hba
))
950 clk_prepare_enable(ufs
->clk_hci
);
951 exynos_ufs_ctrl_auto_hci_clk(ufs
, false);
953 /* FMPSECURITY & SMU resume */
954 exynos_ufs_fmp_sec_cfg(ufs
);
955 exynos_ufs_smu_resume(ufs
);
958 #ifdef CONFIG_EXYNOS_SMC_LOGGING
959 exynos_smc(SMC_CMD_UFS_LOG
, 0, 0, 0);
962 if (ufshcd_is_clkgating_allowed(hba
))
963 clk_disable_unprepare(ufs
->clk_hci
);
968 static u8
exynos_ufs_get_unipro_direct(struct ufs_hba
*hba
, u32 num
)
971 UNIP_DME_LINKSTARTUP_CNF_RESULT
,
972 UNIP_DME_HIBERN8_ENTER_CNF_RESULT
,
973 UNIP_DME_HIBERN8_EXIT_CNF_RESULT
,
974 UNIP_DME_PWR_IND_RESULT
,
975 UNIP_DME_HIBERN8_ENTER_IND_RESULT
,
976 UNIP_DME_HIBERN8_EXIT_IND_RESULT
979 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
981 return unipro_readl(ufs
, offset
[num
]);
984 static int exynos_ufs_crypto_engine_cfg(struct ufs_hba
*hba
,
985 struct ufshcd_lrb
*lrbp
,
986 struct scatterlist
*sg
, int index
,
989 return exynos_ufs_fmp_cfg(hba
, lrbp
, sg
, index
, sector_offset
);
992 static int exynos_ufs_crypto_engine_clear(struct ufs_hba
*hba
,
993 struct ufshcd_lrb
*lrbp
)
995 return exynos_ufs_fmp_clear(hba
, lrbp
);
998 static int exynos_ufs_access_control_abort(struct ufs_hba
*hba
)
1000 struct exynos_ufs
*ufs
= to_exynos_ufs(hba
);
1002 return exynos_ufs_smu_abort(ufs
);
1005 static struct ufs_hba_variant_ops exynos_ufs_ops
= {
1006 .init
= exynos_ufs_init
,
1007 .host_reset
= exynos_ufs_host_reset
,
1008 .pre_setup_clocks
= exynos_ufs_pre_setup_clocks
,
1009 .setup_clocks
= exynos_ufs_setup_clocks
,
1010 .link_startup_notify
= exynos_ufs_link_startup_notify
,
1011 .pwr_change_notify
= exynos_ufs_pwr_change_notify
,
1012 .set_nexus_t_xfer_req
= exynos_ufs_set_nexus_t_xfer_req
,
1013 .set_nexus_t_task_mgmt
= exynos_ufs_set_nexus_t_task_mgmt
,
1014 .hibern8_notify
= exynos_ufs_hibern8_notify
,
1015 .dbg_register_dump
= exynos_ufs_dump_debug_info
,
1016 .suspend
= __exynos_ufs_suspend
,
1017 .resume
= __exynos_ufs_resume
,
1018 .get_unipro_result
= exynos_ufs_get_unipro_direct
,
1019 .crypto_engine_cfg
= exynos_ufs_crypto_engine_cfg
,
1020 .crypto_engine_clear
= exynos_ufs_crypto_engine_clear
,
1021 .access_control_abort
= exynos_ufs_access_control_abort
,
1024 static int exynos_ufs_populate_dt_phy(struct device
*dev
, struct exynos_ufs
*ufs
)
1026 struct device_node
*ufs_phy
;
1027 struct exynos_ufs_phy
*phy
= &ufs
->phy
;
1028 struct resource io_res
;
1031 ufs_phy
= of_get_child_by_name(dev
->of_node
, "ufs-phy");
1033 dev_err(dev
, "failed to get ufs-phy node\n");
1037 ret
= of_address_to_resource(ufs_phy
, 0, &io_res
);
1039 dev_err(dev
, "failed to get i/o address phy pma\n");
1043 phy
->reg_pma
= devm_ioremap_resource(dev
, &io_res
);
1044 if (!phy
->reg_pma
) {
1045 dev_err(dev
, "failed to ioremap for phy pma\n");
1051 of_node_put(ufs_phy
);
1057 * This function is to define offset, mask and shift to access somewhere.
1059 static int exynos_ufs_set_context_for_access(struct device
*dev
,
1060 const char *name
, struct exynos_access_cxt
*cxt
)
1062 struct device_node
*np
;
1065 np
= of_get_child_by_name(dev
->of_node
, name
);
1067 dev_err(dev
, "failed to get node(%s)\n", name
);
1071 ret
= of_property_read_u32(np
, "offset", &cxt
->offset
);
1072 if (IS_ERR(&cxt
->offset
)) {
1073 dev_err(dev
, "failed to set cxt(%s) offset\n", name
);
1077 ret
= of_property_read_u32(np
, "mask", &cxt
->mask
);
1078 if (IS_ERR(&cxt
->mask
)) {
1079 dev_err(dev
, "failed to set cxt(%s) mask\n", name
);
1083 ret
= of_property_read_u32(np
, "val", &cxt
->val
);
1084 if (IS_ERR(&cxt
->val
)) {
1085 dev_err(dev
, "failed to set cxt(%s) val\n", name
);
1092 static int exynos_ufs_populate_dt_system(struct device
*dev
, struct exynos_ufs
*ufs
)
1094 struct device_node
*np
= dev
->of_node
;
1098 ufs
->pmureg
= syscon_regmap_lookup_by_phandle(dev
->of_node
,
1099 "samsung,pmu-phandle");
1100 if (IS_ERR(ufs
->pmureg
)) {
1102 * phy isolation should be available.
1103 * so this case need to be failed.
1105 dev_err(dev
, "pmu regmap lookup failed.\n");
1106 return PTR_ERR(ufs
->pmureg
);
1109 /* Set access context for phy isolation bypass */
1110 ret
= exynos_ufs_set_context_for_access(dev
, "ufs-phy-iso",
1113 /* no device node, default */
1114 ufs
->cxt_iso
.offset
= 0x0724;
1115 ufs
->cxt_iso
.mask
= 0x1;
1116 ufs
->cxt_iso
.val
= 0x1;
1121 ufs
->sysreg
= syscon_regmap_lookup_by_phandle(dev
->of_node
,
1122 "samsung,sysreg-fsys-phandle");
1123 if (IS_ERR(ufs
->sysreg
)) {
1125 * Currently, ufs driver gets sysreg for io coherency.
1126 * Some architecture might not support this feature.
1127 * So the device node might not exist.
1129 dev_err(dev
, "sysreg regmap lookup failed.\n");
1133 /* Set access context for io coherency */
1134 ret
= exynos_ufs_set_context_for_access(dev
, "ufs-dma-coherency",
1135 &ufs
->cxt_coherency
);
1137 /* no device node, default */
1138 ufs
->cxt_coherency
.offset
= 0x0700;
1139 ufs
->cxt_coherency
.mask
= 0x300; /* bit 8,9 */
1140 ufs
->cxt_coherency
.val
= 0x3;
1144 /* TCXO exclusive control */
1145 if (of_property_read_u32(np
, "tcxo-ex-ctrl", &ufs
->tcxo_ex_ctrl
))
1146 ufs
->tcxo_ex_ctrl
= 1;
1151 static int exynos_ufs_get_pwr_mode(struct device_node
*np
,
1152 struct exynos_ufs
*ufs
)
1154 struct uic_pwr_mode
*pmd
= &ufs
->req_pmd_parm
;
1156 pmd
->mode
= FAST_MODE
;
1158 if (of_property_read_u8(np
, "ufs,pmd-attr-lane", &pmd
->lane
))
1161 if (of_property_read_u8(np
, "ufs,pmd-attr-gear", &pmd
->gear
))
1164 pmd
->hs_series
= PA_HS_MODE_B
;
1169 static int exynos_ufs_populate_dt(struct device
*dev
, struct exynos_ufs
*ufs
)
1171 struct device_node
*np
= dev
->of_node
;
1174 /* Get exynos-specific version for featuring */
1175 if (of_property_read_u32(np
, "hw-rev", &ufs
->hw_rev
))
1176 ufs
->hw_rev
= UFS_VER_0004
;
1178 ret
= exynos_ufs_populate_dt_phy(dev
, ufs
);
1180 dev_err(dev
, "failed to populate dt-phy\n");
1184 ret
= exynos_ufs_populate_dt_system(dev
, ufs
);
1186 dev_err(dev
, "failed to populate dt-pmu\n");
1190 exynos_ufs_get_pwr_mode(np
, ufs
);
1192 if (of_property_read_u8(np
, "brd-for-cal", &ufs
->cal_param
->board
))
1193 ufs
->cal_param
->board
= 0;
1195 if (of_property_read_u32(np
, "ufs-pm-qos-int", &ufs
->pm_qos_int_value
))
1196 ufs
->pm_qos_int_value
= 0;
1198 if (of_property_read_u32(np
, "ufs-pm-qos-fsys0", &ufs
->pm_qos_fsys0_value
))
1199 ufs
->pm_qos_fsys0_value
= 0;
1206 static int exynos_ufs_lp_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
1208 struct exynos_ufs
*ufs
=
1209 container_of(nb
, struct exynos_ufs
, tcxo_nb
);
1210 int ret
= NOTIFY_DONE
;
1212 unsigned long flags
;
1214 spin_lock_irqsave(&fsys0_tcxo_lock
, flags
);
1218 if (exynos_use_shared_resource(OWNER_SECOND
, on
) == on
)
1219 exynos_ufs_tcxo_ctrl(ufs
, false);
1222 if (exynos_use_shared_resource(OWNER_SECOND
, on
) == !on
)
1223 exynos_ufs_tcxo_ctrl(ufs
, true);
1226 spin_unlock_irqrestore(&fsys0_tcxo_lock
, flags
);
1231 static u64 exynos_ufs_dma_mask
= DMA_BIT_MASK(32);
1233 static int exynos_ufs_probe(struct platform_device
*pdev
)
1235 struct device
*dev
= &pdev
->dev
;
1236 struct exynos_ufs
*ufs
;
1237 struct resource
*res
;
1240 ufs
= devm_kzalloc(dev
, sizeof(*ufs
), GFP_KERNEL
);
1242 dev_err(dev
, "cannot allocate mem for exynos-ufs\n");
1246 /* exynos-specific hci */
1247 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1248 ufs
->reg_hci
= devm_ioremap_resource(dev
, res
);
1249 if (!ufs
->reg_hci
) {
1250 dev_err(dev
, "cannot ioremap for hci vendor register\n");
1255 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
1256 ufs
->reg_unipro
= devm_ioremap_resource(dev
, res
);
1257 if (!ufs
->reg_unipro
) {
1258 dev_err(dev
, "cannot ioremap for unipro register\n");
1263 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
1264 ufs
->reg_ufsp
= devm_ioremap_resource(dev
, res
);
1265 if (!ufs
->reg_ufsp
) {
1266 dev_err(dev
, "cannot ioremap for ufs protector register\n");
1270 /* This must be before calling exynos_ufs_populate_dt */
1271 ret
= ufs_init_cal(ufs
, ufs_host_index
, pdev
);
1275 ret
= exynos_ufs_populate_dt(dev
, ufs
);
1277 dev_err(dev
, "failed to get dt info.\n");
1282 * pmu node and txco syscon node should be exclusive
1284 if (ufs
->tcxo_ex_ctrl
) {
1285 ufs
->tcxo_nb
.notifier_call
= exynos_ufs_lp_event
;
1286 ufs
->tcxo_nb
.next
= NULL
;
1287 ufs
->tcxo_nb
.priority
= 0;
1289 ret
= exynos_fsys0_tcxo_register_notifier(&ufs
->tcxo_nb
);
1291 dev_err(dev
, "failed to register fsys0 txco notifier\n");
1295 #ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
1296 ufs
->idle_ip_index
= exynos_get_idle_ip_index(dev_name(&pdev
->dev
));
1297 exynos_update_ip_idle_status(ufs
->idle_ip_index
, 0);
1301 dev
->platform_data
= ufs
;
1302 dev
->dma_mask
= &exynos_ufs_dma_mask
;
1304 pm_qos_add_request(&ufs
->pm_qos_int
, PM_QOS_DEVICE_THROUGHPUT
, 0);
1305 pm_qos_add_request(&ufs
->pm_qos_fsys0
, PM_QOS_BUS_THROUGHPUT
, 0);
1306 if (ufs
->tcxo_ex_ctrl
)
1307 spin_lock_init(&fsys0_tcxo_lock
);
1309 ret
= ufshcd_pltfrm_init(pdev
, &exynos_ufs_ops
);
1314 static int exynos_ufs_remove(struct platform_device
*pdev
)
1316 struct exynos_ufs
*ufs
= dev_get_platdata(&pdev
->dev
);
1318 ufshcd_pltfrm_exit(pdev
);
1320 pm_qos_remove_request(&ufs
->pm_qos_fsys0
);
1321 pm_qos_remove_request(&ufs
->pm_qos_int
);
1323 ufs
->misc_flags
= EXYNOS_UFS_MISC_TOGGLE_LOG
;
1325 exynos_ufs_ctrl_phy_pwr(ufs
, false);
1330 #ifdef CONFIG_PM_SLEEP
1331 static int exynos_ufs_suspend(struct device
*dev
)
1333 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1335 return ufshcd_system_suspend(hba
);
1338 static int exynos_ufs_resume(struct device
*dev
)
1340 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1342 return ufshcd_system_resume(hba
);
1345 #define exynos_ufs_suspend NULL
1346 #define exynos_ufs_resume NULL
1347 #endif /* CONFIG_PM_SLEEP */
1349 #ifdef CONFIG_PM_RUNTIME
1350 static int exynos_ufs_runtime_suspend(struct device
*dev
)
1352 return ufshcd_system_suspend(dev_get_drvdata(dev
));
1355 static int exynos_ufs_runtime_resume(struct device
*dev
)
1357 return ufshcd_system_resume(dev_get_drvdata(dev
));
1360 static int exynos_ufs_runtime_idle(struct device
*dev
)
1362 return ufshcd_runtime_idle(dev_get_drvdata(dev
));
1366 #define exynos_ufs_runtime_suspend NULL
1367 #define exynos_ufs_runtime_resume NULL
1368 #define exynos_ufs_runtime_idle NULL
1369 #endif /* CONFIG_PM_RUNTIME */
1371 static void exynos_ufs_shutdown(struct platform_device
*pdev
)
1373 ufshcd_shutdown((struct ufs_hba
*)platform_get_drvdata(pdev
));
1376 static const struct dev_pm_ops exynos_ufs_dev_pm_ops
= {
1377 .suspend
= exynos_ufs_suspend
,
1378 .resume
= exynos_ufs_resume
,
1379 .runtime_suspend
= exynos_ufs_runtime_suspend
,
1380 .runtime_resume
= exynos_ufs_runtime_resume
,
1381 .runtime_idle
= exynos_ufs_runtime_idle
,
1384 static const struct ufs_hba_variant exynos_ufs_drv_data
= {
1385 .ops
= &exynos_ufs_ops
,
1388 static const struct of_device_id exynos_ufs_match
[] = {
1389 { .compatible
= "samsung,exynos-ufs", },
1392 MODULE_DEVICE_TABLE(of
, exynos_ufs_match
);
1394 static struct platform_driver exynos_ufs_driver
= {
1396 .name
= "exynos-ufs",
1397 .owner
= THIS_MODULE
,
1398 .pm
= &exynos_ufs_dev_pm_ops
,
1399 .of_match_table
= exynos_ufs_match
,
1400 .suppress_bind_attrs
= true,
1402 .probe
= exynos_ufs_probe
,
1403 .remove
= exynos_ufs_remove
,
1404 .shutdown
= exynos_ufs_shutdown
,
1407 module_platform_driver(exynos_ufs_driver
);
1408 MODULE_DESCRIPTION("Exynos Specific UFSHCI driver");
1409 MODULE_AUTHOR("Seungwon Jeon <tgih.jun@samsung.com>");
1410 MODULE_AUTHOR("Kiwoong Kim <kwmad.kim@samsung.com>");
1411 MODULE_LICENSE("GPL");