94737d5fa5c1ff24a133933dff384b8d37563c57
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / ufs / ufs-exynos.c
1 /*
2 * UFS Host Controller driver for Exynos specific extensions
3 *
4 * Copyright (C) 2013-2014 Samsung Electronics Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/clk.h>
16 #include <linux/smc.h>
17 #include "ufshcd.h"
18 #include "unipro.h"
19 #include "mphy.h"
20 #include "ufshcd-pltfrm.h"
21 #include "ufs-exynos.h"
22 #include "ufs-exynos-fmp.h"
23 #include <soc/samsung/exynos-fsys0-tcxo.h>
24 #include <soc/samsung/exynos-cpupm.h>
25 #include <linux/mfd/syscon.h>
26 #include <linux/regmap.h>
27 #include <linux/soc/samsung/exynos-soc.h>
28 #include <linux/spinlock.h>
29
30 /*
31 * Unipro attribute value
32 */
33 #define TXTRAILINGCLOCKS 0x10
34 #define TACTIVATE_10_USEC 400 /* unit: 10us */
35
36 /* Device ID */
37 #define DEV_ID 0x00
38 #define PEER_DEV_ID 0x01
39 #define PEER_CPORT_ID 0x00
40 #define TRAFFIC_CLASS 0x00
41
42 #define IATOVAL_NSEC 20000 /* unit: ns */
43
44 /* UFS CAL interface */
45
46 /*
47 * Debugging information, SFR/attributes/misc
48 */
49 static struct exynos_ufs *ufs_host_backup[1];
50 static int ufs_host_index = 0;
51 static spinlock_t fsys0_tcxo_lock;
52
53 static struct exynos_ufs_sfr_log ufs_log_std_sfr[] = {
54 {"CAPABILITIES" , REG_CONTROLLER_CAPABILITIES, 0},
55 {"UFS VERSION" , REG_UFS_VERSION, 0},
56 {"PRODUCT ID" , REG_CONTROLLER_DEV_ID, 0},
57 {"MANUFACTURE ID" , REG_CONTROLLER_PROD_ID, 0},
58 {"INTERRUPT STATUS" , REG_INTERRUPT_STATUS, 0},
59 {"INTERRUPT ENABLE" , REG_INTERRUPT_ENABLE, 0},
60 {"CONTROLLER STATUS" , REG_CONTROLLER_STATUS, 0},
61 {"CONTROLLER ENABLE" , REG_CONTROLLER_ENABLE, 0},
62 {"UTP TRANSF REQ INT AGG CNTRL" , REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL, 0},
63 {"UTP TRANSF REQ LIST BASE L" , REG_UTP_TRANSFER_REQ_LIST_BASE_L, 0},
64 {"UTP TRANSF REQ LIST BASE H" , REG_UTP_TRANSFER_REQ_LIST_BASE_H, 0},
65 {"UTP TRANSF REQ DOOR BELL" , REG_UTP_TRANSFER_REQ_DOOR_BELL, 0},
66 {"UTP TRANSF REQ LIST CLEAR" , REG_UTP_TRANSFER_REQ_LIST_CLEAR, 0},
67 {"UTP TRANSF REQ LIST RUN STOP" , REG_UTP_TRANSFER_REQ_LIST_RUN_STOP, 0},
68 {"UTP TASK REQ LIST BASE L" , REG_UTP_TASK_REQ_LIST_BASE_L, 0},
69 {"UTP TASK REQ LIST BASE H" , REG_UTP_TASK_REQ_LIST_BASE_H, 0},
70 {"UTP TASK REQ DOOR BELL" , REG_UTP_TASK_REQ_DOOR_BELL, 0},
71 {"UTP TASK REQ LIST CLEAR" , REG_UTP_TASK_REQ_LIST_CLEAR, 0},
72 {"UTP TASK REQ LIST RUN STOP" , REG_UTP_TASK_REQ_LIST_RUN_STOP, 0},
73 {"UIC COMMAND" , REG_UIC_COMMAND, 0},
74 {"UIC COMMAND ARG1" , REG_UIC_COMMAND_ARG_1, 0},
75 {"UIC COMMAND ARG2" , REG_UIC_COMMAND_ARG_2, 0},
76 {"UIC COMMAND ARG3" , REG_UIC_COMMAND_ARG_3, 0},
77
78 {},
79 };
80
81 /* Helper for UFS CAL interface */
82 static inline int ufs_init_cal(struct exynos_ufs *ufs, int idx,
83 struct platform_device *pdev)
84 {
85 int ret = 0;
86 struct device *dev = &pdev->dev;
87 struct ufs_cal_param *p = NULL;
88
89 p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
90 if (!p) {
91 dev_err(ufs->dev, "cannot allocate mem for cal param\n");
92 return -ENOMEM;
93 }
94 ufs->cal_param = p;
95
96 p->host = ufs;
97 p->board = 0; /* ken: need a dt node for board */
98 if ((ret = ufs_cal_init(p, idx)) != UFS_CAL_NO_ERROR) {
99 dev_err(ufs->dev, "ufs_init_cal = %d!!!\n", ret);
100 return -EPERM;
101 }
102
103 return 0;
104 }
105
106 static inline int ufs_pre_link(struct exynos_ufs *ufs)
107 {
108 int ret = 0;
109 struct ufs_cal_param *p = ufs->cal_param;
110
111 p->mclk_rate = ufs->mclk_rate;
112 p->target_lane = ufs->num_rx_lanes;
113 p->available_lane = ufs->num_rx_lanes;
114
115 if ((ret = ufs_cal_pre_link(p)) != UFS_CAL_NO_ERROR) {
116 dev_err(ufs->dev, "ufs_pre_link = %d!!!\n", ret);
117 return -EPERM;
118 }
119
120 return 0;
121 }
122
123 static inline int ufs_post_link(struct exynos_ufs *ufs)
124 {
125 int ret = 0;
126
127 if ((ret = ufs_cal_post_link(ufs->cal_param)) != UFS_CAL_NO_ERROR) {
128 dev_err(ufs->dev, "ufs_post_link = %d!!!\n", ret);
129 return -EPERM;
130 }
131
132 return 0;
133 }
134
135 static inline int ufs_pre_gear_change(struct exynos_ufs *ufs,
136 struct uic_pwr_mode *pmd)
137 {
138 struct ufs_cal_param *p = ufs->cal_param;
139 int ret = 0;
140
141 p->pmd = pmd;
142 p->target_lane = pmd->lane;
143 if ((ret = ufs_cal_pre_pmc(p)) != UFS_CAL_NO_ERROR) {
144 dev_err(ufs->dev, "ufs_pre_gear_change = %d!!!\n", ret);
145 return -EPERM;
146 }
147
148 return 0;
149 }
150
151 static inline int ufs_post_gear_change(struct exynos_ufs *ufs)
152 {
153 int ret = 0;
154
155 if ((ret = ufs_cal_post_pmc(ufs->cal_param)) != UFS_CAL_NO_ERROR) {
156 dev_err(ufs->dev, "ufs_post_gear_change = %d!!!\n", ret);
157 return -EPERM;
158 }
159
160 return 0;
161 }
162
163 static inline int ufs_post_h8_enter(struct exynos_ufs *ufs)
164 {
165 int ret = 0;
166
167 if ((ret = ufs_cal_post_h8_enter(ufs->cal_param)) != UFS_CAL_NO_ERROR) {
168 dev_err(ufs->dev, "ufs_post_h8_enter = %d!!!\n", ret);
169 return -EPERM;
170 }
171
172 return 0;
173 }
174
175 static inline int ufs_pre_h8_exit(struct exynos_ufs *ufs)
176 {
177 int ret = 0;
178
179 if ((ret = ufs_cal_pre_h8_exit(ufs->cal_param)) != UFS_CAL_NO_ERROR) {
180 dev_err(ufs->dev, "ufs_pre_h8_exit = %d!!!\n", ret);
181 return -EPERM;
182 }
183
184 return 0;
185 }
186
187 /* Adaptor for UFS CAL */
188 void ufs_lld_dme_set(void *h, u32 addr, u32 val)
189 {
190 ufshcd_dme_set(((struct exynos_ufs *)h)->hba, addr, val);
191 }
192
193 void ufs_lld_dme_get(void *h, u32 addr, u32 *val)
194 {
195 ufshcd_dme_get(((struct exynos_ufs *)h)->hba, addr, val);
196 }
197
198 void ufs_lld_dme_peer_set(void *h, u32 addr, u32 val)
199 {
200 ufshcd_dme_peer_set(((struct exynos_ufs *)h)->hba, addr, val);
201 }
202
203 void ufs_lld_pma_write(void *h, u32 val, u32 addr)
204 {
205 phy_pma_writel((struct exynos_ufs *)h, val, addr);
206 }
207
208 u32 ufs_lld_pma_read(void *h, u32 addr)
209 {
210 return phy_pma_readl((struct exynos_ufs *)h, addr);
211 }
212
213 void ufs_lld_unipro_write(void *h, u32 val, u32 addr)
214 {
215 unipro_writel((struct exynos_ufs *)h, val, addr);
216 }
217
218 void ufs_lld_udelay(u32 val)
219 {
220 udelay(val);
221 }
222
223 void ufs_lld_usleep_delay(u32 min, u32 max)
224 {
225 usleep_range(min, max);
226 }
227
228 unsigned long ufs_lld_get_time_count(unsigned long offset)
229 {
230 return jiffies;
231 }
232
233 unsigned long ufs_lld_calc_timeout(const unsigned int ms)
234 {
235 return msecs_to_jiffies(ms);
236 }
237
238 static inline void exynos_ufs_ctrl_phy_pwr(struct exynos_ufs *ufs, bool en)
239 {
240 int ret = 0;
241
242 if (en)
243 ret = regmap_update_bits(ufs->pmureg, ufs->cxt_iso.offset,
244 ufs->cxt_iso.mask, ufs->cxt_iso.val);
245 else
246 ret = regmap_update_bits(ufs->pmureg, ufs->cxt_iso.offset,
247 ufs->cxt_iso.mask, 0);
248
249 if (ret)
250 dev_err(ufs->dev, "Unable to update PHY ISO control\n");
251 }
252
253 #ifndef __EXYNOS_UFS_VS_DEBUG__
254 static void exynos_ufs_dump_std_sfr(struct ufs_hba *hba)
255 {
256 struct exynos_ufs *ufs = to_exynos_ufs(hba);
257 struct exynos_ufs_sfr_log* cfg = ufs->debug.std_sfr;
258
259 dev_err(hba->dev, ": --------------------------------------------------- \n");
260 dev_err(hba->dev, ": \t\tREGISTER DUMP\n");
261 dev_err(hba->dev, ": --------------------------------------------------- \n");
262
263 while(cfg) {
264 if (!cfg->name)
265 break;
266 cfg->val = ufshcd_readl(hba, cfg->offset);
267
268 /* Dump */
269 dev_err(hba->dev, ": %s(0x%04x):\t\t\t\t0x%08x\n",
270 cfg->name, cfg->offset, cfg->val);
271
272 /* Next SFR */
273 cfg++;
274 }
275 }
276 #endif
277
278 /*
279 * Exynos debugging main function
280 */
281 static void exynos_ufs_dump_debug_info(struct ufs_hba *hba)
282 {
283 #ifdef __EXYNOS_UFS_VS_DEBUG__
284 exynos_ufs_get_uic_info(hba);
285 #else
286 exynos_ufs_dump_std_sfr(hba);
287 #endif
288 }
289
290 static void exynos_ufs_select_refclk(struct exynos_ufs *ufs, bool en)
291 {
292 u32 reg;
293 if (ufs->hw_rev != UFS_VER_0004)
294 return;
295
296 /*
297 * true : alternative clock path, false : active clock path
298 */
299 reg = hci_readl(ufs, HCI_MPHY_REFCLK_SEL);
300 if (en)
301 hci_writel(ufs, reg | MPHY_REFCLK_SEL, HCI_MPHY_REFCLK_SEL);
302 else
303 hci_writel(ufs, reg & ~MPHY_REFCLK_SEL, HCI_MPHY_REFCLK_SEL);
304 }
305
306 inline void exynos_ufs_set_hwacg_control(struct exynos_ufs *ufs, bool en)
307 {
308 u32 reg;
309 if ((ufs->hw_rev != UFS_VER_0004) && (ufs->hw_rev != UFS_VER_0005))
310 return;
311
312 /*
313 * default value 1->0 at KC. so,
314 * need to set "1(disable HWACG)" during UFS init
315 */
316 reg = hci_readl(ufs, HCI_UFS_ACG_DISABLE);
317 if (en)
318 hci_writel(ufs, reg & (~HCI_UFS_ACG_DISABLE_EN), HCI_UFS_ACG_DISABLE);
319 else
320 hci_writel(ufs, reg | HCI_UFS_ACG_DISABLE_EN, HCI_UFS_ACG_DISABLE);
321
322 }
323
324 inline void exynos_ufs_ctrl_auto_hci_clk(struct exynos_ufs *ufs, bool en)
325 {
326 u32 reg = hci_readl(ufs, HCI_FORCE_HCS);
327
328 if (en)
329 hci_writel(ufs, reg | HCI_CORECLK_STOP_EN, HCI_FORCE_HCS);
330 else
331 hci_writel(ufs, reg & ~HCI_CORECLK_STOP_EN, HCI_FORCE_HCS);
332 }
333
334 static inline void exynos_ufs_ctrl_clk(struct exynos_ufs *ufs, bool en)
335 {
336 u32 reg = hci_readl(ufs, HCI_FORCE_HCS);
337
338 if (en)
339 hci_writel(ufs, reg | CLK_STOP_CTRL_EN_ALL, HCI_FORCE_HCS);
340 else
341 hci_writel(ufs, reg & ~CLK_STOP_CTRL_EN_ALL, HCI_FORCE_HCS);
342 }
343
344 static inline void exynos_ufs_gate_clk(struct exynos_ufs *ufs, bool en)
345 {
346
347 u32 reg = hci_readl(ufs, HCI_CLKSTOP_CTRL);
348
349 if (en)
350 hci_writel(ufs, reg | CLK_STOP_ALL, HCI_CLKSTOP_CTRL);
351 else
352 hci_writel(ufs, reg & ~CLK_STOP_ALL, HCI_CLKSTOP_CTRL);
353 }
354
355 static void exynos_ufs_set_unipro_mclk(struct exynos_ufs *ufs)
356 {
357 ufs->mclk_rate = (u32)clk_get_rate(ufs->clk_unipro);
358 }
359
360 static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
361 {
362 u32 cnt_val;
363 unsigned long nVal;
364
365 /* IA_TICK_SEL : 1(1us_TO_CNT_VAL) */
366 nVal = hci_readl(ufs, HCI_UFSHCI_V2P1_CTRL);
367 nVal |= IA_TICK_SEL;
368 hci_writel(ufs, nVal, HCI_UFSHCI_V2P1_CTRL);
369
370 cnt_val = ufs->mclk_rate / 1000000 ;
371 hci_writel(ufs, cnt_val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
372 }
373
374 static void exynos_ufs_init_pmc_req(struct ufs_hba *hba,
375 struct ufs_pa_layer_attr *pwr_max,
376 struct ufs_pa_layer_attr *pwr_req)
377 {
378
379 struct exynos_ufs *ufs = to_exynos_ufs(hba);
380 struct uic_pwr_mode *req_pmd = &ufs->req_pmd_parm;
381 struct uic_pwr_mode *act_pmd = &ufs->act_pmd_parm;
382
383 /* update lane variable after link */
384 ufs->num_rx_lanes = pwr_max->lane_rx;
385 ufs->num_tx_lanes = pwr_max->lane_tx;
386
387 pwr_req->gear_rx
388 = act_pmd->gear= min_t(u8, pwr_max->gear_rx, req_pmd->gear);
389 pwr_req->gear_tx
390 = act_pmd->gear = min_t(u8, pwr_max->gear_tx, req_pmd->gear);
391 pwr_req->lane_rx
392 = act_pmd->lane = min_t(u8, pwr_max->lane_rx, req_pmd->lane);
393 pwr_req->lane_tx
394 = act_pmd->lane = min_t(u8, pwr_max->lane_tx, req_pmd->lane);
395 pwr_req->pwr_rx = act_pmd->mode = req_pmd->mode;
396 pwr_req->pwr_tx = act_pmd->mode = req_pmd->mode;
397 pwr_req->hs_rate = act_pmd->hs_series = req_pmd->hs_series;
398 }
399
400 static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
401 {
402 switch(index) {
403 case UNIP_PA_LYR:
404 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERROR_EN_PA_LAYER);
405 break;
406 case UNIP_DL_LYR:
407 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERROR_EN_DL_LAYER);
408 break;
409 case UNIP_N_LYR:
410 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERROR_EN_N_LAYER);
411 break;
412 case UNIP_T_LYR:
413 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERROR_EN_T_LAYER);
414 break;
415 case UNIP_DME_LYR:
416 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERROR_EN_DME_LAYER);
417 break;
418 }
419 }
420
421 static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
422 {
423 struct exynos_ufs *ufs = to_exynos_ufs(hba);
424
425 /* bit[1] for resetn */
426 hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
427 udelay(5);
428 hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
429 }
430
431 static void exynos_ufs_init_host(struct exynos_ufs *ufs)
432 {
433 u32 reg;
434
435 /* internal clock control */
436 exynos_ufs_ctrl_auto_hci_clk(ufs, false);
437 exynos_ufs_set_unipro_mclk(ufs);
438
439 /* period for interrupt aggregation */
440 exynos_ufs_fit_aggr_timeout(ufs);
441
442 /* misc HCI configurations */
443 hci_writel(ufs, 0xA, HCI_DATA_REORDER);
444 hci_writel(ufs, PRDT_PREFECT_EN | PRDT_SET_SIZE(12),
445 HCI_TXPRDT_ENTRY_SIZE);
446 hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
447 hci_writel(ufs, 0xFFFFFFFF, HCI_UTRL_NEXUS_TYPE);
448 hci_writel(ufs, 0xFFFFFFFF, HCI_UTMRL_NEXUS_TYPE);
449
450 reg = hci_readl(ufs, HCI_AXIDMA_RWDATA_BURST_LEN) &
451 ~BURST_LEN(0);
452 hci_writel(ufs, WLU_EN | BURST_LEN(3),
453 HCI_AXIDMA_RWDATA_BURST_LEN);
454
455 /*
456 * Enable HWAGC control by IOP
457 *
458 * default value 1->0 at KC.
459 * always "0"(controlled by UFS_ACG_DISABLE)
460 */
461 reg = hci_readl(ufs, HCI_IOP_ACG_DISABLE);
462 hci_writel(ufs, reg & (~HCI_IOP_ACG_DISABLE_EN), HCI_IOP_ACG_DISABLE);
463 }
464
465 static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
466 {
467 }
468
469 static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
470 {
471 struct exynos_ufs *ufs = to_exynos_ufs(hba);
472
473 if (!enter) {
474 struct uic_pwr_mode *act_pmd = &ufs->act_pmd_parm;
475 u32 mode = 0;
476
477 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
478 if (mode != (act_pmd->mode << 4 | act_pmd->mode)) {
479 dev_warn(hba->dev, "%s: power mode not matched, mode : 0x%x, act_mode : 0x%x\n",
480 __func__, mode, act_pmd->mode);
481 hba->pwr_info.pwr_rx = (mode >> 4) & 0xf;
482 hba->pwr_info.pwr_tx = mode & 0xf;
483 ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
484 }
485 }
486 }
487
488 static int exynos_ufs_init_system(struct exynos_ufs *ufs)
489 {
490 struct device *dev = ufs->dev;
491 int ret = 0;
492 bool is_io_coherency;
493 bool is_dma_coherent;
494
495 /* PHY isolation bypass */
496 exynos_ufs_ctrl_phy_pwr(ufs, true);
497
498 /* IO cohernecy */
499 is_io_coherency = !IS_ERR(ufs->sysreg);
500 is_dma_coherent = !!of_find_property(dev->of_node,
501 "dma-coherent", NULL);
502
503 if (is_io_coherency != is_dma_coherent)
504 BUG();
505
506 if (!is_io_coherency)
507 dev_err(dev, "Not configured to use IO coherency\n");
508 else
509 ret = regmap_update_bits(ufs->sysreg, ufs->cxt_coherency.offset,
510 ufs->cxt_coherency.mask, ufs->cxt_coherency.val);
511
512 return ret;
513 }
514
515 static int exynos_ufs_get_clks(struct ufs_hba *hba)
516 {
517 struct exynos_ufs *ufs = to_exynos_ufs(hba);
518 struct list_head *head = &hba->clk_list_head;
519 struct ufs_clk_info *clki;
520 int i = 0;
521
522 ufs_host_backup[ufs_host_index++] = ufs;
523 ufs->debug.std_sfr = ufs_log_std_sfr;
524
525 if (!head || list_empty(head))
526 goto out;
527
528 list_for_each_entry(clki, head, list) {
529 /*
530 * get clock with an order listed in device tree
531 */
532 if (i == 0)
533 ufs->clk_hci = clki->clk;
534 else if (i == 1)
535 ufs->clk_unipro = clki->clk;
536
537 i++;
538 }
539 out:
540 if (!ufs->clk_hci || !ufs->clk_unipro)
541 return -EINVAL;
542
543 return 0;
544 }
545
546 static void exynos_ufs_set_features(struct ufs_hba *hba, u32 hw_rev)
547 {
548 /* caps */
549 hba->caps = UFSHCD_CAP_CLK_GATING |
550 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING |
551 UFSHCD_CAP_INTR_AGGR;
552
553 /* quirks of common driver */
554 hba->quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
555 UFSHCI_QUIRK_SKIP_INTR_AGGR |
556 UFSHCD_QUIRK_UNRESET_INTR_AGGR |
557 UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR;
558
559 hba->quirks |= UFSHCD_QUIRK_GET_UPMCRS_DIRECT |
560 UFSHCD_QUIRK_GET_GENERRCODE_DIRECT;
561
562 /* quirks of exynos-specific driver */
563 }
564
565 /*
566 * Exynos-specific callback functions
567 *
568 * init | Pure SW init & system-related init
569 * host_reset | Host SW reset & init
570 * pre_setup_clocks | specific power down
571 * setup_clocks | specific power up
572 * ...
573 *
574 * Initializations for software, host controller and system
575 * should be contained only in ->host_reset() as possible.
576 */
577
578 static int exynos_ufs_init(struct ufs_hba *hba)
579 {
580 struct exynos_ufs *ufs = to_exynos_ufs(hba);
581 int ret;
582 int id;
583
584 /* set features, such as caps or quirks */
585 exynos_ufs_set_features(hba, ufs->hw_rev);
586
587 /* get some clock sources and debug infomation structures */
588 ret = exynos_ufs_get_clks(hba);
589 if (ret)
590 return ret;
591
592 /* system init */
593 ret = exynos_ufs_init_system(ufs);
594 if (ret)
595 return ret;
596
597 /* get fmp & smu id */
598 ret = of_property_read_u32(ufs->dev->of_node, "fmp-id", &id);
599 if (ret)
600 ufs->fmp = SMU_ID_MAX;
601 else
602 ufs->fmp = id;
603
604 ret = of_property_read_u32(ufs->dev->of_node, "smu-id", &id);
605 if (ret)
606 ufs->smu = SMU_ID_MAX;
607 else
608 ufs->smu = id;
609
610 /* FMPSECURITY & SMU */
611 exynos_ufs_fmp_sec_cfg(ufs);
612 exynos_ufs_smu_init(ufs);
613
614 /* Enable log */
615 ret = exynos_ufs_init_dbg(hba);
616
617 if (ret)
618 return ret;
619
620 ufs->misc_flags = EXYNOS_UFS_MISC_TOGGLE_LOG;
621
622 return 0;
623 }
624
625 static void exynos_ufs_host_reset(struct ufs_hba *hba)
626 {
627 struct exynos_ufs *ufs = to_exynos_ufs(hba);
628 unsigned long timeout = jiffies + msecs_to_jiffies(1);
629
630 exynos_ufs_ctrl_auto_hci_clk(ufs, false);
631
632 hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
633
634 do {
635 if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
636 goto success;
637 } while (time_before(jiffies, timeout));
638
639 dev_err(ufs->dev, "timeout host sw-reset\n");
640
641 exynos_ufs_dump_uic_info(hba);
642
643 goto out;
644
645 success:
646 /* host init */
647 exynos_ufs_init_host(ufs);
648
649 /* device reset */
650 exynos_ufs_dev_hw_reset(hba);
651
652 /* secure log */
653 #ifdef CONFIG_EXYNOS_SMC_LOGGING
654 exynos_smc(SMC_CMD_UFS_LOG, 0, 0, 0);
655 #endif
656 out:
657 return;
658 }
659
660 static inline void exynos_ufs_dev_reset_ctrl(struct exynos_ufs *ufs, bool en)
661 {
662
663 if (en)
664 hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
665 else
666 hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
667 }
668
669 static void exynos_ufs_tcxo_ctrl(struct exynos_ufs *ufs, bool tcxo_on)
670 {
671 unsigned int val;
672 int ret;
673
674 ret = regmap_read(ufs->pmureg, ufs->cxt_iso.offset, &val);
675
676 if (tcxo_on == true)
677 val |= (1 << 16);
678 else
679 val &= ~(1 << 16);
680
681 if (!ret)
682 ret = regmap_write(ufs->pmureg, ufs->cxt_iso.offset, val);
683
684 if (ret)
685 dev_err(ufs->dev, "Unable to access the pmureg using regmap\n");
686 }
687
688
689 static bool tcxo_used_by[OWNER_MAX];
690
691 static int exynos_check_shared_resource(int owner)
692 {
693 if (owner == OWNER_FIRST)
694 return tcxo_used_by[OWNER_SECOND];
695 else
696 return tcxo_used_by[OWNER_FIRST];
697 }
698
699
700 static bool exynos_use_shared_resource(int owner, bool use)
701 {
702 tcxo_used_by[owner] = use;
703
704 return exynos_check_shared_resource(owner);
705 }
706 static int exynos_ufs_pre_setup_clocks(struct ufs_hba *hba, bool on)
707 {
708 struct exynos_ufs *ufs = to_exynos_ufs(hba);
709 int ret = 0;
710 unsigned long flags;
711
712 if (on) {
713 #ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
714 exynos_update_ip_idle_status(ufs->idle_ip_index, 0);
715 #endif
716
717 if (ufs->tcxo_ex_ctrl) {
718 spin_lock_irqsave(&fsys0_tcxo_lock, flags);
719 if (exynos_use_shared_resource(OWNER_FIRST, on) == !on)
720 exynos_ufs_tcxo_ctrl(ufs, true);
721 spin_unlock_irqrestore(&fsys0_tcxo_lock, flags);
722 }
723
724 /*
725 * Now all used blocks would not be turned off in a host.
726 */
727 exynos_ufs_ctrl_auto_hci_clk(ufs, false);
728 exynos_ufs_gate_clk(ufs, false);
729
730 /* HWAGC disable */
731 exynos_ufs_set_hwacg_control(ufs, false);
732 } else {
733 pm_qos_update_request(&ufs->pm_qos_int, 0);
734 pm_qos_update_request(&ufs->pm_qos_fsys0, 0);
735
736 /*
737 * BG/SQ off
738 */
739 ret = ufs_post_h8_enter(ufs);
740 }
741
742 return ret;
743 }
744
745 static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on)
746 {
747 struct exynos_ufs *ufs = to_exynos_ufs(hba);
748 int ret = 0;
749 unsigned long flags;
750
751 if (on) {
752 /*
753 * BG/SQ on
754 */
755 ret = ufs_pre_h8_exit(ufs);
756
757 pm_qos_update_request(&ufs->pm_qos_int, ufs->pm_qos_int_value);
758 pm_qos_update_request(&ufs->pm_qos_fsys0, ufs->pm_qos_fsys0_value);
759
760 } else {
761 /*
762 * Now all used blocks would be turned off in a host.
763 */
764 //exynos_ufs_gate_clk(ufs, true);
765 exynos_ufs_ctrl_auto_hci_clk(ufs, true);
766
767 /* HWAGC enable */
768 exynos_ufs_set_hwacg_control(ufs, true);
769
770 if (ufs->tcxo_ex_ctrl) {
771 spin_lock_irqsave(&fsys0_tcxo_lock, flags);
772 if (exynos_use_shared_resource(OWNER_FIRST, on) == on)
773 exynos_ufs_tcxo_ctrl(ufs, false);
774 spin_unlock_irqrestore(&fsys0_tcxo_lock, flags);
775 }
776
777
778 #ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
779 exynos_update_ip_idle_status(ufs->idle_ip_index, 1);
780 #endif
781 }
782
783 return ret;
784 }
785
786 static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
787 enum ufs_notify_change_status status)
788 {
789 struct exynos_ufs *ufs = to_exynos_ufs(hba);
790 int ret = 0;
791
792 switch (status) {
793 case PRE_CHANGE:
794 /* refer to hba */
795 ufs->hba = hba;
796
797 /* hci */
798 exynos_ufs_config_intr(ufs, DFES_DEF_DL_ERRS, UNIP_DL_LYR);
799 exynos_ufs_config_intr(ufs, DFES_DEF_N_ERRS, UNIP_N_LYR);
800 exynos_ufs_config_intr(ufs, DFES_DEF_T_ERRS, UNIP_T_LYR);
801
802 exynos_ufs_ctrl_clk(ufs, true);
803 exynos_ufs_select_refclk(ufs, true);
804 exynos_ufs_gate_clk(ufs, false);
805 exynos_ufs_set_hwacg_control(ufs, false);
806
807 if (ufs->num_rx_lanes == 0 || ufs->num_tx_lanes == 0) {
808 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
809 &ufs->num_rx_lanes);
810 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
811 &ufs->num_tx_lanes);
812 WARN(ufs->num_rx_lanes != ufs->num_tx_lanes,
813 "available data lane is not equal(rx:%d, tx:%d)\n",
814 ufs->num_rx_lanes, ufs->num_tx_lanes);
815 }
816
817 ufs->mclk_rate = clk_get_rate(ufs->clk_unipro);
818
819 ret = ufs_pre_link(ufs);
820 break;
821 case POST_CHANGE:
822 /* UIC configuration table after link startup */
823 ret = ufs_post_link(ufs);
824 break;
825 default:
826 break;
827 }
828
829 return ret;
830 }
831
832 static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
833 enum ufs_notify_change_status status,
834 struct ufs_pa_layer_attr *pwr_max,
835 struct ufs_pa_layer_attr *pwr_req)
836 {
837 struct exynos_ufs *ufs = to_exynos_ufs(hba);
838 struct uic_pwr_mode *act_pmd = &ufs->act_pmd_parm;
839 int ret = 0;
840
841 switch (status) {
842 case PRE_CHANGE:
843
844 /* Set PMC parameters to be requested */
845 exynos_ufs_init_pmc_req(hba, pwr_max, pwr_req);
846
847 /* UIC configuration table before power mode change */
848 ret = ufs_pre_gear_change(ufs, act_pmd);
849
850 break;
851 case POST_CHANGE:
852 /* UIC configuration table after power mode change */
853 ret = ufs_post_gear_change(ufs);
854
855 dev_info(ufs->dev,
856 "Power mode change(%d): M(%d)G(%d)L(%d)HS-series(%d)\n",
857 ret, act_pmd->mode, act_pmd->gear,
858 act_pmd->lane, act_pmd->hs_series);
859 break;
860 default:
861 break;
862 }
863
864 return ret;
865 }
866
867 static void exynos_ufs_set_nexus_t_xfer_req(struct ufs_hba *hba,
868 int tag, struct scsi_cmnd *cmd)
869 {
870 struct exynos_ufs *ufs = to_exynos_ufs(hba);
871 u32 type;
872
873 type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
874
875 if (cmd)
876 type |= (1 << tag);
877 else
878 type &= ~(1 << tag);
879
880 hci_writel(ufs, type, HCI_UTRL_NEXUS_TYPE);
881 }
882
883 static void exynos_ufs_set_nexus_t_task_mgmt(struct ufs_hba *hba, int tag, u8 tm_func)
884 {
885 struct exynos_ufs *ufs = to_exynos_ufs(hba);
886 u32 type;
887
888 type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
889
890 switch (tm_func) {
891 case UFS_ABORT_TASK:
892 case UFS_QUERY_TASK:
893 type |= (1 << tag);
894 break;
895 case UFS_ABORT_TASK_SET:
896 case UFS_CLEAR_TASK_SET:
897 case UFS_LOGICAL_RESET:
898 case UFS_QUERY_TASK_SET:
899 type &= ~(1 << tag);
900 break;
901 }
902
903 hci_writel(ufs, type, HCI_UTMRL_NEXUS_TYPE);
904 }
905
906 static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
907 u8 enter, bool notify)
908 {
909 int noti = (int) notify;
910
911 switch (noti) {
912 case PRE_CHANGE:
913 exynos_ufs_pre_hibern8(hba, enter);
914 break;
915 case POST_CHANGE:
916 exynos_ufs_post_hibern8(hba, enter);
917 break;
918 default:
919 break;
920 }
921 }
922
923 static int __exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
924 {
925 struct exynos_ufs *ufs = to_exynos_ufs(hba);
926
927 pm_qos_update_request(&ufs->pm_qos_int, 0);
928 pm_qos_update_request(&ufs->pm_qos_fsys0, 0);
929
930 exynos_ufs_dev_reset_ctrl(ufs, false);
931
932 exynos_ufs_ctrl_phy_pwr(ufs, false);
933
934 return 0;
935 }
936
937 static int __exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
938 {
939 struct exynos_ufs *ufs = to_exynos_ufs(hba);
940 int ret = 0;
941
942 exynos_ufs_ctrl_phy_pwr(ufs, true);
943
944 /* system init */
945 ret = exynos_ufs_init_system(ufs);
946 if (ret)
947 return ret;
948
949 if (ufshcd_is_clkgating_allowed(hba))
950 clk_prepare_enable(ufs->clk_hci);
951 exynos_ufs_ctrl_auto_hci_clk(ufs, false);
952
953 /* FMPSECURITY & SMU resume */
954 exynos_ufs_fmp_sec_cfg(ufs);
955 exynos_ufs_smu_resume(ufs);
956
957 /* secure log */
958 #ifdef CONFIG_EXYNOS_SMC_LOGGING
959 exynos_smc(SMC_CMD_UFS_LOG, 0, 0, 0);
960 #endif
961
962 if (ufshcd_is_clkgating_allowed(hba))
963 clk_disable_unprepare(ufs->clk_hci);
964
965 return 0;
966 }
967
968 static u8 exynos_ufs_get_unipro_direct(struct ufs_hba *hba, u32 num)
969 {
970 u32 offset[] = {
971 UNIP_DME_LINKSTARTUP_CNF_RESULT,
972 UNIP_DME_HIBERN8_ENTER_CNF_RESULT,
973 UNIP_DME_HIBERN8_EXIT_CNF_RESULT,
974 UNIP_DME_PWR_IND_RESULT,
975 UNIP_DME_HIBERN8_ENTER_IND_RESULT,
976 UNIP_DME_HIBERN8_EXIT_IND_RESULT
977 };
978
979 struct exynos_ufs *ufs = to_exynos_ufs(hba);
980
981 return unipro_readl(ufs, offset[num]);
982 }
983
984 static int exynos_ufs_crypto_engine_cfg(struct ufs_hba *hba,
985 struct ufshcd_lrb *lrbp,
986 struct scatterlist *sg, int index,
987 int sector_offset)
988 {
989 return exynos_ufs_fmp_cfg(hba, lrbp, sg, index, sector_offset);
990 }
991
992 static int exynos_ufs_crypto_engine_clear(struct ufs_hba *hba,
993 struct ufshcd_lrb *lrbp)
994 {
995 return exynos_ufs_fmp_clear(hba, lrbp);
996 }
997
998 static int exynos_ufs_access_control_abort(struct ufs_hba *hba)
999 {
1000 struct exynos_ufs *ufs = to_exynos_ufs(hba);
1001
1002 return exynos_ufs_smu_abort(ufs);
1003 }
1004
1005 static struct ufs_hba_variant_ops exynos_ufs_ops = {
1006 .init = exynos_ufs_init,
1007 .host_reset = exynos_ufs_host_reset,
1008 .pre_setup_clocks = exynos_ufs_pre_setup_clocks,
1009 .setup_clocks = exynos_ufs_setup_clocks,
1010 .link_startup_notify = exynos_ufs_link_startup_notify,
1011 .pwr_change_notify = exynos_ufs_pwr_change_notify,
1012 .set_nexus_t_xfer_req = exynos_ufs_set_nexus_t_xfer_req,
1013 .set_nexus_t_task_mgmt = exynos_ufs_set_nexus_t_task_mgmt,
1014 .hibern8_notify = exynos_ufs_hibern8_notify,
1015 .dbg_register_dump = exynos_ufs_dump_debug_info,
1016 .suspend = __exynos_ufs_suspend,
1017 .resume = __exynos_ufs_resume,
1018 .get_unipro_result = exynos_ufs_get_unipro_direct,
1019 .crypto_engine_cfg = exynos_ufs_crypto_engine_cfg,
1020 .crypto_engine_clear = exynos_ufs_crypto_engine_clear,
1021 .access_control_abort = exynos_ufs_access_control_abort,
1022 };
1023
1024 static int exynos_ufs_populate_dt_phy(struct device *dev, struct exynos_ufs *ufs)
1025 {
1026 struct device_node *ufs_phy;
1027 struct exynos_ufs_phy *phy = &ufs->phy;
1028 struct resource io_res;
1029 int ret;
1030
1031 ufs_phy = of_get_child_by_name(dev->of_node, "ufs-phy");
1032 if (!ufs_phy) {
1033 dev_err(dev, "failed to get ufs-phy node\n");
1034 return -ENODEV;
1035 }
1036
1037 ret = of_address_to_resource(ufs_phy, 0, &io_res);
1038 if (ret) {
1039 dev_err(dev, "failed to get i/o address phy pma\n");
1040 goto err_0;
1041 }
1042
1043 phy->reg_pma = devm_ioremap_resource(dev, &io_res);
1044 if (!phy->reg_pma) {
1045 dev_err(dev, "failed to ioremap for phy pma\n");
1046 ret = -ENOMEM;
1047 goto err_0;
1048 }
1049
1050 err_0:
1051 of_node_put(ufs_phy);
1052
1053 return ret;
1054 }
1055
1056 /*
1057 * This function is to define offset, mask and shift to access somewhere.
1058 */
1059 static int exynos_ufs_set_context_for_access(struct device *dev,
1060 const char *name, struct exynos_access_cxt *cxt)
1061 {
1062 struct device_node *np;
1063 int ret;
1064
1065 np = of_get_child_by_name(dev->of_node, name);
1066 if (!np) {
1067 dev_err(dev, "failed to get node(%s)\n", name);
1068 return 1;
1069 }
1070
1071 ret = of_property_read_u32(np, "offset", &cxt->offset);
1072 if (IS_ERR(&cxt->offset)) {
1073 dev_err(dev, "failed to set cxt(%s) offset\n", name);
1074 return cxt->offset;
1075 }
1076
1077 ret = of_property_read_u32(np, "mask", &cxt->mask);
1078 if (IS_ERR(&cxt->mask)) {
1079 dev_err(dev, "failed to set cxt(%s) mask\n", name);
1080 return cxt->mask;
1081 }
1082
1083 ret = of_property_read_u32(np, "val", &cxt->val);
1084 if (IS_ERR(&cxt->val)) {
1085 dev_err(dev, "failed to set cxt(%s) val\n", name);
1086 return cxt->val;
1087 }
1088
1089 return 0;
1090 }
1091
1092 static int exynos_ufs_populate_dt_system(struct device *dev, struct exynos_ufs *ufs)
1093 {
1094 struct device_node *np = dev->of_node;
1095 int ret;
1096
1097 /* regmap pmureg */
1098 ufs->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
1099 "samsung,pmu-phandle");
1100 if (IS_ERR(ufs->pmureg)) {
1101 /*
1102 * phy isolation should be available.
1103 * so this case need to be failed.
1104 */
1105 dev_err(dev, "pmu regmap lookup failed.\n");
1106 return PTR_ERR(ufs->pmureg);
1107 }
1108
1109 /* Set access context for phy isolation bypass */
1110 ret = exynos_ufs_set_context_for_access(dev, "ufs-phy-iso",
1111 &ufs->cxt_iso);
1112 if (ret == 1) {
1113 /* no device node, default */
1114 ufs->cxt_iso.offset = 0x0724;
1115 ufs->cxt_iso.mask = 0x1;
1116 ufs->cxt_iso.val = 0x1;
1117 ret = 0;
1118 }
1119
1120 /* regmap sysreg */
1121 ufs->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
1122 "samsung,sysreg-fsys-phandle");
1123 if (IS_ERR(ufs->sysreg)) {
1124 /*
1125 * Currently, ufs driver gets sysreg for io coherency.
1126 * Some architecture might not support this feature.
1127 * So the device node might not exist.
1128 */
1129 dev_err(dev, "sysreg regmap lookup failed.\n");
1130 return 0;
1131 }
1132
1133 /* Set access context for io coherency */
1134 ret = exynos_ufs_set_context_for_access(dev, "ufs-dma-coherency",
1135 &ufs->cxt_coherency);
1136 if (ret == 1) {
1137 /* no device node, default */
1138 ufs->cxt_coherency.offset = 0x0700;
1139 ufs->cxt_coherency.mask = 0x300; /* bit 8,9 */
1140 ufs->cxt_coherency.val = 0x3;
1141 ret = 0;
1142 }
1143
1144 /* TCXO exclusive control */
1145 if (of_property_read_u32(np, "tcxo-ex-ctrl", &ufs->tcxo_ex_ctrl))
1146 ufs->tcxo_ex_ctrl = 1;
1147
1148 return ret;
1149 }
1150
1151 static int exynos_ufs_get_pwr_mode(struct device_node *np,
1152 struct exynos_ufs *ufs)
1153 {
1154 struct uic_pwr_mode *pmd = &ufs->req_pmd_parm;
1155
1156 pmd->mode = FAST_MODE;
1157
1158 if (of_property_read_u8(np, "ufs,pmd-attr-lane", &pmd->lane))
1159 pmd->lane = 1;
1160
1161 if (of_property_read_u8(np, "ufs,pmd-attr-gear", &pmd->gear))
1162 pmd->gear = 1;
1163
1164 pmd->hs_series = PA_HS_MODE_B;
1165
1166 return 0;
1167 }
1168
1169 static int exynos_ufs_populate_dt(struct device *dev, struct exynos_ufs *ufs)
1170 {
1171 struct device_node *np = dev->of_node;
1172 int ret;
1173
1174 /* Get exynos-specific version for featuring */
1175 if (of_property_read_u32(np, "hw-rev", &ufs->hw_rev))
1176 ufs->hw_rev = UFS_VER_0004;
1177
1178 ret = exynos_ufs_populate_dt_phy(dev, ufs);
1179 if (ret) {
1180 dev_err(dev, "failed to populate dt-phy\n");
1181 goto out;
1182 }
1183
1184 ret = exynos_ufs_populate_dt_system(dev, ufs);
1185 if (ret) {
1186 dev_err(dev, "failed to populate dt-pmu\n");
1187 goto out;
1188 }
1189
1190 exynos_ufs_get_pwr_mode(np, ufs);
1191
1192 if (of_property_read_u8(np, "brd-for-cal", &ufs->cal_param->board))
1193 ufs->cal_param->board = 0;
1194
1195 if (of_property_read_u32(np, "ufs-pm-qos-int", &ufs->pm_qos_int_value))
1196 ufs->pm_qos_int_value = 0;
1197
1198 if (of_property_read_u32(np, "ufs-pm-qos-fsys0", &ufs->pm_qos_fsys0_value))
1199 ufs->pm_qos_fsys0_value = 0;
1200
1201
1202 out:
1203 return ret;
1204 }
1205
1206 static int exynos_ufs_lp_event(struct notifier_block *nb, unsigned long event, void *data)
1207 {
1208 struct exynos_ufs *ufs =
1209 container_of(nb, struct exynos_ufs, tcxo_nb);
1210 int ret = NOTIFY_DONE;
1211 bool on = true;
1212 unsigned long flags;
1213
1214 spin_lock_irqsave(&fsys0_tcxo_lock, flags);
1215 switch (event) {
1216 case SLEEP_ENTER:
1217 on = false;
1218 if (exynos_use_shared_resource(OWNER_SECOND, on) == on)
1219 exynos_ufs_tcxo_ctrl(ufs, false);
1220 break;
1221 case SLEEP_EXIT:
1222 if (exynos_use_shared_resource(OWNER_SECOND, on) == !on)
1223 exynos_ufs_tcxo_ctrl(ufs, true);
1224 break;
1225 }
1226 spin_unlock_irqrestore(&fsys0_tcxo_lock, flags);
1227
1228 return ret;
1229 }
1230
1231 static u64 exynos_ufs_dma_mask = DMA_BIT_MASK(32);
1232
1233 static int exynos_ufs_probe(struct platform_device *pdev)
1234 {
1235 struct device *dev = &pdev->dev;
1236 struct exynos_ufs *ufs;
1237 struct resource *res;
1238 int ret;
1239
1240 ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
1241 if (!ufs) {
1242 dev_err(dev, "cannot allocate mem for exynos-ufs\n");
1243 return -ENOMEM;
1244 }
1245
1246 /* exynos-specific hci */
1247 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1248 ufs->reg_hci = devm_ioremap_resource(dev, res);
1249 if (!ufs->reg_hci) {
1250 dev_err(dev, "cannot ioremap for hci vendor register\n");
1251 return -ENOMEM;
1252 }
1253
1254 /* unipro */
1255 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1256 ufs->reg_unipro = devm_ioremap_resource(dev, res);
1257 if (!ufs->reg_unipro) {
1258 dev_err(dev, "cannot ioremap for unipro register\n");
1259 return -ENOMEM;
1260 }
1261
1262 /* ufs protector */
1263 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1264 ufs->reg_ufsp = devm_ioremap_resource(dev, res);
1265 if (!ufs->reg_ufsp) {
1266 dev_err(dev, "cannot ioremap for ufs protector register\n");
1267 return -ENOMEM;
1268 }
1269
1270 /* This must be before calling exynos_ufs_populate_dt */
1271 ret = ufs_init_cal(ufs, ufs_host_index, pdev);
1272 if (ret)
1273 return ret;
1274
1275 ret = exynos_ufs_populate_dt(dev, ufs);
1276 if (ret) {
1277 dev_err(dev, "failed to get dt info.\n");
1278 return ret;
1279 }
1280
1281 /*
1282 * pmu node and txco syscon node should be exclusive
1283 */
1284 if (ufs->tcxo_ex_ctrl) {
1285 ufs->tcxo_nb.notifier_call = exynos_ufs_lp_event;
1286 ufs->tcxo_nb.next = NULL;
1287 ufs->tcxo_nb.priority = 0;
1288
1289 ret = exynos_fsys0_tcxo_register_notifier(&ufs->tcxo_nb);
1290 if (ret) {
1291 dev_err(dev, "failed to register fsys0 txco notifier\n");
1292 return ret;
1293 }
1294 }
1295 #ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
1296 ufs->idle_ip_index = exynos_get_idle_ip_index(dev_name(&pdev->dev));
1297 exynos_update_ip_idle_status(ufs->idle_ip_index, 0);
1298 #endif
1299
1300 ufs->dev = dev;
1301 dev->platform_data = ufs;
1302 dev->dma_mask = &exynos_ufs_dma_mask;
1303
1304 pm_qos_add_request(&ufs->pm_qos_int, PM_QOS_DEVICE_THROUGHPUT, 0);
1305 pm_qos_add_request(&ufs->pm_qos_fsys0, PM_QOS_BUS_THROUGHPUT, 0);
1306 if (ufs->tcxo_ex_ctrl)
1307 spin_lock_init(&fsys0_tcxo_lock);
1308
1309 ret = ufshcd_pltfrm_init(pdev, &exynos_ufs_ops);
1310
1311 return ret;
1312 }
1313
1314 static int exynos_ufs_remove(struct platform_device *pdev)
1315 {
1316 struct exynos_ufs *ufs = dev_get_platdata(&pdev->dev);
1317
1318 ufshcd_pltfrm_exit(pdev);
1319
1320 pm_qos_remove_request(&ufs->pm_qos_fsys0);
1321 pm_qos_remove_request(&ufs->pm_qos_int);
1322
1323 ufs->misc_flags = EXYNOS_UFS_MISC_TOGGLE_LOG;
1324
1325 exynos_ufs_ctrl_phy_pwr(ufs, false);
1326
1327 return 0;
1328 }
1329
1330 #ifdef CONFIG_PM_SLEEP
1331 static int exynos_ufs_suspend(struct device *dev)
1332 {
1333 struct ufs_hba *hba = dev_get_drvdata(dev);
1334
1335 return ufshcd_system_suspend(hba);
1336 }
1337
1338 static int exynos_ufs_resume(struct device *dev)
1339 {
1340 struct ufs_hba *hba = dev_get_drvdata(dev);
1341
1342 return ufshcd_system_resume(hba);
1343 }
1344 #else
1345 #define exynos_ufs_suspend NULL
1346 #define exynos_ufs_resume NULL
1347 #endif /* CONFIG_PM_SLEEP */
1348
1349 #ifdef CONFIG_PM_RUNTIME
1350 static int exynos_ufs_runtime_suspend(struct device *dev)
1351 {
1352 return ufshcd_system_suspend(dev_get_drvdata(dev));
1353 }
1354
1355 static int exynos_ufs_runtime_resume(struct device *dev)
1356 {
1357 return ufshcd_system_resume(dev_get_drvdata(dev));
1358 }
1359
1360 static int exynos_ufs_runtime_idle(struct device *dev)
1361 {
1362 return ufshcd_runtime_idle(dev_get_drvdata(dev));
1363 }
1364
1365 #else
1366 #define exynos_ufs_runtime_suspend NULL
1367 #define exynos_ufs_runtime_resume NULL
1368 #define exynos_ufs_runtime_idle NULL
1369 #endif /* CONFIG_PM_RUNTIME */
1370
1371 static void exynos_ufs_shutdown(struct platform_device *pdev)
1372 {
1373 ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
1374 }
1375
1376 static const struct dev_pm_ops exynos_ufs_dev_pm_ops = {
1377 .suspend = exynos_ufs_suspend,
1378 .resume = exynos_ufs_resume,
1379 .runtime_suspend = exynos_ufs_runtime_suspend,
1380 .runtime_resume = exynos_ufs_runtime_resume,
1381 .runtime_idle = exynos_ufs_runtime_idle,
1382 };
1383
1384 static const struct ufs_hba_variant exynos_ufs_drv_data = {
1385 .ops = &exynos_ufs_ops,
1386 };
1387
1388 static const struct of_device_id exynos_ufs_match[] = {
1389 { .compatible = "samsung,exynos-ufs", },
1390 {},
1391 };
1392 MODULE_DEVICE_TABLE(of, exynos_ufs_match);
1393
1394 static struct platform_driver exynos_ufs_driver = {
1395 .driver = {
1396 .name = "exynos-ufs",
1397 .owner = THIS_MODULE,
1398 .pm = &exynos_ufs_dev_pm_ops,
1399 .of_match_table = exynos_ufs_match,
1400 .suppress_bind_attrs = true,
1401 },
1402 .probe = exynos_ufs_probe,
1403 .remove = exynos_ufs_remove,
1404 .shutdown = exynos_ufs_shutdown,
1405 };
1406
1407 module_platform_driver(exynos_ufs_driver);
1408 MODULE_DESCRIPTION("Exynos Specific UFSHCI driver");
1409 MODULE_AUTHOR("Seungwon Jeon <tgih.jun@samsung.com>");
1410 MODULE_AUTHOR("Kiwoong Kim <kwmad.kim@samsung.com>");
1411 MODULE_LICENSE("GPL");