2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #if defined(CONFIG_PM_DEVFREQ)
42 #include <linux/devfreq.h>
44 #include <linux/nls.h>
45 #include <linux/smc.h>
46 #include <scsi/ufs/ioctl.h>
48 #include <linux/blkdev.h>
49 #include <linux/gpio.h>
52 #include "ufs_quirks.h"
54 #include "ufs-exynos.h"
55 #include "ufs_quirks.h"
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/ufs.h>
60 #define UFSHCD_REQ_SENSE_SIZE 18
62 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
65 /* UIC command timeout, unit: ms */
66 #define UIC_CMD_TIMEOUT 500
68 /* NOP OUT retries waiting for NOP IN response */
69 #define NOP_OUT_RETRIES 10
70 /* Timeout after 30 msecs if NOP OUT hangs without response */
71 #define NOP_OUT_TIMEOUT 30 /* msecs */
73 /* Query request retries */
74 #define QUERY_REQ_RETRIES 3
75 /* Query request timeout */
76 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
78 * Query request timeout for fDeviceInit flag
79 * fDeviceInit query response time for some devices is too large that default
80 * QUERY_REQ_TIMEOUT may not be enough for such devices.
82 #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
84 /* Task management command timeout */
85 #define TM_CMD_TIMEOUT 300 /* msecs */
87 /* maximum number of retries for a general UIC command */
88 #define UFS_UIC_COMMAND_RETRIES 3
90 /* maximum number of link-startup retries */
91 #define DME_LINKSTARTUP_RETRIES 3
93 /* Maximum retries for Hibern8 enter */
94 #define UIC_HIBERN8_ENTER_RETRIES 3
96 /* maximum number of reset retries before giving up */
97 #define MAX_HOST_RESET_RETRIES 5
99 /* Expose the flag value from utp_upiu_query.value */
100 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
102 /* Interrupt aggregation default timeout, unit: 40us */
103 #define INT_AGGR_DEF_TO 0x01
105 /* Link Hibernation delay, msecs */
106 #define LINK_H8_DELAY 20
108 /* UFS link setup retries */
109 #define UFS_LINK_SETUP_RETRIES 5
111 /* IOCTL opcode for command - ufs set device read only */
112 #define UFS_IOCTL_BLKROSET BLKROSET
114 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
118 _ret = ufshcd_enable_vreg(_dev, _vreg); \
120 _ret = ufshcd_disable_vreg(_dev, _vreg); \
124 static int ufs_shutdown_state
= 0;
126 #define ufshcd_hex_dump(prefix_str, buf, len) \
127 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
130 UFSHCD_MAX_CHANNEL
= 0,
132 UFSHCD_CMD_PER_LUN
= 32,
133 UFSHCD_CAN_QUEUE
= 32,
140 UFSHCD_STATE_OPERATIONAL
,
141 UFSHCD_STATE_EH_SCHEDULED
,
144 /* UFSHCD error handling flags */
146 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
149 /* UFSHCD UIC layer error flags */
151 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
152 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
153 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
154 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
155 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
156 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
157 UFSHCD_UIC_DL_ERROR
= (1 << 6), /* Data link layer error */
160 #define ufshcd_set_eh_in_progress(h) \
161 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
162 #define ufshcd_eh_in_progress(h) \
163 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
164 #define ufshcd_clear_eh_in_progress(h) \
165 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
167 #define ufshcd_set_ufs_dev_active(h) \
168 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
169 #define ufshcd_set_ufs_dev_sleep(h) \
170 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
171 #define ufshcd_set_ufs_dev_poweroff(h) \
172 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
173 #define ufshcd_is_ufs_dev_active(h) \
174 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
175 #define ufshcd_is_ufs_dev_sleep(h) \
176 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
177 #define ufshcd_is_ufs_dev_poweroff(h) \
178 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
180 static struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
181 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
182 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
183 {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
184 {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
185 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
186 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
189 static inline enum ufs_dev_pwr_mode
190 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
192 return ufs_pm_lvl_states
[lvl
].dev_state
;
195 static inline enum uic_link_state
196 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
198 return ufs_pm_lvl_states
[lvl
].link_state
;
201 static struct ufs_dev_fix ufs_fixups
[] = {
202 /* UFS cards deviations table */
203 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
204 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
205 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
, UFS_DEVICE_NO_VCCQ
),
206 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
207 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
),
208 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
209 UFS_DEVICE_NO_FASTAUTO
),
210 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
211 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
),
212 UFS_FIX(UFS_VENDOR_TOSHIBA
, UFS_ANY_MODEL
,
213 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
214 UFS_FIX(UFS_VENDOR_TOSHIBA
, "THGLF2G9C8KBADG",
215 UFS_DEVICE_QUIRK_PA_TACTIVATE
),
216 UFS_FIX(UFS_VENDOR_TOSHIBA
, "THGLF2G9D8KBADG",
217 UFS_DEVICE_QUIRK_PA_TACTIVATE
),
218 UFS_FIX(UFS_VENDOR_SKHYNIX
, UFS_ANY_MODEL
, UFS_DEVICE_NO_VCCQ
),
219 UFS_FIX(UFS_VENDOR_SKHYNIX
, UFS_ANY_MODEL
,
220 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
),
225 static void ufshcd_tmc_handler(struct ufs_hba
*hba
);
226 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
227 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
228 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
229 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
230 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
231 static int ufshcd_probe_hba(struct ufs_hba
*hba
);
232 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
234 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
235 static int ufshcd_set_vccq_rail_unused(struct ufs_hba
*hba
, bool unused
);
236 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
);
237 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
);
238 static int ufshcd_link_hibern8_ctrl(struct ufs_hba
*hba
, bool en
);
239 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
240 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
241 #if defined(CONFIG_PM_DEVFREQ)
242 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
243 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
244 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
245 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
);
247 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
248 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
249 struct ufs_pa_layer_attr
*pwr_mode
);
250 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
251 enum ufs_dev_pwr_mode pwr_mode
);
252 static int ufshcd_send_request_sense(struct ufs_hba
*hba
,
253 struct scsi_device
*sdp
);
254 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
);
255 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
);
256 static inline bool ufshcd_valid_tag(struct ufs_hba
*hba
, int tag
)
258 return tag
>= 0 && tag
< hba
->nutrs
;
261 static ssize_t
ufshcd_monitor_show(struct device
*dev
,
262 struct device_attribute
*attr
, char *buf
)
264 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
266 return snprintf(buf
, PAGE_SIZE
, "%lu\n", hba
->monitor
.flag
);
269 static ssize_t
ufshcd_monitor_store(struct device
*dev
,
270 struct device_attribute
*attr
, const char *buf
, size_t count
)
272 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
275 if (kstrtoul(buf
, 0, &value
))
278 hba
->monitor
.flag
= value
;
282 static void ufshcd_init_monitor(struct ufs_hba
*hba
)
284 hba
->monitor
.attrs
.show
= ufshcd_monitor_show
;
285 hba
->monitor
.attrs
.store
= ufshcd_monitor_store
;
286 sysfs_attr_init(&hba
->monitor
.attrs
.attr
);
287 hba
->monitor
.attrs
.attr
.name
= "monitor";
288 hba
->monitor
.attrs
.attr
.mode
= S_IRUGO
| S_IWUSR
;
289 if (device_create_file(hba
->dev
, &hba
->monitor
.attrs
))
290 dev_err(hba
->dev
, "Failed to create sysfs for monitor\n");
294 static inline int ufshcd_enable_irq(struct ufs_hba
*hba
)
298 if (!hba
->is_irq_enabled
) {
299 ret
= request_irq(hba
->irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
,
302 dev_err(hba
->dev
, "%s: request_irq failed, ret=%d\n",
304 hba
->is_irq_enabled
= true;
310 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
312 if (hba
->is_irq_enabled
) {
313 free_irq(hba
->irq
, hba
);
314 hba
->is_irq_enabled
= false;
318 /* replace non-printable or non-ASCII characters with spaces */
319 static inline void ufshcd_remove_non_printable(char *val
)
324 if (*val
< 0x20 || *val
> 0x7e)
328 static void ufshcd_add_command_trace(struct ufs_hba
*hba
,
329 unsigned int tag
, const char *str
)
334 struct ufshcd_lrb
*lrbp
;
335 int transfer_len
= -1;
337 if (!trace_ufshcd_command_enabled())
340 lrbp
= &hba
->lrb
[tag
];
342 if (lrbp
->cmd
) { /* data phase exists */
343 opcode
= (u8
)(*lrbp
->cmd
->cmnd
);
344 if ((opcode
== READ_10
) || (opcode
== WRITE_10
)) {
346 * Currently we only fully trace read(10) and write(10)
349 if (lrbp
->cmd
->request
&& lrbp
->cmd
->request
->bio
)
351 lrbp
->cmd
->request
->bio
->bi_iter
.bi_sector
;
352 transfer_len
= be32_to_cpu(
353 lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
357 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
358 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
359 trace_ufshcd_command(dev_name(hba
->dev
), str
, tag
,
360 doorbell
, transfer_len
, intr
, lba
, opcode
);
363 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
365 struct ufs_clk_info
*clki
;
366 struct list_head
*head
= &hba
->clk_list_head
;
368 if (list_empty(head
))
371 list_for_each_entry(clki
, head
, list
) {
372 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
374 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
375 clki
->name
, clki
->curr_freq
);
379 static void ufshcd_print_uic_err_hist(struct ufs_hba
*hba
,
380 struct ufs_uic_err_reg_hist
*err_hist
, char *err_name
)
384 for (i
= 0; i
< UIC_ERR_REG_HIST_LENGTH
; i
++) {
385 int p
= (i
+ err_hist
->pos
- 1) % UIC_ERR_REG_HIST_LENGTH
;
387 if (err_hist
->reg
[p
] == 0)
389 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, i
,
390 err_hist
->reg
[p
], ktime_to_us(err_hist
->tstamp
[p
]));
394 static void ufshcd_print_host_regs(struct ufs_hba
*hba
)
397 * hex_dump reads its data without the readl macro. This might
398 * cause inconsistency issues on some platform, as the printed
399 * values may be from cache and not the most recent value.
400 * To know whether you are looking at an un-cached version verify
401 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
402 * during platform/pci probe function.
404 ufshcd_hex_dump("host regs: ", hba
->mmio_base
, UFSHCI_REG_SPACE_SIZE
);
405 dev_err(hba
->dev
, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
406 hba
->ufs_version
, hba
->capabilities
);
408 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
409 (u32
)hba
->outstanding_reqs
, (u32
)hba
->outstanding_tasks
);
411 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
412 ktime_to_us(hba
->ufs_stats
.last_hibern8_exit_tstamp
),
413 hba
->ufs_stats
.hibern8_exit_cnt
);
415 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.pa_err
, "pa_err");
416 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.dl_err
, "dl_err");
417 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.nl_err
, "nl_err");
418 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.tl_err
, "tl_err");
419 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.dme_err
, "dme_err");
421 ufshcd_print_clk_freqs(hba
);
423 if (hba
->vops
&& hba
->vops
->dbg_register_dump
)
424 hba
->vops
->dbg_register_dump(hba
);
428 void ufshcd_print_trs(struct ufs_hba
*hba
, unsigned long bitmap
, bool pr_prdt
)
430 struct ufshcd_lrb
*lrbp
;
434 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
435 lrbp
= &hba
->lrb
[tag
];
437 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
438 tag
, ktime_to_us(lrbp
->issue_time_stamp
));
440 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
441 tag
, (u64
)lrbp
->utrd_dma_addr
);
443 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
444 sizeof(struct utp_transfer_req_desc
));
445 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
446 (u64
)lrbp
->ucd_req_dma_addr
);
447 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
448 sizeof(struct utp_upiu_req
));
449 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
450 (u64
)lrbp
->ucd_rsp_dma_addr
);
451 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
452 sizeof(struct utp_upiu_rsp
));
454 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
455 prdt_length
= le16_to_cpu(lrbp
->utr_descriptor_ptr
->prd_table_length
)
456 / sizeof(struct ufshcd_sg_entry
);
458 prdt_length
= le16_to_cpu(lrbp
->utr_descriptor_ptr
->prd_table_length
);
461 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
463 (u64
)lrbp
->ucd_prdt_dma_addr
);
465 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
466 sizeof(struct ufshcd_sg_entry
) * prdt_length
);
470 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
472 struct utp_task_req_desc
*tmrdp
;
475 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
476 tmrdp
= &hba
->utmrdl_base_addr
[tag
];
477 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
478 ufshcd_hex_dump("TM TRD: ", &tmrdp
->header
,
479 sizeof(struct request_desc_header
));
480 dev_err(hba
->dev
, "TM[%d] - Task Management Request UPIU\n",
482 ufshcd_hex_dump("TM REQ: ", tmrdp
->task_req_upiu
,
483 sizeof(struct utp_upiu_req
));
484 dev_err(hba
->dev
, "TM[%d] - Task Management Response UPIU\n",
486 ufshcd_hex_dump("TM RSP: ", tmrdp
->task_rsp_upiu
,
487 sizeof(struct utp_task_req_desc
));
491 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
493 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
494 dev_err(hba
->dev
, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
495 hba
->lrb_in_use
, hba
->outstanding_reqs
, hba
->outstanding_tasks
);
496 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
497 hba
->saved_err
, hba
->saved_uic_err
);
498 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
499 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
500 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
501 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
502 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
503 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
504 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
505 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
506 hba
->eh_flags
, hba
->req_abort_count
);
507 dev_err(hba
->dev
, "Host capabilities=0x%x, caps=0x%x\n",
508 hba
->capabilities
, hba
->caps
);
509 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
514 * ufshcd_print_pwr_info - print power params as saved in hba
516 * @hba: per-adapter instance
518 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
520 static const char * const names
[] = {
530 dev_err(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
532 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
533 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
534 names
[hba
->pwr_info
.pwr_rx
],
535 names
[hba
->pwr_info
.pwr_tx
],
536 hba
->pwr_info
.hs_rate
);
540 * ufshcd_wait_for_register - wait for register value to change
541 * @hba - per-adapter interface
542 * @reg - mmio register offset
543 * @mask - mask to apply to read register value
544 * @val - wait condition
545 * @interval_us - polling interval in microsecs
546 * @timeout_ms - timeout in millisecs
547 * @can_sleep - perform sleep or just spin
549 * Returns -ETIMEDOUT on error, zero on success
551 int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
552 u32 val
, unsigned long interval_us
,
553 unsigned long timeout_ms
, bool can_sleep
)
556 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
558 /* ignore bits that we don't intend to wait on */
561 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
563 usleep_range(interval_us
, interval_us
+ 50);
566 if (time_after(jiffies
, timeout
)) {
567 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
577 * ufshcd_get_intr_mask - Get the interrupt bit mask
578 * @hba - Pointer to adapter instance
580 * Returns interrupt bit mask per version
582 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
586 switch (hba
->ufs_version
) {
587 case UFSHCI_VERSION_10
:
588 intr_mask
= INTERRUPT_MASK_ALL_VER_10
;
590 case UFSHCI_VERSION_11
:
591 case UFSHCI_VERSION_20
:
592 intr_mask
= INTERRUPT_MASK_ALL_VER_11
;
594 case UFSHCI_VERSION_21
:
596 intr_mask
= INTERRUPT_MASK_ALL_VER_21
;
604 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
605 * @hba - Pointer to adapter instance
607 * Returns UFSHCI version supported by the controller
609 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
611 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
612 return ufshcd_vops_get_ufs_hci_version(hba
);
614 return ufshcd_readl(hba
, REG_UFS_VERSION
);
618 * ufshcd_is_device_present - Check if any device connected to
619 * the host controller
620 * @hba: pointer to adapter instance
622 * Returns true if device present, false if no device detected
624 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
626 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) &
627 DEVICE_PRESENT
) ? true : false;
631 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
632 * @lrb: pointer to local command reference block
634 * This function is used to get the OCS field from UTRD
635 * Returns the OCS field in the UTRD
637 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
)
639 return le32_to_cpu(lrbp
->utr_descriptor_ptr
->header
.dword_2
) & MASK_OCS
;
643 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
644 * @task_req_descp: pointer to utp_task_req_desc structure
646 * This function is used to get the OCS field from UTMRD
647 * Returns the OCS field in the UTMRD
650 ufshcd_get_tmr_ocs(struct utp_task_req_desc
*task_req_descp
)
652 return le32_to_cpu(task_req_descp
->header
.dword_2
) & MASK_OCS
;
656 * ufshcd_get_tm_free_slot - get a free slot for task management request
657 * @hba: per adapter instance
658 * @free_slot: pointer to variable with available slot value
660 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
661 * Returns 0 if free slot is not available, else return 1 with tag value
664 static bool ufshcd_get_tm_free_slot(struct ufs_hba
*hba
, int *free_slot
)
673 tag
= find_first_zero_bit(&hba
->tm_slots_in_use
, hba
->nutmrs
);
674 if (tag
>= hba
->nutmrs
)
676 } while (test_and_set_bit_lock(tag
, &hba
->tm_slots_in_use
));
684 static inline void ufshcd_put_tm_slot(struct ufs_hba
*hba
, int slot
)
686 clear_bit_unlock(slot
, &hba
->tm_slots_in_use
);
690 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
691 * @hba: per adapter instance
692 * @pos: position of the bit to be cleared
694 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 pos
)
698 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR
)
703 ufshcd_writel(hba
, clear
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
707 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
708 * @hba: per adapter instance
709 * @pos: position of the bit to be cleared
711 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
715 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR
)
720 ufshcd_writel(hba
, clear
, REG_UTP_TASK_REQ_LIST_CLEAR
);
724 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
725 * @hba: per adapter instance
726 * @tag: position of the bit to be cleared
728 static inline void ufshcd_outstanding_req_clear(struct ufs_hba
*hba
, int tag
)
730 __clear_bit(tag
, &hba
->outstanding_reqs
);
734 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
735 * @reg: Register value of host controller status
737 * Returns integer, 0 on Success and positive value if failed
739 static inline int ufshcd_get_lists_status(u32 reg
)
742 * The mask 0xFF is for the following HCS register bits
750 return ((reg
& 0xFF) >> 1) ^ 0x07;
754 * ufshcd_get_uic_cmd_result - Get the UIC command result
755 * @hba: Pointer to adapter instance
757 * This function gets the result of UIC command completion
758 * Returns 0 on success, non zero value on error
760 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
762 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
763 MASK_UIC_COMMAND_RESULT
;
767 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
768 * @hba: Pointer to adapter instance
770 * This function gets UIC command argument3
771 * Returns 0 on success, non zero value on error
773 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
775 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
779 * ufshcd_get_req_rsp - returns the TR response transaction type
780 * @ucd_rsp_ptr: pointer to response UPIU
783 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
785 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
789 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
790 * @ucd_rsp_ptr: pointer to response UPIU
792 * This function gets the response status and scsi_status from response UPIU
793 * Returns the response result code.
796 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
798 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
802 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
804 * @ucd_rsp_ptr: pointer to response UPIU
806 * Return the data segment length.
808 static inline unsigned int
809 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp
*ucd_rsp_ptr
)
811 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
812 MASK_RSP_UPIU_DATA_SEG_LEN
;
816 * ufshcd_is_exception_event - Check if the device raised an exception event
817 * @ucd_rsp_ptr: pointer to response UPIU
819 * The function checks if the device raised an exception event indicated in
820 * the Device Information field of response UPIU.
822 * Returns true if exception is raised, false otherwise.
824 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
826 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
827 MASK_RSP_EXCEPTION_EVENT
? true : false;
831 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
832 * @hba: per adapter instance
835 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
837 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
838 INT_AGGR_COUNTER_AND_TIMER_RESET
,
839 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
843 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
844 * @hba: per adapter instance
845 * @cnt: Interrupt aggregation counter threshold
846 * @tmout: Interrupt aggregation timeout value
849 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
851 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
852 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
853 INT_AGGR_TIMEOUT_VAL(tmout
),
854 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
858 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
859 * @hba: per adapter instance
861 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
863 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
867 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
868 * When run-stop registers are set to 1, it indicates the
869 * host controller that it can process the requests
870 * @hba: per adapter instance
872 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
874 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
875 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
876 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
877 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
881 * ufshcd_hba_start - Start controller initialization sequence
882 * @hba: per adapter instance
884 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
886 ufshcd_writel(hba
, CONTROLLER_ENABLE
, REG_CONTROLLER_ENABLE
);
890 * ufshcd_is_hba_active - Get controller state
891 * @hba: per adapter instance
893 * Returns false if controller is active, true otherwise
895 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
897 return (ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
)
901 static const char *ufschd_uic_link_state_to_string(
902 enum uic_link_state state
)
905 case UIC_LINK_OFF_STATE
: return "OFF";
906 case UIC_LINK_ACTIVE_STATE
: return "ACTIVE";
907 case UIC_LINK_HIBERN8_STATE
: return "HIBERN8";
908 default: return "UNKNOWN";
912 static const char *ufschd_ufs_dev_pwr_mode_to_string(
913 enum ufs_dev_pwr_mode state
)
916 case UFS_ACTIVE_PWR_MODE
: return "ACTIVE";
917 case UFS_SLEEP_PWR_MODE
: return "SLEEP";
918 case UFS_POWERDOWN_PWR_MODE
: return "POWERDOWN";
919 default: return "UNKNOWN";
923 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
925 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
926 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
927 (hba
->ufs_version
== UFSHCI_VERSION_11
))
928 return UFS_UNIPRO_VER_1_41
;
930 return UFS_UNIPRO_VER_1_6
;
932 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
934 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
937 * If both host and device support UniPro ver1.6 or later, PA layer
938 * parameters tuning happens during link startup itself.
940 * We can manually tune PA layer parameters if either host or device
941 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
942 * logic simple, we will only do manual tuning if local unipro version
943 * doesn't support ver1.6 or later.
945 if (ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
)
951 #if defined(CONFIG_PM_DEVFREQ)
952 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
955 struct ufs_clk_info
*clki
;
956 struct list_head
*head
= &hba
->clk_list_head
;
957 ktime_t start
= ktime_get();
958 bool clk_state_changed
= false;
960 if (list_empty(head
))
963 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
967 list_for_each_entry(clki
, head
, list
) {
968 if (!IS_ERR_OR_NULL(clki
->clk
)) {
969 if (scale_up
&& clki
->max_freq
) {
970 if (clki
->curr_freq
== clki
->max_freq
)
973 clk_state_changed
= true;
974 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
976 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
977 __func__
, clki
->name
,
978 clki
->max_freq
, ret
);
981 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
982 "scaled up", clki
->name
,
986 clki
->curr_freq
= clki
->max_freq
;
988 } else if (!scale_up
&& clki
->min_freq
) {
989 if (clki
->curr_freq
== clki
->min_freq
)
992 clk_state_changed
= true;
993 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
995 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
996 __func__
, clki
->name
,
997 clki
->min_freq
, ret
);
1000 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1001 "scaled down", clki
->name
,
1004 clki
->curr_freq
= clki
->min_freq
;
1007 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1008 clki
->name
, clk_get_rate(clki
->clk
));
1011 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1014 if (clk_state_changed
)
1015 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1016 (scale_up
? "up" : "down"),
1017 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1022 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1023 * @hba: per adapter instance
1024 * @scale_up: True if scaling up and false if scaling down
1026 * Returns true if scaling is required, false otherwise.
1028 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1031 struct ufs_clk_info
*clki
;
1032 struct list_head
*head
= &hba
->clk_list_head
;
1034 if (list_empty(head
))
1037 list_for_each_entry(clki
, head
, list
) {
1038 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1039 if (scale_up
&& clki
->max_freq
) {
1040 if (clki
->curr_freq
== clki
->max_freq
)
1043 } else if (!scale_up
&& clki
->min_freq
) {
1044 if (clki
->curr_freq
== clki
->min_freq
)
1054 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1055 u64 wait_timeout_us
)
1057 unsigned long flags
;
1061 bool timeout
= false, do_last_check
= false;
1064 ufshcd_hold(hba
, false);
1065 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1067 * Wait for all the outstanding tasks/transfer requests.
1068 * Verify by checking the doorbell registers are clear.
1070 start
= ktime_get();
1072 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1077 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1078 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
1079 if (!tm_doorbell
&& !tr_doorbell
) {
1082 } else if (do_last_check
) {
1086 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1088 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1092 * We might have scheduled out for long time so make
1093 * sure to check if doorbells are cleared by this time
1096 do_last_check
= true;
1098 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1099 } while (tm_doorbell
|| tr_doorbell
);
1103 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1104 __func__
, tm_doorbell
, tr_doorbell
);
1108 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1109 ufshcd_release(hba
);
1114 * ufshcd_scale_gear - scale up/down UFS gear
1115 * @hba: per adapter instance
1116 * @scale_up: True for scaling up gear and false for scaling down
1118 * Returns 0 for success,
1119 * Returns -EBUSY if scaling can't happen at this time
1120 * Returns non-zero for any other errors
1122 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1124 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1126 struct ufs_pa_layer_attr new_pwr_info
;
1129 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
.info
,
1130 sizeof(struct ufs_pa_layer_attr
));
1132 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1133 sizeof(struct ufs_pa_layer_attr
));
1135 if (hba
->pwr_info
.gear_tx
> UFS_MIN_GEAR_TO_SCALE_DOWN
1136 || hba
->pwr_info
.gear_rx
> UFS_MIN_GEAR_TO_SCALE_DOWN
) {
1137 /* save the current power mode */
1138 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
1140 sizeof(struct ufs_pa_layer_attr
));
1142 /* scale down gear */
1143 new_pwr_info
.gear_tx
= UFS_MIN_GEAR_TO_SCALE_DOWN
;
1144 new_pwr_info
.gear_rx
= UFS_MIN_GEAR_TO_SCALE_DOWN
;
1148 /* check if the power mode needs to be changed or not? */
1149 ret
= ufshcd_change_power_mode(hba
, &new_pwr_info
);
1152 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1154 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1155 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1160 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
)
1162 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1165 * make sure that there are no outstanding requests when
1166 * clock scaling is in progress
1168 scsi_block_requests(hba
->host
);
1169 down_write(&hba
->clk_scaling_lock
);
1170 if (ufshcd_wait_for_doorbell_clr(hba
, DOORBELL_CLR_TOUT_US
)) {
1172 up_write(&hba
->clk_scaling_lock
);
1173 scsi_unblock_requests(hba
->host
);
1179 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
)
1181 up_write(&hba
->clk_scaling_lock
);
1182 scsi_unblock_requests(hba
->host
);
1186 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1187 * @hba: per adapter instance
1188 * @scale_up: True for scaling up and false for scalin down
1190 * Returns 0 for success,
1191 * Returns -EBUSY if scaling can't happen at this time
1192 * Returns non-zero for any other errors
1194 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, bool scale_up
)
1198 /* let's not get into low power until clock scaling is completed */
1199 ufshcd_hold(hba
, false);
1201 ret
= ufshcd_clock_scaling_prepare(hba
);
1205 /* scale down the gear before scaling down clocks */
1207 ret
= ufshcd_scale_gear(hba
, false);
1212 ret
= ufshcd_scale_clks(hba
, scale_up
);
1215 ufshcd_scale_gear(hba
, true);
1219 /* scale up the gear after scaling up clocks */
1221 ret
= ufshcd_scale_gear(hba
, true);
1223 ufshcd_scale_clks(hba
, false);
1228 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1231 ufshcd_clock_scaling_unprepare(hba
);
1232 ufshcd_release(hba
);
1236 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1238 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1239 clk_scaling
.suspend_work
);
1240 unsigned long irq_flags
;
1242 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1243 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1244 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1247 hba
->clk_scaling
.is_suspended
= true;
1248 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1250 __ufshcd_suspend_clkscaling(hba
);
1253 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1255 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1256 clk_scaling
.resume_work
);
1257 unsigned long irq_flags
;
1259 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1260 if (!hba
->clk_scaling
.is_suspended
) {
1261 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1264 hba
->clk_scaling
.is_suspended
= false;
1265 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1267 devfreq_resume_device(hba
->devfreq
);
1270 static int ufshcd_devfreq_target(struct device
*dev
,
1271 unsigned long *freq
, u32 flags
)
1274 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1276 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1277 unsigned long irq_flags
;
1279 if (!ufshcd_is_clkscaling_supported(hba
))
1282 if ((*freq
> 0) && (*freq
< UINT_MAX
)) {
1283 dev_err(hba
->dev
, "%s: invalid freq = %lu\n", __func__
, *freq
);
1287 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1288 if (ufshcd_eh_in_progress(hba
)) {
1289 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1293 if (!hba
->clk_scaling
.active_reqs
)
1294 sched_clk_scaling_suspend_work
= true;
1296 scale_up
= (*freq
== UINT_MAX
) ? true : false;
1297 if (!ufshcd_is_devfreq_scaling_required(hba
, scale_up
)) {
1298 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1300 goto out
; /* no state change required */
1302 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1304 start
= ktime_get();
1305 ret
= ufshcd_devfreq_scale(hba
, scale_up
);
1307 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1308 (scale_up
? "up" : "down"),
1309 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1312 if (sched_clk_scaling_suspend_work
)
1313 queue_work(hba
->clk_scaling
.workq
,
1314 &hba
->clk_scaling
.suspend_work
);
1320 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1321 struct devfreq_dev_status
*stat
)
1323 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1324 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1325 unsigned long flags
;
1327 if (!ufshcd_is_clkscaling_supported(hba
))
1330 memset(stat
, 0, sizeof(*stat
));
1332 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1333 if (!scaling
->window_start_t
)
1336 if (scaling
->is_busy_started
)
1337 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
1338 scaling
->busy_start_t
));
1340 stat
->total_time
= jiffies_to_usecs((long)jiffies
-
1341 (long)scaling
->window_start_t
);
1342 stat
->busy_time
= scaling
->tot_busy_t
;
1344 scaling
->window_start_t
= jiffies
;
1345 scaling
->tot_busy_t
= 0;
1347 if (hba
->outstanding_reqs
) {
1348 scaling
->busy_start_t
= ktime_get();
1349 scaling
->is_busy_started
= true;
1351 scaling
->busy_start_t
= 0;
1352 scaling
->is_busy_started
= false;
1354 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1358 static struct devfreq_dev_profile ufs_devfreq_profile
= {
1360 .target
= ufshcd_devfreq_target
,
1361 .get_dev_status
= ufshcd_devfreq_get_dev_status
,
1364 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1366 unsigned long flags
;
1368 devfreq_suspend_device(hba
->devfreq
);
1369 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1370 hba
->clk_scaling
.window_start_t
= 0;
1371 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1374 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1376 unsigned long flags
;
1377 bool suspend
= false;
1379 if (!ufshcd_is_clkscaling_supported(hba
))
1382 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1383 if (!hba
->clk_scaling
.is_suspended
) {
1385 hba
->clk_scaling
.is_suspended
= true;
1387 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1390 __ufshcd_suspend_clkscaling(hba
);
1393 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1395 unsigned long flags
;
1396 bool resume
= false;
1398 if (!ufshcd_is_clkscaling_supported(hba
))
1401 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1402 if (hba
->clk_scaling
.is_suspended
) {
1404 hba
->clk_scaling
.is_suspended
= false;
1406 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1409 devfreq_resume_device(hba
->devfreq
);
1412 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1413 struct device_attribute
*attr
, char *buf
)
1415 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1417 return snprintf(buf
, PAGE_SIZE
, "%d\n", hba
->clk_scaling
.is_allowed
);
1420 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1421 struct device_attribute
*attr
, const char *buf
, size_t count
)
1423 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1427 if (kstrtou32(buf
, 0, &value
))
1431 if (value
== hba
->clk_scaling
.is_allowed
)
1434 pm_runtime_get_sync(hba
->dev
);
1435 ufshcd_hold(hba
, false);
1437 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1438 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1440 hba
->clk_scaling
.is_allowed
= value
;
1443 ufshcd_resume_clkscaling(hba
);
1445 ufshcd_suspend_clkscaling(hba
);
1446 err
= ufshcd_devfreq_scale(hba
, true);
1448 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1452 ufshcd_release(hba
);
1453 pm_runtime_put_sync(hba
->dev
);
1458 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba
*hba
)
1460 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1461 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1462 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1463 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1464 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1465 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1466 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1470 static void ufshcd_ungate_work(struct work_struct
*work
)
1473 unsigned long flags
;
1474 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1475 clk_gating
.ungate_work
);
1476 bool gating_allowed
= !ufshcd_can_fake_clkgating(hba
);
1478 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1480 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1481 if (hba
->clk_gating
.state
== CLKS_ON
&& gating_allowed
) {
1482 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1486 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1487 if (gating_allowed
) {
1488 ufshcd_setup_clocks(hba
, true);
1490 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1491 hba
->clk_gating
.state
= CLKS_ON
;
1492 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1495 /* Exit from hibern8 */
1496 if (ufshcd_can_hibern8_during_gating(hba
)) {
1497 /* Prevent gating in this path */
1498 hba
->clk_gating
.is_suspended
= true;
1499 if (ufshcd_is_link_hibern8(hba
)) {
1500 ufshcd_set_link_trans_active(hba
);
1501 ret
= ufshcd_link_hibern8_ctrl(hba
, false);
1503 ufshcd_set_link_off(hba
);
1504 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1507 ufshcd_set_link_active(hba
);
1510 hba
->clk_gating
.is_suspended
= false;
1513 scsi_unblock_requests(hba
->host
);
1517 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1518 * Also, exit from hibern8 mode and set the link as active.
1519 * @hba: per adapter instance
1520 * @async: This indicates whether caller should ungate clocks asynchronously.
1522 int ufshcd_hold(struct ufs_hba
*hba
, bool async
)
1525 unsigned long flags
;
1527 if (!ufshcd_is_clkgating_allowed(hba
))
1529 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1530 hba
->clk_gating
.active_reqs
++;
1532 if (ufshcd_eh_in_progress(hba
)) {
1533 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1538 switch (hba
->clk_gating
.state
) {
1542 hba
->clk_gating
.active_reqs
--;
1546 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1547 hba
->clk_gating
.state
= CLKS_ON
;
1548 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1549 hba
->clk_gating
.state
);
1553 * If we are here, it means gating work is either done or
1554 * currently running. Hence, fall through to cancel gating
1555 * work and to enable clocks.
1558 scsi_block_requests(hba
->host
);
1559 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1560 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1561 hba
->clk_gating
.state
);
1562 queue_work(hba
->ufshcd_workq
, &hba
->clk_gating
.ungate_work
);
1564 * fall through to check if we should wait for this
1565 * work to be done or not.
1570 hba
->clk_gating
.active_reqs
--;
1574 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1575 flush_work(&hba
->clk_gating
.ungate_work
);
1576 /* Make sure state is CLKS_ON before returning */
1577 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1580 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1581 __func__
, hba
->clk_gating
.state
);
1584 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1588 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1590 static void ufshcd_gate_work(struct work_struct
*work
)
1592 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1593 clk_gating
.gate_work
.work
);
1594 bool gating_allowed
= !ufshcd_can_fake_clkgating(hba
);
1595 unsigned long flags
;
1597 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1599 * In case you are here to cancel this work the gating state
1600 * would be marked as REQ_CLKS_ON. In this case save time by
1601 * skipping the gating work and exit after changing the clock
1604 if (hba
->clk_gating
.is_suspended
||
1605 (hba
->clk_gating
.state
== REQ_CLKS_ON
)) {
1606 hba
->clk_gating
.state
= CLKS_ON
;
1607 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1608 hba
->clk_gating
.state
);
1612 if (hba
->clk_gating
.active_reqs
1613 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1614 || hba
->lrb_in_use
|| hba
->outstanding_tasks
1615 || hba
->active_uic_cmd
|| hba
->uic_async_done
1616 || scsi_host_in_recovery(hba
->host
))
1619 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1621 /* put the link into hibern8 mode before turning off clocks */
1622 if (ufshcd_can_hibern8_during_gating(hba
)) {
1623 ufshcd_set_link_trans_hibern8(hba
);
1624 if (ufshcd_link_hibern8_ctrl(hba
, true)) {
1625 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1626 hba
->clk_gating
.state
= __CLKS_ON
;
1627 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1628 hba
->clk_gating
.is_suspended
= true;
1629 ufshcd_reset_and_restore(hba
);
1630 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1631 hba
->clk_gating
.state
= CLKS_ON
;
1632 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1633 hba
->clk_gating
.is_suspended
= false;
1634 scsi_unblock_requests(hba
->host
);
1635 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1636 hba
->clk_gating
.state
);
1639 ufshcd_set_link_hibern8(hba
);
1642 if (gating_allowed
) {
1643 if (!ufshcd_is_link_active(hba
))
1644 ufshcd_setup_clocks(hba
, false);
1646 /* If link is active, device ref_clk can't be switched off */
1647 __ufshcd_setup_clocks(hba
, false, true);
1651 * In case you are here to cancel this work the gating state
1652 * would be marked as REQ_CLKS_ON. In this case keep the state
1653 * as REQ_CLKS_ON which would anyway imply that clocks are off
1654 * and a request to turn them on is pending. By doing this way,
1655 * we keep the state machine in tact and this would ultimately
1656 * prevent from doing cancel work multiple times when there are
1657 * new requests arriving before the current cancel work is done.
1659 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1660 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1661 hba
->clk_gating
.state
= CLKS_OFF
;
1662 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1663 hba
->clk_gating
.state
);
1666 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1671 /* host lock must be held before calling this variant */
1672 static void __ufshcd_release(struct ufs_hba
*hba
)
1674 if (!ufshcd_is_clkgating_allowed(hba
))
1677 hba
->clk_gating
.active_reqs
--;
1679 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
1680 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1681 || hba
->lrb_in_use
|| hba
->outstanding_tasks
1682 || hba
->active_uic_cmd
|| hba
->uic_async_done
1683 || scsi_host_in_recovery(hba
->host
)
1684 || ufshcd_eh_in_progress(hba
))
1687 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1688 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1689 queue_delayed_work(hba
->ufshcd_workq
, &hba
->clk_gating
.gate_work
,
1690 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1693 void ufshcd_release(struct ufs_hba
*hba
)
1695 unsigned long flags
;
1697 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1698 __ufshcd_release(hba
);
1699 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1701 EXPORT_SYMBOL_GPL(ufshcd_release
);
1703 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
1704 struct device_attribute
*attr
, char *buf
)
1706 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1708 return snprintf(buf
, PAGE_SIZE
, "%lu\n", hba
->clk_gating
.delay_ms
);
1711 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
1712 struct device_attribute
*attr
, const char *buf
, size_t count
)
1714 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1715 unsigned long flags
, value
;
1717 if (kstrtoul(buf
, 0, &value
))
1720 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1721 hba
->clk_gating
.delay_ms
= value
;
1722 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1726 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
1727 struct device_attribute
*attr
, char *buf
)
1729 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1731 return snprintf(buf
, PAGE_SIZE
, "%d\n", hba
->clk_gating
.is_enabled
);
1734 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
1735 struct device_attribute
*attr
, const char *buf
, size_t count
)
1737 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1738 unsigned long flags
;
1741 if (kstrtou32(buf
, 0, &value
))
1745 if (value
== hba
->clk_gating
.is_enabled
)
1749 ufshcd_release(hba
);
1751 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1752 hba
->clk_gating
.active_reqs
++;
1753 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1756 hba
->clk_gating
.is_enabled
= value
;
1761 static int ufshcd_init_clk_gating(struct ufs_hba
*hba
)
1765 if (!ufshcd_is_clkgating_allowed(hba
))
1768 hba
->ufshcd_workq
= alloc_workqueue("ufshcd_wq", WQ_HIGHPRI
, 0);
1769 if (!hba
->ufshcd_workq
) {
1774 hba
->clk_gating
.delay_ms
= LINK_H8_DELAY
;
1775 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
1776 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
1778 hba
->clk_gating
.is_enabled
= true;
1780 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
1781 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
1782 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
1783 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
1784 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
1785 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
1786 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
1788 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
1789 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
1790 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
1791 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
1792 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
1793 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
1794 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
1800 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
1802 if (!ufshcd_is_clkgating_allowed(hba
))
1804 destroy_workqueue(hba
->ufshcd_workq
);
1805 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
1806 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
1809 #if defined(CONFIG_PM_DEVFREQ)
1810 /* Must be called with host lock acquired */
1811 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
1813 bool queue_resume_work
= false;
1815 if (!ufshcd_is_clkscaling_supported(hba
))
1818 if (!hba
->clk_scaling
.active_reqs
++)
1819 queue_resume_work
= true;
1821 if (!hba
->clk_scaling
.is_allowed
|| hba
->pm_op_in_progress
)
1824 if (queue_resume_work
)
1825 queue_work(hba
->clk_scaling
.workq
,
1826 &hba
->clk_scaling
.resume_work
);
1828 if (!hba
->clk_scaling
.window_start_t
) {
1829 hba
->clk_scaling
.window_start_t
= jiffies
;
1830 hba
->clk_scaling
.tot_busy_t
= 0;
1831 hba
->clk_scaling
.is_busy_started
= false;
1834 if (!hba
->clk_scaling
.is_busy_started
) {
1835 hba
->clk_scaling
.busy_start_t
= ktime_get();
1836 hba
->clk_scaling
.is_busy_started
= true;
1840 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
1842 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1844 if (!ufshcd_is_clkscaling_supported(hba
))
1847 if (!hba
->outstanding_reqs
&& scaling
->is_busy_started
) {
1848 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
1849 scaling
->busy_start_t
));
1850 scaling
->busy_start_t
= 0;
1851 scaling
->is_busy_started
= false;
1857 * ufshcd_send_command - Send SCSI or device management commands
1858 * @hba: per adapter instance
1859 * @task_tag: Task tag of the command
1862 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
)
1864 hba
->lrb
[task_tag
].issue_time_stamp
= ktime_get();
1865 #if defined(CONFIG_PM_DEVFREQ)
1866 ufshcd_clk_scaling_start_busy(hba
);
1868 __set_bit(task_tag
, &hba
->outstanding_reqs
);
1869 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
1870 /* Make sure that doorbell is committed immediately */
1872 ufshcd_add_command_trace(hba
, task_tag
, "send");
1876 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1877 * @lrb - pointer to local reference block
1879 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
1882 if (lrbp
->sense_buffer
&&
1883 ufshcd_get_rsp_upiu_data_seg_len(lrbp
->ucd_rsp_ptr
)) {
1886 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
1887 len_to_copy
= min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH
, len
);
1889 memcpy(lrbp
->sense_buffer
,
1890 lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
1891 min_t(int, len_to_copy
, UFSHCD_REQ_SENSE_SIZE
));
1896 * ufshcd_copy_query_response() - Copy the Query Response and the data
1898 * @hba: per adapter instance
1899 * @lrb - pointer to local reference block
1902 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1904 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
1906 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
1908 /* Get the descriptor */
1909 if (lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
1910 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
1911 GENERAL_UPIU_REQUEST_SIZE
;
1915 /* data segment length */
1916 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
1917 MASK_QUERY_DATA_SEG_LEN
;
1918 buf_len
= be16_to_cpu(
1919 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
1920 if (likely(buf_len
>= resp_len
)) {
1921 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
1924 "%s: Response size is bigger than buffer",
1934 * ufshcd_hba_capabilities - Read controller capabilities
1935 * @hba: per adapter instance
1937 static inline void ufshcd_hba_capabilities(struct ufs_hba
*hba
)
1939 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
1941 /* nutrs and nutmrs are 0 based values */
1942 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
1944 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
1948 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1949 * to accept UIC commands
1950 * @hba: per adapter instance
1951 * Return true on success, else false
1953 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
1955 if (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
)
1962 * ufshcd_get_upmcrs - Get the power mode change request status
1963 * @hba: Pointer to adapter instance
1965 * This function gets the UPMCRS field of HCS register
1966 * Returns value of UPMCRS field
1968 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
, struct uic_command
*cmd
)
1970 if (hba
->quirks
& UFSHCD_QUIRK_GET_GENERRCODE_DIRECT
) {
1971 if (cmd
->command
== UIC_CMD_DME_SET
&&
1972 cmd
->argument1
== UIC_ARG_MIB(PA_PWRMODE
))
1973 return ufshcd_vops_get_unipro(hba
, 3);
1974 else if (cmd
->command
== UIC_CMD_DME_HIBER_ENTER
)
1975 return ufshcd_vops_get_unipro(hba
, 4);
1976 else if (cmd
->command
== UIC_CMD_DME_HIBER_EXIT
)
1977 return ufshcd_vops_get_unipro(hba
, 5);
1979 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
1981 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
1985 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1986 * @hba: per adapter instance
1987 * @uic_cmd: UIC command
1989 * Mutex must be held.
1992 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
1994 WARN_ON(hba
->active_uic_cmd
);
1996 hba
->active_uic_cmd
= uic_cmd
;
1999 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2000 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2001 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2004 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2009 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2010 * @hba: per adapter instance
2011 * @uic_command: UIC command
2013 * Must be called with mutex held.
2014 * Returns 0 only if success.
2017 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2020 unsigned long flags
;
2022 if (wait_for_completion_timeout(&uic_cmd
->done
,
2023 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2024 switch (uic_cmd
->command
) {
2025 case UIC_CMD_DME_LINK_STARTUP
:
2026 case UIC_CMD_DME_HIBER_ENTER
:
2027 case UIC_CMD_DME_HIBER_EXIT
:
2028 if (hba
->quirks
& UFSHCD_QUIRK_GET_GENERRCODE_DIRECT
)
2029 ret
= ufshcd_vops_get_unipro(hba
, uic_cmd
->command
- UIC_CMD_DME_LINK_STARTUP
);
2031 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2034 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2040 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2041 hba
->active_uic_cmd
= NULL
;
2042 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2048 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2049 * @hba: per adapter instance
2050 * @uic_cmd: UIC command
2051 * @completion: initialize the completion only if this is set to true
2053 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2054 * with mutex held and host_lock locked.
2055 * Returns 0 only if success.
2058 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2061 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2063 "Controller not ready to accept UIC commands\n");
2068 init_completion(&uic_cmd
->done
);
2070 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2076 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2077 * @hba: per adapter instance
2078 * @uic_cmd: UIC command
2080 * Returns 0 only if success.
2083 ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2086 unsigned long flags
;
2088 ufshcd_hold(hba
, false);
2089 mutex_lock(&hba
->uic_cmd_mutex
);
2090 ufshcd_add_delay_before_dme_cmd(hba
);
2092 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2093 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2094 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2096 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2098 mutex_unlock(&hba
->uic_cmd_mutex
);
2100 ufshcd_release(hba
);
2105 * ufshcd_map_sg - Map scatter-gather list to prdt
2106 * @lrbp - pointer to local reference block
2108 * Returns 0 in case of success, non-zero value in case of failure
2110 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2112 struct ufshcd_sg_entry
*prd_table
;
2113 struct scatterlist
*sg
;
2114 struct scsi_cmnd
*cmd
;
2117 int sector_offset
= 0;
2121 sg_segments
= scsi_dma_map(cmd
);
2122 if (sg_segments
< 0)
2126 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2127 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2128 cpu_to_le16((u16
)(sg_segments
*
2129 sizeof(struct ufshcd_sg_entry
)));
2131 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2132 cpu_to_le16((u16
) (sg_segments
));
2134 prd_table
= (struct ufshcd_sg_entry
*)lrbp
->ucd_prdt_ptr
;
2136 scsi_for_each_sg(cmd
, sg
, sg_segments
, i
) {
2138 cpu_to_le32(((u32
) sg_dma_len(sg
))-1);
2139 prd_table
[i
].base_addr
=
2140 cpu_to_le32(lower_32_bits(sg
->dma_address
));
2141 prd_table
[i
].upper_addr
=
2142 cpu_to_le32(upper_32_bits(sg
->dma_address
));
2143 prd_table
[i
].reserved
= 0;
2144 hba
->transferred_sector
+= prd_table
[i
].size
;
2146 ret
= ufshcd_vops_crypto_engine_cfg(hba
, lrbp
, sg
, i
, sector_offset
, page_index
++);
2149 "%s: failed to configure crypto engine (%d)\n",
2153 sector_offset
+= UFSHCI_SECTOR_SIZE
/ MIN_SECTOR_SIZE
;
2156 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2163 * ufshcd_enable_intr - enable interrupts
2164 * @hba: per adapter instance
2165 * @intrs: interrupt bits
2167 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2169 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2171 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
2173 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2174 set
= rw
| ((set
^ intrs
) & intrs
);
2179 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2183 * ufshcd_disable_intr - disable interrupts
2184 * @hba: per adapter instance
2185 * @intrs: interrupt bits
2187 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2189 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2191 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
2193 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2194 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2195 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2201 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2205 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2206 * descriptor according to request
2207 * @lrbp: pointer to local reference block
2208 * @upiu_flags: flags required in the header
2209 * @cmd_dir: requests data direction
2211 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
,
2212 u32
*upiu_flags
, enum dma_data_direction cmd_dir
)
2214 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2218 if (cmd_dir
== DMA_FROM_DEVICE
) {
2219 data_direction
= UTP_DEVICE_TO_HOST
;
2220 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2221 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2222 data_direction
= UTP_HOST_TO_DEVICE
;
2223 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2225 data_direction
= UTP_NO_DATA_TRANSFER
;
2226 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2229 dword_0
= data_direction
| (lrbp
->command_type
2230 << UPIU_COMMAND_TYPE_OFFSET
);
2232 dword_0
|= UTP_REQ_DESC_INT_CMD
;
2234 /* Transfer request descriptor header fields */
2235 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
2236 /* dword_1 is reserved, hence it is set to 0 */
2237 req_desc
->header
.dword_1
= 0;
2239 * assigning invalid value for command status. Controller
2240 * updates OCS on command completion, with the command
2243 req_desc
->header
.dword_2
=
2244 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
2245 /* dword_3 is reserved, hence it is set to 0 */
2246 req_desc
->header
.dword_3
= 0;
2248 req_desc
->prd_table_length
= 0;
2252 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2254 * @lrbp - local reference block pointer
2255 * @upiu_flags - flags
2258 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
2260 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2261 unsigned short cdb_len
;
2263 /* command descriptor fields */
2264 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2265 UPIU_TRANSACTION_COMMAND
, upiu_flags
,
2266 lrbp
->lun
, lrbp
->task_tag
);
2267 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2268 UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
2270 /* Total EHS length and Data segment length will be zero */
2271 ucd_req_ptr
->header
.dword_2
= 0;
2273 ucd_req_ptr
->sc
.exp_data_transfer_len
=
2274 cpu_to_be32(lrbp
->cmd
->sdb
.length
);
2276 cdb_len
= min_t(unsigned short, lrbp
->cmd
->cmd_len
, MAX_CDB_SIZE
);
2277 memset(ucd_req_ptr
->sc
.cdb
, 0, MAX_CDB_SIZE
);
2278 memcpy(ucd_req_ptr
->sc
.cdb
, lrbp
->cmd
->cmnd
, cdb_len
);
2280 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2284 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2287 * @lrbp: local reference block pointer
2288 * @upiu_flags: flags
2290 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2291 struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
2293 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2294 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2295 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2296 u8
*descp
= (u8
*)lrbp
->ucd_req_ptr
+ GENERAL_UPIU_REQUEST_SIZE
;
2298 /* Query request header */
2299 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2300 UPIU_TRANSACTION_QUERY_REQ
, upiu_flags
,
2301 lrbp
->lun
, lrbp
->task_tag
);
2302 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2303 0, query
->request
.query_func
, 0, 0);
2305 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
)
2308 /* Data segment length only need for WRITE_DESC */
2309 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2310 ucd_req_ptr
->header
.dword_2
=
2311 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
2313 ucd_req_ptr
->header
.dword_2
= 0;
2315 /* Copy the Query Request buffer as is */
2316 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2319 /* Copy the Descriptor */
2320 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2321 memcpy(descp
, query
->descriptor
, len
);
2323 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2326 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2328 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2330 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2332 /* command descriptor fields */
2333 ucd_req_ptr
->header
.dword_0
=
2335 UPIU_TRANSACTION_NOP_OUT
, 0, 0, lrbp
->task_tag
);
2336 /* clear rest of the fields of basic header */
2337 ucd_req_ptr
->header
.dword_1
= 0;
2338 ucd_req_ptr
->header
.dword_2
= 0;
2340 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2344 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2345 * for Device Management Purposes
2346 * @hba - per adapter instance
2347 * @lrb - pointer to local reference block
2349 static int ufshcd_comp_devman_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2354 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
2355 (hba
->ufs_version
== UFSHCI_VERSION_11
))
2356 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2358 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2360 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
);
2361 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2362 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2363 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2364 ufshcd_prepare_utp_nop_upiu(lrbp
);
2372 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2374 * @hba - per adapter instance
2375 * @lrb - pointer to local reference block
2377 static int ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2382 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
2383 (hba
->ufs_version
== UFSHCI_VERSION_11
))
2384 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2386 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2388 if (likely(lrbp
->cmd
)) {
2389 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
,
2390 lrbp
->cmd
->sc_data_direction
);
2391 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2400 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2401 * @scsi_lun: scsi LUN id
2403 * Returns UPIU LUN id
2405 static inline u8
ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun
)
2407 if (scsi_is_wlun(scsi_lun
))
2408 return (scsi_lun
& UFS_UPIU_MAX_UNIT_NUM_ID
)
2411 return scsi_lun
& UFS_UPIU_MAX_UNIT_NUM_ID
;
2414 static inline unsigned int ufshcd_get_scsi_lun(struct scsi_cmnd
*cmd
)
2416 if (cmd
->cmnd
[0] == SECURITY_PROTOCOL_IN
||
2417 cmd
->cmnd
[0] == SECURITY_PROTOCOL_OUT
)
2418 return (SCSI_W_LUN_BASE
|
2419 (UFS_UPIU_RPMB_WLUN
& UFS_UPIU_MAX_UNIT_NUM_ID
));
2421 return cmd
->device
->lun
;
2425 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2426 * @scsi_lun: UPIU W-LUN id
2428 * Returns SCSI W-LUN id
2430 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2432 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2436 * ufshcd_queuecommand - main entry point for SCSI requests
2437 * @cmd: command from SCSI Midlayer
2438 * @done: call back function
2440 * Returns 0 for success, non-zero in case of failure
2442 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2444 struct ufshcd_lrb
*lrbp
;
2445 struct ufs_hba
*hba
;
2446 unsigned long flags
;
2449 unsigned int scsi_lun
;
2451 hba
= shost_priv(host
);
2453 tag
= cmd
->request
->tag
;
2454 if (!ufshcd_valid_tag(hba
, tag
)) {
2456 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2457 __func__
, tag
, cmd
, cmd
->request
);
2461 if (!down_read_trylock(&hba
->clk_scaling_lock
))
2462 return SCSI_MLQUEUE_HOST_BUSY
;
2464 if ((ufs_shutdown_state
== 1) && (cmd
->cmnd
[0] == START_STOP
)) {
2465 scsi_block_requests(hba
->host
);
2466 cancel_work_sync(&hba
->clk_gating
.ungate_work
);
2469 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2470 switch (hba
->ufshcd_state
) {
2471 case UFSHCD_STATE_OPERATIONAL
:
2473 case UFSHCD_STATE_EH_SCHEDULED
:
2474 case UFSHCD_STATE_RESET
:
2475 err
= SCSI_MLQUEUE_HOST_BUSY
;
2477 case UFSHCD_STATE_ERROR
:
2478 set_host_byte(cmd
, DID_ERROR
);
2480 cmd
->scsi_done(cmd
);
2483 dev_WARN_ONCE(hba
->dev
, 1, "%s: invalid state %d\n",
2484 __func__
, hba
->ufshcd_state
);
2485 set_host_byte(cmd
, DID_BAD_TARGET
);
2486 cmd
->scsi_done(cmd
);
2490 /* if error handling is in progress, don't issue commands */
2491 if (ufshcd_eh_in_progress(hba
)) {
2492 set_host_byte(cmd
, DID_ERROR
);
2493 cmd
->scsi_done(cmd
);
2496 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2498 hba
->req_abort_count
= 0;
2500 /* acquire the tag to make sure device cmds don't use it */
2501 if (test_and_set_bit_lock(tag
, &hba
->lrb_in_use
)) {
2503 * Dev manage command in progress, requeue the command.
2504 * Requeuing the command helps in cases where the request *may*
2505 * find different tag instead of waiting for dev manage command
2508 err
= SCSI_MLQUEUE_HOST_BUSY
;
2512 err
= ufshcd_hold(hba
, true);
2514 err
= SCSI_MLQUEUE_HOST_BUSY
;
2515 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
2518 WARN_ON(hba
->clk_gating
.state
!= CLKS_ON
);
2520 lrbp
= &hba
->lrb
[tag
];
2524 lrbp
->sense_bufflen
= UFSHCD_REQ_SENSE_SIZE
;
2525 lrbp
->sense_buffer
= cmd
->sense_buffer
;
2526 lrbp
->task_tag
= tag
;
2528 scsi_lun
= ufshcd_get_scsi_lun(cmd
);
2529 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(scsi_lun
);
2530 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
) ? true : false;
2531 lrbp
->req_abort_skip
= false;
2533 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2535 err
= ufshcd_map_sg(hba
, lrbp
);
2538 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
2541 /* Make sure descriptors are ready before ringing the doorbell */
2544 /* issue command to the controller */
2545 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2546 if (hba
->vops
&& hba
->vops
->set_nexus_t_xfer_req
)
2547 hba
->vops
->set_nexus_t_xfer_req(hba
, tag
, lrbp
->cmd
);
2548 #ifdef CONFIG_SCSI_UFS_CMD_LOGGING
2549 exynos_ufs_cmd_log_start(hba
, cmd
);
2551 ufshcd_send_command(hba
, tag
);
2553 if (hba
->monitor
.flag
& UFSHCD_MONITOR_LEVEL1
)
2554 dev_info(hba
->dev
, "IO issued(%d)\n", tag
);
2556 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2558 up_read(&hba
->clk_scaling_lock
);
2562 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
2563 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
2566 lrbp
->sense_bufflen
= 0;
2567 lrbp
->sense_buffer
= NULL
;
2568 lrbp
->task_tag
= tag
;
2569 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
2570 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
2571 hba
->dev_cmd
.type
= cmd_type
;
2573 return ufshcd_comp_devman_upiu(hba
, lrbp
);
2577 ufshcd_clear_cmd(struct ufs_hba
*hba
, int tag
)
2580 unsigned long flags
;
2581 u32 mask
= 1 << tag
;
2583 /* clear outstanding transaction before retry */
2584 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2585 ufshcd_utrl_clear(hba
, tag
);
2586 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2589 * wait for for h/w to clear corresponding bit in door-bell.
2590 * max. wait is 1 sec.
2592 err
= ufshcd_wait_for_register(hba
,
2593 REG_UTP_TRANSFER_REQ_DOOR_BELL
,
2594 mask
, ~mask
, 1000, 1000, true);
2600 ufshcd_check_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2602 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2604 /* Get the UPIU response */
2605 query_res
->response
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
) >>
2606 UPIU_RSP_CODE_OFFSET
;
2607 return query_res
->response
;
2611 * ufshcd_dev_cmd_completion() - handles device management command responses
2612 * @hba: per adapter instance
2613 * @lrbp: pointer to local reference block
2616 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2621 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
2622 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
2625 case UPIU_TRANSACTION_NOP_IN
:
2626 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
2628 dev_err(hba
->dev
, "%s: unexpected response %x\n",
2632 case UPIU_TRANSACTION_QUERY_RSP
:
2633 err
= ufshcd_check_query_response(hba
, lrbp
);
2635 err
= ufshcd_copy_query_response(hba
, lrbp
);
2637 case UPIU_TRANSACTION_REJECT_UPIU
:
2638 /* TODO: handle Reject UPIU Response */
2640 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
2645 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
2653 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
2654 struct ufshcd_lrb
*lrbp
, int max_timeout
)
2657 unsigned long time_left
;
2658 unsigned long flags
;
2660 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
2661 msecs_to_jiffies(max_timeout
));
2663 /* Make sure descriptors are ready before ringing the doorbell */
2665 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2666 hba
->dev_cmd
.complete
= NULL
;
2667 if (likely(time_left
)) {
2668 err
= ufshcd_get_tr_ocs(lrbp
);
2670 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
2672 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2676 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
2677 __func__
, lrbp
->task_tag
);
2678 if (!ufshcd_clear_cmd(hba
, lrbp
->task_tag
))
2679 /* successfully cleared the command, retry if needed */
2682 * in case of an error, after clearing the doorbell,
2683 * we also need to clear the outstanding_request
2686 ufshcd_outstanding_req_clear(hba
, lrbp
->task_tag
);
2693 * ufshcd_get_dev_cmd_tag - Get device management command tag
2694 * @hba: per-adapter instance
2695 * @tag: pointer to variable with available slot value
2697 * Get a free slot and lock it until device management command
2700 * Returns false if free slot is unavailable for locking, else
2701 * return true with tag value in @tag.
2703 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba
*hba
, int *tag_out
)
2713 tmp
= ~hba
->lrb_in_use
;
2714 tag
= find_last_bit(&tmp
, hba
->nutrs
);
2715 if (tag
>= hba
->nutrs
)
2717 } while (test_and_set_bit_lock(tag
, &hba
->lrb_in_use
));
2725 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba
*hba
, int tag
)
2727 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
2731 * ufshcd_exec_dev_cmd - API for sending device management requests
2733 * @cmd_type - specifies the type (NOP, Query...)
2734 * @timeout - time in seconds
2736 * NOTE: Since there is only one available tag for device management commands,
2737 * it is expected you hold the hba->dev_cmd.lock mutex.
2739 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
2740 enum dev_cmd_type cmd_type
, int timeout
)
2742 struct ufshcd_lrb
*lrbp
;
2745 struct completion wait
;
2746 unsigned long flags
;
2748 if (!ufshcd_is_link_active(hba
)) {
2749 flush_work(&hba
->clk_gating
.ungate_work
);
2750 if (!ufshcd_is_link_active(hba
))
2754 down_read(&hba
->clk_scaling_lock
);
2757 * Get free slot, sleep if slots are unavailable.
2758 * Even though we use wait_event() which sleeps indefinitely,
2759 * the maximum wait time is bounded by SCSI request timeout.
2761 wait_event(hba
->dev_cmd
.tag_wq
, ufshcd_get_dev_cmd_tag(hba
, &tag
));
2763 init_completion(&wait
);
2764 lrbp
= &hba
->lrb
[tag
];
2766 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
2770 hba
->dev_cmd
.complete
= &wait
;
2772 /* Make sure descriptors are ready before ringing the doorbell */
2774 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2775 if (hba
->vops
&& hba
->vops
->set_nexus_t_xfer_req
)
2776 hba
->vops
->set_nexus_t_xfer_req(hba
, tag
, lrbp
->cmd
);
2777 ufshcd_send_command(hba
, tag
);
2778 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2780 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
2783 ufshcd_put_dev_cmd_tag(hba
, tag
);
2784 wake_up(&hba
->dev_cmd
.tag_wq
);
2785 up_read(&hba
->clk_scaling_lock
);
2790 * ufshcd_init_query() - init the query response and request parameters
2791 * @hba: per-adapter instance
2792 * @request: address of the request pointer to be initialized
2793 * @response: address of the response pointer to be initialized
2794 * @opcode: operation to perform
2795 * @idn: flag idn to access
2796 * @index: LU number to access
2797 * @selector: query/flag/descriptor further identification
2799 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
2800 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
2801 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
2803 *request
= &hba
->dev_cmd
.query
.request
;
2804 *response
= &hba
->dev_cmd
.query
.response
;
2805 memset(*request
, 0, sizeof(struct ufs_query_req
));
2806 memset(*response
, 0, sizeof(struct ufs_query_res
));
2807 (*request
)->upiu_req
.opcode
= opcode
;
2808 (*request
)->upiu_req
.idn
= idn
;
2809 (*request
)->upiu_req
.index
= index
;
2810 (*request
)->upiu_req
.selector
= selector
;
2813 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
2814 enum query_opcode opcode
, enum flag_idn idn
, bool *flag_res
)
2819 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
2820 ret
= ufshcd_query_flag(hba
, opcode
, idn
, flag_res
);
2823 "%s: failed with error %d, retries %d\n",
2824 __func__
, ret
, retries
);
2831 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2832 __func__
, opcode
, idn
, ret
, retries
);
2837 * ufshcd_query_flag() - API function for sending flag query requests
2838 * hba: per-adapter instance
2839 * query_opcode: flag query to perform
2840 * idn: flag idn to access
2841 * flag_res: the flag value after the query request completes
2843 * Returns 0 for success, non-zero in case of failure
2845 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
2846 enum flag_idn idn
, bool *flag_res
)
2848 struct ufs_query_req
*request
= NULL
;
2849 struct ufs_query_res
*response
= NULL
;
2850 int err
, index
= 0, selector
= 0;
2851 int timeout
= QUERY_REQ_TIMEOUT
;
2855 ufshcd_hold(hba
, false);
2856 mutex_lock(&hba
->dev_cmd
.lock
);
2857 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2861 case UPIU_QUERY_OPCODE_SET_FLAG
:
2862 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
2863 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
2864 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2866 case UPIU_QUERY_OPCODE_READ_FLAG
:
2867 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2869 /* No dummy reads */
2870 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
2878 "%s: Expected query flag opcode but got = %d\n",
2884 if (idn
== QUERY_FLAG_IDN_FDEVICEINIT
)
2885 timeout
= QUERY_FDEVICEINIT_REQ_TIMEOUT
;
2887 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
2891 "%s: Sending flag query for idn %d failed, err = %d\n",
2892 __func__
, idn
, err
);
2897 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
2898 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
2901 mutex_unlock(&hba
->dev_cmd
.lock
);
2902 ufshcd_release(hba
);
2907 * ufshcd_query_attr - API function for sending attribute requests
2908 * hba: per-adapter instance
2909 * opcode: attribute opcode
2910 * idn: attribute idn to access
2911 * index: index field
2912 * selector: selector field
2913 * attr_val: the attribute value after the query request completes
2915 * Returns 0 for success, non-zero in case of failure
2917 static int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
2918 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
2920 struct ufs_query_req
*request
= NULL
;
2921 struct ufs_query_res
*response
= NULL
;
2926 ufshcd_hold(hba
, false);
2928 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
2934 mutex_lock(&hba
->dev_cmd
.lock
);
2935 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2939 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
2940 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2941 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
2943 case UPIU_QUERY_OPCODE_READ_ATTR
:
2944 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2947 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
2953 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
2956 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2957 __func__
, opcode
, idn
, index
, err
);
2961 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
2964 mutex_unlock(&hba
->dev_cmd
.lock
);
2966 ufshcd_release(hba
);
2971 * ufshcd_query_attr_retry() - API function for sending query
2972 * attribute with retries
2973 * @hba: per-adapter instance
2974 * @opcode: attribute opcode
2975 * @idn: attribute idn to access
2976 * @index: index field
2977 * @selector: selector field
2978 * @attr_val: the attribute value after the query request
2981 * Returns 0 for success, non-zero in case of failure
2983 static int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
2984 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
2990 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2991 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
2992 selector
, attr_val
);
2994 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
2995 __func__
, ret
, retries
);
3002 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3003 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3007 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3008 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3009 u8 selector
, u8
*desc_buf
, int *buf_len
)
3011 struct ufs_query_req
*request
= NULL
;
3012 struct ufs_query_res
*response
= NULL
;
3017 ufshcd_hold(hba
, false);
3019 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3025 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3026 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3027 __func__
, *buf_len
);
3032 mutex_lock(&hba
->dev_cmd
.lock
);
3033 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3035 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3036 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3039 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3040 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3042 case UPIU_QUERY_OPCODE_READ_DESC
:
3043 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3047 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3053 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3056 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3057 __func__
, opcode
, idn
, index
, err
);
3061 hba
->dev_cmd
.query
.descriptor
= NULL
;
3062 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3065 mutex_unlock(&hba
->dev_cmd
.lock
);
3067 ufshcd_release(hba
);
3072 * ufshcd_query_descriptor_retry - API function for sending descriptor
3074 * hba: per-adapter instance
3075 * opcode: attribute opcode
3076 * idn: attribute idn to access
3077 * index: index field
3078 * selector: selector field
3079 * desc_buf: the buffer that contains the descriptor
3080 * buf_len: length parameter passed to the device
3082 * Returns 0 for success, non-zero in case of failure.
3083 * The buf_len parameter will contain, on return, the length parameter
3084 * received on the response.
3086 static int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3087 enum query_opcode opcode
,
3088 enum desc_idn idn
, u8 index
,
3090 u8
*desc_buf
, int *buf_len
)
3095 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3096 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3097 selector
, desc_buf
, buf_len
);
3098 if (!err
|| err
== -EINVAL
)
3106 * ufshcd_read_desc_length - read the specified descriptor length from header
3107 * @hba: Pointer to adapter instance
3108 * @desc_id: descriptor idn value
3109 * @desc_index: descriptor index
3110 * @desc_length: pointer to variable to read the length of descriptor
3112 * Return 0 in case of success, non-zero otherwise
3114 static int ufshcd_read_desc_length(struct ufs_hba
*hba
,
3115 enum desc_idn desc_id
,
3120 u8 header
[QUERY_DESC_HDR_SIZE
];
3121 int header_len
= QUERY_DESC_HDR_SIZE
;
3123 if (desc_id
>= QUERY_DESC_IDN_MAX
)
3126 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3127 desc_id
, desc_index
, 0, header
,
3131 dev_err(hba
->dev
, "%s: Failed to get descriptor header id %d",
3134 } else if (desc_id
!= header
[QUERY_DESC_DESC_TYPE_OFFSET
]) {
3135 dev_warn(hba
->dev
, "%s: descriptor header id %d and desc_id %d mismatch",
3136 __func__
, header
[QUERY_DESC_DESC_TYPE_OFFSET
],
3141 *desc_length
= header
[QUERY_DESC_LENGTH_OFFSET
];
3147 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3148 * @hba: Pointer to adapter instance
3149 * @desc_id: descriptor idn value
3150 * @desc_len: mapped desc length (out)
3152 * Return 0 in case of success, non-zero otherwise
3154 int ufshcd_map_desc_id_to_length(struct ufs_hba
*hba
,
3155 enum desc_idn desc_id
, int *desc_len
)
3158 case QUERY_DESC_IDN_DEVICE
:
3159 *desc_len
= hba
->desc_size
.dev_desc
;
3161 case QUERY_DESC_IDN_POWER
:
3162 *desc_len
= hba
->desc_size
.pwr_desc
;
3164 case QUERY_DESC_IDN_GEOMETRY
:
3165 *desc_len
= hba
->desc_size
.geom_desc
;
3167 case QUERY_DESC_IDN_CONFIGURATION
:
3168 *desc_len
= hba
->desc_size
.conf_desc
;
3170 case QUERY_DESC_IDN_UNIT
:
3171 *desc_len
= hba
->desc_size
.unit_desc
;
3173 case QUERY_DESC_IDN_INTERCONNECT
:
3174 *desc_len
= hba
->desc_size
.interc_desc
;
3176 case QUERY_DESC_IDN_STRING
:
3177 *desc_len
= QUERY_DESC_MAX_SIZE
;
3179 case QUERY_DESC_IDN_RFU_0
:
3180 case QUERY_DESC_IDN_RFU_1
:
3189 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length
);
3192 * ufshcd_read_desc_param - read the specified descriptor parameter
3193 * @hba: Pointer to adapter instance
3194 * @desc_id: descriptor idn value
3195 * @desc_index: descriptor index
3196 * @param_offset: offset of the parameter to read
3197 * @param_read_buf: pointer to buffer where parameter would be read
3198 * @param_size: sizeof(param_read_buf)
3200 * Return 0 in case of success, non-zero otherwise
3202 static int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3203 enum desc_idn desc_id
,
3212 bool is_kmalloc
= true;
3215 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3218 /* Get the max length of descriptor from structure filled up at probe
3221 ret
= ufshcd_map_desc_id_to_length(hba
, desc_id
, &buff_len
);
3224 if (ret
|| !buff_len
) {
3225 dev_err(hba
->dev
, "%s: Failed to get full descriptor length",
3230 /* Check whether we need temp memory */
3231 if (param_offset
!= 0 || param_size
< buff_len
) {
3232 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
3236 desc_buf
= param_read_buf
;
3240 /* Request for full descriptor */
3241 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3242 desc_id
, desc_index
, 0,
3243 desc_buf
, &buff_len
);
3246 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3247 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3252 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3253 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header",
3254 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3260 * While reading variable size descriptors (like string descriptor),
3261 * some UFS devices may report the "LENGTH" (field in "Transaction
3262 * Specific fields" of Query Response UPIU) same as what was requested
3263 * in Query Request UPIU instead of reporting the actual size of the
3264 * variable size descriptor.
3265 * Although it's safe to ignore the "LENGTH" field for variable size
3266 * descriptors as we can always derive the length of the descriptor from
3267 * the descriptor header fields. Hence this change impose the length
3268 * match check only for fixed size descriptors (for which we always
3269 * request the correct size as part of Query Request UPIU).
3271 if ((desc_id
!= QUERY_DESC_IDN_STRING
) &&
3272 (buff_len
!= desc_buf
[QUERY_DESC_LENGTH_OFFSET
])) {
3273 dev_err(hba
->dev
, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3274 __func__
, buff_len
, desc_buf
[QUERY_DESC_LENGTH_OFFSET
]);
3278 /* Check wherher we will not copy more data, than available */
3279 if (is_kmalloc
&& param_size
> buff_len
)
3280 param_size
= buff_len
;
3283 memcpy(param_read_buf
, &desc_buf
[param_offset
], param_size
);
3290 static inline int ufshcd_read_desc(struct ufs_hba
*hba
,
3291 enum desc_idn desc_id
,
3296 return ufshcd_read_desc_param(hba
, desc_id
, desc_index
, 0, buf
, size
);
3299 static inline int ufshcd_read_power_desc(struct ufs_hba
*hba
,
3303 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_POWER
, 0, buf
, size
);
3306 static int ufshcd_read_device_desc(struct ufs_hba
*hba
, u8
*buf
, u32 size
)
3308 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_DEVICE
, 0, buf
, size
);
3312 * ufshcd_read_string_desc - read string descriptor
3313 * @hba: pointer to adapter instance
3314 * @desc_index: descriptor index
3315 * @buf: pointer to buffer where descriptor would be read
3316 * @size: size of buf
3317 * @ascii: if true convert from unicode to ascii characters
3319 * Return 0 in case of success, non-zero otherwise
3321 #define ASCII_STD true
3322 static int ufshcd_read_string_desc(struct ufs_hba
*hba
, int desc_index
,
3323 u8
*buf
, u32 size
, bool ascii
)
3327 err
= ufshcd_read_desc(hba
,
3328 QUERY_DESC_IDN_STRING
, desc_index
, buf
, size
);
3331 dev_err(hba
->dev
, "%s: reading String Desc failed after %d retries. err = %d\n",
3332 __func__
, QUERY_REQ_RETRIES
, err
);
3343 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3344 ascii_len
= (desc_len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3345 if (size
< ascii_len
+ QUERY_DESC_HDR_SIZE
) {
3346 dev_err(hba
->dev
, "%s: buffer allocated size is too small\n",
3352 buff_ascii
= kmalloc(ascii_len
, GFP_KERNEL
);
3359 * the descriptor contains string in UTF16 format
3360 * we need to convert to utf-8 so it can be displayed
3362 utf16s_to_utf8s((wchar_t *)&buf
[QUERY_DESC_HDR_SIZE
],
3363 desc_len
- QUERY_DESC_HDR_SIZE
,
3364 UTF16_BIG_ENDIAN
, buff_ascii
, ascii_len
);
3366 /* replace non-printable or non-ASCII characters with spaces */
3367 for (i
= 0; i
< ascii_len
; i
++)
3368 ufshcd_remove_non_printable(&buff_ascii
[i
]);
3370 memset(buf
+ QUERY_DESC_HDR_SIZE
, 0,
3371 size
- QUERY_DESC_HDR_SIZE
);
3372 memcpy(buf
+ QUERY_DESC_HDR_SIZE
, buff_ascii
, ascii_len
);
3373 buf
[QUERY_DESC_LENGTH_OFFSET
] = ascii_len
+ QUERY_DESC_HDR_SIZE
;
3381 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3382 * @hba: Pointer to adapter instance
3384 * @param_offset: offset of the parameter to read
3385 * @param_read_buf: pointer to buffer where parameter would be read
3386 * @param_size: sizeof(param_read_buf)
3388 * Return 0 in case of success, non-zero otherwise
3390 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3392 enum unit_desc_param param_offset
,
3397 * Unit descriptors are only available for general purpose LUs (LUN id
3398 * from 0 to 7) and RPMB Well known LU.
3400 if (lun
!= UFS_UPIU_RPMB_WLUN
&& (lun
>= UFS_UPIU_MAX_GENERAL_LUN
))
3403 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3404 param_offset
, param_read_buf
, param_size
);
3407 int ufshcd_read_health_desc(struct ufs_hba
*hba
, u8
*buf
, u32 size
)
3411 err
= ufshcd_read_desc(hba
,
3412 QUERY_DESC_IDN_HEALTH
, 0, buf
, size
);
3415 dev_err(hba
->dev
, "%s: reading Device Health Desc failed. err = %d\n",
3422 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3423 * @hba: per adapter instance
3425 * 1. Allocate DMA memory for Command Descriptor array
3426 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3427 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3428 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3430 * 4. Allocate memory for local reference block(lrb).
3432 * Returns 0 for success, non-zero in case of failure
3434 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3436 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3438 /* Allocate memory for UTP command descriptors */
3439 ucdl_size
= (sizeof(struct utp_transfer_cmd_desc
) * hba
->nutrs
);
3440 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3442 &hba
->ucdl_dma_addr
,
3446 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3447 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3448 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3449 * be aligned to 128 bytes as well
3451 if (!hba
->ucdl_base_addr
||
3452 WARN_ON(hba
->ucdl_dma_addr
& (PAGE_SIZE
- 1))) {
3454 "Command Descriptor Memory allocation failed\n");
3459 * Allocate memory for UTP Transfer descriptors
3460 * UFSHCI requires 1024 byte alignment of UTRD
3462 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3463 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3465 &hba
->utrdl_dma_addr
,
3467 if (!hba
->utrdl_base_addr
||
3468 WARN_ON(hba
->utrdl_dma_addr
& (PAGE_SIZE
- 1))) {
3470 "Transfer Descriptor Memory allocation failed\n");
3475 * Allocate memory for UTP Task Management descriptors
3476 * UFSHCI requires 1024 byte alignment of UTMRD
3478 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3479 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3481 &hba
->utmrdl_dma_addr
,
3483 if (!hba
->utmrdl_base_addr
||
3484 WARN_ON(hba
->utmrdl_dma_addr
& (PAGE_SIZE
- 1))) {
3486 "Task Management Descriptor Memory allocation failed\n");
3490 /* Allocate memory for local reference block */
3491 hba
->lrb
= devm_kzalloc(hba
->dev
,
3492 hba
->nutrs
* sizeof(struct ufshcd_lrb
),
3495 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3504 * ufshcd_host_memory_configure - configure local reference block with
3506 * @hba: per adapter instance
3508 * Configure Host memory space
3509 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3511 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3513 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3514 * into local reference block.
3516 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3518 struct utp_transfer_cmd_desc
*cmd_descp
;
3519 struct utp_transfer_req_desc
*utrdlp
;
3520 dma_addr_t cmd_desc_dma_addr
;
3521 dma_addr_t cmd_desc_element_addr
;
3522 u16 response_offset
;
3527 utrdlp
= hba
->utrdl_base_addr
;
3528 cmd_descp
= hba
->ucdl_base_addr
;
3531 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3533 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3535 cmd_desc_size
= sizeof(struct utp_transfer_cmd_desc
);
3536 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3538 for (i
= 0; i
< hba
->nutrs
; i
++) {
3539 /* Configure UTRD with command descriptor base address */
3540 cmd_desc_element_addr
=
3541 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3542 utrdlp
[i
].command_desc_base_addr_lo
=
3543 cpu_to_le32(lower_32_bits(cmd_desc_element_addr
));
3544 utrdlp
[i
].command_desc_base_addr_hi
=
3545 cpu_to_le32(upper_32_bits(cmd_desc_element_addr
));
3547 /* Response upiu and prdt offset should be in double words */
3548 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3549 utrdlp
[i
].response_upiu_offset
=
3550 cpu_to_le16(response_offset
);
3551 utrdlp
[i
].prd_table_offset
=
3552 cpu_to_le16(prdt_offset
);
3553 utrdlp
[i
].response_upiu_length
=
3554 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3556 utrdlp
[i
].response_upiu_offset
=
3557 cpu_to_le16((response_offset
>> 2));
3558 utrdlp
[i
].prd_table_offset
=
3559 cpu_to_le16((prdt_offset
>> 2));
3560 utrdlp
[i
].response_upiu_length
=
3561 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3564 hba
->lrb
[i
].utr_descriptor_ptr
= (utrdlp
+ i
);
3565 hba
->lrb
[i
].utrd_dma_addr
= hba
->utrdl_dma_addr
+
3566 (i
* sizeof(struct utp_transfer_req_desc
));
3567 hba
->lrb
[i
].ucd_req_ptr
=
3568 (struct utp_upiu_req
*)(cmd_descp
+ i
);
3569 hba
->lrb
[i
].ucd_req_dma_addr
= cmd_desc_element_addr
;
3570 hba
->lrb
[i
].ucd_rsp_ptr
=
3571 (struct utp_upiu_rsp
*)cmd_descp
[i
].response_upiu
;
3572 hba
->lrb
[i
].ucd_rsp_dma_addr
= cmd_desc_element_addr
+
3574 hba
->lrb
[i
].ucd_prdt_ptr
=
3575 (struct ufshcd_sg_entry
*)cmd_descp
[i
].prd_table
;
3576 hba
->lrb
[i
].ucd_prdt_dma_addr
= cmd_desc_element_addr
+
3582 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3583 * @hba: per adapter instance
3585 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3586 * in order to initialize the Unipro link startup procedure.
3587 * Once the Unipro links are up, the device connected to the controller
3590 * Returns 0 on success, non-zero value on failure
3592 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3594 struct uic_command uic_cmd
= {0};
3597 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3599 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3602 "dme-link-startup: error code %d\n", ret
);
3606 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
3608 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3609 unsigned long min_sleep_time_us
;
3611 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
3615 * last_dme_cmd_tstamp will be 0 only for 1st call to
3618 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
3619 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
3621 unsigned long delta
=
3622 (unsigned long) ktime_to_us(
3623 ktime_sub(ktime_get(),
3624 hba
->last_dme_cmd_tstamp
));
3626 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
3628 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
3630 return; /* no more delay required */
3633 /* allow sleep for extra 50us if needed */
3634 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
3637 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
3639 struct uic_command uic_cmd
= {0};
3642 uic_cmd
.command
= UIC_CMD_DME_RESET
;
3643 uic_cmd
.argument1
= 0x1;
3645 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3648 "dme-reset: error code %d\n", ret
);
3653 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
3655 struct uic_command uic_cmd
= {0};
3658 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
3660 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3663 "dme-enable: error code %d\n", ret
);
3669 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3670 * @hba: per adapter instance
3671 * @attr_sel: uic command argument1
3672 * @attr_set: attribute set type as uic command argument2
3673 * @mib_val: setting value as uic command argument3
3674 * @peer: indicate whether peer or local
3676 * Returns 0 on success, non-zero value on failure
3678 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3679 u8 attr_set
, u32 mib_val
, u8 peer
)
3681 struct uic_command uic_cmd
= {0};
3682 static const char *const action
[] = {
3686 const char *set
= action
[!!peer
];
3688 int retries
= UFS_UIC_COMMAND_RETRIES
;
3690 uic_cmd
.command
= peer
?
3691 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
3692 uic_cmd
.argument1
= attr_sel
;
3693 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
3694 uic_cmd
.argument3
= mib_val
;
3697 /* for peer attributes we retry upon failure */
3698 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3700 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
3701 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
3702 } while (ret
&& peer
&& --retries
);
3705 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3706 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
3707 UFS_UIC_COMMAND_RETRIES
- retries
);
3711 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
3714 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3715 * @hba: per adapter instance
3716 * @attr_sel: uic command argument1
3717 * @mib_val: the value of the attribute as returned by the UIC command
3718 * @peer: indicate whether peer or local
3720 * Returns 0 on success, non-zero value on failure
3722 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3723 u32
*mib_val
, u8 peer
)
3725 struct uic_command uic_cmd
= {0};
3726 static const char *const action
[] = {
3730 const char *get
= action
[!!peer
];
3732 int retries
= UFS_UIC_COMMAND_RETRIES
;
3733 struct ufs_pa_layer_attr orig_pwr_info
;
3734 struct ufs_pa_layer_attr temp_pwr_info
;
3735 bool pwr_mode_change
= false;
3737 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
3738 orig_pwr_info
= hba
->pwr_info
;
3739 temp_pwr_info
= orig_pwr_info
;
3741 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
3742 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
3743 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
3744 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
3745 pwr_mode_change
= true;
3746 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
3747 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
3748 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
3749 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
3750 pwr_mode_change
= true;
3752 if (pwr_mode_change
) {
3753 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
3759 uic_cmd
.command
= peer
?
3760 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
3761 uic_cmd
.argument1
= attr_sel
;
3764 /* for peer attributes we retry upon failure */
3765 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3767 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
3768 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
3769 } while (ret
&& peer
&& --retries
);
3772 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
3773 get
, UIC_GET_ATTR_ID(attr_sel
),
3774 UFS_UIC_COMMAND_RETRIES
- retries
);
3776 if (mib_val
&& !ret
)
3777 *mib_val
= uic_cmd
.argument3
;
3779 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
3781 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
3785 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
3788 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3789 * state) and waits for it to take effect.
3791 * @hba: per adapter instance
3792 * @cmd: UIC command to execute
3794 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3795 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3796 * and device UniPro link and hence it's final completion would be indicated by
3797 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3798 * addition to normal UIC command completion Status (UCCS). This function only
3799 * returns after the relevant status bits indicate the completion.
3801 * Returns 0 on success, non-zero value on failure
3803 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
3805 struct completion uic_async_done
;
3806 unsigned long flags
;
3809 bool reenable_intr
= false;
3811 mutex_lock(&hba
->uic_cmd_mutex
);
3812 init_completion(&uic_async_done
);
3813 ufshcd_add_delay_before_dme_cmd(hba
);
3815 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3816 hba
->uic_async_done
= &uic_async_done
;
3817 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
3818 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
3820 * Make sure UIC command completion interrupt is disabled before
3821 * issuing UIC command.
3824 reenable_intr
= true;
3826 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
3827 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3830 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3831 cmd
->command
, cmd
->argument3
, ret
);
3835 if (!wait_for_completion_timeout(hba
->uic_async_done
,
3836 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
3838 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3839 cmd
->command
, cmd
->argument3
);
3844 status
= ufshcd_get_upmcrs(hba
, cmd
);
3845 if (status
!= PWR_LOCAL
) {
3847 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3848 cmd
->command
, status
);
3849 ret
= (status
!= PWR_OK
) ? status
: -1;
3852 /* Dump debugging information to system memory */
3854 ufshcd_vops_dbg_register_dump(hba
);
3855 exynos_ufs_show_uic_info(hba
);
3856 ufshcd_print_host_state(hba
);
3857 ufshcd_print_pwr_info(hba
);
3858 ufshcd_print_host_regs(hba
);
3861 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3862 hba
->active_uic_cmd
= NULL
;
3863 hba
->uic_async_done
= NULL
;
3865 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
3866 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3867 mutex_unlock(&hba
->uic_cmd_mutex
);
3873 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3874 * using DME_SET primitives.
3875 * @hba: per adapter instance
3876 * @mode: powr mode value
3878 * Returns 0 on success, non-zero value on failure
3880 static int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
3882 struct uic_command uic_cmd
= {0};
3885 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
3886 ret
= ufshcd_dme_set(hba
,
3887 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
3889 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3895 uic_cmd
.command
= UIC_CMD_DME_SET
;
3896 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
3897 uic_cmd
.argument3
= mode
;
3898 ufshcd_hold(hba
, false);
3899 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3900 ufshcd_release(hba
);
3906 static int ufshcd_link_recovery(struct ufs_hba
*hba
)
3909 unsigned long flags
;
3911 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3912 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
3913 ufshcd_set_eh_in_progress(hba
);
3914 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3916 ret
= ufshcd_host_reset_and_restore(hba
);
3918 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3920 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
3921 ufshcd_clear_eh_in_progress(hba
);
3922 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3925 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
3931 static int __ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
3934 struct uic_command uic_cmd
= {0};
3935 ktime_t start
= ktime_get();
3937 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
3938 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3939 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
3940 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
3943 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
3947 * If link recovery fails then return error so that caller
3948 * don't retry the hibern8 enter again.
3950 if (ufshcd_link_recovery(hba
))
3957 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
3959 int ret
= 0, retries
;
3961 for (retries
= UIC_HIBERN8_ENTER_RETRIES
; retries
> 0; retries
--) {
3962 ret
= __ufshcd_uic_hibern8_enter(hba
);
3963 if (!ret
|| ret
== -ENOLINK
)
3970 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
3972 struct uic_command uic_cmd
= {0};
3974 ktime_t start
= ktime_get();
3977 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
3978 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3979 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
3980 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
3983 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
3985 ret
= ufshcd_link_recovery(hba
);
3988 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_get();
3989 hba
->ufs_stats
.hibern8_exit_cnt
++;
3996 * ufshcd_init_pwr_info - setting the POR (power on reset)
3997 * values in hba power info
3998 * @hba: per-adapter instance
4000 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4002 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4003 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4004 hba
->pwr_info
.lane_rx
= 1;
4005 hba
->pwr_info
.lane_tx
= 1;
4006 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4007 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4008 hba
->pwr_info
.hs_rate
= 0;
4011 static int ufshcd_link_hibern8_ctrl(struct ufs_hba
*hba
, bool en
)
4015 if (hba
->vops
&& hba
->vops
->hibern8_notify
)
4016 hba
->vops
->hibern8_notify(hba
, en
, PRE_CHANGE
);
4019 ret
= ufshcd_uic_hibern8_enter(hba
);
4021 ret
= ufshcd_uic_hibern8_exit(hba
);
4026 if (hba
->monitor
.flag
& UFSHCD_MONITOR_LEVEL2
) {
4028 dev_info(hba
->dev
, "H8+\n");
4030 dev_info(hba
->dev
, "H8-\n");
4033 if (hba
->vops
&& hba
->vops
->hibern8_notify
)
4034 hba
->vops
->hibern8_notify(hba
, en
, POST_CHANGE
);
4037 hba
->tcx_replay_timer_expired_cnt
= 0;
4038 hba
->fcx_protection_timer_expired_cnt
= 0;
4044 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4045 * @hba: per-adapter instance
4047 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4049 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4051 if (hba
->max_pwr_info
.is_valid
)
4054 pwr_info
->pwr_tx
= FAST_MODE
;
4055 pwr_info
->pwr_rx
= FAST_MODE
;
4056 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4058 /* Get the connected lane count */
4059 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4060 &pwr_info
->lane_rx
);
4061 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4062 &pwr_info
->lane_tx
);
4064 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4065 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4072 hba
->tcx_replay_timer_expired_cnt
= 0;
4073 hba
->fcx_protection_timer_expired_cnt
= 0;
4075 /* Get the peer available lane count */
4076 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_AVAILRXDATALANES
),
4077 &pwr_info
->peer_available_lane_rx
);
4078 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_AVAILTXDATALANES
),
4079 &pwr_info
->peer_available_lane_tx
);
4081 if (!pwr_info
->peer_available_lane_rx
|| !pwr_info
->peer_available_lane_tx
) {
4082 dev_err(hba
->dev
, "%s: invalid peer available lanes value. rx=%d, tx=%d\n",
4084 pwr_info
->peer_available_lane_rx
,
4085 pwr_info
->peer_available_lane_tx
);
4090 * First, get the maximum gears of HS speed.
4091 * If a zero value, it means there is no HSGEAR capability.
4092 * Then, get the maximum gears of PWM speed.
4094 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4095 if (!pwr_info
->gear_rx
) {
4096 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4097 &pwr_info
->gear_rx
);
4098 if (!pwr_info
->gear_rx
) {
4099 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4100 __func__
, pwr_info
->gear_rx
);
4103 pwr_info
->pwr_rx
= SLOW_MODE
;
4106 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4107 &pwr_info
->gear_tx
);
4108 if (!pwr_info
->gear_tx
) {
4109 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4110 &pwr_info
->gear_tx
);
4111 if (!pwr_info
->gear_tx
) {
4112 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4113 __func__
, pwr_info
->gear_tx
);
4116 pwr_info
->pwr_tx
= SLOW_MODE
;
4119 hba
->max_pwr_info
.is_valid
= true;
4123 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4124 struct ufs_pa_layer_attr
*pwr_mode
)
4128 /* if already configured to the requested pwr_mode */
4129 if (pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4130 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4131 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4132 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4133 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4134 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4135 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4136 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4141 * Configure attributes for power mode change with below.
4142 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4143 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4146 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4147 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4149 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4150 pwr_mode
->pwr_rx
== FAST_MODE
)
4151 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), TRUE
);
4153 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), FALSE
);
4155 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4156 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4158 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4159 pwr_mode
->pwr_tx
== FAST_MODE
)
4160 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), TRUE
);
4162 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), FALSE
);
4164 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4165 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4166 pwr_mode
->pwr_rx
== FAST_MODE
||
4167 pwr_mode
->pwr_tx
== FAST_MODE
)
4168 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4171 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4172 | pwr_mode
->pwr_tx
);
4176 "%s: power mode change failed %d\n", __func__
, ret
);
4178 ufshcd_hold(hba
, false);
4179 ret
= ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4181 ufshcd_release(hba
);
4185 memcpy(&hba
->pwr_info
, pwr_mode
,
4186 sizeof(struct ufs_pa_layer_attr
));
4194 * ufshcd_config_pwr_mode - configure a new power mode
4195 * @hba: per-adapter instance
4196 * @desired_pwr_mode: desired power configuration
4198 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4199 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4201 struct ufs_pa_layer_attr final_params
= { 0 };
4204 ufshcd_hold(hba
, false);
4205 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4206 desired_pwr_mode
, &final_params
);
4209 if (ret
== -ENOTSUPP
)
4210 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4215 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4217 ufshcd_print_pwr_info(hba
);
4219 ufshcd_release(hba
);
4222 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4225 * ufshcd_complete_dev_init() - checks device readiness
4226 * hba: per-adapter instance
4228 * Set fDeviceInit flag and poll until device toggles it.
4230 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4236 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4237 QUERY_FLAG_IDN_FDEVICEINIT
, NULL
);
4240 "%s setting fDeviceInit flag failed with error %d\n",
4245 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4246 for (i
= 0; i
< 1000 && !err
&& flag_res
; i
++)
4247 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4248 QUERY_FLAG_IDN_FDEVICEINIT
, &flag_res
);
4250 if (!err
&& flag_res
)
4255 "%s reading fDeviceInit flag failed with error %d\n",
4259 "%s fDeviceInit was not cleared by the device\n",
4267 * ufshcd_make_hba_operational - Make UFS controller operational
4268 * @hba: per adapter instance
4270 * To bring UFS host controller to operational state,
4271 * 1. Enable required interrupts
4272 * 2. Configure interrupt aggregation
4273 * 3. Program UTRL and UTMRL base address
4274 * 4. Configure run-stop-registers
4276 * Returns 0 on success, non-zero value on failure
4278 static int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4283 /* Enable required interrupts */
4284 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4286 /* Configure interrupt aggregation */
4287 if (ufshcd_is_intr_aggr_allowed(hba
))
4288 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4290 ufshcd_disable_intr_aggr(hba
);
4292 /* Configure UTRL and UTMRL base address registers */
4293 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4294 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4295 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4296 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4297 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4298 REG_UTP_TASK_REQ_LIST_BASE_L
);
4299 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4300 REG_UTP_TASK_REQ_LIST_BASE_H
);
4303 * Make sure base address and interrupt setup are updated before
4304 * enabling the run/stop registers below.
4309 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4311 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4312 if (!(ufshcd_get_lists_status(reg
))) {
4313 ufshcd_enable_run_stop_reg(hba
);
4316 "Host controller not ready to process requests");
4326 * ufshcd_hba_stop - Send controller to reset state
4327 * @hba: per adapter instance
4328 * @can_sleep: perform sleep or just spin
4330 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
, bool can_sleep
)
4334 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4335 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4336 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4339 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4343 * _ufshcd_hba_enable - initialize the controller
4344 * @hba: per adapter instance
4346 * The controller resets itself and controller firmware initialization
4347 * sequence kicks off. When controller is ready it will set
4348 * the Host Controller Enable bit to 1.
4350 * Returns 0 on success, non-zero value on failure
4352 static int __ufshcd_hba_enable(struct ufs_hba
*hba
)
4357 * msleep of 1 and 5 used in this function might result in msleep(20),
4358 * but it was necessary to send the UFS FPGA to reset mode during
4359 * development and testing of this driver. msleep can be changed to
4360 * mdelay and retry count can be reduced based on the controller.
4362 if (!ufshcd_is_hba_active(hba
))
4363 /* change controller state to "reset state" */
4364 ufshcd_hba_stop(hba
, true);
4366 /* UniPro link is disabled at this point */
4367 ufshcd_set_link_off(hba
);
4369 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4371 /* start controller initialization sequence */
4372 ufshcd_hba_start(hba
);
4375 * To initialize a UFS host controller HCE bit must be set to 1.
4376 * During initialization the HCE bit value changes from 1->0->1.
4377 * When the host controller completes initialization sequence
4378 * it sets the value of HCE bit to 1. The same HCE bit is read back
4379 * to check if the controller has completed initialization sequence.
4380 * So without this delay the value HCE = 1, set in the previous
4381 * instruction might be read back.
4382 * This delay can be changed based on the controller.
4386 /* wait for the host controller to complete initialization */
4388 while (ufshcd_is_hba_active(hba
)) {
4393 "Controller enable failed\n");
4399 /* enable UIC related interrupts */
4400 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4402 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4407 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4409 int tx_lanes
, i
, err
= 0;
4412 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4415 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4417 for (i
= 0; i
< tx_lanes
; i
++) {
4419 err
= ufshcd_dme_set(hba
,
4420 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4421 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4424 err
= ufshcd_dme_peer_set(hba
,
4425 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4426 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4429 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4430 __func__
, peer
, i
, err
);
4438 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4440 return ufshcd_disable_tx_lcc(hba
, true);
4443 static int ufshcd_hba_enable(struct ufs_hba
*hba
)
4446 unsigned long flags
;
4448 ufshcd_hold(hba
, false);
4450 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4451 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4452 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4454 if (hba
->vops
&& hba
->vops
->host_reset
)
4455 hba
->vops
->host_reset(hba
);
4457 if (hba
->quirks
& UFSHCD_QUIRK_USE_OF_HCE
) {
4458 ufshcd_set_link_off(hba
);
4460 /* enable UIC related interrupts */
4461 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4463 ret
= ufshcd_dme_reset(hba
);
4465 ret
= ufshcd_dme_enable(hba
);
4467 ret
= __ufshcd_hba_enable(hba
);
4469 ufshcd_release(hba
);
4472 dev_err(hba
->dev
, "Host controller enable failed\n");
4478 * ufshcd_link_startup - Initialize unipro link startup
4479 * @hba: per adapter instance
4481 * Returns 0 for success, non-zero in case of failure
4483 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4486 int retries
= DME_LINKSTARTUP_RETRIES
;
4488 ufshcd_hold(hba
, false);
4491 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4493 ret
= ufshcd_dme_link_startup(hba
);
4495 /* check if device is detected by inter-connect layer */
4496 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4497 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4503 * DME link lost indication is only received when link is up,
4504 * but we can't be sure if the link is up until link startup
4505 * succeeds. So reset the local Uni-Pro and try again.
4507 if ((ret
&& !retries
) || (ret
&& ufshcd_hba_enable(hba
)))
4509 } while (ret
&& retries
--);
4512 /* failed to get the link up... retire */
4515 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4516 ufshcd_init_pwr_info(hba
);
4517 ufshcd_print_pwr_info(hba
);
4519 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4520 ret
= ufshcd_disable_device_tx_lcc(hba
);
4525 /* Include any host controller configuration via UIC commands */
4526 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4530 ret
= ufshcd_make_hba_operational(hba
);
4532 ufshcd_release(hba
);
4535 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4536 ufshcd_print_host_state(hba
);
4537 ufshcd_print_pwr_info(hba
);
4538 ufshcd_print_host_regs(hba
);
4544 * ufshcd_verify_dev_init() - Verify device initialization
4545 * @hba: per-adapter instance
4547 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4548 * device Transport Protocol (UTP) layer is ready after a reset.
4549 * If the UTP layer at the device side is not initialized, it may
4550 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4551 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4553 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
4558 ufshcd_hold(hba
, false);
4559 mutex_lock(&hba
->dev_cmd
.lock
);
4560 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
4561 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
4564 if (!err
|| err
== -ETIMEDOUT
)
4567 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
4569 mutex_unlock(&hba
->dev_cmd
.lock
);
4570 ufshcd_release(hba
);
4573 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
4578 * ufshcd_set_queue_depth - set lun queue depth
4579 * @sdev: pointer to SCSI device
4581 * Read bLUQueueDepth value and activate scsi tagged command
4582 * queueing. For WLUN, queue depth is set to 1. For best-effort
4583 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4584 * value that host can queue.
4586 static void ufshcd_set_queue_depth(struct scsi_device
*sdev
)
4590 struct ufs_hba
*hba
;
4592 hba
= shost_priv(sdev
->host
);
4594 lun_qdepth
= hba
->nutrs
;
4595 ret
= ufshcd_read_unit_desc_param(hba
,
4596 ufshcd_scsi_to_upiu_lun(sdev
->lun
),
4597 UNIT_DESC_PARAM_LU_Q_DEPTH
,
4599 sizeof(lun_qdepth
));
4601 /* Some WLUN doesn't support unit descriptor */
4602 if (ret
== -EOPNOTSUPP
)
4604 else if (!lun_qdepth
)
4605 /* eventually, we can figure out the real queue depth */
4606 lun_qdepth
= hba
->nutrs
;
4608 lun_qdepth
= min_t(int, lun_qdepth
, hba
->nutrs
);
4610 dev_dbg(hba
->dev
, "%s: activate tcq with queue depth %d\n",
4611 __func__
, lun_qdepth
);
4612 scsi_change_queue_depth(sdev
, lun_qdepth
);
4616 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4617 * @hba: per-adapter instance
4618 * @lun: UFS device lun id
4619 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4621 * Returns 0 in case of success and b_lu_write_protect status would be returned
4622 * @b_lu_write_protect parameter.
4623 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4624 * Returns -EINVAL in case of invalid parameters passed to this function.
4626 static int ufshcd_get_lu_wp(struct ufs_hba
*hba
,
4628 u8
*b_lu_write_protect
)
4632 if (!b_lu_write_protect
)
4635 * According to UFS device spec, RPMB LU can't be write
4636 * protected so skip reading bLUWriteProtect parameter for
4637 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4639 else if (lun
>= UFS_UPIU_MAX_GENERAL_LUN
)
4642 ret
= ufshcd_read_unit_desc_param(hba
,
4644 UNIT_DESC_PARAM_LU_WR_PROTECT
,
4646 sizeof(*b_lu_write_protect
));
4651 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4653 * @hba: per-adapter instance
4654 * @sdev: pointer to SCSI device
4657 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba
*hba
,
4658 struct scsi_device
*sdev
)
4660 if (hba
->dev_info
.f_power_on_wp_en
&&
4661 !hba
->dev_info
.is_lu_power_on_wp
) {
4662 u8 b_lu_write_protect
;
4664 if (!ufshcd_get_lu_wp(hba
, ufshcd_scsi_to_upiu_lun(sdev
->lun
),
4665 &b_lu_write_protect
) &&
4666 (b_lu_write_protect
== UFS_LU_POWER_ON_WP
))
4667 hba
->dev_info
.is_lu_power_on_wp
= true;
4671 static void ufshcd_done(struct request
*rq
)
4673 struct scsi_cmnd
*cmd
= rq
->special
;
4674 scsi_dma_unmap(cmd
);
4675 scsi_softirq_done(rq
);
4679 * ufshcd_slave_alloc - handle initial SCSI device configurations
4680 * @sdev: pointer to SCSI device
4684 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
4686 struct ufs_hba
*hba
;
4688 hba
= shost_priv(sdev
->host
);
4690 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4691 sdev
->use_10_for_ms
= 1;
4693 /* allow SCSI layer to restart the device in case of errors */
4694 sdev
->allow_restart
= 1;
4696 /* REPORT SUPPORTED OPERATION CODES is not supported */
4697 sdev
->no_report_opcodes
= 1;
4699 /* WRITE_SAME command is not supported */
4700 sdev
->no_write_same
= 1;
4702 ufshcd_set_queue_depth(sdev
);
4704 ufshcd_get_lu_power_on_wp_status(hba
, sdev
);
4706 blk_queue_softirq_done(sdev
->request_queue
, ufshcd_done
);
4708 blk_queue_update_dma_alignment(sdev
->request_queue
, PAGE_SIZE
- 1);
4714 * ufshcd_change_queue_depth - change queue depth
4715 * @sdev: pointer to SCSI device
4716 * @depth: required depth to set
4718 * Change queue depth and make sure the max. limits are not crossed.
4720 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
4722 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
4724 if (depth
> hba
->nutrs
)
4726 return scsi_change_queue_depth(sdev
, depth
);
4730 * ufshcd_slave_configure - adjust SCSI device configurations
4731 * @sdev: pointer to SCSI device
4733 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
4735 struct request_queue
*q
= sdev
->request_queue
;
4737 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
4738 blk_queue_max_segment_size(q
, PRDT_DATA_BYTE_COUNT_MAX
);
4739 blk_queue_update_dma_alignment(q
, PAGE_SIZE
- 1);
4745 * ufshcd_slave_destroy - remove SCSI device configurations
4746 * @sdev: pointer to SCSI device
4748 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
4750 struct ufs_hba
*hba
;
4752 hba
= shost_priv(sdev
->host
);
4753 /* Drop the reference as it won't be needed anymore */
4754 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
4755 unsigned long flags
;
4757 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4758 hba
->sdev_ufs_device
= NULL
;
4759 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4764 * ufshcd_task_req_compl - handle task management request completion
4765 * @hba: per adapter instance
4766 * @index: index of the completed request
4767 * @resp: task management service response
4769 * Returns non-zero value on error, zero on success
4771 static int ufshcd_task_req_compl(struct ufs_hba
*hba
, u32 index
, u8
*resp
)
4773 struct utp_task_req_desc
*task_req_descp
;
4774 struct utp_upiu_task_rsp
*task_rsp_upiup
;
4775 unsigned long flags
;
4779 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4781 task_req_descp
= hba
->utmrdl_base_addr
;
4782 ocs_value
= ufshcd_get_tmr_ocs(&task_req_descp
[index
]);
4784 if (ocs_value
== OCS_SUCCESS
) {
4785 task_rsp_upiup
= (struct utp_upiu_task_rsp
*)
4786 task_req_descp
[index
].task_rsp_upiu
;
4787 task_result
= be32_to_cpu(task_rsp_upiup
->output_param1
);
4788 task_result
= task_result
& MASK_TM_SERVICE_RESP
;
4790 *resp
= (u8
)task_result
;
4792 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
4793 __func__
, ocs_value
);
4795 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4801 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4802 * @lrb: pointer to local reference block of completed command
4803 * @scsi_status: SCSI command status
4805 * Returns value base on SCSI command status
4808 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
4812 switch (scsi_status
) {
4813 case SAM_STAT_CHECK_CONDITION
:
4814 ufshcd_copy_sense_data(lrbp
);
4816 result
|= DID_OK
<< 16 |
4817 COMMAND_COMPLETE
<< 8 |
4820 case SAM_STAT_TASK_SET_FULL
:
4822 case SAM_STAT_TASK_ABORTED
:
4823 ufshcd_copy_sense_data(lrbp
);
4824 result
|= scsi_status
;
4827 result
|= DID_ERROR
<< 16;
4829 } /* end of switch */
4835 * ufshcd_transfer_rsp_status - Get overall status of the response
4836 * @hba: per adapter instance
4837 * @lrb: pointer to local reference block of completed command
4839 * Returns result of the command to notify SCSI midlayer
4842 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
4848 /* overall command status of utrd */
4849 ocs
= ufshcd_get_tr_ocs(lrbp
);
4853 case OCS_FATAL_ERROR
:
4854 result
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
4855 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
4857 case UPIU_TRANSACTION_RESPONSE
:
4859 * get the response UPIU result to extract
4860 * the SCSI command status
4862 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
4865 * get the result based on SCSI status response
4866 * to notify the SCSI midlayer of the command status
4868 scsi_status
= result
& MASK_SCSI_STATUS
;
4869 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
4872 * Currently we are only supporting BKOPs exception
4873 * events hence we can ignore BKOPs exception event
4874 * during power management callbacks. BKOPs exception
4875 * event is not expected to be raised in runtime suspend
4876 * callback as it allows the urgent bkops.
4877 * During system suspend, we are anyway forcefully
4878 * disabling the bkops and if urgent bkops is needed
4879 * it will be enabled on system resume. Long term
4880 * solution could be to abort the system suspend if
4881 * UFS device needs urgent BKOPs.
4883 if (!hba
->pm_op_in_progress
&&
4884 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
) &&
4885 scsi_host_in_recovery(hba
->host
)) {
4886 schedule_work(&hba
->eeh_work
);
4887 dev_info(hba
->dev
, "execption event reported\n");
4891 case UPIU_TRANSACTION_REJECT_UPIU
:
4892 /* TODO: handle Reject UPIU Response */
4893 result
= DID_ERROR
<< 16;
4895 "Reject UPIU not fully implemented\n");
4898 result
= DID_ERROR
<< 16;
4900 "Unexpected request response code = %x\n",
4906 result
|= DID_ABORT
<< 16;
4908 case OCS_INVALID_COMMAND_STATUS
:
4909 result
|= DID_REQUEUE
<< 16;
4911 case OCS_INVALID_CMD_TABLE_ATTR
:
4912 case OCS_INVALID_PRDT_ATTR
:
4913 case OCS_MISMATCH_DATA_BUF_SIZE
:
4914 case OCS_MISMATCH_RESP_UPIU_SIZE
:
4915 case OCS_PEER_COMM_FAILURE
:
4917 result
|= DID_ERROR
<< 16;
4919 "OCS error from controller = %x for tag %d\n",
4920 ocs
, lrbp
->task_tag
);
4921 ufshcd_print_host_regs(hba
);
4922 ufshcd_print_host_state(hba
);
4924 } /* end of switch */
4926 if (host_byte(result
) != DID_OK
)
4927 ufshcd_print_trs(hba
, 1 << lrbp
->task_tag
, true);
4932 * ufshcd_uic_cmd_compl - handle completion of uic command
4933 * @hba: per adapter instance
4934 * @intr_status: interrupt status generated by the controller
4936 static void ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
4938 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
4939 hba
->active_uic_cmd
->argument2
|=
4940 ufshcd_get_uic_cmd_result(hba
);
4941 hba
->active_uic_cmd
->argument3
=
4942 ufshcd_get_dme_attr_val(hba
);
4943 complete(&hba
->active_uic_cmd
->done
);
4946 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
)
4947 complete(hba
->uic_async_done
);
4951 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4952 * @hba: per adapter instance
4953 * @completed_reqs: requests to complete
4955 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
, int reason
,
4956 unsigned long completed_reqs
)
4958 struct ufshcd_lrb
*lrbp
;
4959 struct scsi_cmnd
*cmd
;
4963 for_each_set_bit(index
, &completed_reqs
, hba
->nutrs
) {
4964 lrbp
= &hba
->lrb
[index
];
4967 ufshcd_add_command_trace(hba
, index
, "complete");
4968 result
= ufshcd_vops_crypto_engine_clear(hba
, lrbp
);
4971 "%s: failed to clear crypto engine (%d)\n",
4974 result
= ufshcd_transfer_rsp_status(hba
, lrbp
);
4975 cmd
->result
= result
;
4977 set_host_byte(cmd
, reason
);
4978 /* Mark completed command as NULL in LRB */
4980 clear_bit_unlock(index
, &hba
->lrb_in_use
);
4981 /* Do not touch lrbp after scsi done */
4982 cmd
->scsi_done(cmd
);
4983 #ifdef CONFIG_SCSI_UFS_CMD_LOGGING
4984 exynos_ufs_cmd_log_end(hba
, index
);
4986 __ufshcd_release(hba
);
4988 if (hba
->monitor
.flag
& UFSHCD_MONITOR_LEVEL1
)
4989 dev_info(hba
->dev
, "Transfer Done(%d)\n",
4992 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
4993 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
4994 if (hba
->dev_cmd
.complete
) {
4995 ufshcd_add_command_trace(hba
, index
,
4997 complete(hba
->dev_cmd
.complete
);
5000 if (ufshcd_is_clkscaling_supported(hba
))
5001 hba
->clk_scaling
.active_reqs
--;
5004 /* clear corresponding bits of completed commands */
5005 hba
->outstanding_reqs
^= completed_reqs
;
5006 #if defined(CONFIG_PM_DEVFREQ)
5007 ufshcd_clk_scaling_update_busy(hba
);
5009 /* we might have free'd some tags above */
5010 wake_up(&hba
->dev_cmd
.tag_wq
);
5014 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5015 * @hba: per adapter instance
5017 static void ufshcd_transfer_req_compl(struct ufs_hba
*hba
, int reason
)
5019 unsigned long completed_reqs
;
5022 /* Resetting interrupt aggregation counters first and reading the
5023 * DOOR_BELL afterward allows us to handle all the completed requests.
5024 * In order to prevent other interrupts starvation the DB is read once
5025 * after reset. The down side of this solution is the possibility of
5026 * false interrupt if device completes another request after resetting
5027 * aggregation and before reading the DB.
5029 if (!ufshcd_can_reset_intr_aggr(hba
) && ufshcd_is_intr_aggr_allowed(hba
))
5030 ufshcd_reset_intr_aggr(hba
);
5032 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5033 completed_reqs
= tr_doorbell
^ hba
->outstanding_reqs
;
5035 __ufshcd_transfer_req_compl(hba
, reason
, completed_reqs
);
5039 * ufshcd_disable_ee - disable exception event
5040 * @hba: per-adapter instance
5041 * @mask: exception event to disable
5043 * Disables exception event in the device so that the EVENT_ALERT
5046 * Returns zero on success, non-zero error value on failure.
5048 static int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5053 if (!(hba
->ee_ctrl_mask
& mask
))
5056 val
= hba
->ee_ctrl_mask
& ~mask
;
5057 val
&= MASK_EE_STATUS
;
5058 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5059 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
5061 hba
->ee_ctrl_mask
&= ~mask
;
5067 * ufshcd_enable_ee - enable exception event
5068 * @hba: per-adapter instance
5069 * @mask: exception event to enable
5071 * Enable corresponding exception event in the device to allow
5072 * device to alert host in critical scenarios.
5074 * Returns zero on success, non-zero error value on failure.
5076 static int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5081 if (hba
->ee_ctrl_mask
& mask
)
5084 val
= hba
->ee_ctrl_mask
| mask
;
5085 val
&= MASK_EE_STATUS
;
5086 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5087 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
5089 hba
->ee_ctrl_mask
|= mask
;
5095 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5096 * @hba: per-adapter instance
5098 * Allow device to manage background operations on its own. Enabling
5099 * this might lead to inconsistent latencies during normal data transfers
5100 * as the device is allowed to manage its own way of handling background
5103 * Returns zero on success, non-zero on failure.
5105 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5109 if (hba
->auto_bkops_enabled
)
5112 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5113 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
5115 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5120 hba
->auto_bkops_enabled
= true;
5121 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5123 /* No need of URGENT_BKOPS exception from the device */
5124 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5126 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5133 * ufshcd_disable_auto_bkops - block device in doing background operations
5134 * @hba: per-adapter instance
5136 * Disabling background operations improves command response latency but
5137 * has drawback of device moving into critical state where the device is
5138 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5139 * host is idle so that BKOPS are managed effectively without any negative
5142 * Returns zero on success, non-zero on failure.
5144 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5148 if (!hba
->auto_bkops_enabled
)
5152 * If host assisted BKOPs is to be enabled, make sure
5153 * urgent bkops exception is allowed.
5155 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5157 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5162 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5163 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
5165 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5167 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5171 hba
->auto_bkops_enabled
= false;
5172 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5178 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5179 * @hba: per adapter instance
5181 * After a device reset the device may toggle the BKOPS_EN flag
5182 * to default value. The s/w tracking variables should be updated
5183 * as well. This function would change the auto-bkops state based on
5184 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5186 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5188 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5189 hba
->auto_bkops_enabled
= false;
5190 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5191 ufshcd_enable_auto_bkops(hba
);
5193 hba
->auto_bkops_enabled
= true;
5194 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5195 ufshcd_disable_auto_bkops(hba
);
5199 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5201 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5202 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5206 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5207 * @hba: per-adapter instance
5208 * @status: bkops_status value
5210 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5211 * flag in the device to permit background operations if the device
5212 * bkops_status is greater than or equal to "status" argument passed to
5213 * this function, disable otherwise.
5215 * Returns 0 for success, non-zero in case of failure.
5217 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5218 * to know whether auto bkops is enabled or disabled after this function
5219 * returns control to it.
5221 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5222 enum bkops_status status
)
5225 u32 curr_status
= 0;
5227 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5229 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5232 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5233 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5234 __func__
, curr_status
);
5239 if (curr_status
>= status
) {
5240 err
= ufshcd_enable_auto_bkops(hba
);
5242 dev_info(hba
->dev
, "%s: auto_bkops enabled, status : %d\n",
5243 __func__
, curr_status
);
5246 err
= ufshcd_disable_auto_bkops(hba
);
5252 * ufshcd_urgent_bkops - handle urgent bkops exception event
5253 * @hba: per-adapter instance
5255 * Enable fBackgroundOpsEn flag in the device to permit background
5258 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5259 * and negative error value for any other failure.
5261 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5263 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5266 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5268 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5269 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5272 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5275 u32 curr_status
= 0;
5277 if (hba
->is_urgent_bkops_lvl_checked
)
5278 goto enable_auto_bkops
;
5280 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5282 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5288 * We are seeing that some devices are raising the urgent bkops
5289 * exception events even when BKOPS status doesn't indicate performace
5290 * impacted or critical. Handle these device by determining their urgent
5291 * bkops status at runtime.
5293 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5294 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5295 __func__
, curr_status
);
5296 /* update the current status as the urgent bkops level */
5297 hba
->urgent_bkops_lvl
= curr_status
;
5298 hba
->is_urgent_bkops_lvl_checked
= true;
5302 err
= ufshcd_enable_auto_bkops(hba
);
5305 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5310 * ufshcd_exception_event_handler - handle exceptions raised by device
5311 * @work: pointer to work data
5313 * Read bExceptionEventStatus attribute from the device and handle the
5314 * exception event accordingly.
5316 static void ufshcd_exception_event_handler(struct work_struct
*work
)
5318 struct ufs_hba
*hba
;
5321 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
5323 pm_runtime_get_sync(hba
->dev
);
5324 scsi_block_requests(hba
->host
);
5325 err
= ufshcd_get_ee_status(hba
, &status
);
5327 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
5332 status
&= hba
->ee_ctrl_mask
;
5334 if (status
& MASK_EE_URGENT_BKOPS
)
5335 ufshcd_bkops_exception_event_handler(hba
);
5338 scsi_unblock_requests(hba
->host
);
5339 pm_runtime_put_sync(hba
->dev
);
5343 /* Complete requests that have door-bell cleared */
5344 static void ufshcd_complete_requests(struct ufs_hba
*hba
)
5346 ufshcd_transfer_req_compl(hba
, 0);
5347 ufshcd_tmc_handler(hba
);
5351 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5352 * to recover from the DL NAC errors or not.
5353 * @hba: per-adapter instance
5355 * Returns true if error handling is required, false otherwise
5357 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
5359 unsigned long flags
;
5360 bool err_handling
= true;
5362 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5364 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5365 * device fatal error and/or DL NAC & REPLAY timeout errors.
5367 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
5370 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
5371 ((hba
->saved_err
& UIC_ERROR
) &&
5372 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
5375 if ((hba
->saved_err
& UIC_ERROR
) &&
5376 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
5379 * wait for 50ms to see if we can get any other errors or not.
5381 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5383 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5386 * now check if we have got any other severe errors other than
5389 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
5390 ((hba
->saved_err
& UIC_ERROR
) &&
5391 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
5395 * As DL NAC is the only error received so far, send out NOP
5396 * command to confirm if link is still active or not.
5397 * - If we don't get any response then do error recovery.
5398 * - If we get response then clear the DL NAC error bit.
5401 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5402 err
= ufshcd_verify_dev_init(hba
);
5403 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5408 /* Link seems to be alive hence ignore the DL NAC errors */
5409 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
5410 hba
->saved_err
&= ~UIC_ERROR
;
5411 /* clear NAC error */
5412 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
5413 if (!hba
->saved_uic_err
) {
5414 err_handling
= false;
5419 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5420 return err_handling
;
5424 * ufshcd_err_handler - handle UFS errors that require s/w attention
5425 * @work: pointer to work structure
5427 static void ufshcd_err_handler(struct work_struct
*work
)
5429 struct ufs_hba
*hba
;
5430 struct ufs_vreg_info
*info
;
5431 struct exynos_ufs
*ufs
;
5432 unsigned long flags
;
5437 bool needs_reset
= false;
5439 hba
= container_of(work
, struct ufs_hba
, eh_work
);
5440 info
= &hba
->vreg_info
;
5442 pm_runtime_get_sync(hba
->dev
);
5443 ufshcd_hold(hba
, false);
5445 ufs
= to_exynos_ufs(hba
);
5446 if (hba
->saved_err
& UIC_ERROR
) {
5447 dev_err(hba
->dev
, ": CLKSTOP CTRL(0x%04x):\t\t\t\t0x%08x\n",
5448 HCI_CLKSTOP_CTRL
, hci_readl(ufs
, HCI_CLKSTOP_CTRL
));
5449 dev_err(hba
->dev
, ": FORCE HCS(0x%04x):\t\t\t\t0x%08x\n",
5450 HCI_FORCE_HCS
, hci_readl(ufs
, HCI_FORCE_HCS
));
5453 /* Dump debugging information to system memory */
5454 ufshcd_vops_dbg_register_dump(hba
);
5456 /* Dump UFS power & reset_n GPIO status */
5457 if (gpio_is_valid(info
->ufs_power_gpio
))
5458 dev_info(hba
->dev
, "%s: UFS power pin: 0x%08x\n", __func__
, gpio_get_value(info
->ufs_power_gpio
));
5459 if (gpio_is_valid(info
->ufs_reset_n_gpio
))
5460 dev_info(hba
->dev
, "%s: RESET_N: 0x%08x\n", __func__
, gpio_get_value(info
->ufs_reset_n_gpio
));
5462 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5463 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
5466 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
5467 ufshcd_set_eh_in_progress(hba
);
5468 exynos_ufs_show_uic_info(hba
);
5470 /* Complete requests that have door-bell cleared by h/w */
5471 ufshcd_complete_requests(hba
);
5473 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
5476 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5477 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5478 ret
= ufshcd_quirk_dl_nac_errors(hba
);
5479 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5481 goto skip_err_handling
;
5483 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
5484 ((hba
->saved_err
& UIC_ERROR
) &&
5485 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_PA_INIT_ERROR
|
5486 UFSHCD_UIC_DL_ERROR
|
5487 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
5488 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
))))
5492 * if host reset is required then skip clearing the pending
5493 * transfers forcefully because they will automatically get
5494 * cleared after link startup.
5497 goto skip_pending_xfer_clear
;
5499 /* release lock as clear command might sleep */
5500 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5501 /* Clear pending transfer requests */
5502 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
) {
5503 if (ufshcd_clear_cmd(hba
, tag
)) {
5505 goto lock_skip_pending_xfer_clear
;
5509 /* Clear pending task management requests */
5510 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
5511 if (ufshcd_clear_tm_cmd(hba
, tag
)) {
5513 goto lock_skip_pending_xfer_clear
;
5517 lock_skip_pending_xfer_clear
:
5518 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5520 /* Complete the requests that are cleared by s/w */
5521 ufshcd_complete_requests(hba
);
5523 if (err_xfer
|| err_tm
)
5526 skip_pending_xfer_clear
:
5527 /* Fatal errors need reset */
5529 unsigned long max_doorbells
= (1UL << hba
->nutrs
) - 1;
5532 * ufshcd_reset_and_restore() does the link reinitialization
5533 * which will need atleast one empty doorbell slot to send the
5534 * device management commands (NOP and query commands).
5535 * If there is no slot empty at this moment then free up last
5538 if (hba
->outstanding_reqs
== max_doorbells
)
5539 __ufshcd_transfer_req_compl(hba
, 0,
5540 (1UL << (hba
->nutrs
- 1)));
5542 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5544 /* Fatal errors need reset */
5545 if (err_xfer
|| err_tm
|| (hba
->saved_err
& INT_FATAL_ERRORS
) ||
5546 ((hba
->saved_err
& UIC_ERROR
) &&
5547 ((hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
5548 (hba
->saved_uic_err
& UFSHCD_UIC_DL_ERROR
))))
5550 "%s: saved_err:0x%x, saved_uic_err:0x%x\n",
5551 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
5553 err
= ufshcd_reset_and_restore(hba
);
5554 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5556 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5557 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
5558 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5560 dev_err(hba
->dev
, "%s: reset and restore failed\n",
5564 hba
->saved_uic_err
= 0;
5569 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
5570 if (hba
->saved_err
|| hba
->saved_uic_err
)
5571 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5572 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
5575 ufshcd_clear_eh_in_progress(hba
);
5578 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5579 scsi_unblock_requests(hba
->host
);
5580 ufshcd_release(hba
);
5581 pm_runtime_put_sync(hba
->dev
);
5584 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist
*reg_hist
,
5587 reg_hist
->reg
[reg_hist
->pos
] = reg
;
5588 reg_hist
->tstamp
[reg_hist
->pos
] = ktime_get();
5589 reg_hist
->pos
= (reg_hist
->pos
+ 1) % UIC_ERR_REG_HIST_LENGTH
;
5593 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5594 * @hba: per-adapter instance
5596 static void ufshcd_update_uic_error(struct ufs_hba
*hba
)
5600 /* PHY layer lane error */
5601 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
5602 /* Ignore LINERESET indication, as this is not an error */
5603 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
5604 (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)) {
5606 * To know whether this error is fatal or not, DB timeout
5607 * must be checked but this error is handled separately.
5609 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n", __func__
);
5610 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.pa_err
, reg
);
5613 /* PA_INIT_ERROR is fatal and needs UIC reset */
5614 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
5616 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.dl_err
, reg
);
5618 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
5619 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
5620 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_ERROR_IND_RECEIVED
) {
5621 if (hba
->saved_uic_phy_err_cnt
> 10) {
5622 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
5623 hba
->saved_uic_phy_err_cnt
= 0;
5625 hba
->saved_uic_phy_err_cnt
++;
5626 } else if (hba
->dev_quirks
&
5627 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
5628 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
5630 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
5631 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
5632 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
5635 if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP
)
5636 hba
->tcx_replay_timer_expired_cnt
++;
5638 if (reg
& UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP
)
5639 hba
->fcx_protection_timer_expired_cnt
++;
5641 if (hba
->tcx_replay_timer_expired_cnt
>= 2 ||
5642 hba
->fcx_protection_timer_expired_cnt
>= 2)
5643 hba
->uic_error
|= UFSHCD_UIC_DL_ERROR
;
5645 /* UIC NL/TL/DME errors needs software retry */
5646 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
5648 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.nl_err
, reg
);
5649 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
5652 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
5654 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.tl_err
, reg
);
5655 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
5658 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
5660 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.dme_err
, reg
);
5661 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
5664 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
5665 __func__
, hba
->uic_error
);
5669 * ufshcd_check_errors - Check for errors that need s/w attention
5670 * @hba: per-adapter instance
5672 static void ufshcd_check_errors(struct ufs_hba
*hba
)
5674 bool queue_eh_work
= false;
5676 if (hba
->errors
& INT_FATAL_ERRORS
)
5677 queue_eh_work
= true;
5679 if (hba
->errors
& UIC_ERROR
) {
5681 ufshcd_update_uic_error(hba
);
5683 queue_eh_work
= true;
5686 if (queue_eh_work
) {
5688 * update the transfer error masks to sticky bits, let's do this
5689 * irrespective of current ufshcd_state.
5691 hba
->saved_err
|= hba
->errors
;
5692 hba
->saved_uic_err
|= hba
->uic_error
;
5694 /* handle fatal errors only when link is functional */
5695 if (hba
->ufshcd_state
== UFSHCD_STATE_OPERATIONAL
) {
5696 /* block commands from scsi mid-layer */
5697 scsi_block_requests(hba
->host
);
5699 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED
;
5701 /* dump controller state before resetting */
5702 if (hba
->saved_err
& (INT_FATAL_ERRORS
| UIC_ERROR
)) {
5703 bool pr_prdt
= !!(hba
->saved_err
&
5704 SYSTEM_BUS_FATAL_ERROR
);
5706 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5707 __func__
, hba
->saved_err
,
5708 hba
->saved_uic_err
);
5710 ufshcd_print_host_regs(hba
);
5711 ufshcd_print_pwr_info(hba
);
5712 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
5713 ufshcd_print_trs(hba
, hba
->outstanding_reqs
,
5716 schedule_work(&hba
->eh_work
);
5720 * if (!queue_eh_work) -
5721 * Other errors are either non-fatal where host recovers
5722 * itself without s/w intervention or errors that will be
5723 * handled by the SCSI core layer.
5728 * ufshcd_tmc_handler - handle task management function completion
5729 * @hba: per adapter instance
5731 static void ufshcd_tmc_handler(struct ufs_hba
*hba
)
5735 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
5736 hba
->tm_condition
= tm_doorbell
^ hba
->outstanding_tasks
;
5737 hba
->outstanding_tasks
^= hba
->tm_condition
;
5738 wake_up(&hba
->tm_wq
);
5742 * ufshcd_sl_intr - Interrupt service routine
5743 * @hba: per adapter instance
5744 * @intr_status: contains interrupts generated by the controller
5746 static void ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
5748 hba
->errors
= UFSHCD_ERROR_MASK
& intr_status
;
5750 ufshcd_check_errors(hba
);
5752 hba
->saved_uic_phy_err_cnt
= 0;
5754 if (intr_status
& UFSHCD_UIC_MASK
)
5755 ufshcd_uic_cmd_compl(hba
, intr_status
);
5757 if (intr_status
& UTP_TASK_REQ_COMPL
)
5758 ufshcd_tmc_handler(hba
);
5760 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
5761 ufshcd_transfer_req_compl(hba
, 0);
5763 /* Interrupt disable for stop UIC interrupts storm */
5764 if (hba
->saved_uic_err
&& (hba
->ufshcd_state
!= UFSHCD_STATE_RESET
))
5765 ufshcd_disable_intr(hba
, UIC_ERROR
);
5769 * ufshcd_intr - Main interrupt service routine
5771 * @__hba: pointer to adapter instance
5773 * Returns IRQ_HANDLED - If interrupt is valid
5774 * IRQ_NONE - If invalid interrupt
5776 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
5778 u32 intr_status
, enabled_intr_status
;
5779 irqreturn_t retval
= IRQ_NONE
;
5780 struct ufs_hba
*hba
= __hba
;
5782 spin_lock(hba
->host
->host_lock
);
5783 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
5784 enabled_intr_status
=
5785 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
5788 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
5790 if (enabled_intr_status
) {
5791 ufshcd_sl_intr(hba
, enabled_intr_status
);
5792 retval
= IRQ_HANDLED
;
5794 spin_unlock(hba
->host
->host_lock
);
5798 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
5801 u32 mask
= 1 << tag
;
5802 unsigned long flags
;
5804 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5805 ufshcd_utmrl_clear(hba
, tag
);
5806 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5808 /* poll for max. 1 sec to clear door bell register by h/w */
5809 err
= ufshcd_wait_for_register(hba
,
5810 REG_UTP_TASK_REQ_DOOR_BELL
,
5811 mask
, 0, 1000, 1000, true);
5816 * ufshcd_issue_tm_cmd - issues task management commands to controller
5817 * @hba: per adapter instance
5818 * @lun_id: LUN ID to which TM command is sent
5819 * @task_id: task ID to which the TM command is applicable
5820 * @tm_function: task management function opcode
5821 * @tm_response: task management service response return value
5823 * Returns non-zero value on error, zero on success.
5825 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
5826 u8 tm_function
, u8
*tm_response
)
5828 struct utp_task_req_desc
*task_req_descp
;
5829 struct utp_upiu_task_req
*task_req_upiup
;
5830 struct Scsi_Host
*host
;
5831 unsigned long flags
;
5839 * Get free slot, sleep if slots are unavailable.
5840 * Even though we use wait_event() which sleeps indefinitely,
5841 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5843 wait_event(hba
->tm_tag_wq
, ufshcd_get_tm_free_slot(hba
, &free_slot
));
5844 ufshcd_hold(hba
, false);
5846 spin_lock_irqsave(host
->host_lock
, flags
);
5847 task_req_descp
= hba
->utmrdl_base_addr
;
5848 task_req_descp
+= free_slot
;
5850 /* Configure task request descriptor */
5851 task_req_descp
->header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
5852 task_req_descp
->header
.dword_2
=
5853 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
5855 /* Configure task request UPIU */
5857 (struct utp_upiu_task_req
*) task_req_descp
->task_req_upiu
;
5858 task_tag
= hba
->nutrs
+ free_slot
;
5859 task_req_upiup
->header
.dword_0
=
5860 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ
, 0,
5862 task_req_upiup
->header
.dword_1
=
5863 UPIU_HEADER_DWORD(0, tm_function
, 0, 0);
5865 * The host shall provide the same value for LUN field in the basic
5866 * header and for Input Parameter.
5868 task_req_upiup
->input_param1
= cpu_to_be32(lun_id
);
5869 task_req_upiup
->input_param2
= cpu_to_be32(task_id
);
5871 /* send command to the controller */
5872 if (hba
->vops
&& hba
->vops
->set_nexus_t_task_mgmt
)
5873 hba
->vops
->set_nexus_t_task_mgmt(hba
, free_slot
, tm_function
);
5874 __set_bit(free_slot
, &hba
->outstanding_tasks
);
5876 /* Make sure descriptors are ready before ringing the task doorbell */
5879 ufshcd_writel(hba
, 1 << free_slot
, REG_UTP_TASK_REQ_DOOR_BELL
);
5880 /* Make sure that doorbell is committed immediately */
5883 spin_unlock_irqrestore(host
->host_lock
, flags
);
5885 /* wait until the task management command is completed */
5886 err
= wait_event_timeout(hba
->tm_wq
,
5887 test_bit(free_slot
, &hba
->tm_condition
),
5888 msecs_to_jiffies(TM_CMD_TIMEOUT
));
5890 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
5891 __func__
, tm_function
);
5892 if (!ufshcd_clear_tm_cmd(hba
, free_slot
)) {
5893 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5894 __clear_bit(free_slot
, &hba
->outstanding_tasks
);
5895 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5897 dev_WARN(hba
->dev
, "%s: unable clear tm cmd (slot %d) after timeout\n",
5898 __func__
, free_slot
);
5902 err
= ufshcd_task_req_compl(hba
, free_slot
, tm_response
);
5905 clear_bit(free_slot
, &hba
->tm_condition
);
5906 ufshcd_put_tm_slot(hba
, free_slot
);
5907 wake_up(&hba
->tm_tag_wq
);
5909 ufshcd_release(hba
);
5914 * ufshcd_eh_device_reset_handler - device reset handler registered to
5916 * @cmd: SCSI command pointer
5918 * Returns SUCCESS/FAILED
5920 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
5922 struct Scsi_Host
*host
;
5923 struct ufs_hba
*hba
;
5928 struct ufshcd_lrb
*lrbp
;
5929 unsigned long flags
;
5931 host
= cmd
->device
->host
;
5932 hba
= shost_priv(host
);
5933 tag
= cmd
->request
->tag
;
5936 #ifdef CONFIG_EXYNOS_SMC_LOGGING
5937 exynos_smc(SMC_CMD_UFS_LOG
, 1, 0, hba
->secure_log
.paddr
);
5940 /* Dump debugging information to system memory */
5941 ufshcd_vops_dbg_register_dump(hba
);
5942 exynos_ufs_show_uic_info(hba
);
5944 lrbp
= &hba
->lrb
[tag
];
5945 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, 0, UFS_LOGICAL_RESET
, &resp
);
5946 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
5952 /* clear the commands that were pending for corresponding LUN */
5953 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
) {
5954 if (hba
->lrb
[pos
].lun
== lrbp
->lun
) {
5955 err
= ufshcd_clear_cmd(hba
, pos
);
5960 spin_lock_irqsave(host
->host_lock
, flags
);
5961 ufshcd_transfer_req_compl(hba
, DID_RESET
);
5962 spin_unlock_irqrestore(host
->host_lock
, flags
);
5965 hba
->req_abort_count
= 0;
5967 dev_info(hba
->dev
, "%s: LU reset succeeded\n", __func__
);
5970 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
5976 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
5978 struct ufshcd_lrb
*lrbp
;
5981 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
5982 lrbp
= &hba
->lrb
[tag
];
5983 lrbp
->req_abort_skip
= true;
5988 * ufshcd_abort - abort a specific command
5989 * @cmd: SCSI command pointer
5991 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5992 * command, and in host controller by clearing the door-bell register. There can
5993 * be race between controller sending the command to the device while abort is
5994 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5995 * really issued and then try to abort it.
5997 * Returns SUCCESS/FAILED
5999 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
6001 struct Scsi_Host
*host
;
6002 struct ufs_hba
*hba
;
6003 unsigned long flags
;
6008 struct ufshcd_lrb
*lrbp
;
6011 host
= cmd
->device
->host
;
6012 hba
= shost_priv(host
);
6013 tag
= cmd
->request
->tag
;
6014 lrbp
= &hba
->lrb
[tag
];
6015 if (!ufshcd_valid_tag(hba
, tag
)) {
6017 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6018 __func__
, tag
, cmd
, cmd
->request
);
6023 * Task abort to the device W-LUN is illegal. When this command
6024 * will fail, due to spec violation, scsi err handling next step
6025 * will be to send LU reset which, again, is a spec violation.
6026 * To avoid these unnecessary/illegal step we skip to the last error
6027 * handling stage: reset and restore.
6029 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
)
6030 return ufshcd_eh_host_reset_handler(cmd
);
6033 #ifdef CONFIG_EXYNOS_SMC_LOGGING
6034 exynos_smc(SMC_CMD_UFS_LOG
, 1, 0, hba
->secure_log
.paddr
);
6037 if (cmd
->cmnd
[0] == READ_10
|| cmd
->cmnd
[0] == WRITE_10
) {
6038 unsigned long lba
= (unsigned long) ((cmd
->cmnd
[2] << 24) |
6039 (cmd
->cmnd
[3] << 16) |
6040 (cmd
->cmnd
[4] << 8) |
6041 (cmd
->cmnd
[5] << 0));
6042 unsigned int sct
= (cmd
->cmnd
[7] << 8) |
6043 (cmd
->cmnd
[8] << 0);
6045 dev_err(hba
->dev
, "%s: tag:%d, cmd:0x%x, "
6046 "lba:0x%08lx, sct:0x%04x, retries %d\n",
6047 __func__
, tag
, cmd
->cmnd
[0], lba
, sct
, cmd
->retries
);
6049 dev_err(hba
->dev
, "%s: tag:%d, cmd:0x%x, retries %d\n",
6050 __func__
, tag
, cmd
->cmnd
[0], cmd
->retries
);
6053 ufshcd_hold(hba
, false);
6055 /* Dump debugging information to system memory */
6056 ufshcd_vops_dbg_register_dump(hba
);
6057 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
6058 /* If command is already aborted/completed, return SUCCESS */
6059 if (!(test_bit(tag
, &hba
->outstanding_reqs
))) {
6061 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6062 __func__
, tag
, hba
->outstanding_reqs
, reg
);
6066 if (!(reg
& (1 << tag
))) {
6068 "%s: cmd was completed, but without a notifying intr, tag = %d",
6073 /* Print Transfer Request of aborted task */
6074 dev_err(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
6077 * Print detailed info about aborted request.
6078 * As more than one request might get aborted at the same time,
6079 * print full information only for the first aborted request in order
6080 * to reduce repeated printouts. For other aborted requests only print
6083 scsi_print_command(hba
->lrb
[tag
].cmd
);
6084 if (!hba
->req_abort_count
) {
6085 ufshcd_print_host_regs(hba
);
6086 ufshcd_print_host_state(hba
);
6087 ufshcd_print_pwr_info(hba
);
6088 ufshcd_print_trs(hba
, 1 << tag
, true);
6090 ufshcd_print_trs(hba
, 1 << tag
, false);
6092 hba
->req_abort_count
++;
6094 /* Skip task abort in case previous aborts failed and report failure */
6095 if (lrbp
->req_abort_skip
) {
6100 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
6101 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
6102 UFS_QUERY_TASK
, &resp
);
6103 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
6104 /* cmd pending in the device */
6105 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
6108 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
6110 * cmd not pending in the device, check if it is
6113 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
6115 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
6116 if (reg
& (1 << tag
)) {
6117 /* sleep for max. 200us to stabilize */
6118 usleep_range(100, 200);
6121 /* command completed already */
6122 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
6127 "%s: no response from device. tag = %d, err %d\n",
6128 __func__
, tag
, err
);
6130 err
= resp
; /* service response error */
6132 "%s: query task failed with err %d\n",
6141 "%s: cmd might be missed, not pending in device\n",
6146 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
6147 UFS_ABORT_TASK
, &resp
);
6148 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
6150 err
= resp
; /* service response error */
6151 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
6152 __func__
, tag
, err
);
6157 err
= ufshcd_clear_cmd(hba
, tag
);
6159 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
6160 __func__
, tag
, err
);
6164 scsi_dma_unmap(cmd
);
6166 spin_lock_irqsave(host
->host_lock
, flags
);
6167 ufshcd_outstanding_req_clear(hba
, tag
);
6168 hba
->lrb
[tag
].cmd
= NULL
;
6169 spin_unlock_irqrestore(host
->host_lock
, flags
);
6171 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
6172 wake_up(&hba
->dev_cmd
.tag_wq
);
6178 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
6179 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
6184 * This ufshcd_release() corresponds to the original scsi cmd that got
6185 * aborted here (as we won't get any IRQ for it).
6187 ufshcd_release(hba
);
6192 * ufshcd_host_reset_and_restore - reset and restore host controller
6193 * @hba: per-adapter instance
6195 * Note that host controller reset may issue DME_RESET to
6196 * local and remote (device) Uni-Pro stack and the attributes
6197 * are reset to default state.
6199 * Returns zero on success, non-zero on failure
6201 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
6204 unsigned long flags
;
6206 /* Reset the host controller */
6207 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6208 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6209 ufshcd_set_eh_in_progress(hba
);
6210 ufshcd_hba_stop(hba
, false);
6211 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6213 #if defined(CONFIG_PM_DEVFREQ)
6214 /* scale up clocks to max frequency before full reinitialization */
6215 ufshcd_scale_clks(hba
, true);
6218 /* Establish the link again and restore the device */
6219 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
6220 if (hba
->pm_op_in_progress
)
6221 async_schedule(ufshcd_async_scan
, hba
);
6225 err
= ufshcd_probe_hba(hba
);
6227 if (!err
&& (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
)) {
6228 dev_err(hba
->dev
, "%s: failed\n", __func__
);
6233 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6234 ufshcd_clear_eh_in_progress(hba
);
6235 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6241 * ufshcd_reset_and_restore - reset and re-initialize host/device
6242 * @hba: per-adapter instance
6244 * Reset and recover device, host and re-establish link. This
6245 * is helpful to recover the communication in fatal error conditions.
6247 * Returns zero on success, non-zero on failure
6249 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
6252 unsigned long flags
;
6253 int retries
= MAX_HOST_RESET_RETRIES
;
6257 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
)
6258 ufshcd_clear_cmd(hba
, tag
);
6260 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6261 ufshcd_transfer_req_compl(hba
, DID_RESET
);
6262 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6267 err
= ufshcd_host_reset_and_restore(hba
);
6268 } while (err
&& --retries
);
6271 * After reset the door-bell might be cleared, complete
6272 * outstanding requests in s/w here.
6274 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6275 ufshcd_transfer_req_compl(hba
, DID_RESET
);
6276 ufshcd_tmc_handler(hba
);
6277 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6283 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6284 * @cmd - SCSI command pointer
6286 * Returns SUCCESS/FAILED
6288 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
6291 unsigned long flags
;
6292 struct ufs_hba
*hba
;
6294 hba
= shost_priv(cmd
->device
->host
);
6296 ufshcd_hold(hba
, false);
6298 * Check if there is any race with fatal error handling.
6299 * If so, wait for it to complete. Even though fatal error
6300 * handling does reset and restore in some cases, don't assume
6301 * anything out of it. We are just avoiding race here.
6304 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6305 if (!(work_pending(&hba
->eh_work
) ||
6306 hba
->ufshcd_state
== UFSHCD_STATE_RESET
||
6307 hba
->ufshcd_state
== UFSHCD_STATE_EH_SCHEDULED
))
6309 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6310 dev_dbg(hba
->dev
, "%s: reset in progress\n", __func__
);
6311 flush_work(&hba
->eh_work
);
6314 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6315 ufshcd_set_eh_in_progress(hba
);
6316 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6318 err
= ufshcd_reset_and_restore(hba
);
6320 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6323 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6326 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6328 ufshcd_clear_eh_in_progress(hba
);
6329 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6331 ufshcd_release(hba
);
6336 * ufshcd_get_max_icc_level - calculate the ICC level
6337 * @sup_curr_uA: max. current supported by the regulator
6338 * @start_scan: row at the desc table to start scan from
6339 * @buff: power descriptor buffer
6341 * Returns calculated max ICC level for specific regulator
6343 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
, char *buff
)
6350 for (i
= start_scan
; i
>= 0; i
--) {
6351 data
= be16_to_cpup((__be16
*)&buff
[2 * i
]);
6352 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
6353 ATTR_ICC_LVL_UNIT_OFFSET
;
6354 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
6356 case UFSHCD_NANO_AMP
:
6357 curr_uA
= curr_uA
/ 1000;
6359 case UFSHCD_MILI_AMP
:
6360 curr_uA
= curr_uA
* 1000;
6363 curr_uA
= curr_uA
* 1000 * 1000;
6365 case UFSHCD_MICRO_AMP
:
6369 if (sup_curr_uA
>= curr_uA
)
6374 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
6381 * ufshcd_calc_icc_level - calculate the max ICC level
6382 * In case regulators are not initialized we'll return 0
6383 * @hba: per-adapter instance
6384 * @desc_buf: power descriptor buffer to extract ICC levels from.
6385 * @len: length of desc_buff
6387 * Returns calculated ICC level
6389 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
6390 u8
*desc_buf
, int len
)
6394 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
6395 !hba
->vreg_info
.vccq2
) {
6397 "%s: Regulator capability was not set, actvIccLevel=%d",
6398 __func__
, icc_level
);
6402 if (hba
->vreg_info
.vcc
)
6403 icc_level
= ufshcd_get_max_icc_level(
6404 hba
->vreg_info
.vcc
->max_uA
,
6405 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
6406 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
6408 if (hba
->vreg_info
.vccq
)
6409 icc_level
= ufshcd_get_max_icc_level(
6410 hba
->vreg_info
.vccq
->max_uA
,
6412 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
6414 if (hba
->vreg_info
.vccq2
)
6415 icc_level
= ufshcd_get_max_icc_level(
6416 hba
->vreg_info
.vccq2
->max_uA
,
6418 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
6423 static void ufshcd_init_icc_levels(struct ufs_hba
*hba
)
6426 int buff_len
= hba
->desc_size
.pwr_desc
;
6427 u8 desc_buf
[hba
->desc_size
.pwr_desc
];
6429 ret
= ufshcd_read_power_desc(hba
, desc_buf
, buff_len
);
6432 "%s: Failed reading power descriptor.len = %d ret = %d",
6433 __func__
, buff_len
, ret
);
6437 hba
->init_prefetch_data
.icc_level
=
6438 ufshcd_find_max_sup_active_icc_level(hba
,
6439 desc_buf
, buff_len
);
6440 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x",
6441 __func__
, hba
->init_prefetch_data
.icc_level
);
6443 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
6444 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0,
6445 &hba
->init_prefetch_data
.icc_level
);
6449 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6450 __func__
, hba
->init_prefetch_data
.icc_level
, ret
);
6455 * ufshcd_scsi_add_wlus - Adds required W-LUs
6456 * @hba: per-adapter instance
6458 * UFS device specification requires the UFS devices to support 4 well known
6460 * "REPORT_LUNS" (address: 01h)
6461 * "UFS Device" (address: 50h)
6462 * "RPMB" (address: 44h)
6463 * "BOOT" (address: 30h)
6464 * UFS device's power management needs to be controlled by "POWER CONDITION"
6465 * field of SSU (START STOP UNIT) command. But this "power condition" field
6466 * will take effect only when its sent to "UFS device" well known logical unit
6467 * hence we require the scsi_device instance to represent this logical unit in
6468 * order for the UFS host driver to send the SSU command for power management.
6470 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6471 * Block) LU so user space process can control this LU. User space may also
6472 * want to have access to BOOT LU.
6474 * This function adds scsi device instances for each of all well known LUs
6475 * (except "REPORT LUNS" LU).
6477 * Returns zero on success (all required W-LUs are added successfully),
6478 * non-zero error value on failure (if failed to add any of the required W-LU).
6480 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
6483 struct scsi_device
*sdev_boot
;
6485 hba
->sdev_ufs_device
= __scsi_add_device(hba
->host
, 0, 0,
6486 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
6487 if (IS_ERR(hba
->sdev_ufs_device
)) {
6488 ret
= PTR_ERR(hba
->sdev_ufs_device
);
6489 hba
->sdev_ufs_device
= NULL
;
6492 scsi_device_put(hba
->sdev_ufs_device
);
6494 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
6495 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
6496 if (IS_ERR(sdev_boot
)) {
6497 ret
= PTR_ERR(sdev_boot
);
6498 goto remove_sdev_ufs_device
;
6500 scsi_device_put(sdev_boot
);
6502 hba
->sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
6503 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
6504 if (IS_ERR(hba
->sdev_rpmb
)) {
6505 ret
= PTR_ERR(hba
->sdev_rpmb
);
6506 goto remove_sdev_boot
;
6508 scsi_device_put(hba
->sdev_rpmb
);
6512 scsi_remove_device(sdev_boot
);
6513 remove_sdev_ufs_device
:
6514 scsi_remove_device(hba
->sdev_ufs_device
);
6519 static int ufs_get_device_desc(struct ufs_hba
*hba
,
6520 struct ufs_dev_desc
*dev_desc
)
6524 u8 str_desc_buf
[QUERY_DESC_MAX_SIZE
+ 1] = {0};
6525 u8 desc_buf
[hba
->desc_size
.dev_desc
];
6527 err
= ufshcd_read_device_desc(hba
, desc_buf
, hba
->desc_size
.dev_desc
);
6529 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
6535 * getting vendor (manufacturerID) and Bank Index in big endian
6538 dev_desc
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
6539 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
6541 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
6543 err
= ufshcd_read_string_desc(hba
, model_index
, str_desc_buf
,
6544 QUERY_DESC_MAX_SIZE
, ASCII_STD
);
6546 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
6551 str_desc_buf
[QUERY_DESC_MAX_SIZE
] = '\0';
6552 strlcpy(dev_desc
->model
, (str_desc_buf
+ QUERY_DESC_HDR_SIZE
),
6553 min_t(u8
, str_desc_buf
[QUERY_DESC_LENGTH_OFFSET
],
6556 /* Null terminate the model string */
6557 dev_desc
->model
[MAX_MODEL_LEN
] = '\0';
6563 static void ufs_fixup_device_setup(struct ufs_hba
*hba
,
6564 struct ufs_dev_desc
*dev_desc
)
6566 struct ufs_dev_fix
*f
;
6568 for (f
= ufs_fixups
; f
->quirk
; f
++) {
6569 if ((f
->card
.wmanufacturerid
== dev_desc
->wmanufacturerid
||
6570 f
->card
.wmanufacturerid
== UFS_ANY_VENDOR
) &&
6571 (STR_PRFX_EQUAL(f
->card
.model
, dev_desc
->model
) ||
6572 !strcmp(f
->card
.model
, UFS_ANY_MODEL
)))
6573 hba
->dev_quirks
|= f
->quirk
;
6578 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6579 * @hba: per-adapter instance
6581 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6582 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6583 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6584 * the hibern8 exit latency.
6586 * Returns zero on success, non-zero error value on failure.
6588 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
6591 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
6593 ret
= ufshcd_dme_peer_get(hba
,
6595 RX_MIN_ACTIVATETIME_CAPABILITY
,
6596 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6597 &peer_rx_min_activatetime
);
6601 /* make sure proper unit conversion is applied */
6602 tuned_pa_tactivate
=
6603 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
6604 / PA_TACTIVATE_TIME_UNIT_US
);
6605 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6606 tuned_pa_tactivate
);
6613 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6614 * @hba: per-adapter instance
6616 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6617 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6618 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6619 * This optimal value can help reduce the hibern8 exit latency.
6621 * Returns zero on success, non-zero error value on failure.
6623 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
6626 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
6627 u32 max_hibern8_time
, tuned_pa_hibern8time
;
6629 ret
= ufshcd_dme_get(hba
,
6630 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
6631 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6632 &local_tx_hibern8_time_cap
);
6636 ret
= ufshcd_dme_peer_get(hba
,
6637 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
6638 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6639 &peer_rx_hibern8_time_cap
);
6643 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
6644 peer_rx_hibern8_time_cap
);
6645 /* make sure proper unit conversion is applied */
6646 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
6647 / PA_HIBERN8_TIME_UNIT_US
);
6648 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
6649 tuned_pa_hibern8time
);
6654 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
6656 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
6657 ufshcd_tune_pa_tactivate(hba
);
6658 ufshcd_tune_pa_hibern8time(hba
);
6661 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
6662 /* set 1ms timeout for PA_TACTIVATE */
6663 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
6668 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
6670 int err_reg_hist_size
= sizeof(struct ufs_uic_err_reg_hist
);
6672 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
6673 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
6675 memset(&hba
->ufs_stats
.pa_err
, 0, err_reg_hist_size
);
6676 memset(&hba
->ufs_stats
.dl_err
, 0, err_reg_hist_size
);
6677 memset(&hba
->ufs_stats
.nl_err
, 0, err_reg_hist_size
);
6678 memset(&hba
->ufs_stats
.tl_err
, 0, err_reg_hist_size
);
6679 memset(&hba
->ufs_stats
.dme_err
, 0, err_reg_hist_size
);
6681 hba
->req_abort_count
= 0;
6684 static void ufshcd_init_desc_sizes(struct ufs_hba
*hba
)
6688 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_DEVICE
, 0,
6689 &hba
->desc_size
.dev_desc
);
6691 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
6693 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_POWER
, 0,
6694 &hba
->desc_size
.pwr_desc
);
6696 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
6698 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_INTERCONNECT
, 0,
6699 &hba
->desc_size
.interc_desc
);
6701 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
6703 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_CONFIGURATION
, 0,
6704 &hba
->desc_size
.conf_desc
);
6706 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
6708 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_UNIT
, 0,
6709 &hba
->desc_size
.unit_desc
);
6711 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
6713 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_GEOMETRY
, 0,
6714 &hba
->desc_size
.geom_desc
);
6716 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
6719 static void ufshcd_def_desc_sizes(struct ufs_hba
*hba
)
6721 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
6722 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
6723 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
6724 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
6725 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
6726 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
6730 * ufshcd_probe_hba - probe hba to detect device and initialize
6731 * @hba: per-adapter instance
6733 * Execute link-startup and verify device initialization
6735 static int ufshcd_probe_hba(struct ufs_hba
*hba
)
6737 struct ufs_dev_desc card
= {0};
6738 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
6739 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
6741 int ret
, link_startup_fail
= 0, device_reset
= 0;
6742 ktime_t start
= ktime_get();
6743 unsigned long flags
;
6746 /* For deivce power control when link startup fail. */
6747 if (link_startup_fail
|| device_reset
) {
6748 ufshcd_vreg_set_lpm(hba
);
6749 ret
= ufshcd_vreg_set_hpm(hba
);
6752 if (gpio_is_valid(info
->ufs_power_gpio
))
6753 dev_info(hba
->dev
, "%s: UFS power pin: 0x%08x\n", __func__
, gpio_get_value(info
->ufs_power_gpio
));
6754 if (gpio_is_valid(info
->ufs_reset_n_gpio
))
6755 dev_info(hba
->dev
, "%s: RESET_N: 0x%08x\n", __func__
, gpio_get_value(info
->ufs_reset_n_gpio
));
6760 ret
= ufshcd_hba_enable(hba
);
6764 ret
= ufshcd_link_startup(hba
);
6766 link_startup_fail
= 1;
6769 link_startup_fail
= 0;
6771 dev_info(hba
->dev
, "UFS link established\n");
6773 /* set the default level for urgent bkops */
6774 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
6775 hba
->is_urgent_bkops_lvl_checked
= false;
6777 /* Debug counters initialization */
6778 ufshcd_clear_dbg_ufs_stats(hba
);
6780 /* UniPro link is active now */
6781 ufshcd_set_link_active(hba
);
6783 ret
= ufshcd_verify_dev_init(hba
);
6787 ret
= ufshcd_complete_dev_init(hba
);
6791 /* Init check for device descriptor sizes */
6792 ufshcd_init_desc_sizes(hba
);
6794 ret
= ufs_get_device_desc(hba
, &card
);
6796 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
6801 ufs_fixup_device_setup(hba
, &card
);
6802 ufshcd_tune_unipro_params(hba
);
6804 ret
= ufshcd_set_vccq_rail_unused(hba
,
6805 (hba
->dev_quirks
& UFS_DEVICE_NO_VCCQ
) ? true : false);
6809 /* UFS device is also active now */
6810 ufshcd_set_ufs_dev_active(hba
);
6811 ufshcd_force_reset_auto_bkops(hba
);
6812 hba
->wlun_dev_clr_ua
= true;
6814 if (ufshcd_get_max_pwr_mode(hba
)) {
6816 "%s: Failed getting max supported power mode\n",
6819 if ((pwr_info
->lane_rx
!= pwr_info
->peer_available_lane_rx
)
6820 || (pwr_info
->lane_tx
!= pwr_info
->peer_available_lane_tx
)) {
6822 "%s: availabele lanes, Host:Device Lane tx %d%d rx %d:%d\n",
6824 pwr_info
->lane_tx
, pwr_info
->peer_available_lane_tx
,
6825 pwr_info
->lane_rx
, pwr_info
->peer_available_lane_rx
);
6827 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
6829 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
6834 if (hba
->max_pwr_info
.info
.pwr_rx
== FAST_MODE
||
6835 hba
->max_pwr_info
.info
.pwr_tx
== FAST_MODE
||
6836 hba
->max_pwr_info
.info
.pwr_rx
== FASTAUTO_MODE
||
6837 hba
->max_pwr_info
.info
.pwr_tx
== FASTAUTO_MODE
)
6838 dev_info(hba
->dev
, "HS mode configured\n");
6841 /* set the state as operational after switching to desired gear */
6842 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6843 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6844 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6847 * If we are in error handling context or in power management callbacks
6848 * context, no need to scan the host
6850 if (!ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
6851 && !hba
->async_resume
) {
6854 /* clear any previous UFS device information */
6855 memset(&hba
->dev_info
, 0, sizeof(hba
->dev_info
));
6856 ret
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
6857 QUERY_FLAG_IDN_PWR_ON_WPE
, &flag
);
6859 hba
->dev_info
.f_power_on_wp_en
= flag
;
6866 if (!hba
->is_init_prefetch
)
6867 ufshcd_init_icc_levels(hba
);
6869 scsi_scan_host(hba
->host
);
6871 /* Add required well known logical units to scsi mid layer */
6872 ret
= ufshcd_scsi_add_wlus(hba
);
6874 dev_warn(hba
->dev
, "%s failed to add w-lus %d\n",
6879 /* Initialize devfreq after UFS device is detected */
6880 if (ufshcd_is_clkscaling_supported(hba
)) {
6881 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
6883 sizeof(struct ufs_pa_layer_attr
));
6884 hba
->clk_scaling
.saved_pwr_info
.is_valid
= true;
6885 if (!hba
->devfreq
) {
6886 #if defined(CONFIG_PM_DEVFREQ)
6887 hba
->devfreq
= devm_devfreq_add_device(hba
->dev
,
6888 &ufs_devfreq_profile
,
6892 if (IS_ERR(hba
->devfreq
)) {
6893 ret
= PTR_ERR(hba
->devfreq
);
6894 dev_err(hba
->dev
, "Unable to register with devfreq %d\n",
6899 hba
->clk_scaling
.is_allowed
= true;
6902 pm_runtime_put_sync(hba
->dev
);
6905 hba
->host
->wlun_clr_uac
= true;
6906 if (!hba
->is_init_prefetch
)
6907 hba
->is_init_prefetch
= true;
6910 if (ret
&& re_cnt
++ < UFS_LINK_SETUP_RETRIES
) {
6911 dev_err(hba
->dev
, "%s failed with err %d, retrying:%d\n",
6912 __func__
, ret
, re_cnt
);
6914 } else if (ret
&& re_cnt
>= UFS_LINK_SETUP_RETRIES
) {
6915 dev_err(hba
->dev
, "%s failed after retries with err %d\n",
6917 exynos_ufs_dump_uic_info(hba
);
6918 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6919 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6920 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6924 * If we failed to initialize the device or the device is not
6925 * present, turn off the power/clocks etc.
6927 if (ret
&& !ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
6928 pm_runtime_put_sync(hba
->dev
);
6929 ufshcd_hba_exit(hba
);
6932 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
6933 ktime_to_us(ktime_sub(ktime_get(), start
)),
6934 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
6938 * Inform scsi mid-layer that we did reset and allow to handle
6939 * Unit Attention properly.
6941 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6942 scsi_report_bus_reset(hba
->host
, 0);
6943 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6946 hba
->async_resume
= false;
6952 * ufshcd_async_scan - asynchronous execution for probing hba
6953 * @data: data pointer to pass to this function
6954 * @cookie: cookie data
6956 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
6958 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
6961 if (hba
->async_resume
) {
6962 scsi_block_requests(hba
->host
);
6963 err
= ufshcd_probe_hba(hba
);
6967 if (!ufshcd_is_ufs_dev_active(hba
)) {
6968 scsi_unblock_requests(hba
->host
);
6969 ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
6970 scsi_block_requests(hba
->host
);
6974 * If BKOPs operations are urgently needed at this moment then
6975 * keep auto-bkops enabled or else disable it.
6977 ufshcd_urgent_bkops(hba
);
6979 scsi_unblock_requests(hba
->host
);
6981 ufshcd_probe_hba(hba
);
6985 static enum blk_eh_timer_return
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
6987 unsigned long flags
;
6988 struct Scsi_Host
*host
;
6989 struct ufs_hba
*hba
;
6993 if (!scmd
|| !scmd
->device
|| !scmd
->device
->host
)
6994 return BLK_EH_NOT_HANDLED
;
6996 host
= scmd
->device
->host
;
6997 hba
= shost_priv(host
);
6999 return BLK_EH_NOT_HANDLED
;
7001 spin_lock_irqsave(host
->host_lock
, flags
);
7003 for_each_set_bit(index
, &hba
->outstanding_reqs
, hba
->nutrs
) {
7004 if (hba
->lrb
[index
].cmd
== scmd
) {
7010 spin_unlock_irqrestore(host
->host_lock
, flags
);
7013 * Bypass SCSI error handling and reset the block layer timer if this
7014 * SCSI command was not actually dispatched to UFS driver, otherwise
7015 * let SCSI layer handle the error as usual.
7017 return found
? BLK_EH_NOT_HANDLED
: BLK_EH_RESET_TIMER
;
7021 * ufshcd_query_ioctl - perform user read queries
7022 * @hba: per-adapter instance
7023 * @lun: used for lun specific queries
7024 * @buffer: user space buffer for reading and submitting query data and params
7025 * @return: 0 for success negative error code otherwise
7027 * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7028 * It will read the opcode, idn and buf_length parameters, and, put the
7029 * response in the buffer field while updating the used size in buf_length.
7031 static int ufshcd_query_ioctl(struct ufs_hba
*hba
, u8 lun
, void __user
*buffer
)
7033 struct ufs_ioctl_query_data
*ioctl_data
;
7042 ioctl_data
= kzalloc(sizeof(struct ufs_ioctl_query_data
), GFP_KERNEL
);
7044 dev_err(hba
->dev
, "%s: Failed allocating %zu bytes\n", __func__
,
7045 sizeof(struct ufs_ioctl_query_data
));
7050 /* extract params from user buffer */
7051 err
= copy_from_user(ioctl_data
, buffer
,
7052 sizeof(struct ufs_ioctl_query_data
));
7055 "%s: Failed copying buffer from user, err %d\n",
7057 goto out_release_mem
;
7060 /* verify legal parameters & send query */
7061 switch (ioctl_data
->opcode
) {
7062 case UPIU_QUERY_OPCODE_READ_DESC
:
7063 switch (ioctl_data
->idn
) {
7064 case QUERY_DESC_IDN_DEVICE
:
7065 case QUERY_DESC_IDN_CONFIGURATION
:
7066 case QUERY_DESC_IDN_INTERCONNECT
:
7067 case QUERY_DESC_IDN_GEOMETRY
:
7068 case QUERY_DESC_IDN_POWER
:
7069 case QUERY_DESC_IDN_HEALTH
:
7072 case QUERY_DESC_IDN_UNIT
:
7073 if (!ufs_is_valid_unit_desc_lun(lun
)) {
7075 "%s: No unit descriptor for lun 0x%x\n",
7078 goto out_release_mem
;
7085 length
= min_t(int, QUERY_DESC_MAX_SIZE
,
7086 ioctl_data
->buf_size
);
7087 desc
= kzalloc(length
, GFP_KERNEL
);
7089 dev_err(hba
->dev
, "%s: Failed allocating %d bytes\n",
7092 goto out_release_mem
;
7094 err
= ufshcd_query_descriptor_retry(hba
, ioctl_data
->opcode
,
7095 ioctl_data
->idn
, index
, 0, desc
, &length
);
7097 case UPIU_QUERY_OPCODE_READ_ATTR
:
7098 switch (ioctl_data
->idn
) {
7099 case QUERY_ATTR_IDN_BOOT_LU_EN
:
7100 case QUERY_ATTR_IDN_POWER_MODE
:
7101 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL
:
7102 case QUERY_ATTR_IDN_OOO_DATA_EN
:
7103 case QUERY_ATTR_IDN_BKOPS_STATUS
:
7104 case QUERY_ATTR_IDN_PURGE_STATUS
:
7105 case QUERY_ATTR_IDN_MAX_DATA_IN
:
7106 case QUERY_ATTR_IDN_MAX_DATA_OUT
:
7107 case QUERY_ATTR_IDN_REF_CLK_FREQ
:
7108 case QUERY_ATTR_IDN_CONF_DESC_LOCK
:
7109 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT
:
7110 case QUERY_ATTR_IDN_EE_CONTROL
:
7111 case QUERY_ATTR_IDN_EE_STATUS
:
7112 case QUERY_ATTR_IDN_SECONDS_PASSED
:
7115 case QUERY_ATTR_IDN_DYN_CAP_NEEDED
:
7116 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM
:
7122 err
= ufshcd_query_attr_retry(hba
, ioctl_data
->opcode
,
7123 ioctl_data
->idn
, index
, 0, &att
);
7125 case UPIU_QUERY_OPCODE_READ_FLAG
:
7126 switch (ioctl_data
->idn
) {
7127 case QUERY_FLAG_IDN_FDEVICEINIT
:
7128 case QUERY_FLAG_IDN_PERMANENT_WPE
:
7129 case QUERY_FLAG_IDN_PWR_ON_WPE
:
7130 case QUERY_FLAG_IDN_BKOPS_EN
:
7131 case QUERY_FLAG_IDN_PURGE_ENABLE
:
7132 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL
:
7133 case QUERY_FLAG_IDN_BUSY_RTC
:
7138 err
= ufshcd_query_flag_retry(hba
, ioctl_data
->opcode
,
7139 ioctl_data
->idn
, &flag
);
7146 dev_err(hba
->dev
, "%s: Query for idn %d failed\n", __func__
,
7148 goto out_release_mem
;
7152 * copy response data
7153 * As we might end up reading less data then what is specified in
7154 * "ioct_data->buf_size". So we are updating "ioct_data->
7155 * buf_size" to what exactly we have read.
7157 switch (ioctl_data
->opcode
) {
7158 case UPIU_QUERY_OPCODE_READ_DESC
:
7159 ioctl_data
->buf_size
= min_t(int, ioctl_data
->buf_size
, length
);
7162 case UPIU_QUERY_OPCODE_READ_ATTR
:
7163 ioctl_data
->buf_size
= sizeof(u32
);
7166 case UPIU_QUERY_OPCODE_READ_FLAG
:
7167 ioctl_data
->buf_size
= 1;
7175 err
= copy_to_user(buffer
, ioctl_data
,
7176 sizeof(struct ufs_ioctl_query_data
));
7178 dev_err(hba
->dev
, "%s: Failed copying back to user.\n",
7180 err
= copy_to_user(buffer
+ sizeof(struct ufs_ioctl_query_data
),
7181 data_ptr
, ioctl_data
->buf_size
);
7183 dev_err(hba
->dev
, "%s: err %d copying back to user.\n",
7185 goto out_release_mem
;
7189 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7190 __func__
, ioctl_data
->opcode
, (unsigned int)ioctl_data
->idn
);
7200 * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7201 * @dev: scsi device required for per LUN queries
7202 * @cmd: command opcode
7203 * @buffer: user space buffer for transferring data
7205 * Supported commands:
7208 static int ufshcd_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*buffer
)
7210 struct ufs_hba
*hba
= shost_priv(dev
->host
);
7215 if (cmd
!= SCSI_UFS_REQUEST_SENSE
) {
7216 dev_err(hba
->dev
, "%s: User buffer is NULL!\n", __func__
);
7221 case SCSI_UFS_REQUEST_SENSE
:
7222 err
= ufshcd_send_request_sense(hba
, hba
->sdev_rpmb
);
7224 dev_warn(hba
->dev
, "%s failed to clear uac on rpmb(w-lu) %d\n",
7227 hba
->host
->wlun_clr_uac
= false;
7229 case UFS_IOCTL_QUERY
:
7230 //pm_runtime_get_sync(hba->dev);
7231 err
= ufshcd_query_ioctl(hba
, ufshcd_scsi_to_upiu_lun(dev
->lun
),
7233 //pm_runtime_put_sync(hba->dev);
7235 case UFS_IOCTL_BLKROSET
:
7240 dev_err(hba
->dev
, "%s: Illegal ufs-IOCTL cmd %d\n", __func__
,
7247 static struct scsi_host_template ufshcd_driver_template
= {
7248 .module
= THIS_MODULE
,
7250 .proc_name
= UFSHCD
,
7251 .queuecommand
= ufshcd_queuecommand
,
7252 .slave_alloc
= ufshcd_slave_alloc
,
7253 .slave_configure
= ufshcd_slave_configure
,
7254 .slave_destroy
= ufshcd_slave_destroy
,
7255 .change_queue_depth
= ufshcd_change_queue_depth
,
7256 .eh_abort_handler
= ufshcd_abort
,
7257 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
7258 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
7259 .eh_timed_out
= ufshcd_eh_timed_out
,
7260 .ioctl
= ufshcd_ioctl
,
7262 .sg_tablesize
= SG_ALL
,
7263 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
7264 .can_queue
= UFSHCD_CAN_QUEUE
,
7265 .max_host_blocked
= 1,
7266 .skip_settle_delay
= 1,
7267 .track_queue_depth
= 1,
7270 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
7278 ret
= regulator_set_load(vreg
->reg
, ua
);
7280 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
7281 __func__
, vreg
->name
, ua
, ret
);
7287 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
7288 struct ufs_vreg
*vreg
)
7292 else if (vreg
->unused
)
7295 return ufshcd_config_vreg_load(hba
->dev
, vreg
,
7296 UFS_VREG_LPM_LOAD_UA
);
7299 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
7300 struct ufs_vreg
*vreg
)
7304 else if (vreg
->unused
)
7307 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
7310 static int ufshcd_config_vreg(struct device
*dev
,
7311 struct ufs_vreg
*vreg
, bool on
)
7314 struct regulator
*reg
;
7316 int min_uV
, uA_load
;
7323 if (regulator_count_voltages(reg
) > 0) {
7324 min_uV
= on
? vreg
->min_uV
: 0;
7325 ret
= regulator_set_voltage(reg
, min_uV
, vreg
->max_uV
);
7327 dev_err(dev
, "%s: %s set voltage failed, err=%d\n",
7328 __func__
, name
, ret
);
7332 uA_load
= on
? vreg
->max_uA
: 0;
7333 ret
= ufshcd_config_vreg_load(dev
, vreg
, uA_load
);
7341 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
7347 else if (vreg
->enabled
|| vreg
->unused
)
7350 ret
= ufshcd_config_vreg(dev
, vreg
, true);
7352 ret
= regulator_enable(vreg
->reg
);
7355 vreg
->enabled
= true;
7357 dev_err(dev
, "%s: %s enable failed, err=%d\n",
7358 __func__
, vreg
->name
, ret
);
7363 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
7369 else if (!vreg
->enabled
|| vreg
->unused
)
7372 ret
= regulator_disable(vreg
->reg
);
7375 /* ignore errors on applying disable config */
7376 ufshcd_config_vreg(dev
, vreg
, false);
7377 vreg
->enabled
= false;
7379 dev_err(dev
, "%s: %s disable failed, err=%d\n",
7380 __func__
, vreg
->name
, ret
);
7386 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
7389 struct device
*dev
= hba
->dev
;
7390 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7395 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
7399 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
7403 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
7409 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
7410 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
7411 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
7416 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
7418 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7421 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
7426 static int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
7433 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
7434 if (IS_ERR(vreg
->reg
)) {
7435 ret
= PTR_ERR(vreg
->reg
);
7436 dev_err(dev
, "%s: %s get failed, err=%d\n",
7437 __func__
, vreg
->name
, ret
);
7443 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
7446 struct device
*dev
= hba
->dev
;
7447 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7452 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
7456 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
7460 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
7465 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
7467 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7470 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
7475 static int ufshcd_set_vccq_rail_unused(struct ufs_hba
*hba
, bool unused
)
7478 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7482 else if (!info
->vccq
)
7486 /* shut off the rail here */
7487 ret
= ufshcd_toggle_vreg(hba
->dev
, info
->vccq
, false);
7489 * Mark this rail as no longer used, so it doesn't get enabled
7493 info
->vccq
->unused
= true;
7496 * rail should have been already enabled hence just make sure
7497 * that unused flag is cleared.
7499 info
->vccq
->unused
= false;
7505 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
7509 struct ufs_clk_info
*clki
;
7510 struct list_head
*head
= &hba
->clk_list_head
;
7511 const char *ref_clk
= "ref_clk";
7512 unsigned long flags
;
7513 ktime_t start
= ktime_get();
7514 bool clk_state_changed
= false;
7516 if (list_empty(head
))
7519 ufshcd_vops_pre_setup_clocks(hba
, on
);
7521 list_for_each_entry(clki
, head
, list
) {
7522 if (!IS_ERR_OR_NULL(clki
->clk
)) {
7524 !strncmp(clki
->name
, ref_clk
, strlen(ref_clk
)))
7527 clk_state_changed
= on
^ clki
->enabled
;
7528 if (on
&& !clki
->enabled
) {
7529 ret
= clk_prepare_enable(clki
->clk
);
7531 hba
->clk_gating
.state
= CLKS_DISABLE
;
7532 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
7533 __func__
, clki
->name
, ret
);
7536 } else if (!on
&& clki
->enabled
) {
7537 clk_disable_unprepare(clki
->clk
);
7540 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
7541 clki
->name
, on
? "en" : "dis");
7545 ret
= ufshcd_vops_setup_clocks(hba
, on
);
7549 list_for_each_entry(clki
, head
, list
) {
7550 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
7551 clk_disable_unprepare(clki
->clk
);
7553 } else if (!ret
&& on
) {
7554 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7555 hba
->clk_gating
.state
= CLKS_ON
;
7556 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
7557 hba
->clk_gating
.state
);
7558 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7561 if (clk_state_changed
)
7562 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
7563 (on
? "on" : "off"),
7564 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
7568 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
7570 return __ufshcd_setup_clocks(hba
, on
, false);
7573 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
7576 struct ufs_clk_info
*clki
;
7577 struct device
*dev
= hba
->dev
;
7578 struct list_head
*head
= &hba
->clk_list_head
;
7580 if (list_empty(head
))
7583 list_for_each_entry(clki
, head
, list
) {
7587 clki
->clk
= devm_clk_get(dev
, clki
->name
);
7588 if (IS_ERR(clki
->clk
)) {
7589 ret
= PTR_ERR(clki
->clk
);
7590 dev_err(dev
, "%s: %s clk get failed, %d\n",
7591 __func__
, clki
->name
, ret
);
7595 if (clki
->max_freq
) {
7596 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
7598 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
7599 __func__
, clki
->name
,
7600 clki
->max_freq
, ret
);
7603 #if defined(CONFIG_PM_DEVFREQ)
7604 clki
->curr_freq
= clki
->max_freq
;
7607 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
7608 clki
->name
, clk_get_rate(clki
->clk
));
7614 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
7621 err
= ufshcd_vops_init(hba
);
7625 err
= ufshcd_vops_setup_regulators(hba
, true);
7632 ufshcd_vops_exit(hba
);
7635 dev_err(hba
->dev
, "%s: variant %s init failed err %d\n",
7636 __func__
, ufshcd_get_var_name(hba
), err
);
7640 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
7645 ufshcd_vops_setup_regulators(hba
, false);
7647 ufshcd_vops_exit(hba
);
7650 static int ufshcd_hba_init(struct ufs_hba
*hba
)
7655 * Handle host controller power separately from the UFS device power
7656 * rails as it will help controlling the UFS host controller power
7657 * collapse easily which is different than UFS device power collapse.
7658 * Also, enable the host controller power before we go ahead with rest
7659 * of the initialization here.
7661 err
= ufshcd_init_hba_vreg(hba
);
7665 err
= ufshcd_setup_hba_vreg(hba
, true);
7669 err
= ufshcd_init_clocks(hba
);
7671 goto out_disable_hba_vreg
;
7673 err
= ufshcd_setup_clocks(hba
, true);
7675 goto out_disable_hba_vreg
;
7677 err
= ufshcd_init_vreg(hba
);
7679 goto out_disable_clks
;
7681 err
= ufshcd_setup_vreg(hba
, true);
7683 goto out_disable_clks
;
7685 err
= ufshcd_variant_hba_init(hba
);
7687 goto out_disable_vreg
;
7689 hba
->is_powered
= true;
7693 ufshcd_setup_vreg(hba
, false);
7695 ufshcd_setup_clocks(hba
, false);
7696 out_disable_hba_vreg
:
7697 ufshcd_setup_hba_vreg(hba
, false);
7702 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
7704 if (hba
->is_powered
) {
7705 ufshcd_variant_hba_exit(hba
);
7706 ufshcd_setup_vreg(hba
, false);
7707 #if defined(CONFIG_PM_DEVFREQ)
7708 ufshcd_suspend_clkscaling(hba
);
7710 if (ufshcd_is_clkscaling_supported(hba
)) {
7711 #if defined(CONFIG_PM_DEVFREQ)
7713 ufshcd_suspend_clkscaling(hba
);
7715 destroy_workqueue(hba
->clk_scaling
.workq
);
7717 ufshcd_setup_clocks(hba
, false);
7718 ufshcd_setup_hba_vreg(hba
, false);
7719 hba
->is_powered
= false;
7724 ufshcd_send_request_sense(struct ufs_hba
*hba
, struct scsi_device
*sdp
)
7726 unsigned char cmd
[6] = {REQUEST_SENSE
,
7730 UFSHCD_REQ_SENSE_SIZE
,
7735 buffer
= kzalloc(UFSHCD_REQ_SENSE_SIZE
, GFP_KERNEL
);
7741 ret
= scsi_execute(sdp
, cmd
, DMA_FROM_DEVICE
, buffer
,
7742 UFSHCD_REQ_SENSE_SIZE
, NULL
, NULL
,
7743 msecs_to_jiffies(1000), 3, 0, RQF_PM
, NULL
);
7745 pr_err("%s: failed with err %d\n", __func__
, ret
);
7753 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7755 * @hba: per adapter instance
7756 * @pwr_mode: device power mode to set
7758 * Returns 0 if requested power mode is set successfully
7759 * Returns non-zero if failed to set the requested power mode
7761 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
7762 enum ufs_dev_pwr_mode pwr_mode
)
7764 unsigned char cmd
[6] = { START_STOP
};
7765 struct scsi_sense_hdr sshdr
;
7766 struct scsi_device
*sdp
;
7767 unsigned long flags
;
7770 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7771 sdp
= hba
->sdev_ufs_device
;
7773 ret
= scsi_device_get(sdp
);
7774 if (!ret
&& !scsi_device_online(sdp
)) {
7776 scsi_device_put(sdp
);
7781 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7787 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7788 * handling, which would wait for host to be resumed. Since we know
7789 * we are functional while we are here, skip host resume in error
7792 hba
->host
->eh_noresume
= 1;
7793 if (hba
->wlun_dev_clr_ua
) {
7794 ret
= ufshcd_send_request_sense(hba
, sdp
);
7797 /* Unit attention condition is cleared now */
7798 hba
->wlun_dev_clr_ua
= false;
7801 cmd
[4] = pwr_mode
<< 4;
7804 * Current function would be generally called from the power management
7805 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7806 * already suspended childs.
7808 pr_info("%s %d\n", __func__
, __LINE__
);
7809 ret
= scsi_execute(sdp
, cmd
, DMA_NONE
, NULL
, 0, NULL
, &sshdr
,
7810 (23 * HZ
), 0, 0, RQF_PM
, NULL
);
7811 pr_info("%s %d\n", __func__
, __LINE__
);
7813 sdev_printk(KERN_WARNING
, sdp
,
7814 "START_STOP failed for power mode: %d, result %x\n",
7816 if (driver_byte(ret
) & DRIVER_SENSE
)
7817 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
7821 hba
->curr_dev_pwr_mode
= pwr_mode
;
7823 scsi_device_put(sdp
);
7824 hba
->host
->eh_noresume
= 0;
7828 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
7829 enum uic_link_state req_link_state
,
7830 int check_for_bkops
)
7834 if (req_link_state
== hba
->uic_link_state
)
7837 if (req_link_state
== UIC_LINK_HIBERN8_STATE
||
7838 req_link_state
== UIC_LINK_OFF_STATE
) {
7839 ufshcd_set_link_trans_hibern8(hba
);
7840 ret
= ufshcd_link_hibern8_ctrl(hba
, true);
7842 ufshcd_set_link_hibern8(hba
);
7844 unsigned long flags
;
7845 bool saved_is_suspended
= hba
->clk_gating
.is_suspended
;
7847 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7848 hba
->clk_gating
.state
= __CLKS_ON
;
7849 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7851 hba
->clk_gating
.is_suspended
= true;
7852 ufshcd_host_reset_and_restore(hba
);
7853 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7854 hba
->clk_gating
.state
= CLKS_ON
;
7855 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7856 hba
->clk_gating
.is_suspended
= saved_is_suspended
;
7863 * If autobkops is enabled, link can't be turned off because
7864 * turning off the link would also turn off the device.
7866 if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
7867 (!check_for_bkops
|| (check_for_bkops
&&
7868 !hba
->auto_bkops_enabled
))) {
7869 unsigned long flags
;
7872 * Change controller state to "reset state" which
7873 * should also put the link in off/reset state
7876 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7877 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7878 ufshcd_hba_stop(hba
, true);
7879 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7881 * TODO: Check if we need any delay to make sure that
7882 * controller is reset
7884 ufshcd_set_link_off(hba
);
7892 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
7895 * It seems some UFS devices may keep drawing more than sleep current
7896 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7897 * To avoid this situation, add 2ms delay before putting these UFS
7898 * rails in LPM mode.
7900 if (!ufshcd_is_link_active(hba
) &&
7901 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
7902 usleep_range(2000, 2100);
7905 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7908 * If UFS device and link is in OFF state, all power supplies (VCC,
7909 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7910 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7911 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7913 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7914 * in low power state which would save some power.
7916 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
7917 !hba
->dev_info
.is_lu_power_on_wp
) {
7918 ufshcd_setup_vreg(hba
, false);
7919 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
7920 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
7921 if (!ufshcd_is_link_active(hba
)) {
7922 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
7923 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
7928 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
7932 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
7933 !hba
->dev_info
.is_lu_power_on_wp
) {
7934 ret
= ufshcd_setup_vreg(hba
, true);
7935 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
7936 if (!ret
&& !ufshcd_is_link_active(hba
)) {
7937 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
7940 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
7944 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
7949 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
7951 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
7956 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
7958 if (ufshcd_is_link_off(hba
))
7959 ufshcd_setup_hba_vreg(hba
, false);
7962 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
7964 if (ufshcd_is_link_off(hba
))
7965 ufshcd_setup_hba_vreg(hba
, true);
7969 * ufshcd_suspend - helper function for suspend operations
7970 * @hba: per adapter instance
7971 * @pm_op: desired low power operation type
7973 * This function will try to put the UFS device and link into low power
7974 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7975 * (System PM level).
7977 * If this function is called during shutdown, it will make sure that
7978 * both UFS device and UFS link is powered off.
7980 * NOTE: UFS device & link must be active before we enter in this function.
7982 * Returns 0 for success and non-zero for failure
7984 static int ufshcd_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
7987 enum ufs_pm_level pm_lvl
;
7988 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
7989 enum uic_link_state req_link_state
;
7990 bool gating_allowed
= !ufshcd_can_fake_clkgating(hba
);
7992 hba
->pm_op_in_progress
= 1;
7993 if (!ufshcd_is_shutdown_pm(pm_op
)) {
7994 pm_lvl
= ufshcd_is_runtime_pm(pm_op
) ?
7995 hba
->rpm_lvl
: hba
->spm_lvl
;
7996 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
7997 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
7999 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
8000 req_link_state
= UIC_LINK_OFF_STATE
;
8004 * If we can't transition into any of the low power modes
8005 * just gate the clocks.
8007 ufshcd_hold(hba
, false);
8008 hba
->clk_gating
.is_suspended
= true;
8010 if (hba
->clk_scaling
.is_allowed
) {
8011 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
8012 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
8013 #if defined(CONFIG_PM_DEVFREQ)
8014 ufshcd_suspend_clkscaling(hba
);
8018 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
8019 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
8023 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
8024 (req_link_state
== hba
->uic_link_state
))
8027 /* UFS device & link must be active before we enter in this function */
8028 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
8033 if (ufshcd_is_runtime_pm(pm_op
)) {
8034 if (ufshcd_can_autobkops_during_suspend(hba
)) {
8036 * The device is idle with no requests in the queue,
8037 * allow background operations if bkops status shows
8038 * that performance might be impacted.
8040 ret
= ufshcd_urgent_bkops(hba
);
8044 /* make sure that auto bkops is disabled */
8045 ufshcd_disable_auto_bkops(hba
);
8049 if (ufshcd_is_shutdown_pm(pm_op
))
8050 ufs_shutdown_state
= 1;
8052 if ((req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) &&
8053 ((ufshcd_is_runtime_pm(pm_op
) && !hba
->auto_bkops_enabled
) ||
8054 !ufshcd_is_runtime_pm(pm_op
))) {
8055 /* ensure that bkops is disabled */
8056 ufshcd_disable_auto_bkops(hba
);
8057 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
8062 ret
= ufshcd_link_state_transition(hba
, req_link_state
, 1);
8064 goto set_dev_active
;
8070 * Flush pending works before clock is disabled
8072 cancel_work_sync(&hba
->eh_work
);
8073 cancel_work_sync(&hba
->eeh_work
);
8076 * Disable the host irq as host controller as there won't be any
8077 * host controller trasanction expected till resume.
8079 ufshcd_disable_irq(hba
);
8081 ufshcd_vreg_set_lpm(hba
);
8084 if (gating_allowed
) {
8085 if (!ufshcd_is_link_active(hba
))
8086 ufshcd_setup_clocks(hba
, false);
8088 /* If link is active, device ref_clk can't be switched off */
8089 __ufshcd_setup_clocks(hba
, false, true);
8092 hba
->clk_gating
.state
= CLKS_OFF
;
8093 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
8095 * Call vendor specific suspend callback. As these callbacks may access
8096 * vendor specific host controller register space call them before the
8097 * host clocks are ON.
8099 ret
= ufshcd_vops_suspend(hba
, pm_op
);
8101 goto set_link_active
;
8104 /* Put the host controller in low power mode if possible */
8105 ufshcd_hba_vreg_set_lpm(hba
);
8109 #if defined(CONFIG_PM_DEVFREQ)
8110 if (hba
->clk_scaling
.is_allowed
)
8111 ufshcd_resume_clkscaling(hba
);
8114 if (ufshcd_is_shutdown_pm(pm_op
))
8117 ret
= ufshcd_enable_irq(hba
);
8121 if (ufshcd_is_link_hibern8(hba
)) {
8122 ufshcd_set_link_trans_active(hba
);
8123 if (!ufshcd_link_hibern8_ctrl(hba
, false))
8124 ufshcd_set_link_active(hba
);
8126 ufshcd_set_link_off(hba
);
8127 } else if (ufshcd_is_link_off(hba
))
8128 ufshcd_host_reset_and_restore(hba
);
8130 if (ufshcd_is_shutdown_pm(pm_op
))
8133 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
8134 ufshcd_disable_auto_bkops(hba
);
8136 #if defined(CONFIG_PM_DEVFREQ)
8137 if (hba
->clk_scaling
.is_allowed
)
8138 ufshcd_resume_clkscaling(hba
);
8139 hba
->clk_gating
.is_suspended
= false;
8141 ufshcd_release(hba
);
8143 hba
->pm_op_in_progress
= 0;
8144 dev_info(hba
->dev
, "UFS suspend done\n");
8150 * ufshcd_resume - helper function for resume operations
8151 * @hba: per adapter instance
8152 * @pm_op: runtime PM or system PM
8154 * This function basically brings the UFS device, UniPro link and controller
8157 * Returns 0 for success and non-zero for failure
8159 static int ufshcd_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
8162 enum uic_link_state old_link_state
;
8163 enum ufs_pm_level pm_lvl
;
8164 bool gating_allowed
= !ufshcd_can_fake_clkgating(hba
);
8166 hba
->pm_op_in_progress
= 1;
8167 if (ufshcd_is_system_pm(pm_op
))
8168 pm_lvl
= hba
->spm_lvl
;
8170 pm_lvl
= hba
->rpm_lvl
;
8172 if (ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
) == UIC_LINK_OFF_STATE
)
8173 hba
->uic_link_state
= UIC_LINK_OFF_STATE
;
8174 old_link_state
= hba
->uic_link_state
;
8176 ufshcd_hba_vreg_set_hpm(hba
);
8178 ret
= ufshcd_vreg_set_hpm(hba
);
8180 goto disable_irq_and_vops_clks
;
8183 * Call vendor specific resume callback. As these callbacks may access
8184 * vendor specific host controller register space call them when the
8185 * host clocks are ON.
8187 ret
= ufshcd_vops_resume(hba
, pm_op
);
8191 if (gating_allowed
) {
8192 /* Make sure clocks are enabled before accessing controller */
8193 ret
= ufshcd_setup_clocks(hba
, true);
8198 /* enable the host irq as host controller would be active soon */
8199 ret
= ufshcd_enable_irq(hba
);
8201 goto disable_irq_and_vops_clks
;
8203 if (ufshcd_is_link_hibern8(hba
)) {
8204 ufshcd_set_link_trans_active(hba
);
8205 ret
= ufshcd_link_hibern8_ctrl(hba
, false);
8207 ufshcd_set_link_active(hba
);
8209 ufshcd_set_link_off(hba
);
8210 goto vendor_suspend
;
8212 } else if (ufshcd_is_link_off(hba
)) {
8213 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
8214 hba
->async_resume
= true;
8215 ret
= ufshcd_host_reset_and_restore(hba
);
8218 ret
= ufshcd_host_reset_and_restore(hba
);
8222 * ufshcd_host_reset_and_restore() should have already
8223 * set the link state as active
8225 if (ret
|| !ufshcd_is_link_active(hba
))
8226 goto vendor_suspend
;
8229 if (!ufshcd_is_ufs_dev_active(hba
)) {
8230 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
8232 goto set_old_link_state
;
8235 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
8236 ufshcd_enable_auto_bkops(hba
);
8239 * If BKOPs operations are urgently needed at this moment then
8240 * keep auto-bkops enabled or else disable it.
8242 ufshcd_urgent_bkops(hba
);
8243 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
8246 hba
->clk_gating
.is_suspended
= false;
8248 #if defined(CONFIG_PM_DEVFREQ)
8249 if (hba
->clk_scaling
.is_allowed
)
8250 ufshcd_resume_clkscaling(hba
);
8253 /* Schedule clock gating in case of no access to UFS device yet */
8254 ufshcd_release(hba
);
8258 ufshcd_link_state_transition(hba
, old_link_state
, 0);
8260 ufshcd_vops_suspend(hba
, pm_op
);
8261 disable_irq_and_vops_clks
:
8262 ufshcd_disable_irq(hba
);
8263 #if defined(CONFIG_PM_DEVFREQ)
8264 if (hba
->clk_scaling
.is_allowed
)
8265 ufshcd_suspend_clkscaling(hba
);
8269 ufshcd_setup_clocks(hba
, false);
8271 ufshcd_vreg_set_lpm(hba
);
8273 hba
->pm_op_in_progress
= 0;
8275 if (hba
->monitor
.flag
& UFSHCD_MONITOR_LEVEL1
)
8276 dev_info(hba
->dev
, "UFS resume done\n");
8282 * ufshcd_system_suspend - system suspend routine
8283 * @hba: per adapter instance
8284 * @pm_op: runtime PM or system PM
8286 * Check the description of ufshcd_suspend() function for more details.
8288 * Returns 0 for success and non-zero for failure
8290 int ufshcd_system_suspend(struct ufs_hba
*hba
)
8293 ktime_t start
= ktime_get();
8295 if (!hba
|| !hba
->is_powered
)
8298 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
) ==
8299 hba
->curr_dev_pwr_mode
) &&
8300 (ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
) ==
8301 hba
->uic_link_state
))
8304 if (pm_runtime_suspended(hba
->dev
)) {
8306 * UFS device and/or UFS link low power states during runtime
8307 * suspend seems to be different than what is expected during
8308 * system suspend. Hence runtime resume the devic & link and
8309 * let the system suspend low power states to take effect.
8310 * TODO: If resume takes longer time, we might have optimize
8311 * it in future by not resuming everything if possible.
8313 ret
= ufshcd_runtime_resume(hba
);
8318 ret
= ufshcd_suspend(hba
, UFS_SYSTEM_PM
);
8320 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
8321 ktime_to_us(ktime_sub(ktime_get(), start
)),
8322 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8324 hba
->is_sys_suspended
= true;
8327 EXPORT_SYMBOL(ufshcd_system_suspend
);
8330 * ufshcd_system_resume - system resume routine
8331 * @hba: per adapter instance
8333 * Returns 0 for success and non-zero for failure
8336 int ufshcd_system_resume(struct ufs_hba
*hba
)
8339 ktime_t start
= ktime_get();
8344 if (!hba
->is_powered
|| pm_runtime_suspended(hba
->dev
))
8346 * Let the runtime resume take care of resuming
8347 * if runtime suspended.
8351 ret
= ufshcd_resume(hba
, UFS_SYSTEM_PM
);
8353 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
8354 ktime_to_us(ktime_sub(ktime_get(), start
)),
8355 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8357 hba
->is_sys_suspended
= false;
8360 EXPORT_SYMBOL(ufshcd_system_resume
);
8363 * ufshcd_runtime_suspend - runtime suspend routine
8364 * @hba: per adapter instance
8366 * Check the description of ufshcd_suspend() function for more details.
8368 * Returns 0 for success and non-zero for failure
8370 int ufshcd_runtime_suspend(struct ufs_hba
*hba
)
8373 ktime_t start
= ktime_get();
8378 if (!hba
->is_powered
)
8381 ret
= ufshcd_suspend(hba
, UFS_RUNTIME_PM
);
8383 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
8384 ktime_to_us(ktime_sub(ktime_get(), start
)),
8385 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8388 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
8391 * ufshcd_runtime_resume - runtime resume routine
8392 * @hba: per adapter instance
8394 * This function basically brings the UFS device, UniPro link and controller
8395 * to active state. Following operations are done in this function:
8397 * 1. Turn on all the controller related clocks
8398 * 2. Bring the UniPro link out of Hibernate state
8399 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8401 * 4. If auto-bkops is enabled on the device, disable it.
8403 * So following would be the possible power state after this function return
8405 * S1: UFS device in Active state with VCC rail ON
8406 * UniPro link in Active state
8407 * All the UFS/UniPro controller clocks are ON
8409 * Returns 0 for success and non-zero for failure
8411 int ufshcd_runtime_resume(struct ufs_hba
*hba
)
8414 ktime_t start
= ktime_get();
8419 if (!hba
->is_powered
)
8422 ret
= ufshcd_resume(hba
, UFS_RUNTIME_PM
);
8424 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
8425 ktime_to_us(ktime_sub(ktime_get(), start
)),
8426 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8429 EXPORT_SYMBOL(ufshcd_runtime_resume
);
8431 int ufshcd_runtime_idle(struct ufs_hba
*hba
)
8435 EXPORT_SYMBOL(ufshcd_runtime_idle
);
8437 static inline ssize_t
ufshcd_pm_lvl_store(struct device
*dev
,
8438 struct device_attribute
*attr
,
8439 const char *buf
, size_t count
,
8442 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
8443 unsigned long flags
, value
;
8445 if (kstrtoul(buf
, 0, &value
))
8448 if (value
>= UFS_PM_LVL_MAX
)
8451 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
8453 hba
->rpm_lvl
= value
;
8455 hba
->spm_lvl
= value
;
8456 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
8460 static ssize_t
ufshcd_rpm_lvl_show(struct device
*dev
,
8461 struct device_attribute
*attr
, char *buf
)
8463 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
8467 curr_len
= snprintf(buf
, PAGE_SIZE
,
8468 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
8470 ufschd_ufs_dev_pwr_mode_to_string(
8471 ufs_pm_lvl_states
[hba
->rpm_lvl
].dev_state
),
8472 ufschd_uic_link_state_to_string(
8473 ufs_pm_lvl_states
[hba
->rpm_lvl
].link_state
));
8475 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
8476 "\nAll available Runtime PM levels info:\n");
8477 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++)
8478 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
8479 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
8481 ufschd_ufs_dev_pwr_mode_to_string(
8482 ufs_pm_lvl_states
[lvl
].dev_state
),
8483 ufschd_uic_link_state_to_string(
8484 ufs_pm_lvl_states
[lvl
].link_state
));
8489 static ssize_t
ufshcd_rpm_lvl_store(struct device
*dev
,
8490 struct device_attribute
*attr
, const char *buf
, size_t count
)
8492 return ufshcd_pm_lvl_store(dev
, attr
, buf
, count
, true);
8495 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba
*hba
)
8497 hba
->rpm_lvl_attr
.show
= ufshcd_rpm_lvl_show
;
8498 hba
->rpm_lvl_attr
.store
= ufshcd_rpm_lvl_store
;
8499 sysfs_attr_init(&hba
->rpm_lvl_attr
.attr
);
8500 hba
->rpm_lvl_attr
.attr
.name
= "rpm_lvl";
8501 hba
->rpm_lvl_attr
.attr
.mode
= 0644;
8502 if (device_create_file(hba
->dev
, &hba
->rpm_lvl_attr
))
8503 dev_err(hba
->dev
, "Failed to create sysfs for rpm_lvl\n");
8506 static ssize_t
ufshcd_spm_lvl_show(struct device
*dev
,
8507 struct device_attribute
*attr
, char *buf
)
8509 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
8513 curr_len
= snprintf(buf
, PAGE_SIZE
,
8514 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
8516 ufschd_ufs_dev_pwr_mode_to_string(
8517 ufs_pm_lvl_states
[hba
->spm_lvl
].dev_state
),
8518 ufschd_uic_link_state_to_string(
8519 ufs_pm_lvl_states
[hba
->spm_lvl
].link_state
));
8521 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
8522 "\nAll available System PM levels info:\n");
8523 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++)
8524 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
8525 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
8527 ufschd_ufs_dev_pwr_mode_to_string(
8528 ufs_pm_lvl_states
[lvl
].dev_state
),
8529 ufschd_uic_link_state_to_string(
8530 ufs_pm_lvl_states
[lvl
].link_state
));
8535 static ssize_t
ufshcd_spm_lvl_store(struct device
*dev
,
8536 struct device_attribute
*attr
, const char *buf
, size_t count
)
8538 return ufshcd_pm_lvl_store(dev
, attr
, buf
, count
, false);
8541 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba
*hba
)
8543 hba
->spm_lvl_attr
.show
= ufshcd_spm_lvl_show
;
8544 hba
->spm_lvl_attr
.store
= ufshcd_spm_lvl_store
;
8545 sysfs_attr_init(&hba
->spm_lvl_attr
.attr
);
8546 hba
->spm_lvl_attr
.attr
.name
= "spm_lvl";
8547 hba
->spm_lvl_attr
.attr
.mode
= 0644;
8548 if (device_create_file(hba
->dev
, &hba
->spm_lvl_attr
))
8549 dev_err(hba
->dev
, "Failed to create sysfs for spm_lvl\n");
8552 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba
*hba
)
8554 ufshcd_add_rpm_lvl_sysfs_nodes(hba
);
8555 ufshcd_add_spm_lvl_sysfs_nodes(hba
);
8558 static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba
*hba
)
8560 device_remove_file(hba
->dev
, &hba
->rpm_lvl_attr
);
8561 device_remove_file(hba
->dev
, &hba
->spm_lvl_attr
);
8565 * ufshcd_shutdown - shutdown routine
8566 * @hba: per adapter instance
8568 * This function would power off both UFS device and UFS link.
8570 * Returns 0 always to allow force shutdown even in case of errors.
8572 int ufshcd_shutdown(struct ufs_hba
*hba
)
8576 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
8579 if (pm_runtime_suspended(hba
->dev
)) {
8580 ret
= ufshcd_runtime_resume(hba
);
8585 ret
= ufshcd_suspend(hba
, UFS_SHUTDOWN_PM
);
8588 dev_err(hba
->dev
, "%s failed, err %d\n", __func__
, ret
);
8589 /* allow force shutdown even in case of errors */
8592 EXPORT_SYMBOL(ufshcd_shutdown
);
8595 * ufshcd_remove - de-allocate SCSI host and host memory space
8596 * data structure memory
8597 * @hba - per adapter instance
8599 void ufshcd_remove(struct ufs_hba
*hba
)
8601 ufshcd_remove_sysfs_nodes(hba
);
8602 scsi_remove_host(hba
->host
);
8603 /* disable interrupts */
8604 ufshcd_disable_intr(hba
, hba
->intr_mask
);
8605 ufshcd_hba_stop(hba
, true);
8607 ufshcd_exit_clk_gating(hba
);
8608 #if defined(CONFIG_PM_DEVFREQ)
8609 if (ufshcd_is_clkscaling_supported(hba
))
8610 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
8612 ufshcd_hba_exit(hba
);
8614 EXPORT_SYMBOL_GPL(ufshcd_remove
);
8617 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8618 * @hba: pointer to Host Bus Adapter (HBA)
8620 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
8622 scsi_host_put(hba
->host
);
8624 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
8627 * ufshcd_set_dma_mask - Set dma mask based on the controller
8628 * addressing capability
8629 * @hba: per adapter instance
8631 * Returns 0 for success, non-zero for failure
8633 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
8635 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
8636 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
8639 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
8643 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8644 * @dev: pointer to device handle
8645 * @hba_handle: driver private handle
8646 * Returns 0 on success, non-zero value on failure
8648 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
8650 struct Scsi_Host
*host
;
8651 struct ufs_hba
*hba
;
8656 "Invalid memory reference for dev is NULL\n");
8661 host
= scsi_host_alloc(&ufshcd_driver_template
,
8662 sizeof(struct ufs_hba
));
8664 dev_err(dev
, "scsi_host_alloc failed\n");
8668 hba
= shost_priv(host
);
8673 INIT_LIST_HEAD(&hba
->clk_list_head
);
8678 EXPORT_SYMBOL(ufshcd_alloc_host
);
8681 * ufshcd_init - Driver initialization routine
8682 * @hba: per-adapter instance
8683 * @mmio_base: base register address
8684 * @irq: Interrupt line of device
8685 * Returns 0 on success, non-zero value on failure
8687 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
8690 struct Scsi_Host
*host
= hba
->host
;
8691 struct device
*dev
= hba
->dev
;
8695 "Invalid memory reference for mmio_base is NULL\n");
8700 hba
->mmio_base
= mmio_base
;
8703 /* Set descriptor lengths to specification defaults */
8704 ufshcd_def_desc_sizes(hba
);
8706 err
= ufshcd_hba_init(hba
);
8710 /* Read capabilities registers */
8711 ufshcd_hba_capabilities(hba
);
8713 /* Get UFS version supported by the controller */
8714 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
8716 if ((hba
->ufs_version
!= UFSHCI_VERSION_10
) &&
8717 (hba
->ufs_version
!= UFSHCI_VERSION_11
) &&
8718 (hba
->ufs_version
!= UFSHCI_VERSION_20
) &&
8719 (hba
->ufs_version
!= UFSHCI_VERSION_21
))
8720 dev_err(hba
->dev
, "invalid UFS version 0x%x\n",
8723 /* Get Interrupt bit mask per version */
8724 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
8726 err
= ufshcd_set_dma_mask(hba
);
8728 dev_err(hba
->dev
, "set dma mask failed\n");
8732 /* Allocate memory for host memory space */
8733 err
= ufshcd_memory_alloc(hba
);
8735 dev_err(hba
->dev
, "Memory allocation failed\n");
8740 ufshcd_host_memory_configure(hba
);
8742 host
->can_queue
= hba
->nutrs
;
8743 host
->cmd_per_lun
= hba
->nutrs
;
8744 host
->max_id
= UFSHCD_MAX_ID
;
8745 host
->max_lun
= UFS_MAX_LUNS
;
8746 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
8747 host
->unique_id
= host
->host_no
;
8748 host
->max_cmd_len
= MAX_CDB_SIZE
;
8750 hba
->max_pwr_info
.is_valid
= false;
8752 /* Initailize wait queue for task management */
8753 init_waitqueue_head(&hba
->tm_wq
);
8754 init_waitqueue_head(&hba
->tm_tag_wq
);
8756 /* Initialize work queues */
8757 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
8758 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
8760 /* Initialize UIC command mutex */
8761 mutex_init(&hba
->uic_cmd_mutex
);
8763 /* Initialize mutex for device management commands */
8764 mutex_init(&hba
->dev_cmd
.lock
);
8766 init_rwsem(&hba
->clk_scaling_lock
);
8768 /* Initialize device management tag acquire wait queue */
8769 init_waitqueue_head(&hba
->dev_cmd
.tag_wq
);
8771 /* Initialize monitor */
8772 ufshcd_init_monitor(hba
);
8774 err
= ufshcd_init_clk_gating(hba
);
8776 dev_err(hba
->dev
, "init clk_gating failed\n");
8781 * In order to avoid any spurious interrupt immediately after
8782 * registering UFS controller interrupt handler, clear any pending UFS
8783 * interrupt status and disable all the UFS interrupts.
8785 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
8786 REG_INTERRUPT_STATUS
);
8787 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
8789 * Make sure that UFS interrupts are disabled and any pending interrupt
8790 * status is cleared before registering UFS interrupt handler.
8794 /* IRQ registration */
8795 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
8797 dev_err(hba
->dev
, "request irq failed\n");
8800 hba
->is_irq_enabled
= true;
8803 err
= scsi_add_host(host
, hba
->dev
);
8805 dev_err(hba
->dev
, "scsi_add_host failed\n");
8809 #if defined(CONFIG_PM_DEVFREQ)
8810 if (ufshcd_is_clkscaling_supported(hba
)) {
8811 char wq_name
[sizeof("ufs_clkscaling_00")];
8813 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
8814 ufshcd_clk_scaling_suspend_work
);
8815 INIT_WORK(&hba
->clk_scaling
.resume_work
,
8816 ufshcd_clk_scaling_resume_work
);
8818 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
8820 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
8822 ufshcd_clkscaling_init_sysfs(hba
);
8826 /* Hold auto suspend until async scan completes */
8827 pm_runtime_get_sync(dev
);
8830 * The device-initialize-sequence hasn't been invoked yet.
8831 * Set the device to power-off state
8833 ufshcd_set_ufs_dev_poweroff(hba
);
8835 async_schedule(ufshcd_async_scan
, hba
);
8836 ufshcd_add_sysfs_nodes(hba
);
8841 ufshcd_exit_clk_gating(hba
);
8843 hba
->is_irq_enabled
= false;
8844 ufshcd_hba_exit(hba
);
8848 EXPORT_SYMBOL_GPL(ufshcd_init
);
8850 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8851 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8852 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8853 MODULE_LICENSE("GPL");
8854 MODULE_VERSION(UFSHCD_DRIVER_VERSION
);