*/
#include <linux/async.h>
+#if defined(CONFIG_PM_DEVFREQ)
#include <linux/devfreq.h>
+#endif
#include <linux/nls.h>
+#include <linux/smc.h>
+#include <scsi/ufs/ioctl.h>
#include <linux/of.h>
+#include <linux/blkdev.h>
+#include <linux/gpio.h>
+
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/*
+ * Query request timeout for fDeviceInit flag
+ * fDeviceInit query response time for some devices is too large that default
+ * QUERY_REQ_TIMEOUT may not be enough for such devices.
+ */
+#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
/* Task management command timeout */
-#define TM_CMD_TIMEOUT 100 /* msecs */
+#define TM_CMD_TIMEOUT 300 /* msecs */
/* maximum number of retries for a general UIC command */
#define UFS_UIC_COMMAND_RETRIES 3
/* UFS link setup retries */
#define UFS_LINK_SETUP_RETRIES 5
+/* IOCTL opcode for command - ufs set device read only */
+#define UFS_IOCTL_BLKROSET BLKROSET
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
_ret; \
})
+static int ufs_shutdown_state = 0;
+
#define ufshcd_hex_dump(prefix_str, buf, len) \
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
return ufs_pm_lvl_states[lvl].link_state;
}
-static inline enum ufs_pm_level
-ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
- enum uic_link_state link_state)
-{
- enum ufs_pm_level lvl;
-
- for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
- if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
- (ufs_pm_lvl_states[lvl].link_state == link_state))
- return lvl;
- }
-
- /* if no match found, return the level 0 */
- return UFS_PM_LVL_0;
-}
-
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+#if defined(CONFIG_PM_DEVFREQ)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
+#endif
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ enum ufs_dev_pwr_mode pwr_mode);
+static int ufshcd_send_request_sense(struct ufs_hba *hba,
+ struct scsi_device *sdp);
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba);
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
return tag >= 0 && tag < hba->nutrs;
}
+static ssize_t ufshcd_monitor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->monitor.flag);
+}
+
+static ssize_t ufshcd_monitor_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ hba->monitor.flag = value;
+ return count;
+}
+
+static void ufshcd_init_monitor(struct ufs_hba *hba)
+{
+ hba->monitor.attrs.show = ufshcd_monitor_show;
+ hba->monitor.attrs.store = ufshcd_monitor_store;
+ sysfs_attr_init(&hba->monitor.attrs.attr);
+ hba->monitor.attrs.attr.name = "monitor";
+ hba->monitor.attrs.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->monitor.attrs))
+ dev_err(hba->dev, "Failed to create sysfs for monitor\n");
+}
+
+
static inline int ufshcd_enable_irq(struct ufs_hba *hba)
{
int ret = 0;
ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
sizeof(struct utp_upiu_rsp));
- prdt_length = le16_to_cpu(
- lrbp->utr_descriptor_ptr->prd_table_length);
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ prdt_length = le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length)
+ / sizeof(struct ufshcd_sg_entry);
+ else
+ prdt_length = le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
+
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
tag, prdt_length,
(u64)lrbp->ucd_prdt_dma_addr);
-
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
sizeof(struct ufshcd_sg_entry) * prdt_length);
*/
static inline int ufshcd_get_lists_status(u32 reg)
{
- return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
+ /*
+ * The mask 0xFF is for the following HCS register bits
+ * Bit Description
+ * 0 Device Present
+ * 1 UTRLRDY
+ * 2 UTMRLRDY
+ * 3 UCRDY
+ * 4-7 reserved
+ */
+ return ((reg & 0xFF) >> 1) ^ 0x07;
}
/**
return false;
}
+#if defined(CONFIG_PM_DEVFREQ)
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
}
+#endif
static void ufshcd_ungate_work(struct work_struct *work)
{
if (async)
hba->clk_gating.active_reqs--;
case CLKS_ON:
- /*
- * Wait for the ungate work to complete if in progress.
- * Though the clocks may be in ON state, the link could
- * still be in hibner8 state if hibern8 is allowed
- * during clock gating.
- * Make sure we exit hibern8 state also in addition to
- * clocks being ON.
- */
- if (ufshcd_can_hibern8_during_gating(hba) &&
- ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- goto start;
- }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- schedule_work(&hba->clk_gating.ungate_work);
+ queue_work(hba->ufshcd_workq, &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
hba->clk_gating.state = CLKS_ON;
spin_unlock_irqrestore(hba->host->host_lock, flags);
hba->clk_gating.is_suspended = false;
+ scsi_unblock_requests(hba->host);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto out;
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
+ queue_delayed_work(hba->ufshcd_workq, &hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
}
return count;
}
-static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+static int ufshcd_init_clk_gating(struct ufs_hba *hba)
{
+ int ret = 0;
+
if (!ufshcd_is_clkgating_allowed(hba))
- return;
+ goto out;
+
+ hba->ufshcd_workq = alloc_workqueue("ufshcd_wq", WQ_HIGHPRI, 0);
+ if (!hba->ufshcd_workq) {
+ ret = -ENOMEM;
+ goto out;
+ }
hba->clk_gating.delay_ms = LINK_H8_DELAY;
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
hba->clk_gating.enable_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+
+out:
+ return ret;
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
+ destroy_workqueue(hba->ufshcd_workq);
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
}
+#if defined(CONFIG_PM_DEVFREQ)
/* Must be called with host lock acquired */
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
scaling->is_busy_started = false;
}
}
+#endif
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
+#if defined(CONFIG_PM_DEVFREQ)
ufshcd_clk_scaling_start_busy(hba);
+#endif
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
* This function gets the UPMCRS field of HCS register
* Returns value of UPMCRS field
*/
-static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba, struct uic_command *cmd)
{
- return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+ if (hba->quirks & UFSHCD_QUIRK_GET_GENERRCODE_DIRECT) {
+ if (cmd->command == UIC_CMD_DME_SET &&
+ cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE))
+ return ufshcd_vops_get_unipro(hba, 3);
+ else if (cmd->command == UIC_CMD_DME_HIBER_ENTER)
+ return ufshcd_vops_get_unipro(hba, 4);
+ else if (cmd->command == UIC_CMD_DME_HIBER_EXIT)
+ return ufshcd_vops_get_unipro(hba, 5);
+ else
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+ } else
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
}
/**
unsigned long flags;
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT)))
- ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
- else
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ switch (uic_cmd->command) {
+ case UIC_CMD_DME_LINK_STARTUP:
+ case UIC_CMD_DME_HIBER_ENTER:
+ case UIC_CMD_DME_HIBER_EXIT:
+ if (hba->quirks & UFSHCD_QUIRK_GET_GENERRCODE_DIRECT)
+ ret = ufshcd_vops_get_unipro(hba, uic_cmd->command - UIC_CMD_DME_LINK_STARTUP);
+ else
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ break;
+ default:
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ break;
+ }
+ } else
ret = -ETIMEDOUT;
spin_lock_irqsave(hba->host->host_lock, flags);
struct scatterlist *sg;
struct scsi_cmnd *cmd;
int sg_segments;
- int i;
+ int i, ret;
+ int sector_offset = 0;
+ int page_index = 0;
cmd = lrbp->cmd;
sg_segments = scsi_dma_map(cmd);
prd_table[i].reserved = 0;
hba->transferred_sector += prd_table[i].size;
+ ret = ufshcd_vops_crypto_engine_cfg(hba, lrbp, sg, i, sector_offset, page_index++);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to configure crypto engine (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ sector_offset += UFSHCI_SECTOR_SIZE / MIN_SECTOR_SIZE;
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
0, query->request.query_func, 0, 0);
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_READ_DESC)
+ len = 0;
+
/* Data segment length only need for WRITE_DESC */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
ucd_req_ptr->header.dword_2 =
u32 upiu_flags;
int ret = 0;
- if (hba->ufs_version == UFSHCI_VERSION_20)
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- else
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
u32 upiu_flags;
int ret = 0;
- if (hba->ufs_version == UFSHCI_VERSION_20)
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- else
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
}
+static inline unsigned int ufshcd_get_scsi_lun(struct scsi_cmnd *cmd)
+{
+ if (cmd->cmnd[0] == SECURITY_PROTOCOL_IN ||
+ cmd->cmnd[0] == SECURITY_PROTOCOL_OUT)
+ return (SCSI_W_LUN_BASE |
+ (UFS_UPIU_RPMB_WLUN & UFS_UPIU_MAX_UNIT_NUM_ID));
+ else
+ return cmd->device->lun;
+}
+
/**
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @scsi_lun: UPIU W-LUN id
unsigned long flags;
int tag;
int err = 0;
+ unsigned int scsi_lun;
hba = shost_priv(host);
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
+ if ((ufs_shutdown_state == 1) && (cmd->cmnd[0] == START_STOP)) {
+ scsi_block_requests(hba->host);
+ cancel_work_sync(&hba->clk_gating.ungate_work);
+ }
+
spin_lock_irqsave(hba->host->host_lock, flags);
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
goto out_unlock;
case UFSHCD_STATE_ERROR:
set_host_byte(cmd, DID_ERROR);
+ scsi_dma_map(cmd);
cmd->scsi_done(cmd);
goto out_unlock;
default:
lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
- lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+
+ scsi_lun = ufshcd_get_scsi_lun(cmd);
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(scsi_lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
lrbp->req_abort_skip = false;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->vops && hba->vops->set_nexus_t_xfer_req)
hba->vops->set_nexus_t_xfer_req(hba, tag, lrbp->cmd);
+#ifdef CONFIG_SCSI_UFS_CMD_LOGGING
+ exynos_ufs_cmd_log_start(hba, cmd);
+#endif
ufshcd_send_command(hba, tag);
+
+ if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
+ dev_info(hba->dev, "IO issued(%d)\n", tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
unsigned long flags;
if (!ufshcd_is_link_active(hba)) {
-
+ flush_work(&hba->clk_gating.ungate_work);
+ if (!ufshcd_is_link_active(hba))
return -EPERM;
}
+
down_read(&hba->clk_scaling_lock);
/*
goto out_unlock;
}
+ if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
+ timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
+
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
- int err;
+ int err = 0;
BUG_ON(!hba);
goto out;
}
- status = ufshcd_get_upmcrs(hba);
+ status = ufshcd_get_upmcrs(hba, cmd);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
ret = (status != PWR_OK) ? status : -1;
}
out:
+ /* Dump debugging information to system memory */
if (ret) {
+ ufshcd_vops_dbg_register_dump(hba);
+ exynos_ufs_show_uic_info(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_host_regs(hba);
struct uic_command uic_cmd = {0};
ktime_t start = ktime_get();
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
-
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
if (ret) {
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
-
+ ssleep(2);
/*
* If link recovery fails then return error so that caller
* don't retry the hibern8 enter again.
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
- } else
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
- POST_CHANGE);
+ }
return ret;
}
int ret;
ktime_t start = ktime_get();
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
__func__, ret);
ret = ufshcd_link_recovery(hba);
} else {
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
- POST_CHANGE);
+
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
hba->ufs_stats.hibern8_exit_cnt++;
}
ret = ufshcd_uic_hibern8_enter(hba);
else
ret = ufshcd_uic_hibern8_exit(hba);
+
if (ret)
goto out;
+
+ if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL2) {
+ if (en)
+ dev_info(hba->dev, "H8+\n");
+ else
+ dev_info(hba->dev, "H8-\n");
}
+
if (hba->vops && hba->vops->hibern8_notify)
hba->vops->hibern8_notify(hba, en, POST_CHANGE);
+
out:
hba->tcx_replay_timer_expired_cnt = 0;
hba->fcx_protection_timer_expired_cnt = 0;
return -EINVAL;
}
+ hba->tcx_replay_timer_expired_cnt = 0;
+ hba->fcx_protection_timer_expired_cnt = 0;
+
+ /* Get the peer available lane count */
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
+ &pwr_info->peer_available_lane_rx);
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
+ &pwr_info->peer_available_lane_tx);
+
+ if (!pwr_info->peer_available_lane_rx || !pwr_info->peer_available_lane_tx) {
+ dev_err(hba->dev, "%s: invalid peer available lanes value. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->peer_available_lane_rx,
+ pwr_info->peer_available_lane_tx);
+ return -EINVAL;
+ }
+
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
sizeof(struct ufs_pa_layer_attr));
}
+out:
return ret;
}
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ if (!err && flag_res)
+ udelay(100);
+
if (err)
dev_err(hba->dev,
"%s reading fDeviceInit flag failed with error %d\n",
if (hba->vops && hba->vops->host_reset)
hba->vops->host_reset(hba);
+
if (hba->quirks & UFSHCD_QUIRK_USE_OF_HCE) {
ufshcd_set_link_off(hba);
+
/* enable UIC related interrupts */
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
- bool link_startup_again = false;
-
ufshcd_hold(hba, false);
-
- /*
- * If UFS device isn't active then we will have to issue link startup
- * 2 times to make sure the device state move to active.
- */
- if (!ufshcd_is_ufs_dev_active(hba))
- link_startup_again = true;
-link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
* but we can't be sure if the link is up until link startup
* succeeds. So reset the local Uni-Pro and try again.
*/
- if (ret && ufshcd_hba_enable(hba))
+ if ((ret && !retries) || (ret && ufshcd_hba_enable(hba)))
goto out;
} while (ret && retries--);
/* failed to get the link up... retire */
goto out;
- if (link_startup_again) {
- link_startup_again = false;
- retries = DME_LINKSTARTUP_RETRIES;
- goto link_startup;
- }
-
/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
ufshcd_init_pwr_info(hba);
ufshcd_print_pwr_info(hba);
}
}
+static void ufshcd_done(struct request *rq)
+{
+ struct scsi_cmnd *cmd = rq->special;
+ scsi_dma_unmap(cmd);
+ scsi_softirq_done(rq);
+}
+
/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
/* REPORT SUPPORTED OPERATION CODES is not supported */
sdev->no_report_opcodes = 1;
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
ufshcd_set_queue_depth(sdev);
ufshcd_get_lu_power_on_wp_status(hba, sdev);
+ blk_queue_softirq_done(sdev->request_queue, ufshcd_done);
+
+ blk_queue_update_dma_alignment(sdev->request_queue, PAGE_SIZE - 1);
+
return 0;
}
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
scsi_host_in_recovery(hba->host)) {
schedule_work(&hba->eeh_work);
+ dev_info(hba->dev, "execption event reported\n");
+ }
+
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
cmd = lrbp->cmd;
if (cmd) {
ufshcd_add_command_trace(hba, index, "complete");
+ result = ufshcd_vops_crypto_engine_clear(hba, lrbp);
+ if (result) {
+ dev_err(hba->dev,
+ "%s: failed to clear crypto engine (%d)\n",
+ __func__, result);
+ }
result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
cmd->result = result;
if (reason)
set_host_byte(cmd, reason);
clear_bit_unlock(index, &hba->lrb_in_use);
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
+#ifdef CONFIG_SCSI_UFS_CMD_LOGGING
+ exynos_ufs_cmd_log_end(hba, index);
+#endif
__ufshcd_release(hba);
+
+ if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
+ dev_info(hba->dev, "Transfer Done(%d)\n",
+ index);
+
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
-
+#if defined(CONFIG_PM_DEVFREQ)
ufshcd_clk_scaling_update_busy(hba);
-
+#endif
/* we might have free'd some tags above */
wake_up(&hba->dev_cmd.tag_wq);
}
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba, int reason)
{
unsigned long completed_reqs;
u32 tr_doorbell;
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
- if (ufshcd_is_intr_aggr_allowed(hba))
+ if (!ufshcd_can_reset_intr_aggr(hba) && ufshcd_is_intr_aggr_allowed(hba))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ __ufshcd_transfer_req_compl(hba, reason, completed_reqs);
}
/**
goto out;
}
- if (curr_status >= status)
+ if (curr_status >= status) {
err = ufshcd_enable_auto_bkops(hba);
+ if (!err)
+ dev_info(hba->dev, "%s: auto_bkops enabled, status : %d\n",
+ __func__, curr_status);
+ }
else
err = ufshcd_disable_auto_bkops(hba);
out:
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
ufshcd_bkops_exception_event_handler(hba);
out:
+ scsi_unblock_requests(hba->host);
pm_runtime_put_sync(hba->dev);
return;
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba);
+ ufshcd_transfer_req_compl(hba, 0);
ufshcd_tmc_handler(hba);
}
static void ufshcd_err_handler(struct work_struct *work)
{
struct ufs_hba *hba;
+ struct ufs_vreg_info *info;
+ struct exynos_ufs *ufs;
unsigned long flags;
u32 err_xfer = 0;
u32 err_tm = 0;
bool needs_reset = false;
hba = container_of(work, struct ufs_hba, eh_work);
+ info = &hba->vreg_info;
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
+ ufs = to_exynos_ufs(hba);
+ if (hba->saved_err & UIC_ERROR) {
+ dev_err(hba->dev, ": CLKSTOP CTRL(0x%04x):\t\t\t\t0x%08x\n",
+ HCI_CLKSTOP_CTRL, hci_readl(ufs, HCI_CLKSTOP_CTRL));
+ dev_err(hba->dev, ": FORCE HCS(0x%04x):\t\t\t\t0x%08x\n",
+ HCI_FORCE_HCS, hci_readl(ufs, HCI_FORCE_HCS));
+ }
+
+ /* Dump debugging information to system memory */
+ ufshcd_vops_dbg_register_dump(hba);
+
+ /* Dump UFS power & reset_n GPIO status */
+ if (gpio_is_valid(info->ufs_power_gpio))
+ dev_info(hba->dev, "%s: UFS power pin: 0x%08x\n", __func__, gpio_get_value(info->ufs_power_gpio));
+ if (gpio_is_valid(info->ufs_reset_n_gpio))
+ dev_info(hba->dev, "%s: RESET_N: 0x%08x\n", __func__, gpio_get_value(info->ufs_reset_n_gpio));
+
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
goto out;
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba);
+ exynos_ufs_show_uic_info(hba);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
if ((hba->saved_err & INT_FATAL_ERRORS) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+ UFSHCD_UIC_DL_ERROR |
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
needs_reset = true;
* slot forcefully.
*/
if (hba->outstanding_reqs == max_doorbells)
- __ufshcd_transfer_req_compl(hba,
+ __ufshcd_transfer_req_compl(hba, 0,
(1UL << (hba->nutrs - 1)));
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Fatal errors need reset */
+ if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ ((hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_uic_err & UFSHCD_UIC_DL_ERROR))))
+ dev_err(hba->dev,
+ "%s: saved_err:0x%x, saved_uic_err:0x%x\n",
+ __func__, hba->saved_err, hba->saved_uic_err);
+
err = ufshcd_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (err) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->ufshcd_state = UFSHCD_STATE_ERROR;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
dev_err(hba->dev, "%s: reset and restore failed\n",
__func__);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
}
hba->saved_err = 0;
hba->saved_uic_err = 0;
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_ERROR_IND_RECEIVED) {
+ if (hba->saved_uic_phy_err_cnt > 10) {
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ hba->saved_uic_phy_err_cnt = 0;
+ } else
+ hba->saved_uic_phy_err_cnt++;
+ } else if (hba->dev_quirks &
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
hba->uic_error |=
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors)
ufshcd_check_errors(hba);
+ else
+ hba->saved_uic_phy_err_cnt = 0;
if (intr_status & UFSHCD_UIC_MASK)
ufshcd_uic_cmd_compl(hba, intr_status);
ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ ufshcd_transfer_req_compl(hba, 0);
+
+ /* Interrupt disable for stop UIC interrupts storm */
+ if (hba->saved_uic_err && (hba->ufshcd_state != UFSHCD_STATE_RESET))
+ ufshcd_disable_intr(hba, UIC_ERROR);
}
/**
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
- ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
-
/* send command to the controller */
if (hba->vops && hba->vops->set_nexus_t_task_mgmt)
hba->vops->set_nexus_t_task_mgmt(hba, free_slot, tm_function);
hba = shost_priv(host);
tag = cmd->request->tag;
+ /* secure log */
+#ifdef CONFIG_EXYNOS_SMC_LOGGING
+ exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
+#endif
+
+ /* Dump debugging information to system memory */
+ ufshcd_vops_dbg_register_dump(hba);
+ exynos_ufs_show_uic_info(hba);
+
lrbp = &hba->lrb[tag];
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
}
}
spin_lock_irqsave(host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
+ ufshcd_transfer_req_compl(hba, DID_RESET);
spin_unlock_irqrestore(host->host_lock, flags);
out:
hba->req_abort_count = 0;
if (!err) {
+ dev_info(hba->dev, "%s: LU reset succeeded\n", __func__);
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
return ufshcd_eh_host_reset_handler(cmd);
+ /* secure log */
+#ifdef CONFIG_EXYNOS_SMC_LOGGING
+ exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
+#endif
+
+ if (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10) {
+ unsigned long lba = (unsigned long) ((cmd->cmnd[2] << 24) |
+ (cmd->cmnd[3] << 16) |
+ (cmd->cmnd[4] << 8) |
+ (cmd->cmnd[5] << 0));
+ unsigned int sct = (cmd->cmnd[7] << 8) |
+ (cmd->cmnd[8] << 0);
+
+ dev_err(hba->dev, "%s: tag:%d, cmd:0x%x, "
+ "lba:0x%08lx, sct:0x%04x, retries %d\n",
+ __func__, tag, cmd->cmnd[0], lba, sct, cmd->retries);
+ } else {
+ dev_err(hba->dev, "%s: tag:%d, cmd:0x%x, retries %d\n",
+ __func__, tag, cmd->cmnd[0], cmd->retries);
+ }
+
ufshcd_hold(hba, false);
+
+ /* Dump debugging information to system memory */
+ ufshcd_vops_dbg_register_dump(hba);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
if (!(test_bit(tag, &hba->outstanding_reqs))) {
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
+ goto clean;
}
/* Print Transfer Request of aborted task */
__func__, tag, err);
if (!err)
err = resp; /* service response error */
+ dev_err(hba->dev,
+ "%s: query task failed with err %d\n",
+ __func__, err);
goto out;
}
}
if (!poll_cnt) {
err = -EBUSY;
+ dev_err(hba->dev,
+ "%s: cmd might be missed, not pending in device\n",
+ __func__);
goto out;
}
__func__, tag, err);
goto out;
}
-
+clean:
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
*/
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
- int err;
+ int err = 0;
unsigned long flags;
/* Reset the host controller */
ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+#if defined(CONFIG_PM_DEVFREQ)
/* scale up clocks to max frequency before full reinitialization */
ufshcd_scale_clks(hba, true);
+#endif
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba);
+#ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
+ if (hba->pm_op_in_progress)
+ async_schedule(ufshcd_async_scan, hba);
+ else
+#endif
+ {
+ err = ufshcd_probe_hba(hba);
if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
dev_err(hba->dev, "%s: failed\n", __func__);
err = -EIO;
}
+ }
+
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
* outstanding requests in s/w here.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
+ ufshcd_transfer_req_compl(hba, DID_RESET);
ufshcd_tmc_handler(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
- struct scsi_device *sdev_rpmb;
struct scsi_device *sdev_boot;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
}
scsi_device_put(sdev_boot);
- sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(sdev_rpmb)) {
- ret = PTR_ERR(sdev_rpmb);
+ if (IS_ERR(hba->sdev_rpmb)) {
+ ret = PTR_ERR(hba->sdev_rpmb);
goto remove_sdev_boot;
}
- scsi_device_put(sdev_rpmb);
+ scsi_device_put(hba->sdev_rpmb);
goto out;
remove_sdev_boot:
return ret;
}
-/**
- * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
- * less than device PA_TACTIVATE time.
- * @hba: per-adapter instance
- *
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
- * for such devices.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 granularity, peer_granularity;
- u32 pa_tactivate, peer_pa_tactivate;
- u32 pa_tactivate_us, peer_pa_tactivate_us;
- u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &granularity);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &peer_granularity);
- if (ret)
- goto out;
-
- if ((granularity < PA_GRANULARITY_MIN_VAL) ||
- (granularity > PA_GRANULARITY_MAX_VAL)) {
- dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
- __func__, granularity);
- return -EINVAL;
- }
-
- if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
- (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
- dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
- __func__, peer_granularity);
- return -EINVAL;
- }
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
- &peer_pa_tactivate);
- if (ret)
- goto out;
-
- pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
- peer_pa_tactivate_us = peer_pa_tactivate *
- gran_to_us_table[peer_granularity - 1];
-
- if (pa_tactivate_us > peer_pa_tactivate_us) {
- u32 new_peer_pa_tactivate;
-
- new_peer_pa_tactivate = pa_tactivate_us /
- gran_to_us_table[peer_granularity - 1];
- new_peer_pa_tactivate++;
- ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- new_peer_pa_tactivate);
- }
-
-out:
- return ret;
-}
-
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
- ufshcd_quirk_tune_host_pa_tactivate(hba);
- ufshcd_vops_apply_dev_quirks(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
static int ufshcd_probe_hba(struct ufs_hba *hba)
{
struct ufs_dev_desc card = {0};
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+ struct ufs_vreg_info *info = &hba->vreg_info;
int re_cnt = 0;
- int ret;
+ int ret, link_startup_fail = 0, device_reset = 0;
ktime_t start = ktime_get();
unsigned long flags;
retry:
+ /* For deivce power control when link startup fail. */
+ if (link_startup_fail || device_reset) {
+ ufshcd_vreg_set_lpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ device_reset = 0;
+
+ if (gpio_is_valid(info->ufs_power_gpio))
+ dev_info(hba->dev, "%s: UFS power pin: 0x%08x\n", __func__, gpio_get_value(info->ufs_power_gpio));
+ if (gpio_is_valid(info->ufs_reset_n_gpio))
+ dev_info(hba->dev, "%s: RESET_N: 0x%08x\n", __func__, gpio_get_value(info->ufs_reset_n_gpio));
+ if (ret)
+ goto out;
+ }
+
ret = ufshcd_hba_enable(hba);
if (ret)
goto out;
ret = ufshcd_link_startup(hba);
- if (ret)
+ if (ret) {
+ link_startup_fail = 1;
goto out;
+ }
+ link_startup_fail = 0;
+
+ dev_info(hba->dev, "UFS link established\n");
/* set the default level for urgent bkops */
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
"%s: Failed getting max supported power mode\n",
__func__);
} else {
+ if ((pwr_info->lane_rx != pwr_info->peer_available_lane_rx)
+ || (pwr_info->lane_tx != pwr_info->peer_available_lane_tx)) {
+ dev_info(hba->dev,
+ "%s: availabele lanes, Host:Device Lane tx %d%d rx %d:%d\n",
+ __func__,
+ pwr_info->lane_tx, pwr_info->peer_available_lane_tx,
+ pwr_info->lane_rx, pwr_info->peer_available_lane_rx);
+ }
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
goto out;
}
+
+ if (hba->max_pwr_info.info.pwr_rx == FAST_MODE ||
+ hba->max_pwr_info.info.pwr_tx == FAST_MODE ||
+ hba->max_pwr_info.info.pwr_rx == FASTAUTO_MODE ||
+ hba->max_pwr_info.info.pwr_tx == FASTAUTO_MODE)
+ dev_info(hba->dev, "HS mode configured\n");
}
/* set the state as operational after switching to desired gear */
+ spin_lock_irqsave(hba->host->host_lock, flags);
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
*/
- if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress
+ && !hba->async_resume) {
bool flag;
/* clear any previous UFS device information */
memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag);
+ if (!ret)
hba->dev_info.f_power_on_wp_en = flag;
+ else {
+ device_reset = 1;
+ goto out;
+ }
+ device_reset = 0;
if (!hba->is_init_prefetch)
ufshcd_init_icc_levels(hba);
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
if (!hba->devfreq) {
+#if defined(CONFIG_PM_DEVFREQ)
hba->devfreq = devm_devfreq_add_device(hba->dev,
&ufs_devfreq_profile,
"simple_ondemand",
NULL);
+#endif
if (IS_ERR(hba->devfreq)) {
ret = PTR_ERR(hba->devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n",
pm_runtime_put_sync(hba->dev);
}
+ hba->host->wlun_clr_uac = true;
if (!hba->is_init_prefetch)
hba->is_init_prefetch = true;
dev_err(hba->dev, "%s failed with err %d, retrying:%d\n",
__func__, ret, re_cnt);
goto retry;
+ } else if (ret && re_cnt >= UFS_LINK_SETUP_RETRIES) {
+ dev_err(hba->dev, "%s failed after retries with err %d\n",
+ __func__, ret);
+ exynos_ufs_dump_uic_info(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
- }
+
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ if (!ret) {
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ scsi_report_bus_reset(hba->host, 0);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ hba->async_resume = false;
+
return ret;
}
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ int err = 0;
- ufshcd_probe_hba(hba);
+ if (hba->async_resume) {
+ scsi_block_requests(hba->host);
+ err = ufshcd_probe_hba(hba);
+ if (err)
+ goto err;
+
+ if (!ufshcd_is_ufs_dev_active(hba)) {
+ scsi_unblock_requests(hba->host);
+ ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+ scsi_block_requests(hba->host);
+ }
+
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
+err:
+ scsi_unblock_requests(hba->host);
+ } else {
+ ufshcd_probe_hba(hba);
+ }
}
static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
}
+/**
+ * ufshcd_query_ioctl - perform user read queries
+ * @hba: per-adapter instance
+ * @lun: used for lun specific queries
+ * @buffer: user space buffer for reading and submitting query data and params
+ * @return: 0 for success negative error code otherwise
+ *
+ * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
+ * It will read the opcode, idn and buf_length parameters, and, put the
+ * response in the buffer field while updating the used size in buf_length.
+ */
+static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
+{
+ struct ufs_ioctl_query_data *ioctl_data;
+ int err = 0;
+ int length = 0;
+ void *data_ptr;
+ bool flag;
+ u32 att;
+ u8 index;
+ u8 *desc = NULL;
+
+ ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
+ if (!ioctl_data) {
+ dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
+ sizeof(struct ufs_ioctl_query_data));
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* extract params from user buffer */
+ err = copy_from_user(ioctl_data, buffer,
+ sizeof(struct ufs_ioctl_query_data));
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Failed copying buffer from user, err %d\n",
+ __func__, err);
+ goto out_release_mem;
+ }
+
+ /* verify legal parameters & send query */
+ switch (ioctl_data->opcode) {
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ switch (ioctl_data->idn) {
+ case QUERY_DESC_IDN_DEVICE:
+ case QUERY_DESC_IDN_CONFIGURATION:
+ case QUERY_DESC_IDN_INTERCONNECT:
+ case QUERY_DESC_IDN_GEOMETRY:
+ case QUERY_DESC_IDN_POWER:
+ case QUERY_DESC_IDN_HEALTH:
+ index = 0;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ if (!ufs_is_valid_unit_desc_lun(lun)) {
+ dev_err(hba->dev,
+ "%s: No unit descriptor for lun 0x%x\n",
+ __func__, lun);
+ err = -EINVAL;
+ goto out_release_mem;
+ }
+ index = lun;
+ break;
+ default:
+ goto out_einval;
+ }
+ length = min_t(int, QUERY_DESC_MAX_SIZE,
+ ioctl_data->buf_size);
+ desc = kzalloc(length, GFP_KERNEL);
+ if (!desc) {
+ dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+ __func__, length);
+ err = -ENOMEM;
+ goto out_release_mem;
+ }
+ err = ufshcd_query_descriptor_retry(hba, ioctl_data->opcode,
+ ioctl_data->idn, index, 0, desc, &length);
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ switch (ioctl_data->idn) {
+ case QUERY_ATTR_IDN_BOOT_LU_EN:
+ case QUERY_ATTR_IDN_POWER_MODE:
+ case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+ case QUERY_ATTR_IDN_OOO_DATA_EN:
+ case QUERY_ATTR_IDN_BKOPS_STATUS:
+ case QUERY_ATTR_IDN_PURGE_STATUS:
+ case QUERY_ATTR_IDN_MAX_DATA_IN:
+ case QUERY_ATTR_IDN_MAX_DATA_OUT:
+ case QUERY_ATTR_IDN_REF_CLK_FREQ:
+ case QUERY_ATTR_IDN_CONF_DESC_LOCK:
+ case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+ case QUERY_ATTR_IDN_EE_CONTROL:
+ case QUERY_ATTR_IDN_EE_STATUS:
+ case QUERY_ATTR_IDN_SECONDS_PASSED:
+ index = 0;
+ break;
+ case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+ case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
+ index = lun;
+ break;
+ default:
+ goto out_einval;
+ }
+ err = ufshcd_query_attr_retry(hba, ioctl_data->opcode,
+ ioctl_data->idn, index, 0, &att);
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ switch (ioctl_data->idn) {
+ case QUERY_FLAG_IDN_FDEVICEINIT:
+ case QUERY_FLAG_IDN_PERMANENT_WPE:
+ case QUERY_FLAG_IDN_PWR_ON_WPE:
+ case QUERY_FLAG_IDN_BKOPS_EN:
+ case QUERY_FLAG_IDN_PURGE_ENABLE:
+ case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
+ case QUERY_FLAG_IDN_BUSY_RTC:
+ break;
+ default:
+ goto out_einval;
+ }
+ err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
+ ioctl_data->idn, &flag);
+ break;
+ default:
+ goto out_einval;
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
+ ioctl_data->idn);
+ goto out_release_mem;
+ }
+
+ /*
+ * copy response data
+ * As we might end up reading less data then what is specified in
+ * "ioct_data->buf_size". So we are updating "ioct_data->
+ * buf_size" to what exactly we have read.
+ */
+ switch (ioctl_data->opcode) {
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
+ data_ptr = desc;
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ ioctl_data->buf_size = sizeof(u32);
+ data_ptr = &att;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ ioctl_data->buf_size = 1;
+ data_ptr = &flag;
+ break;
+ default:
+ BUG_ON(true);
+ }
+
+ /* copy to user */
+ err = copy_to_user(buffer, ioctl_data,
+ sizeof(struct ufs_ioctl_query_data));
+ if (err)
+ dev_err(hba->dev, "%s: Failed copying back to user.\n",
+ __func__);
+ err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
+ data_ptr, ioctl_data->buf_size);
+ if (err)
+ dev_err(hba->dev, "%s: err %d copying back to user.\n",
+ __func__, err);
+ goto out_release_mem;
+
+out_einval:
+ dev_err(hba->dev,
+ "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
+ __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
+ err = -EINVAL;
+out_release_mem:
+ kfree(ioctl_data);
+ kfree(desc);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
+ * @dev: scsi device required for per LUN queries
+ * @cmd: command opcode
+ * @buffer: user space buffer for transferring data
+ *
+ * Supported commands:
+ * UFS_IOCTL_QUERY
+ */
+static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
+{
+ struct ufs_hba *hba = shost_priv(dev->host);
+ int err = 0;
+
+ BUG_ON(!hba);
+ if (!buffer) {
+ if (cmd != SCSI_UFS_REQUEST_SENSE) {
+ dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
+ return -EINVAL;
+ }
+ }
+ switch (cmd) {
+ case SCSI_UFS_REQUEST_SENSE:
+ err = ufshcd_send_request_sense(hba, hba->sdev_rpmb);
+ if (err) {
+ dev_warn(hba->dev, "%s failed to clear uac on rpmb(w-lu) %d\n",
+ __func__, err);
+ }
+ hba->host->wlun_clr_uac = false;
+ break;
+ case UFS_IOCTL_QUERY:
+ //pm_runtime_get_sync(hba->dev);
+ err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
+ buffer);
+ //pm_runtime_put_sync(hba->dev);
+ break;
+ case UFS_IOCTL_BLKROSET:
+ err = -ENOIOCTLCMD;
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Illegal ufs-IOCTL cmd %d\n", __func__,
+ cmd);
+ break;
+ }
+
+ return err;
+}
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
.eh_timed_out = ufshcd_eh_timed_out,
+ .ioctl = ufshcd_ioctl,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
if (list_empty(head))
goto out;
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
+ ufshcd_vops_pre_setup_clocks(hba, on);
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (on && !clki->enabled) {
ret = clk_prepare_enable(clki->clk);
if (ret) {
+ hba->clk_gating.state = CLKS_DISABLE;
dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
__func__, clki->name, ret);
goto out;
}
}
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
+ ret = ufshcd_vops_setup_clocks(hba, on);
out:
if (ret) {
clki->max_freq, ret);
goto out;
}
+#if defined(CONFIG_PM_DEVFREQ)
clki->curr_freq = clki->max_freq;
+#endif
}
dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
clki->name, clk_get_rate(clki->clk));
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+#if defined(CONFIG_PM_DEVFREQ)
ufshcd_suspend_clkscaling(hba);
+#endif
if (ufshcd_is_clkscaling_supported(hba)) {
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
+#endif
destroy_workqueue(hba->clk_scaling.workq);
}
ufshcd_setup_clocks(hba, false);
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
+ pr_info("%s %d\n", __func__, __LINE__);
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ (23 * HZ), 0, 0, RQF_PM, NULL);
+ pr_info("%s %d\n", __func__, __LINE__);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
if (req_link_state == hba->uic_link_state)
return 0;
- if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+ if (req_link_state == UIC_LINK_HIBERN8_STATE ||
+ req_link_state == UIC_LINK_OFF_STATE) {
ufshcd_set_link_trans_hibern8(hba);
ret = ufshcd_link_hibern8_ctrl(hba, true);
if (!ret)
hba->clk_gating.is_suspended = saved_is_suspended;
goto out;
- }
- /*
- * If autobkops is enabled, link can't be turned off because
- * turning off the link would also turn off the device.
- */
- else if ((req_link_state == UIC_LINK_OFF_STATE) &&
- (!check_for_bkops || (check_for_bkops &&
- !hba->auto_bkops_enabled))) {
- /*
- * Let's make sure that link is in low power mode, we are doing
- * this currently by putting the link in Hibern8. Otherway to
- * put the link in low power mode is to send the DME end point
- * to device and then send the DME reset command to local
- * unipro. But putting the link in hibern8 is much faster.
- */
- ret = ufshcd_uic_hibern8_enter(hba);
- if (ret)
- goto out;
+ }
+
+
/*
- * Change controller state to "reset state" which
- * should also put the link in off/reset state
+ * If autobkops is enabled, link can't be turned off because
+ * turning off the link would also turn off the device.
*/
+ if ((req_link_state == UIC_LINK_OFF_STATE) &&
+ (!check_for_bkops || (check_for_bkops &&
+ !hba->auto_bkops_enabled))) {
+ unsigned long flags;
+
+ /*
+ * Change controller state to "reset state" which
+ * should also put the link in off/reset state
+ */
+
spin_lock_irqsave(hba->host->host_lock, flags);
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_hba_stop(hba, true);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- /*
- * TODO: Check if we need any delay to make sure that
- * controller is reset
- */
- ufshcd_set_link_off(hba);
+ /*
+ * TODO: Check if we need any delay to make sure that
+ * controller is reset
+ */
+ ufshcd_set_link_off(hba);
+ }
}
out:
if (hba->clk_scaling.is_allowed) {
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
+#if defined(CONFIG_PM_DEVFREQ)
ufshcd_suspend_clkscaling(hba);
+#endif
}
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
}
}
+ if (ufshcd_is_shutdown_pm(pm_op))
+ ufs_shutdown_state = 1;
+
if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
!ufshcd_is_runtime_pm(pm_op))) {
disable_clks:
+
/*
* Flush pending works before clock is disabled
*/
*/
ufshcd_disable_irq(hba);
+ ufshcd_vreg_set_lpm(hba);
+ udelay(50);
if (gating_allowed) {
if (!ufshcd_is_link_active(hba))
goto out;
set_link_active:
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
+#endif
+
+ if (ufshcd_is_shutdown_pm(pm_op))
+ goto out;
+
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto out;
+
if (ufshcd_is_link_hibern8(hba)) {
ufshcd_set_link_trans_active(hba);
if (!ufshcd_link_hibern8_ctrl(hba, false))
} else if (ufshcd_is_link_off(hba))
ufshcd_host_reset_and_restore(hba);
set_dev_active:
+ if (ufshcd_is_shutdown_pm(pm_op))
+ goto out;
+
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+#endif
ufshcd_release(hba);
out:
hba->pm_op_in_progress = 0;
+ dev_info(hba->dev, "UFS suspend done\n");
+
return ret;
}
ufshcd_hba_vreg_set_hpm(hba);
-
- /* enable the host irq as host controller would be active soon */
- ret = ufshcd_enable_irq(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
-
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
goto disable_irq_and_vops_clks;
if (ret)
goto disable_vreg;
}
+
+ /* enable the host irq as host controller would be active soon */
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
if (ufshcd_is_link_hibern8(hba)) {
ufshcd_set_link_trans_active(hba);
ret = ufshcd_link_hibern8_ctrl(hba, false);
goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) {
+#ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
+ hba->async_resume = true;
+ ret = ufshcd_host_reset_and_restore(hba);
+ goto async_resume;
+#else
ret = ufshcd_host_reset_and_restore(hba);
+#endif
+
/*
* ufshcd_host_reset_and_restore() should have already
* set the link state as active
* keep auto-bkops enabled or else disable it.
*/
ufshcd_urgent_bkops(hba);
-
+#ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
+async_resume:
+#endif
hba->clk_gating.is_suspended = false;
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
+#endif
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_vreg:
- ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_suspend_clkscaling(hba);
- ufshcd_setup_clocks(hba, false);
+#endif
+
+ if (gating_allowed)
+ ufshcd_setup_clocks(hba, false);
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
out:
hba->pm_op_in_progress = 0;
+
+ if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
+ dev_info(hba->dev, "UFS resume done\n");
+
return ret;
}
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
ufshcd_hba_stop(hba, true);
ufshcd_exit_clk_gating(hba);
+#if defined(CONFIG_PM_DEVFREQ)
if (ufshcd_is_clkscaling_supported(hba))
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+#endif
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);
- ufshcd_init_clk_gating(hba);
+ /* Initialize monitor */
+ ufshcd_init_monitor(hba);
+
+ err = ufshcd_init_clk_gating(hba);
+ if (err) {
+ dev_err(hba->dev, "init clk_gating failed\n");
+ goto out_disable;
+ }
/*
* In order to avoid any spurious interrupt immediately after
goto exit_gating;
}
+#if defined(CONFIG_PM_DEVFREQ)
if (ufshcd_is_clkscaling_supported(hba)) {
char wq_name[sizeof("ufs_clkscaling_00")];
ufshcd_clkscaling_init_sysfs(hba);
}
-
- /*
- * Set the default power management level for runtime and system PM.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
+#endif
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
/*
- * We are assuming that device wasn't put in sleep/power-down
- * state exclusively during the boot stage before kernel.
- * This assumption helps avoid doing link startup twice during
- * ufshcd_probe_hba().
+ * The device-initialize-sequence hasn't been invoked yet.
+ * Set the device to power-off state
*/
- ufshcd_set_ufs_dev_active(hba);
+ ufshcd_set_ufs_dev_poweroff(hba);
async_schedule(ufshcd_async_scan, hba);
ufshcd_add_sysfs_nodes(hba);
return 0;
-out_remove_scsi_host:
- scsi_remove_host(hba->host);
exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable: