--- /dev/null
+/*
+ * drivers/scsi/ufs/mphy.h
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MPHY_H_
+#define _MPHY_H_
+
+#define TX_HIBERN8TIME_CAP 0x0f
+#define TX_MIN_ACTIVATE_TIME 0x33
+
+#define RX_HS_G1_SYNC_LENGTH_CAP 0x8b
+#define RX_HS_G1_PREP_LENGTH_CAP 0x8c
+#define RX_HS_G2_SYNC_LENGTH_CAP 0x94
+#define RX_HS_G3_SYNC_LENGTH_CAP 0x95
+#define RX_HS_G2_PREP_LENGTH_CAP 0x96
+#define RX_HS_G3_PREP_LENGTH_CAP 0x97
+ #define SYNC_RANGE_FINE (0 << 6)
+ #define SYNC_RANGE_COARSE (1 << 6)
+ #define SYNC_LEN(x) ((x) & 0x3f)
+ #define PREP_LEN(x) ((x) & 0xf)
+#define RX_ADV_GRANULARITY_CAP 0x98
+ #define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1)
+#define TX_ADV_GRANULARITY_CAP 0x10
+ #define TX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1)
+#define RX_MIN_ACTIVATETIME_CAP 0x8f
+#define RX_HIBERN8TIME_CAP 0x92
+#define RX_ADV_HIBERN8TIME_CAP 0x99
+#define RX_ADV_MIN_ACTIVATETIME_CAP 0x9a
+
+#endif /* _MPHY_H_ */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
/* Interrupt aggregation default timeout, unit: 40us */
-#define INT_AGGR_DEF_TO 0x02
+#define INT_AGGR_DEF_TO 0x01
+
+/* Link Hibernation delay, msecs */
+#define LINK_H8_DELAY 20
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
}
+static int ufshcd_dme_reset(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_RESET;
+ uic_cmd.argument1 = 0x1;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+static int ufshcd_dme_enable(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_ENABLE;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-enable: error code %d\n", ret);
+
+ return ret;
+}
+
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
*/
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode)
{
struct ufs_pa_layer_attr final_params = { 0 };
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
/**
* ufshcd_complete_dev_init() - checks device readiness
}
/**
- * ufshcd_hba_enable - initialize the controller
+ * _ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
*
* The controller resets itself and controller firmware initialization
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+static int __ufshcd_hba_enable(struct ufs_hba *hba)
{
int retry;
return ufshcd_disable_tx_lcc(hba, true);
}
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int ret;
+ if (hba->vops && hba->vops->host_reset)
+ hba->vops->host_reset(hba);
+ if (hba->quirks & UFSHCD_QUIRK_USE_OF_HCE) {
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ ret = ufshcd_dme_reset(hba);
+ if (!ret)
+ ret = ufshcd_dme_enable(hba);
+ } else {
+ ret = __ufshcd_hba_enable(hba);
+ }
+ return ret;
+}
+
/**
* ufshcd_link_startup - Initialize unipro link startup
* @hba: per adapter instance
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
return 0;
}
switch (ocs) {
case OCS_SUCCESS:
+ case OCS_FATAL_ERROR:
result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
switch (result) {
case OCS_MISMATCH_DATA_BUF_SIZE:
case OCS_MISMATCH_RESP_UPIU_SIZE:
case OCS_PEER_COMM_FAILURE:
- case OCS_FATAL_ERROR:
default:
result |= DID_ERROR << 16;
dev_err(hba->dev,
static int ufshcd_probe_hba(struct ufs_hba *hba)
{
struct ufs_dev_desc card = {0};
+ int re_cnt = 0;
int ret;
ktime_t start = ktime_get();
+retry:
ret = ufshcd_link_startup(hba);
if (ret)
goto out;
hba->is_init_prefetch = true;
out:
+ if (ret) {
+ goto retry;
+ }
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
struct ufs_pa_layer_attr *);
void (*setup_xfer_req)(struct ufs_hba *, int, bool);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
- void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
+ void (*hibern8_notify)(struct ufs_hba *, u8, bool);
int (*apply_dev_quirks)(struct ufs_hba *);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
int pm_op_in_progress;
struct ufshcd_lrb *lrb;
- unsigned long lrb_in_use;
+ volatile unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
*/
#define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+ #define UFSHCD_QUIRK_USE_OF_HCE UFS_BIT(8)
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
u8 attr_set, u32 mib_val, u8 peer);
extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer);
+extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode);
/* UIC command interfaces for DME primitives */
#define DME_LOCAL 0
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE BIT(0)
+#define UIC_NETWORK_BAD_DEVICEID_ENC BIT(1)
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING BIT(2)
/* UECT - Host UIC Error Code Transport Layer 44h */
#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE BIT(0)
+#define UIC_TRANSPORT_UNKNOWN_CPORTID BIT(1)
+#define UIC_TRANSPORT_NO_CONNECTION_RX BIT(2)
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING BIT(3)
+#define UIC_TRANSPORT_BAD_TC BIT(4)
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW BIT(5)
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING BIT(6)
/* UECDME - Host UIC Error Code DME 48h */
#define UIC_DME_ERROR UFS_BIT(31)