[COMMON] scsi: ufs: Add reset for PA_ERROR interrupt storming control.
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2 * Universal Flash Storage Host controller driver Core
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7 *
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
38 */
39
40 #include <linux/async.h>
41 #if defined(CONFIG_PM_DEVFREQ)
42 #include <linux/devfreq.h>
43 #endif
44 #include <linux/nls.h>
45 #include <linux/smc.h>
46 #include <scsi/ufs/ioctl.h>
47 #include <linux/of.h>
48 #include <linux/blkdev.h>
49 #include <linux/gpio.h>
50
51 #include "ufshcd.h"
52 #include "ufs_quirks.h"
53 #include "unipro.h"
54 #include "ufs-exynos.h"
55 #include "ufs_quirks.h"
56
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/ufs.h>
59
60 #define UFSHCD_REQ_SENSE_SIZE 18
61
62 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
63 UTP_TASK_REQ_COMPL |\
64 UFSHCD_ERROR_MASK)
65 /* UIC command timeout, unit: ms */
66 #define UIC_CMD_TIMEOUT 500
67
68 /* NOP OUT retries waiting for NOP IN response */
69 #define NOP_OUT_RETRIES 10
70 /* Timeout after 30 msecs if NOP OUT hangs without response */
71 #define NOP_OUT_TIMEOUT 30 /* msecs */
72
73 /* Query request retries */
74 #define QUERY_REQ_RETRIES 3
75 /* Query request timeout */
76 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
77 /*
78 * Query request timeout for fDeviceInit flag
79 * fDeviceInit query response time for some devices is too large that default
80 * QUERY_REQ_TIMEOUT may not be enough for such devices.
81 */
82 #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
83
84 /* Task management command timeout */
85 #define TM_CMD_TIMEOUT 300 /* msecs */
86
87 /* maximum number of retries for a general UIC command */
88 #define UFS_UIC_COMMAND_RETRIES 3
89
90 /* maximum number of link-startup retries */
91 #define DME_LINKSTARTUP_RETRIES 3
92
93 /* Maximum retries for Hibern8 enter */
94 #define UIC_HIBERN8_ENTER_RETRIES 3
95
96 /* maximum number of reset retries before giving up */
97 #define MAX_HOST_RESET_RETRIES 5
98
99 /* Expose the flag value from utp_upiu_query.value */
100 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
101
102 /* Interrupt aggregation default timeout, unit: 40us */
103 #define INT_AGGR_DEF_TO 0x01
104
105 /* Link Hibernation delay, msecs */
106 #define LINK_H8_DELAY 20
107
108 /* UFS link setup retries */
109 #define UFS_LINK_SETUP_RETRIES 5
110
111 /* IOCTL opcode for command - ufs set device read only */
112 #define UFS_IOCTL_BLKROSET BLKROSET
113
114 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
115 ({ \
116 int _ret; \
117 if (_on) \
118 _ret = ufshcd_enable_vreg(_dev, _vreg); \
119 else \
120 _ret = ufshcd_disable_vreg(_dev, _vreg); \
121 _ret; \
122 })
123
124 static int ufs_shutdown_state = 0;
125
126 #define ufshcd_hex_dump(prefix_str, buf, len) \
127 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
128
129 enum {
130 UFSHCD_MAX_CHANNEL = 0,
131 UFSHCD_MAX_ID = 1,
132 UFSHCD_CMD_PER_LUN = 32,
133 UFSHCD_CAN_QUEUE = 32,
134 };
135
136 /* UFSHCD states */
137 enum {
138 UFSHCD_STATE_RESET,
139 UFSHCD_STATE_ERROR,
140 UFSHCD_STATE_OPERATIONAL,
141 UFSHCD_STATE_EH_SCHEDULED,
142 };
143
144 /* UFSHCD error handling flags */
145 enum {
146 UFSHCD_EH_IN_PROGRESS = (1 << 0),
147 };
148
149 /* UFSHCD UIC layer error flags */
150 enum {
151 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
152 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
153 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
154 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
155 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
156 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
157 UFSHCD_UIC_DL_ERROR = (1 << 6), /* Data link layer error */
158 };
159
160 #define ufshcd_set_eh_in_progress(h) \
161 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
162 #define ufshcd_eh_in_progress(h) \
163 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
164 #define ufshcd_clear_eh_in_progress(h) \
165 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
166
167 #define ufshcd_set_ufs_dev_active(h) \
168 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
169 #define ufshcd_set_ufs_dev_sleep(h) \
170 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
171 #define ufshcd_set_ufs_dev_poweroff(h) \
172 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
173 #define ufshcd_is_ufs_dev_active(h) \
174 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
175 #define ufshcd_is_ufs_dev_sleep(h) \
176 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
177 #define ufshcd_is_ufs_dev_poweroff(h) \
178 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
179
180 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
181 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
182 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
183 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
184 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
185 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
186 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
187 };
188
189 static inline enum ufs_dev_pwr_mode
190 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
191 {
192 return ufs_pm_lvl_states[lvl].dev_state;
193 }
194
195 static inline enum uic_link_state
196 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
197 {
198 return ufs_pm_lvl_states[lvl].link_state;
199 }
200
201 static struct ufs_dev_fix ufs_fixups[] = {
202 /* UFS cards deviations table */
203 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
204 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
206 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
207 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
208 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
209 UFS_DEVICE_NO_FASTAUTO),
210 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
211 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
212 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
213 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
214 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
215 UFS_DEVICE_QUIRK_PA_TACTIVATE),
216 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
217 UFS_DEVICE_QUIRK_PA_TACTIVATE),
218 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
219 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
220 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
221
222 END_FIX
223 };
224
225 static void ufshcd_tmc_handler(struct ufs_hba *hba);
226 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
227 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
228 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
229 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
230 static void ufshcd_hba_exit(struct ufs_hba *hba);
231 static int ufshcd_probe_hba(struct ufs_hba *hba);
232 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
233 bool skip_ref_clk);
234 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
235 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
236 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
237 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
238 static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en);
239 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
240 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
241 #if defined(CONFIG_PM_DEVFREQ)
242 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
243 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
244 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
245 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
246 #endif
247 static irqreturn_t ufshcd_intr(int irq, void *__hba);
248 static int ufshcd_change_power_mode(struct ufs_hba *hba,
249 struct ufs_pa_layer_attr *pwr_mode);
250 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
251 enum ufs_dev_pwr_mode pwr_mode);
252 static int ufshcd_send_request_sense(struct ufs_hba *hba,
253 struct scsi_device *sdp);
254 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba);
255 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba);
256 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
257 {
258 return tag >= 0 && tag < hba->nutrs;
259 }
260
261 static ssize_t ufshcd_monitor_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263 {
264 struct ufs_hba *hba = dev_get_drvdata(dev);
265
266 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->monitor.flag);
267 }
268
269 static ssize_t ufshcd_monitor_store(struct device *dev,
270 struct device_attribute *attr, const char *buf, size_t count)
271 {
272 struct ufs_hba *hba = dev_get_drvdata(dev);
273 unsigned long value;
274
275 if (kstrtoul(buf, 0, &value))
276 return -EINVAL;
277
278 hba->monitor.flag = value;
279 return count;
280 }
281
282 static void ufshcd_init_monitor(struct ufs_hba *hba)
283 {
284 hba->monitor.attrs.show = ufshcd_monitor_show;
285 hba->monitor.attrs.store = ufshcd_monitor_store;
286 sysfs_attr_init(&hba->monitor.attrs.attr);
287 hba->monitor.attrs.attr.name = "monitor";
288 hba->monitor.attrs.attr.mode = S_IRUGO | S_IWUSR;
289 if (device_create_file(hba->dev, &hba->monitor.attrs))
290 dev_err(hba->dev, "Failed to create sysfs for monitor\n");
291 }
292
293
294 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
295 {
296 int ret = 0;
297
298 if (!hba->is_irq_enabled) {
299 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
300 hba);
301 if (ret)
302 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
303 __func__, ret);
304 hba->is_irq_enabled = true;
305 }
306
307 return ret;
308 }
309
310 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
311 {
312 if (hba->is_irq_enabled) {
313 free_irq(hba->irq, hba);
314 hba->is_irq_enabled = false;
315 }
316 }
317
318 /* replace non-printable or non-ASCII characters with spaces */
319 static inline void ufshcd_remove_non_printable(char *val)
320 {
321 if (!val)
322 return;
323
324 if (*val < 0x20 || *val > 0x7e)
325 *val = ' ';
326 }
327
328 static void ufshcd_add_command_trace(struct ufs_hba *hba,
329 unsigned int tag, const char *str)
330 {
331 sector_t lba = -1;
332 u8 opcode = 0;
333 u32 intr, doorbell;
334 struct ufshcd_lrb *lrbp;
335 int transfer_len = -1;
336
337 if (!trace_ufshcd_command_enabled())
338 return;
339
340 lrbp = &hba->lrb[tag];
341
342 if (lrbp->cmd) { /* data phase exists */
343 opcode = (u8)(*lrbp->cmd->cmnd);
344 if ((opcode == READ_10) || (opcode == WRITE_10)) {
345 /*
346 * Currently we only fully trace read(10) and write(10)
347 * commands
348 */
349 if (lrbp->cmd->request && lrbp->cmd->request->bio)
350 lba =
351 lrbp->cmd->request->bio->bi_iter.bi_sector;
352 transfer_len = be32_to_cpu(
353 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
354 }
355 }
356
357 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
358 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
359 trace_ufshcd_command(dev_name(hba->dev), str, tag,
360 doorbell, transfer_len, intr, lba, opcode);
361 }
362
363 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
364 {
365 struct ufs_clk_info *clki;
366 struct list_head *head = &hba->clk_list_head;
367
368 if (list_empty(head))
369 return;
370
371 list_for_each_entry(clki, head, list) {
372 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
373 clki->max_freq)
374 dev_err(hba->dev, "clk: %s, rate: %u\n",
375 clki->name, clki->curr_freq);
376 }
377 }
378
379 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
380 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
381 {
382 int i;
383
384 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
385 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
386
387 if (err_hist->reg[p] == 0)
388 continue;
389 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
390 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
391 }
392 }
393
394 static void ufshcd_print_host_regs(struct ufs_hba *hba)
395 {
396 /*
397 * hex_dump reads its data without the readl macro. This might
398 * cause inconsistency issues on some platform, as the printed
399 * values may be from cache and not the most recent value.
400 * To know whether you are looking at an un-cached version verify
401 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
402 * during platform/pci probe function.
403 */
404 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
405 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
406 hba->ufs_version, hba->capabilities);
407 dev_err(hba->dev,
408 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
409 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
410 dev_err(hba->dev,
411 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
412 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
413 hba->ufs_stats.hibern8_exit_cnt);
414
415 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
416 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
417 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
418 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
419 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
420
421 ufshcd_print_clk_freqs(hba);
422
423 if (hba->vops && hba->vops->dbg_register_dump)
424 hba->vops->dbg_register_dump(hba);
425 }
426
427 static
428 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
429 {
430 struct ufshcd_lrb *lrbp;
431 int prdt_length;
432 int tag;
433
434 for_each_set_bit(tag, &bitmap, hba->nutrs) {
435 lrbp = &hba->lrb[tag];
436
437 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
438 tag, ktime_to_us(lrbp->issue_time_stamp));
439 dev_err(hba->dev,
440 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
441 tag, (u64)lrbp->utrd_dma_addr);
442
443 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
444 sizeof(struct utp_transfer_req_desc));
445 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
446 (u64)lrbp->ucd_req_dma_addr);
447 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
448 sizeof(struct utp_upiu_req));
449 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
450 (u64)lrbp->ucd_rsp_dma_addr);
451 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
452 sizeof(struct utp_upiu_rsp));
453
454 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
455 prdt_length = le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length)
456 / sizeof(struct ufshcd_sg_entry);
457 else
458 prdt_length = le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
459
460 dev_err(hba->dev,
461 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
462 tag, prdt_length,
463 (u64)lrbp->ucd_prdt_dma_addr);
464 if (pr_prdt)
465 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
466 sizeof(struct ufshcd_sg_entry) * prdt_length);
467 }
468 }
469
470 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
471 {
472 struct utp_task_req_desc *tmrdp;
473 int tag;
474
475 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
476 tmrdp = &hba->utmrdl_base_addr[tag];
477 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
478 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
479 sizeof(struct request_desc_header));
480 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
481 tag);
482 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
483 sizeof(struct utp_upiu_req));
484 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
485 tag);
486 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
487 sizeof(struct utp_task_req_desc));
488 }
489 }
490
491 static void ufshcd_print_host_state(struct ufs_hba *hba)
492 {
493 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
494 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
495 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
496 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
497 hba->saved_err, hba->saved_uic_err);
498 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
499 hba->curr_dev_pwr_mode, hba->uic_link_state);
500 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
501 hba->pm_op_in_progress, hba->is_sys_suspended);
502 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
503 hba->auto_bkops_enabled, hba->host->host_self_blocked);
504 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
505 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
506 hba->eh_flags, hba->req_abort_count);
507 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
508 hba->capabilities, hba->caps);
509 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
510 hba->dev_quirks);
511 }
512
513 /**
514 * ufshcd_print_pwr_info - print power params as saved in hba
515 * power info
516 * @hba: per-adapter instance
517 */
518 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
519 {
520 static const char * const names[] = {
521 "INVALID MODE",
522 "FAST MODE",
523 "SLOW_MODE",
524 "INVALID MODE",
525 "FASTAUTO_MODE",
526 "SLOWAUTO_MODE",
527 "INVALID MODE",
528 };
529
530 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
531 __func__,
532 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
533 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
534 names[hba->pwr_info.pwr_rx],
535 names[hba->pwr_info.pwr_tx],
536 hba->pwr_info.hs_rate);
537 }
538
539 /*
540 * ufshcd_wait_for_register - wait for register value to change
541 * @hba - per-adapter interface
542 * @reg - mmio register offset
543 * @mask - mask to apply to read register value
544 * @val - wait condition
545 * @interval_us - polling interval in microsecs
546 * @timeout_ms - timeout in millisecs
547 * @can_sleep - perform sleep or just spin
548 *
549 * Returns -ETIMEDOUT on error, zero on success
550 */
551 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
552 u32 val, unsigned long interval_us,
553 unsigned long timeout_ms, bool can_sleep)
554 {
555 int err = 0;
556 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
557
558 /* ignore bits that we don't intend to wait on */
559 val = val & mask;
560
561 while ((ufshcd_readl(hba, reg) & mask) != val) {
562 if (can_sleep)
563 usleep_range(interval_us, interval_us + 50);
564 else
565 udelay(interval_us);
566 if (time_after(jiffies, timeout)) {
567 if ((ufshcd_readl(hba, reg) & mask) != val)
568 err = -ETIMEDOUT;
569 break;
570 }
571 }
572
573 return err;
574 }
575
576 /**
577 * ufshcd_get_intr_mask - Get the interrupt bit mask
578 * @hba - Pointer to adapter instance
579 *
580 * Returns interrupt bit mask per version
581 */
582 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
583 {
584 u32 intr_mask = 0;
585
586 switch (hba->ufs_version) {
587 case UFSHCI_VERSION_10:
588 intr_mask = INTERRUPT_MASK_ALL_VER_10;
589 break;
590 case UFSHCI_VERSION_11:
591 case UFSHCI_VERSION_20:
592 intr_mask = INTERRUPT_MASK_ALL_VER_11;
593 break;
594 case UFSHCI_VERSION_21:
595 default:
596 intr_mask = INTERRUPT_MASK_ALL_VER_21;
597 break;
598 }
599
600 return intr_mask;
601 }
602
603 /**
604 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
605 * @hba - Pointer to adapter instance
606 *
607 * Returns UFSHCI version supported by the controller
608 */
609 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
610 {
611 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
612 return ufshcd_vops_get_ufs_hci_version(hba);
613
614 return ufshcd_readl(hba, REG_UFS_VERSION);
615 }
616
617 /**
618 * ufshcd_is_device_present - Check if any device connected to
619 * the host controller
620 * @hba: pointer to adapter instance
621 *
622 * Returns true if device present, false if no device detected
623 */
624 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
625 {
626 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
627 DEVICE_PRESENT) ? true : false;
628 }
629
630 /**
631 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
632 * @lrb: pointer to local command reference block
633 *
634 * This function is used to get the OCS field from UTRD
635 * Returns the OCS field in the UTRD
636 */
637 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
638 {
639 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
640 }
641
642 /**
643 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
644 * @task_req_descp: pointer to utp_task_req_desc structure
645 *
646 * This function is used to get the OCS field from UTMRD
647 * Returns the OCS field in the UTMRD
648 */
649 static inline int
650 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
651 {
652 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
653 }
654
655 /**
656 * ufshcd_get_tm_free_slot - get a free slot for task management request
657 * @hba: per adapter instance
658 * @free_slot: pointer to variable with available slot value
659 *
660 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
661 * Returns 0 if free slot is not available, else return 1 with tag value
662 * in @free_slot.
663 */
664 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
665 {
666 int tag;
667 bool ret = false;
668
669 if (!free_slot)
670 goto out;
671
672 do {
673 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
674 if (tag >= hba->nutmrs)
675 goto out;
676 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
677
678 *free_slot = tag;
679 ret = true;
680 out:
681 return ret;
682 }
683
684 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
685 {
686 clear_bit_unlock(slot, &hba->tm_slots_in_use);
687 }
688
689 /**
690 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
691 * @hba: per adapter instance
692 * @pos: position of the bit to be cleared
693 */
694 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
695 {
696 u32 clear;
697
698 if (hba->quirks & UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR)
699 clear = (1 << pos);
700 else
701 clear = ~(1 << pos);
702
703 ufshcd_writel(hba, clear, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
704 }
705
706 /**
707 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
708 * @hba: per adapter instance
709 * @pos: position of the bit to be cleared
710 */
711 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
712 {
713 u32 clear;
714
715 if (hba->quirks & UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR)
716 clear = (1 << pos);
717 else
718 clear = ~(1 << pos);
719
720 ufshcd_writel(hba, clear, REG_UTP_TASK_REQ_LIST_CLEAR);
721 }
722
723 /**
724 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
725 * @hba: per adapter instance
726 * @tag: position of the bit to be cleared
727 */
728 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
729 {
730 __clear_bit(tag, &hba->outstanding_reqs);
731 }
732
733 /**
734 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
735 * @reg: Register value of host controller status
736 *
737 * Returns integer, 0 on Success and positive value if failed
738 */
739 static inline int ufshcd_get_lists_status(u32 reg)
740 {
741 /*
742 * The mask 0xFF is for the following HCS register bits
743 * Bit Description
744 * 0 Device Present
745 * 1 UTRLRDY
746 * 2 UTMRLRDY
747 * 3 UCRDY
748 * 4-7 reserved
749 */
750 return ((reg & 0xFF) >> 1) ^ 0x07;
751 }
752
753 /**
754 * ufshcd_get_uic_cmd_result - Get the UIC command result
755 * @hba: Pointer to adapter instance
756 *
757 * This function gets the result of UIC command completion
758 * Returns 0 on success, non zero value on error
759 */
760 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
761 {
762 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
763 MASK_UIC_COMMAND_RESULT;
764 }
765
766 /**
767 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
768 * @hba: Pointer to adapter instance
769 *
770 * This function gets UIC command argument3
771 * Returns 0 on success, non zero value on error
772 */
773 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
774 {
775 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
776 }
777
778 /**
779 * ufshcd_get_req_rsp - returns the TR response transaction type
780 * @ucd_rsp_ptr: pointer to response UPIU
781 */
782 static inline int
783 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
784 {
785 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
786 }
787
788 /**
789 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
790 * @ucd_rsp_ptr: pointer to response UPIU
791 *
792 * This function gets the response status and scsi_status from response UPIU
793 * Returns the response result code.
794 */
795 static inline int
796 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
797 {
798 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
799 }
800
801 /*
802 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
803 * from response UPIU
804 * @ucd_rsp_ptr: pointer to response UPIU
805 *
806 * Return the data segment length.
807 */
808 static inline unsigned int
809 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
810 {
811 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
812 MASK_RSP_UPIU_DATA_SEG_LEN;
813 }
814
815 /**
816 * ufshcd_is_exception_event - Check if the device raised an exception event
817 * @ucd_rsp_ptr: pointer to response UPIU
818 *
819 * The function checks if the device raised an exception event indicated in
820 * the Device Information field of response UPIU.
821 *
822 * Returns true if exception is raised, false otherwise.
823 */
824 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
825 {
826 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
827 MASK_RSP_EXCEPTION_EVENT ? true : false;
828 }
829
830 /**
831 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
832 * @hba: per adapter instance
833 */
834 static inline void
835 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
836 {
837 ufshcd_writel(hba, INT_AGGR_ENABLE |
838 INT_AGGR_COUNTER_AND_TIMER_RESET,
839 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
840 }
841
842 /**
843 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
844 * @hba: per adapter instance
845 * @cnt: Interrupt aggregation counter threshold
846 * @tmout: Interrupt aggregation timeout value
847 */
848 static inline void
849 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
850 {
851 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
852 INT_AGGR_COUNTER_THLD_VAL(cnt) |
853 INT_AGGR_TIMEOUT_VAL(tmout),
854 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
855 }
856
857 /**
858 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
859 * @hba: per adapter instance
860 */
861 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
862 {
863 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
864 }
865
866 /**
867 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
868 * When run-stop registers are set to 1, it indicates the
869 * host controller that it can process the requests
870 * @hba: per adapter instance
871 */
872 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
873 {
874 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
875 REG_UTP_TASK_REQ_LIST_RUN_STOP);
876 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
877 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
878 }
879
880 /**
881 * ufshcd_hba_start - Start controller initialization sequence
882 * @hba: per adapter instance
883 */
884 static inline void ufshcd_hba_start(struct ufs_hba *hba)
885 {
886 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
887 }
888
889 /**
890 * ufshcd_is_hba_active - Get controller state
891 * @hba: per adapter instance
892 *
893 * Returns false if controller is active, true otherwise
894 */
895 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
896 {
897 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
898 ? false : true;
899 }
900
901 static const char *ufschd_uic_link_state_to_string(
902 enum uic_link_state state)
903 {
904 switch (state) {
905 case UIC_LINK_OFF_STATE: return "OFF";
906 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
907 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
908 default: return "UNKNOWN";
909 }
910 }
911
912 static const char *ufschd_ufs_dev_pwr_mode_to_string(
913 enum ufs_dev_pwr_mode state)
914 {
915 switch (state) {
916 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
917 case UFS_SLEEP_PWR_MODE: return "SLEEP";
918 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
919 default: return "UNKNOWN";
920 }
921 }
922
923 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
924 {
925 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
926 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
927 (hba->ufs_version == UFSHCI_VERSION_11))
928 return UFS_UNIPRO_VER_1_41;
929 else
930 return UFS_UNIPRO_VER_1_6;
931 }
932 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
933
934 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
935 {
936 /*
937 * If both host and device support UniPro ver1.6 or later, PA layer
938 * parameters tuning happens during link startup itself.
939 *
940 * We can manually tune PA layer parameters if either host or device
941 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
942 * logic simple, we will only do manual tuning if local unipro version
943 * doesn't support ver1.6 or later.
944 */
945 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
946 return true;
947 else
948 return false;
949 }
950
951 #if defined(CONFIG_PM_DEVFREQ)
952 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
953 {
954 int ret = 0;
955 struct ufs_clk_info *clki;
956 struct list_head *head = &hba->clk_list_head;
957 ktime_t start = ktime_get();
958 bool clk_state_changed = false;
959
960 if (list_empty(head))
961 goto out;
962
963 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
964 if (ret)
965 return ret;
966
967 list_for_each_entry(clki, head, list) {
968 if (!IS_ERR_OR_NULL(clki->clk)) {
969 if (scale_up && clki->max_freq) {
970 if (clki->curr_freq == clki->max_freq)
971 continue;
972
973 clk_state_changed = true;
974 ret = clk_set_rate(clki->clk, clki->max_freq);
975 if (ret) {
976 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
977 __func__, clki->name,
978 clki->max_freq, ret);
979 break;
980 }
981 trace_ufshcd_clk_scaling(dev_name(hba->dev),
982 "scaled up", clki->name,
983 clki->curr_freq,
984 clki->max_freq);
985
986 clki->curr_freq = clki->max_freq;
987
988 } else if (!scale_up && clki->min_freq) {
989 if (clki->curr_freq == clki->min_freq)
990 continue;
991
992 clk_state_changed = true;
993 ret = clk_set_rate(clki->clk, clki->min_freq);
994 if (ret) {
995 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
996 __func__, clki->name,
997 clki->min_freq, ret);
998 break;
999 }
1000 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1001 "scaled down", clki->name,
1002 clki->curr_freq,
1003 clki->min_freq);
1004 clki->curr_freq = clki->min_freq;
1005 }
1006 }
1007 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1008 clki->name, clk_get_rate(clki->clk));
1009 }
1010
1011 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1012
1013 out:
1014 if (clk_state_changed)
1015 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1016 (scale_up ? "up" : "down"),
1017 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1018 return ret;
1019 }
1020
1021 /**
1022 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1023 * @hba: per adapter instance
1024 * @scale_up: True if scaling up and false if scaling down
1025 *
1026 * Returns true if scaling is required, false otherwise.
1027 */
1028 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1029 bool scale_up)
1030 {
1031 struct ufs_clk_info *clki;
1032 struct list_head *head = &hba->clk_list_head;
1033
1034 if (list_empty(head))
1035 return false;
1036
1037 list_for_each_entry(clki, head, list) {
1038 if (!IS_ERR_OR_NULL(clki->clk)) {
1039 if (scale_up && clki->max_freq) {
1040 if (clki->curr_freq == clki->max_freq)
1041 continue;
1042 return true;
1043 } else if (!scale_up && clki->min_freq) {
1044 if (clki->curr_freq == clki->min_freq)
1045 continue;
1046 return true;
1047 }
1048 }
1049 }
1050
1051 return false;
1052 }
1053
1054 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1055 u64 wait_timeout_us)
1056 {
1057 unsigned long flags;
1058 int ret = 0;
1059 u32 tm_doorbell;
1060 u32 tr_doorbell;
1061 bool timeout = false, do_last_check = false;
1062 ktime_t start;
1063
1064 ufshcd_hold(hba, false);
1065 spin_lock_irqsave(hba->host->host_lock, flags);
1066 /*
1067 * Wait for all the outstanding tasks/transfer requests.
1068 * Verify by checking the doorbell registers are clear.
1069 */
1070 start = ktime_get();
1071 do {
1072 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1073 ret = -EBUSY;
1074 goto out;
1075 }
1076
1077 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1078 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1079 if (!tm_doorbell && !tr_doorbell) {
1080 timeout = false;
1081 break;
1082 } else if (do_last_check) {
1083 break;
1084 }
1085
1086 spin_unlock_irqrestore(hba->host->host_lock, flags);
1087 schedule();
1088 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1089 wait_timeout_us) {
1090 timeout = true;
1091 /*
1092 * We might have scheduled out for long time so make
1093 * sure to check if doorbells are cleared by this time
1094 * or not.
1095 */
1096 do_last_check = true;
1097 }
1098 spin_lock_irqsave(hba->host->host_lock, flags);
1099 } while (tm_doorbell || tr_doorbell);
1100
1101 if (timeout) {
1102 dev_err(hba->dev,
1103 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1104 __func__, tm_doorbell, tr_doorbell);
1105 ret = -EBUSY;
1106 }
1107 out:
1108 spin_unlock_irqrestore(hba->host->host_lock, flags);
1109 ufshcd_release(hba);
1110 return ret;
1111 }
1112
1113 /**
1114 * ufshcd_scale_gear - scale up/down UFS gear
1115 * @hba: per adapter instance
1116 * @scale_up: True for scaling up gear and false for scaling down
1117 *
1118 * Returns 0 for success,
1119 * Returns -EBUSY if scaling can't happen at this time
1120 * Returns non-zero for any other errors
1121 */
1122 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1123 {
1124 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1125 int ret = 0;
1126 struct ufs_pa_layer_attr new_pwr_info;
1127
1128 if (scale_up) {
1129 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1130 sizeof(struct ufs_pa_layer_attr));
1131 } else {
1132 memcpy(&new_pwr_info, &hba->pwr_info,
1133 sizeof(struct ufs_pa_layer_attr));
1134
1135 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1136 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1137 /* save the current power mode */
1138 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1139 &hba->pwr_info,
1140 sizeof(struct ufs_pa_layer_attr));
1141
1142 /* scale down gear */
1143 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1144 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1145 }
1146 }
1147
1148 /* check if the power mode needs to be changed or not? */
1149 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1150
1151 if (ret)
1152 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1153 __func__, ret,
1154 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1155 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1156
1157 return ret;
1158 }
1159
1160 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1161 {
1162 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1163 int ret = 0;
1164 /*
1165 * make sure that there are no outstanding requests when
1166 * clock scaling is in progress
1167 */
1168 scsi_block_requests(hba->host);
1169 down_write(&hba->clk_scaling_lock);
1170 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1171 ret = -EBUSY;
1172 up_write(&hba->clk_scaling_lock);
1173 scsi_unblock_requests(hba->host);
1174 }
1175
1176 return ret;
1177 }
1178
1179 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1180 {
1181 up_write(&hba->clk_scaling_lock);
1182 scsi_unblock_requests(hba->host);
1183 }
1184
1185 /**
1186 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1187 * @hba: per adapter instance
1188 * @scale_up: True for scaling up and false for scalin down
1189 *
1190 * Returns 0 for success,
1191 * Returns -EBUSY if scaling can't happen at this time
1192 * Returns non-zero for any other errors
1193 */
1194 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1195 {
1196 int ret = 0;
1197
1198 /* let's not get into low power until clock scaling is completed */
1199 ufshcd_hold(hba, false);
1200
1201 ret = ufshcd_clock_scaling_prepare(hba);
1202 if (ret)
1203 return ret;
1204
1205 /* scale down the gear before scaling down clocks */
1206 if (!scale_up) {
1207 ret = ufshcd_scale_gear(hba, false);
1208 if (ret)
1209 goto out;
1210 }
1211
1212 ret = ufshcd_scale_clks(hba, scale_up);
1213 if (ret) {
1214 if (!scale_up)
1215 ufshcd_scale_gear(hba, true);
1216 goto out;
1217 }
1218
1219 /* scale up the gear after scaling up clocks */
1220 if (scale_up) {
1221 ret = ufshcd_scale_gear(hba, true);
1222 if (ret) {
1223 ufshcd_scale_clks(hba, false);
1224 goto out;
1225 }
1226 }
1227
1228 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1229
1230 out:
1231 ufshcd_clock_scaling_unprepare(hba);
1232 ufshcd_release(hba);
1233 return ret;
1234 }
1235
1236 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1237 {
1238 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1239 clk_scaling.suspend_work);
1240 unsigned long irq_flags;
1241
1242 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1243 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1244 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1245 return;
1246 }
1247 hba->clk_scaling.is_suspended = true;
1248 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1249
1250 __ufshcd_suspend_clkscaling(hba);
1251 }
1252
1253 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1254 {
1255 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1256 clk_scaling.resume_work);
1257 unsigned long irq_flags;
1258
1259 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1260 if (!hba->clk_scaling.is_suspended) {
1261 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1262 return;
1263 }
1264 hba->clk_scaling.is_suspended = false;
1265 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1266
1267 devfreq_resume_device(hba->devfreq);
1268 }
1269
1270 static int ufshcd_devfreq_target(struct device *dev,
1271 unsigned long *freq, u32 flags)
1272 {
1273 int ret = 0;
1274 struct ufs_hba *hba = dev_get_drvdata(dev);
1275 ktime_t start;
1276 bool scale_up, sched_clk_scaling_suspend_work = false;
1277 unsigned long irq_flags;
1278
1279 if (!ufshcd_is_clkscaling_supported(hba))
1280 return -EINVAL;
1281
1282 if ((*freq > 0) && (*freq < UINT_MAX)) {
1283 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1284 return -EINVAL;
1285 }
1286
1287 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1288 if (ufshcd_eh_in_progress(hba)) {
1289 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1290 return 0;
1291 }
1292
1293 if (!hba->clk_scaling.active_reqs)
1294 sched_clk_scaling_suspend_work = true;
1295
1296 scale_up = (*freq == UINT_MAX) ? true : false;
1297 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1298 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1299 ret = 0;
1300 goto out; /* no state change required */
1301 }
1302 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1303
1304 start = ktime_get();
1305 ret = ufshcd_devfreq_scale(hba, scale_up);
1306
1307 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1308 (scale_up ? "up" : "down"),
1309 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1310
1311 out:
1312 if (sched_clk_scaling_suspend_work)
1313 queue_work(hba->clk_scaling.workq,
1314 &hba->clk_scaling.suspend_work);
1315
1316 return ret;
1317 }
1318
1319
1320 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1321 struct devfreq_dev_status *stat)
1322 {
1323 struct ufs_hba *hba = dev_get_drvdata(dev);
1324 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1325 unsigned long flags;
1326
1327 if (!ufshcd_is_clkscaling_supported(hba))
1328 return -EINVAL;
1329
1330 memset(stat, 0, sizeof(*stat));
1331
1332 spin_lock_irqsave(hba->host->host_lock, flags);
1333 if (!scaling->window_start_t)
1334 goto start_window;
1335
1336 if (scaling->is_busy_started)
1337 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1338 scaling->busy_start_t));
1339
1340 stat->total_time = jiffies_to_usecs((long)jiffies -
1341 (long)scaling->window_start_t);
1342 stat->busy_time = scaling->tot_busy_t;
1343 start_window:
1344 scaling->window_start_t = jiffies;
1345 scaling->tot_busy_t = 0;
1346
1347 if (hba->outstanding_reqs) {
1348 scaling->busy_start_t = ktime_get();
1349 scaling->is_busy_started = true;
1350 } else {
1351 scaling->busy_start_t = 0;
1352 scaling->is_busy_started = false;
1353 }
1354 spin_unlock_irqrestore(hba->host->host_lock, flags);
1355 return 0;
1356 }
1357
1358 static struct devfreq_dev_profile ufs_devfreq_profile = {
1359 .polling_ms = 100,
1360 .target = ufshcd_devfreq_target,
1361 .get_dev_status = ufshcd_devfreq_get_dev_status,
1362 };
1363
1364 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1365 {
1366 unsigned long flags;
1367
1368 devfreq_suspend_device(hba->devfreq);
1369 spin_lock_irqsave(hba->host->host_lock, flags);
1370 hba->clk_scaling.window_start_t = 0;
1371 spin_unlock_irqrestore(hba->host->host_lock, flags);
1372 }
1373
1374 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1375 {
1376 unsigned long flags;
1377 bool suspend = false;
1378
1379 if (!ufshcd_is_clkscaling_supported(hba))
1380 return;
1381
1382 spin_lock_irqsave(hba->host->host_lock, flags);
1383 if (!hba->clk_scaling.is_suspended) {
1384 suspend = true;
1385 hba->clk_scaling.is_suspended = true;
1386 }
1387 spin_unlock_irqrestore(hba->host->host_lock, flags);
1388
1389 if (suspend)
1390 __ufshcd_suspend_clkscaling(hba);
1391 }
1392
1393 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1394 {
1395 unsigned long flags;
1396 bool resume = false;
1397
1398 if (!ufshcd_is_clkscaling_supported(hba))
1399 return;
1400
1401 spin_lock_irqsave(hba->host->host_lock, flags);
1402 if (hba->clk_scaling.is_suspended) {
1403 resume = true;
1404 hba->clk_scaling.is_suspended = false;
1405 }
1406 spin_unlock_irqrestore(hba->host->host_lock, flags);
1407
1408 if (resume)
1409 devfreq_resume_device(hba->devfreq);
1410 }
1411
1412 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1413 struct device_attribute *attr, char *buf)
1414 {
1415 struct ufs_hba *hba = dev_get_drvdata(dev);
1416
1417 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1418 }
1419
1420 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1421 struct device_attribute *attr, const char *buf, size_t count)
1422 {
1423 struct ufs_hba *hba = dev_get_drvdata(dev);
1424 u32 value;
1425 int err;
1426
1427 if (kstrtou32(buf, 0, &value))
1428 return -EINVAL;
1429
1430 value = !!value;
1431 if (value == hba->clk_scaling.is_allowed)
1432 goto out;
1433
1434 pm_runtime_get_sync(hba->dev);
1435 ufshcd_hold(hba, false);
1436
1437 cancel_work_sync(&hba->clk_scaling.suspend_work);
1438 cancel_work_sync(&hba->clk_scaling.resume_work);
1439
1440 hba->clk_scaling.is_allowed = value;
1441
1442 if (value) {
1443 ufshcd_resume_clkscaling(hba);
1444 } else {
1445 ufshcd_suspend_clkscaling(hba);
1446 err = ufshcd_devfreq_scale(hba, true);
1447 if (err)
1448 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1449 __func__, err);
1450 }
1451
1452 ufshcd_release(hba);
1453 pm_runtime_put_sync(hba->dev);
1454 out:
1455 return count;
1456 }
1457
1458 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1459 {
1460 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1461 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1462 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1463 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1464 hba->clk_scaling.enable_attr.attr.mode = 0644;
1465 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1466 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1467 }
1468 #endif
1469
1470 static void ufshcd_ungate_work(struct work_struct *work)
1471 {
1472 int ret;
1473 unsigned long flags;
1474 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1475 clk_gating.ungate_work);
1476 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
1477
1478 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1479
1480 spin_lock_irqsave(hba->host->host_lock, flags);
1481 if (hba->clk_gating.state == CLKS_ON && gating_allowed) {
1482 spin_unlock_irqrestore(hba->host->host_lock, flags);
1483 goto unblock_reqs;
1484 }
1485
1486 spin_unlock_irqrestore(hba->host->host_lock, flags);
1487 if (gating_allowed) {
1488 ufshcd_setup_clocks(hba, true);
1489 } else {
1490 spin_lock_irqsave(hba->host->host_lock, flags);
1491 hba->clk_gating.state = CLKS_ON;
1492 spin_unlock_irqrestore(hba->host->host_lock, flags);
1493 }
1494
1495 /* Exit from hibern8 */
1496 if (ufshcd_can_hibern8_during_gating(hba)) {
1497 /* Prevent gating in this path */
1498 hba->clk_gating.is_suspended = true;
1499 if (ufshcd_is_link_hibern8(hba)) {
1500 ufshcd_set_link_trans_active(hba);
1501 ret = ufshcd_link_hibern8_ctrl(hba, false);
1502 if (ret) {
1503 ufshcd_set_link_off(hba);
1504 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1505 __func__, ret);
1506 } else {
1507 ufshcd_set_link_active(hba);
1508 }
1509 }
1510 hba->clk_gating.is_suspended = false;
1511 }
1512 unblock_reqs:
1513 scsi_unblock_requests(hba->host);
1514 }
1515
1516 /**
1517 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1518 * Also, exit from hibern8 mode and set the link as active.
1519 * @hba: per adapter instance
1520 * @async: This indicates whether caller should ungate clocks asynchronously.
1521 */
1522 int ufshcd_hold(struct ufs_hba *hba, bool async)
1523 {
1524 int rc = 0;
1525 unsigned long flags;
1526
1527 if (!ufshcd_is_clkgating_allowed(hba))
1528 goto out;
1529 spin_lock_irqsave(hba->host->host_lock, flags);
1530 hba->clk_gating.active_reqs++;
1531
1532 if (ufshcd_eh_in_progress(hba)) {
1533 spin_unlock_irqrestore(hba->host->host_lock, flags);
1534 return 0;
1535 }
1536
1537 start:
1538 switch (hba->clk_gating.state) {
1539 case __CLKS_ON:
1540 rc = -EAGAIN;
1541 if (async)
1542 hba->clk_gating.active_reqs--;
1543 case CLKS_ON:
1544 break;
1545 case REQ_CLKS_OFF:
1546 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1547 hba->clk_gating.state = CLKS_ON;
1548 trace_ufshcd_clk_gating(dev_name(hba->dev),
1549 hba->clk_gating.state);
1550 break;
1551 }
1552 /*
1553 * If we are here, it means gating work is either done or
1554 * currently running. Hence, fall through to cancel gating
1555 * work and to enable clocks.
1556 */
1557 case CLKS_OFF:
1558 scsi_block_requests(hba->host);
1559 hba->clk_gating.state = REQ_CLKS_ON;
1560 trace_ufshcd_clk_gating(dev_name(hba->dev),
1561 hba->clk_gating.state);
1562 queue_work(hba->ufshcd_workq, &hba->clk_gating.ungate_work);
1563 /*
1564 * fall through to check if we should wait for this
1565 * work to be done or not.
1566 */
1567 case REQ_CLKS_ON:
1568 if (async) {
1569 rc = -EAGAIN;
1570 hba->clk_gating.active_reqs--;
1571 break;
1572 }
1573
1574 spin_unlock_irqrestore(hba->host->host_lock, flags);
1575 flush_work(&hba->clk_gating.ungate_work);
1576 /* Make sure state is CLKS_ON before returning */
1577 spin_lock_irqsave(hba->host->host_lock, flags);
1578 goto start;
1579 default:
1580 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1581 __func__, hba->clk_gating.state);
1582 break;
1583 }
1584 spin_unlock_irqrestore(hba->host->host_lock, flags);
1585 out:
1586 return rc;
1587 }
1588 EXPORT_SYMBOL_GPL(ufshcd_hold);
1589
1590 static void ufshcd_gate_work(struct work_struct *work)
1591 {
1592 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1593 clk_gating.gate_work.work);
1594 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
1595 unsigned long flags;
1596
1597 spin_lock_irqsave(hba->host->host_lock, flags);
1598 /*
1599 * In case you are here to cancel this work the gating state
1600 * would be marked as REQ_CLKS_ON. In this case save time by
1601 * skipping the gating work and exit after changing the clock
1602 * state to CLKS_ON.
1603 */
1604 if (hba->clk_gating.is_suspended ||
1605 (hba->clk_gating.state == REQ_CLKS_ON)) {
1606 hba->clk_gating.state = CLKS_ON;
1607 trace_ufshcd_clk_gating(dev_name(hba->dev),
1608 hba->clk_gating.state);
1609 goto rel_lock;
1610 }
1611
1612 if (hba->clk_gating.active_reqs
1613 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1614 || hba->lrb_in_use || hba->outstanding_tasks
1615 || hba->active_uic_cmd || hba->uic_async_done
1616 || scsi_host_in_recovery(hba->host))
1617 goto rel_lock;
1618
1619 spin_unlock_irqrestore(hba->host->host_lock, flags);
1620
1621 /* put the link into hibern8 mode before turning off clocks */
1622 if (ufshcd_can_hibern8_during_gating(hba)) {
1623 ufshcd_set_link_trans_hibern8(hba);
1624 if (ufshcd_link_hibern8_ctrl(hba, true)) {
1625 spin_lock_irqsave(hba->host->host_lock, flags);
1626 hba->clk_gating.state = __CLKS_ON;
1627 spin_unlock_irqrestore(hba->host->host_lock, flags);
1628 hba->clk_gating.is_suspended = true;
1629 ufshcd_reset_and_restore(hba);
1630 spin_lock_irqsave(hba->host->host_lock, flags);
1631 hba->clk_gating.state = CLKS_ON;
1632 spin_unlock_irqrestore(hba->host->host_lock, flags);
1633 hba->clk_gating.is_suspended = false;
1634 scsi_unblock_requests(hba->host);
1635 trace_ufshcd_clk_gating(dev_name(hba->dev),
1636 hba->clk_gating.state);
1637 goto out;
1638 }
1639 ufshcd_set_link_hibern8(hba);
1640 }
1641
1642 if (gating_allowed) {
1643 if (!ufshcd_is_link_active(hba))
1644 ufshcd_setup_clocks(hba, false);
1645 else
1646 /* If link is active, device ref_clk can't be switched off */
1647 __ufshcd_setup_clocks(hba, false, true);
1648 }
1649
1650 /*
1651 * In case you are here to cancel this work the gating state
1652 * would be marked as REQ_CLKS_ON. In this case keep the state
1653 * as REQ_CLKS_ON which would anyway imply that clocks are off
1654 * and a request to turn them on is pending. By doing this way,
1655 * we keep the state machine in tact and this would ultimately
1656 * prevent from doing cancel work multiple times when there are
1657 * new requests arriving before the current cancel work is done.
1658 */
1659 spin_lock_irqsave(hba->host->host_lock, flags);
1660 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1661 hba->clk_gating.state = CLKS_OFF;
1662 trace_ufshcd_clk_gating(dev_name(hba->dev),
1663 hba->clk_gating.state);
1664 }
1665 rel_lock:
1666 spin_unlock_irqrestore(hba->host->host_lock, flags);
1667 out:
1668 return;
1669 }
1670
1671 /* host lock must be held before calling this variant */
1672 static void __ufshcd_release(struct ufs_hba *hba)
1673 {
1674 if (!ufshcd_is_clkgating_allowed(hba))
1675 return;
1676
1677 hba->clk_gating.active_reqs--;
1678
1679 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1680 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1681 || hba->lrb_in_use || hba->outstanding_tasks
1682 || hba->active_uic_cmd || hba->uic_async_done
1683 || scsi_host_in_recovery(hba->host)
1684 || ufshcd_eh_in_progress(hba))
1685 return;
1686
1687 hba->clk_gating.state = REQ_CLKS_OFF;
1688 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1689 queue_delayed_work(hba->ufshcd_workq, &hba->clk_gating.gate_work,
1690 msecs_to_jiffies(hba->clk_gating.delay_ms));
1691 }
1692
1693 void ufshcd_release(struct ufs_hba *hba)
1694 {
1695 unsigned long flags;
1696
1697 spin_lock_irqsave(hba->host->host_lock, flags);
1698 __ufshcd_release(hba);
1699 spin_unlock_irqrestore(hba->host->host_lock, flags);
1700 }
1701 EXPORT_SYMBOL_GPL(ufshcd_release);
1702
1703 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1704 struct device_attribute *attr, char *buf)
1705 {
1706 struct ufs_hba *hba = dev_get_drvdata(dev);
1707
1708 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1709 }
1710
1711 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1712 struct device_attribute *attr, const char *buf, size_t count)
1713 {
1714 struct ufs_hba *hba = dev_get_drvdata(dev);
1715 unsigned long flags, value;
1716
1717 if (kstrtoul(buf, 0, &value))
1718 return -EINVAL;
1719
1720 spin_lock_irqsave(hba->host->host_lock, flags);
1721 hba->clk_gating.delay_ms = value;
1722 spin_unlock_irqrestore(hba->host->host_lock, flags);
1723 return count;
1724 }
1725
1726 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1727 struct device_attribute *attr, char *buf)
1728 {
1729 struct ufs_hba *hba = dev_get_drvdata(dev);
1730
1731 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1732 }
1733
1734 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1735 struct device_attribute *attr, const char *buf, size_t count)
1736 {
1737 struct ufs_hba *hba = dev_get_drvdata(dev);
1738 unsigned long flags;
1739 u32 value;
1740
1741 if (kstrtou32(buf, 0, &value))
1742 return -EINVAL;
1743
1744 value = !!value;
1745 if (value == hba->clk_gating.is_enabled)
1746 goto out;
1747
1748 if (value) {
1749 ufshcd_release(hba);
1750 } else {
1751 spin_lock_irqsave(hba->host->host_lock, flags);
1752 hba->clk_gating.active_reqs++;
1753 spin_unlock_irqrestore(hba->host->host_lock, flags);
1754 }
1755
1756 hba->clk_gating.is_enabled = value;
1757 out:
1758 return count;
1759 }
1760
1761 static int ufshcd_init_clk_gating(struct ufs_hba *hba)
1762 {
1763 int ret = 0;
1764
1765 if (!ufshcd_is_clkgating_allowed(hba))
1766 goto out;
1767
1768 hba->ufshcd_workq = alloc_workqueue("ufshcd_wq", WQ_HIGHPRI, 0);
1769 if (!hba->ufshcd_workq) {
1770 ret = -ENOMEM;
1771 goto out;
1772 }
1773
1774 hba->clk_gating.delay_ms = LINK_H8_DELAY;
1775 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1776 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1777
1778 hba->clk_gating.is_enabled = true;
1779
1780 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1781 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1782 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1783 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1784 hba->clk_gating.delay_attr.attr.mode = 0644;
1785 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1786 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1787
1788 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1789 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1790 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1791 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1792 hba->clk_gating.enable_attr.attr.mode = 0644;
1793 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1794 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1795
1796 out:
1797 return ret;
1798 }
1799
1800 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1801 {
1802 if (!ufshcd_is_clkgating_allowed(hba))
1803 return;
1804 destroy_workqueue(hba->ufshcd_workq);
1805 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1806 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1807 }
1808
1809 #if defined(CONFIG_PM_DEVFREQ)
1810 /* Must be called with host lock acquired */
1811 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1812 {
1813 bool queue_resume_work = false;
1814
1815 if (!ufshcd_is_clkscaling_supported(hba))
1816 return;
1817
1818 if (!hba->clk_scaling.active_reqs++)
1819 queue_resume_work = true;
1820
1821 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1822 return;
1823
1824 if (queue_resume_work)
1825 queue_work(hba->clk_scaling.workq,
1826 &hba->clk_scaling.resume_work);
1827
1828 if (!hba->clk_scaling.window_start_t) {
1829 hba->clk_scaling.window_start_t = jiffies;
1830 hba->clk_scaling.tot_busy_t = 0;
1831 hba->clk_scaling.is_busy_started = false;
1832 }
1833
1834 if (!hba->clk_scaling.is_busy_started) {
1835 hba->clk_scaling.busy_start_t = ktime_get();
1836 hba->clk_scaling.is_busy_started = true;
1837 }
1838 }
1839
1840 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1841 {
1842 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1843
1844 if (!ufshcd_is_clkscaling_supported(hba))
1845 return;
1846
1847 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1848 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1849 scaling->busy_start_t));
1850 scaling->busy_start_t = 0;
1851 scaling->is_busy_started = false;
1852 }
1853 }
1854 #endif
1855
1856 /**
1857 * ufshcd_send_command - Send SCSI or device management commands
1858 * @hba: per adapter instance
1859 * @task_tag: Task tag of the command
1860 */
1861 static inline
1862 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1863 {
1864 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1865 #if defined(CONFIG_PM_DEVFREQ)
1866 ufshcd_clk_scaling_start_busy(hba);
1867 #endif
1868 __set_bit(task_tag, &hba->outstanding_reqs);
1869 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1870 /* Make sure that doorbell is committed immediately */
1871 wmb();
1872 ufshcd_add_command_trace(hba, task_tag, "send");
1873 }
1874
1875 /**
1876 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1877 * @lrb - pointer to local reference block
1878 */
1879 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1880 {
1881 int len;
1882 if (lrbp->sense_buffer &&
1883 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1884 int len_to_copy;
1885
1886 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1887 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1888
1889 memcpy(lrbp->sense_buffer,
1890 lrbp->ucd_rsp_ptr->sr.sense_data,
1891 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1892 }
1893 }
1894
1895 /**
1896 * ufshcd_copy_query_response() - Copy the Query Response and the data
1897 * descriptor
1898 * @hba: per adapter instance
1899 * @lrb - pointer to local reference block
1900 */
1901 static
1902 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1903 {
1904 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1905
1906 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1907
1908 /* Get the descriptor */
1909 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1910 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1911 GENERAL_UPIU_REQUEST_SIZE;
1912 u16 resp_len;
1913 u16 buf_len;
1914
1915 /* data segment length */
1916 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1917 MASK_QUERY_DATA_SEG_LEN;
1918 buf_len = be16_to_cpu(
1919 hba->dev_cmd.query.request.upiu_req.length);
1920 if (likely(buf_len >= resp_len)) {
1921 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1922 } else {
1923 dev_warn(hba->dev,
1924 "%s: Response size is bigger than buffer",
1925 __func__);
1926 return -EINVAL;
1927 }
1928 }
1929
1930 return 0;
1931 }
1932
1933 /**
1934 * ufshcd_hba_capabilities - Read controller capabilities
1935 * @hba: per adapter instance
1936 */
1937 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1938 {
1939 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1940
1941 /* nutrs and nutmrs are 0 based values */
1942 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1943 hba->nutmrs =
1944 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1945 }
1946
1947 /**
1948 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1949 * to accept UIC commands
1950 * @hba: per adapter instance
1951 * Return true on success, else false
1952 */
1953 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1954 {
1955 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1956 return true;
1957 else
1958 return false;
1959 }
1960
1961 /**
1962 * ufshcd_get_upmcrs - Get the power mode change request status
1963 * @hba: Pointer to adapter instance
1964 *
1965 * This function gets the UPMCRS field of HCS register
1966 * Returns value of UPMCRS field
1967 */
1968 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba, struct uic_command *cmd)
1969 {
1970 if (hba->quirks & UFSHCD_QUIRK_GET_GENERRCODE_DIRECT) {
1971 if (cmd->command == UIC_CMD_DME_SET &&
1972 cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE))
1973 return ufshcd_vops_get_unipro(hba, 3);
1974 else if (cmd->command == UIC_CMD_DME_HIBER_ENTER)
1975 return ufshcd_vops_get_unipro(hba, 4);
1976 else if (cmd->command == UIC_CMD_DME_HIBER_EXIT)
1977 return ufshcd_vops_get_unipro(hba, 5);
1978 else
1979 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1980 } else
1981 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1982 }
1983
1984 /**
1985 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1986 * @hba: per adapter instance
1987 * @uic_cmd: UIC command
1988 *
1989 * Mutex must be held.
1990 */
1991 static inline void
1992 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1993 {
1994 WARN_ON(hba->active_uic_cmd);
1995
1996 hba->active_uic_cmd = uic_cmd;
1997
1998 /* Write Args */
1999 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2000 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2001 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2002
2003 /* Write UIC Cmd */
2004 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2005 REG_UIC_COMMAND);
2006 }
2007
2008 /**
2009 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2010 * @hba: per adapter instance
2011 * @uic_command: UIC command
2012 *
2013 * Must be called with mutex held.
2014 * Returns 0 only if success.
2015 */
2016 static int
2017 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2018 {
2019 int ret;
2020 unsigned long flags;
2021
2022 if (wait_for_completion_timeout(&uic_cmd->done,
2023 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2024 switch (uic_cmd->command) {
2025 case UIC_CMD_DME_LINK_STARTUP:
2026 case UIC_CMD_DME_HIBER_ENTER:
2027 case UIC_CMD_DME_HIBER_EXIT:
2028 if (hba->quirks & UFSHCD_QUIRK_GET_GENERRCODE_DIRECT)
2029 ret = ufshcd_vops_get_unipro(hba, uic_cmd->command - UIC_CMD_DME_LINK_STARTUP);
2030 else
2031 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2032 break;
2033 default:
2034 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2035 break;
2036 }
2037 } else
2038 ret = -ETIMEDOUT;
2039
2040 spin_lock_irqsave(hba->host->host_lock, flags);
2041 hba->active_uic_cmd = NULL;
2042 spin_unlock_irqrestore(hba->host->host_lock, flags);
2043
2044 return ret;
2045 }
2046
2047 /**
2048 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2049 * @hba: per adapter instance
2050 * @uic_cmd: UIC command
2051 * @completion: initialize the completion only if this is set to true
2052 *
2053 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2054 * with mutex held and host_lock locked.
2055 * Returns 0 only if success.
2056 */
2057 static int
2058 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2059 bool completion)
2060 {
2061 if (!ufshcd_ready_for_uic_cmd(hba)) {
2062 dev_err(hba->dev,
2063 "Controller not ready to accept UIC commands\n");
2064 return -EIO;
2065 }
2066
2067 if (completion)
2068 init_completion(&uic_cmd->done);
2069
2070 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2071
2072 return 0;
2073 }
2074
2075 /**
2076 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2077 * @hba: per adapter instance
2078 * @uic_cmd: UIC command
2079 *
2080 * Returns 0 only if success.
2081 */
2082 static int
2083 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2084 {
2085 int ret;
2086 unsigned long flags;
2087
2088 ufshcd_hold(hba, false);
2089 mutex_lock(&hba->uic_cmd_mutex);
2090 ufshcd_add_delay_before_dme_cmd(hba);
2091
2092 spin_lock_irqsave(hba->host->host_lock, flags);
2093 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2094 spin_unlock_irqrestore(hba->host->host_lock, flags);
2095 if (!ret)
2096 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2097
2098 mutex_unlock(&hba->uic_cmd_mutex);
2099
2100 ufshcd_release(hba);
2101 return ret;
2102 }
2103
2104 /**
2105 * ufshcd_map_sg - Map scatter-gather list to prdt
2106 * @lrbp - pointer to local reference block
2107 *
2108 * Returns 0 in case of success, non-zero value in case of failure
2109 */
2110 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2111 {
2112 struct ufshcd_sg_entry *prd_table;
2113 struct scatterlist *sg;
2114 struct scsi_cmnd *cmd;
2115 int sg_segments;
2116 int i, ret;
2117 int sector_offset = 0;
2118 int page_index = 0;
2119
2120 cmd = lrbp->cmd;
2121 sg_segments = scsi_dma_map(cmd);
2122 if (sg_segments < 0)
2123 return sg_segments;
2124
2125 if (sg_segments) {
2126 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2127 lrbp->utr_descriptor_ptr->prd_table_length =
2128 cpu_to_le16((u16)(sg_segments *
2129 sizeof(struct ufshcd_sg_entry)));
2130 else
2131 lrbp->utr_descriptor_ptr->prd_table_length =
2132 cpu_to_le16((u16) (sg_segments));
2133
2134 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2135
2136 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2137 prd_table[i].size =
2138 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2139 prd_table[i].base_addr =
2140 cpu_to_le32(lower_32_bits(sg->dma_address));
2141 prd_table[i].upper_addr =
2142 cpu_to_le32(upper_32_bits(sg->dma_address));
2143 prd_table[i].reserved = 0;
2144 hba->transferred_sector += prd_table[i].size;
2145
2146 ret = ufshcd_vops_crypto_engine_cfg(hba, lrbp, sg, i, sector_offset, page_index++);
2147 if (ret) {
2148 dev_err(hba->dev,
2149 "%s: failed to configure crypto engine (%d)\n",
2150 __func__, ret);
2151 return ret;
2152 }
2153 sector_offset += UFSHCI_SECTOR_SIZE / MIN_SECTOR_SIZE;
2154 }
2155 } else {
2156 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2157 }
2158
2159 return 0;
2160 }
2161
2162 /**
2163 * ufshcd_enable_intr - enable interrupts
2164 * @hba: per adapter instance
2165 * @intrs: interrupt bits
2166 */
2167 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2168 {
2169 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2170
2171 if (hba->ufs_version == UFSHCI_VERSION_10) {
2172 u32 rw;
2173 rw = set & INTERRUPT_MASK_RW_VER_10;
2174 set = rw | ((set ^ intrs) & intrs);
2175 } else {
2176 set |= intrs;
2177 }
2178
2179 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2180 }
2181
2182 /**
2183 * ufshcd_disable_intr - disable interrupts
2184 * @hba: per adapter instance
2185 * @intrs: interrupt bits
2186 */
2187 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2188 {
2189 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2190
2191 if (hba->ufs_version == UFSHCI_VERSION_10) {
2192 u32 rw;
2193 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2194 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2195 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2196
2197 } else {
2198 set &= ~intrs;
2199 }
2200
2201 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2202 }
2203
2204 /**
2205 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2206 * descriptor according to request
2207 * @lrbp: pointer to local reference block
2208 * @upiu_flags: flags required in the header
2209 * @cmd_dir: requests data direction
2210 */
2211 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2212 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2213 {
2214 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2215 u32 data_direction;
2216 u32 dword_0;
2217
2218 if (cmd_dir == DMA_FROM_DEVICE) {
2219 data_direction = UTP_DEVICE_TO_HOST;
2220 *upiu_flags = UPIU_CMD_FLAGS_READ;
2221 } else if (cmd_dir == DMA_TO_DEVICE) {
2222 data_direction = UTP_HOST_TO_DEVICE;
2223 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2224 } else {
2225 data_direction = UTP_NO_DATA_TRANSFER;
2226 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2227 }
2228
2229 dword_0 = data_direction | (lrbp->command_type
2230 << UPIU_COMMAND_TYPE_OFFSET);
2231 if (lrbp->intr_cmd)
2232 dword_0 |= UTP_REQ_DESC_INT_CMD;
2233
2234 /* Transfer request descriptor header fields */
2235 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2236 /* dword_1 is reserved, hence it is set to 0 */
2237 req_desc->header.dword_1 = 0;
2238 /*
2239 * assigning invalid value for command status. Controller
2240 * updates OCS on command completion, with the command
2241 * status
2242 */
2243 req_desc->header.dword_2 =
2244 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2245 /* dword_3 is reserved, hence it is set to 0 */
2246 req_desc->header.dword_3 = 0;
2247
2248 req_desc->prd_table_length = 0;
2249 }
2250
2251 /**
2252 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2253 * for scsi commands
2254 * @lrbp - local reference block pointer
2255 * @upiu_flags - flags
2256 */
2257 static
2258 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2259 {
2260 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2261 unsigned short cdb_len;
2262
2263 /* command descriptor fields */
2264 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2265 UPIU_TRANSACTION_COMMAND, upiu_flags,
2266 lrbp->lun, lrbp->task_tag);
2267 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2268 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2269
2270 /* Total EHS length and Data segment length will be zero */
2271 ucd_req_ptr->header.dword_2 = 0;
2272
2273 ucd_req_ptr->sc.exp_data_transfer_len =
2274 cpu_to_be32(lrbp->cmd->sdb.length);
2275
2276 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2277 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2278 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2279
2280 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2281 }
2282
2283 /**
2284 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2285 * for query requsts
2286 * @hba: UFS hba
2287 * @lrbp: local reference block pointer
2288 * @upiu_flags: flags
2289 */
2290 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2291 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2292 {
2293 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2294 struct ufs_query *query = &hba->dev_cmd.query;
2295 u16 len = be16_to_cpu(query->request.upiu_req.length);
2296 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2297
2298 /* Query request header */
2299 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2300 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2301 lrbp->lun, lrbp->task_tag);
2302 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2303 0, query->request.query_func, 0, 0);
2304
2305 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_READ_DESC)
2306 len = 0;
2307
2308 /* Data segment length only need for WRITE_DESC */
2309 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2310 ucd_req_ptr->header.dword_2 =
2311 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2312 else
2313 ucd_req_ptr->header.dword_2 = 0;
2314
2315 /* Copy the Query Request buffer as is */
2316 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2317 QUERY_OSF_SIZE);
2318
2319 /* Copy the Descriptor */
2320 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2321 memcpy(descp, query->descriptor, len);
2322
2323 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2324 }
2325
2326 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2327 {
2328 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2329
2330 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2331
2332 /* command descriptor fields */
2333 ucd_req_ptr->header.dword_0 =
2334 UPIU_HEADER_DWORD(
2335 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2336 /* clear rest of the fields of basic header */
2337 ucd_req_ptr->header.dword_1 = 0;
2338 ucd_req_ptr->header.dword_2 = 0;
2339
2340 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2341 }
2342
2343 /**
2344 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2345 * for Device Management Purposes
2346 * @hba - per adapter instance
2347 * @lrb - pointer to local reference block
2348 */
2349 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2350 {
2351 u32 upiu_flags;
2352 int ret = 0;
2353
2354 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2355 (hba->ufs_version == UFSHCI_VERSION_11))
2356 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2357 else
2358 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2359
2360 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2361 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2362 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2363 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2364 ufshcd_prepare_utp_nop_upiu(lrbp);
2365 else
2366 ret = -EINVAL;
2367
2368 return ret;
2369 }
2370
2371 /**
2372 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2373 * for SCSI Purposes
2374 * @hba - per adapter instance
2375 * @lrb - pointer to local reference block
2376 */
2377 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2378 {
2379 u32 upiu_flags;
2380 int ret = 0;
2381
2382 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2383 (hba->ufs_version == UFSHCI_VERSION_11))
2384 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2385 else
2386 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2387
2388 if (likely(lrbp->cmd)) {
2389 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2390 lrbp->cmd->sc_data_direction);
2391 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2392 } else {
2393 ret = -EINVAL;
2394 }
2395
2396 return ret;
2397 }
2398
2399 /*
2400 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2401 * @scsi_lun: scsi LUN id
2402 *
2403 * Returns UPIU LUN id
2404 */
2405 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2406 {
2407 if (scsi_is_wlun(scsi_lun))
2408 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2409 | UFS_UPIU_WLUN_ID;
2410 else
2411 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2412 }
2413
2414 static inline unsigned int ufshcd_get_scsi_lun(struct scsi_cmnd *cmd)
2415 {
2416 if (cmd->cmnd[0] == SECURITY_PROTOCOL_IN ||
2417 cmd->cmnd[0] == SECURITY_PROTOCOL_OUT)
2418 return (SCSI_W_LUN_BASE |
2419 (UFS_UPIU_RPMB_WLUN & UFS_UPIU_MAX_UNIT_NUM_ID));
2420 else
2421 return cmd->device->lun;
2422 }
2423
2424 /**
2425 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2426 * @scsi_lun: UPIU W-LUN id
2427 *
2428 * Returns SCSI W-LUN id
2429 */
2430 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2431 {
2432 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2433 }
2434
2435 /**
2436 * ufshcd_queuecommand - main entry point for SCSI requests
2437 * @cmd: command from SCSI Midlayer
2438 * @done: call back function
2439 *
2440 * Returns 0 for success, non-zero in case of failure
2441 */
2442 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2443 {
2444 struct ufshcd_lrb *lrbp;
2445 struct ufs_hba *hba;
2446 unsigned long flags;
2447 int tag;
2448 int err = 0;
2449 unsigned int scsi_lun;
2450
2451 hba = shost_priv(host);
2452
2453 tag = cmd->request->tag;
2454 if (!ufshcd_valid_tag(hba, tag)) {
2455 dev_err(hba->dev,
2456 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2457 __func__, tag, cmd, cmd->request);
2458 BUG();
2459 }
2460
2461 if (!down_read_trylock(&hba->clk_scaling_lock))
2462 return SCSI_MLQUEUE_HOST_BUSY;
2463
2464 if ((ufs_shutdown_state == 1) && (cmd->cmnd[0] == START_STOP)) {
2465 scsi_block_requests(hba->host);
2466 cancel_work_sync(&hba->clk_gating.ungate_work);
2467 }
2468
2469 spin_lock_irqsave(hba->host->host_lock, flags);
2470 switch (hba->ufshcd_state) {
2471 case UFSHCD_STATE_OPERATIONAL:
2472 break;
2473 case UFSHCD_STATE_EH_SCHEDULED:
2474 case UFSHCD_STATE_RESET:
2475 err = SCSI_MLQUEUE_HOST_BUSY;
2476 goto out_unlock;
2477 case UFSHCD_STATE_ERROR:
2478 set_host_byte(cmd, DID_ERROR);
2479 scsi_dma_map(cmd);
2480 cmd->scsi_done(cmd);
2481 goto out_unlock;
2482 default:
2483 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2484 __func__, hba->ufshcd_state);
2485 set_host_byte(cmd, DID_BAD_TARGET);
2486 cmd->scsi_done(cmd);
2487 goto out_unlock;
2488 }
2489
2490 /* if error handling is in progress, don't issue commands */
2491 if (ufshcd_eh_in_progress(hba)) {
2492 set_host_byte(cmd, DID_ERROR);
2493 cmd->scsi_done(cmd);
2494 goto out_unlock;
2495 }
2496 spin_unlock_irqrestore(hba->host->host_lock, flags);
2497
2498 hba->req_abort_count = 0;
2499
2500 /* acquire the tag to make sure device cmds don't use it */
2501 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2502 /*
2503 * Dev manage command in progress, requeue the command.
2504 * Requeuing the command helps in cases where the request *may*
2505 * find different tag instead of waiting for dev manage command
2506 * completion.
2507 */
2508 err = SCSI_MLQUEUE_HOST_BUSY;
2509 goto out;
2510 }
2511
2512 err = ufshcd_hold(hba, true);
2513 if (err) {
2514 err = SCSI_MLQUEUE_HOST_BUSY;
2515 clear_bit_unlock(tag, &hba->lrb_in_use);
2516 goto out;
2517 }
2518 WARN_ON(hba->clk_gating.state != CLKS_ON);
2519
2520 lrbp = &hba->lrb[tag];
2521
2522 WARN_ON(lrbp->cmd);
2523 lrbp->cmd = cmd;
2524 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2525 lrbp->sense_buffer = cmd->sense_buffer;
2526 lrbp->task_tag = tag;
2527
2528 scsi_lun = ufshcd_get_scsi_lun(cmd);
2529 lrbp->lun = ufshcd_scsi_to_upiu_lun(scsi_lun);
2530 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2531 lrbp->req_abort_skip = false;
2532
2533 ufshcd_comp_scsi_upiu(hba, lrbp);
2534
2535 err = ufshcd_map_sg(hba, lrbp);
2536 if (err) {
2537 lrbp->cmd = NULL;
2538 clear_bit_unlock(tag, &hba->lrb_in_use);
2539 goto out;
2540 }
2541 /* Make sure descriptors are ready before ringing the doorbell */
2542 wmb();
2543
2544 /* issue command to the controller */
2545 spin_lock_irqsave(hba->host->host_lock, flags);
2546 if (hba->vops && hba->vops->set_nexus_t_xfer_req)
2547 hba->vops->set_nexus_t_xfer_req(hba, tag, lrbp->cmd);
2548 #ifdef CONFIG_SCSI_UFS_CMD_LOGGING
2549 exynos_ufs_cmd_log_start(hba, cmd);
2550 #endif
2551 ufshcd_send_command(hba, tag);
2552
2553 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
2554 dev_info(hba->dev, "IO issued(%d)\n", tag);
2555 out_unlock:
2556 spin_unlock_irqrestore(hba->host->host_lock, flags);
2557 out:
2558 up_read(&hba->clk_scaling_lock);
2559 return err;
2560 }
2561
2562 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2563 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2564 {
2565 lrbp->cmd = NULL;
2566 lrbp->sense_bufflen = 0;
2567 lrbp->sense_buffer = NULL;
2568 lrbp->task_tag = tag;
2569 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2570 lrbp->intr_cmd = true; /* No interrupt aggregation */
2571 hba->dev_cmd.type = cmd_type;
2572
2573 return ufshcd_comp_devman_upiu(hba, lrbp);
2574 }
2575
2576 static int
2577 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2578 {
2579 int err = 0;
2580 unsigned long flags;
2581 u32 mask = 1 << tag;
2582
2583 /* clear outstanding transaction before retry */
2584 spin_lock_irqsave(hba->host->host_lock, flags);
2585 ufshcd_utrl_clear(hba, tag);
2586 spin_unlock_irqrestore(hba->host->host_lock, flags);
2587
2588 /*
2589 * wait for for h/w to clear corresponding bit in door-bell.
2590 * max. wait is 1 sec.
2591 */
2592 err = ufshcd_wait_for_register(hba,
2593 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2594 mask, ~mask, 1000, 1000, true);
2595
2596 return err;
2597 }
2598
2599 static int
2600 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2601 {
2602 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2603
2604 /* Get the UPIU response */
2605 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2606 UPIU_RSP_CODE_OFFSET;
2607 return query_res->response;
2608 }
2609
2610 /**
2611 * ufshcd_dev_cmd_completion() - handles device management command responses
2612 * @hba: per adapter instance
2613 * @lrbp: pointer to local reference block
2614 */
2615 static int
2616 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2617 {
2618 int resp;
2619 int err = 0;
2620
2621 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2622 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2623
2624 switch (resp) {
2625 case UPIU_TRANSACTION_NOP_IN:
2626 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2627 err = -EINVAL;
2628 dev_err(hba->dev, "%s: unexpected response %x\n",
2629 __func__, resp);
2630 }
2631 break;
2632 case UPIU_TRANSACTION_QUERY_RSP:
2633 err = ufshcd_check_query_response(hba, lrbp);
2634 if (!err)
2635 err = ufshcd_copy_query_response(hba, lrbp);
2636 break;
2637 case UPIU_TRANSACTION_REJECT_UPIU:
2638 /* TODO: handle Reject UPIU Response */
2639 err = -EPERM;
2640 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2641 __func__);
2642 break;
2643 default:
2644 err = -EINVAL;
2645 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2646 __func__, resp);
2647 break;
2648 }
2649
2650 return err;
2651 }
2652
2653 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2654 struct ufshcd_lrb *lrbp, int max_timeout)
2655 {
2656 int err = 0;
2657 unsigned long time_left;
2658 unsigned long flags;
2659
2660 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2661 msecs_to_jiffies(max_timeout));
2662
2663 /* Make sure descriptors are ready before ringing the doorbell */
2664 wmb();
2665 spin_lock_irqsave(hba->host->host_lock, flags);
2666 hba->dev_cmd.complete = NULL;
2667 if (likely(time_left)) {
2668 err = ufshcd_get_tr_ocs(lrbp);
2669 if (!err)
2670 err = ufshcd_dev_cmd_completion(hba, lrbp);
2671 }
2672 spin_unlock_irqrestore(hba->host->host_lock, flags);
2673
2674 if (!time_left) {
2675 err = -ETIMEDOUT;
2676 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2677 __func__, lrbp->task_tag);
2678 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2679 /* successfully cleared the command, retry if needed */
2680 err = -EAGAIN;
2681 /*
2682 * in case of an error, after clearing the doorbell,
2683 * we also need to clear the outstanding_request
2684 * field in hba
2685 */
2686 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2687 }
2688
2689 return err;
2690 }
2691
2692 /**
2693 * ufshcd_get_dev_cmd_tag - Get device management command tag
2694 * @hba: per-adapter instance
2695 * @tag: pointer to variable with available slot value
2696 *
2697 * Get a free slot and lock it until device management command
2698 * completes.
2699 *
2700 * Returns false if free slot is unavailable for locking, else
2701 * return true with tag value in @tag.
2702 */
2703 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2704 {
2705 int tag;
2706 bool ret = false;
2707 unsigned long tmp;
2708
2709 if (!tag_out)
2710 goto out;
2711
2712 do {
2713 tmp = ~hba->lrb_in_use;
2714 tag = find_last_bit(&tmp, hba->nutrs);
2715 if (tag >= hba->nutrs)
2716 goto out;
2717 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2718
2719 *tag_out = tag;
2720 ret = true;
2721 out:
2722 return ret;
2723 }
2724
2725 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2726 {
2727 clear_bit_unlock(tag, &hba->lrb_in_use);
2728 }
2729
2730 /**
2731 * ufshcd_exec_dev_cmd - API for sending device management requests
2732 * @hba - UFS hba
2733 * @cmd_type - specifies the type (NOP, Query...)
2734 * @timeout - time in seconds
2735 *
2736 * NOTE: Since there is only one available tag for device management commands,
2737 * it is expected you hold the hba->dev_cmd.lock mutex.
2738 */
2739 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2740 enum dev_cmd_type cmd_type, int timeout)
2741 {
2742 struct ufshcd_lrb *lrbp;
2743 int err;
2744 int tag;
2745 struct completion wait;
2746 unsigned long flags;
2747
2748 if (!ufshcd_is_link_active(hba)) {
2749 flush_work(&hba->clk_gating.ungate_work);
2750 if (!ufshcd_is_link_active(hba))
2751 return -EPERM;
2752 }
2753
2754 down_read(&hba->clk_scaling_lock);
2755
2756 /*
2757 * Get free slot, sleep if slots are unavailable.
2758 * Even though we use wait_event() which sleeps indefinitely,
2759 * the maximum wait time is bounded by SCSI request timeout.
2760 */
2761 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2762
2763 init_completion(&wait);
2764 lrbp = &hba->lrb[tag];
2765 WARN_ON(lrbp->cmd);
2766 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2767 if (unlikely(err))
2768 goto out_put_tag;
2769
2770 hba->dev_cmd.complete = &wait;
2771
2772 /* Make sure descriptors are ready before ringing the doorbell */
2773 wmb();
2774 spin_lock_irqsave(hba->host->host_lock, flags);
2775 if (hba->vops && hba->vops->set_nexus_t_xfer_req)
2776 hba->vops->set_nexus_t_xfer_req(hba, tag, lrbp->cmd);
2777 ufshcd_send_command(hba, tag);
2778 spin_unlock_irqrestore(hba->host->host_lock, flags);
2779
2780 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2781
2782 out_put_tag:
2783 ufshcd_put_dev_cmd_tag(hba, tag);
2784 wake_up(&hba->dev_cmd.tag_wq);
2785 up_read(&hba->clk_scaling_lock);
2786 return err;
2787 }
2788
2789 /**
2790 * ufshcd_init_query() - init the query response and request parameters
2791 * @hba: per-adapter instance
2792 * @request: address of the request pointer to be initialized
2793 * @response: address of the response pointer to be initialized
2794 * @opcode: operation to perform
2795 * @idn: flag idn to access
2796 * @index: LU number to access
2797 * @selector: query/flag/descriptor further identification
2798 */
2799 static inline void ufshcd_init_query(struct ufs_hba *hba,
2800 struct ufs_query_req **request, struct ufs_query_res **response,
2801 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2802 {
2803 *request = &hba->dev_cmd.query.request;
2804 *response = &hba->dev_cmd.query.response;
2805 memset(*request, 0, sizeof(struct ufs_query_req));
2806 memset(*response, 0, sizeof(struct ufs_query_res));
2807 (*request)->upiu_req.opcode = opcode;
2808 (*request)->upiu_req.idn = idn;
2809 (*request)->upiu_req.index = index;
2810 (*request)->upiu_req.selector = selector;
2811 }
2812
2813 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2814 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2815 {
2816 int ret;
2817 int retries;
2818
2819 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2820 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2821 if (ret)
2822 dev_dbg(hba->dev,
2823 "%s: failed with error %d, retries %d\n",
2824 __func__, ret, retries);
2825 else
2826 break;
2827 }
2828
2829 if (ret)
2830 dev_err(hba->dev,
2831 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2832 __func__, opcode, idn, ret, retries);
2833 return ret;
2834 }
2835
2836 /**
2837 * ufshcd_query_flag() - API function for sending flag query requests
2838 * hba: per-adapter instance
2839 * query_opcode: flag query to perform
2840 * idn: flag idn to access
2841 * flag_res: the flag value after the query request completes
2842 *
2843 * Returns 0 for success, non-zero in case of failure
2844 */
2845 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2846 enum flag_idn idn, bool *flag_res)
2847 {
2848 struct ufs_query_req *request = NULL;
2849 struct ufs_query_res *response = NULL;
2850 int err, index = 0, selector = 0;
2851 int timeout = QUERY_REQ_TIMEOUT;
2852
2853 BUG_ON(!hba);
2854
2855 ufshcd_hold(hba, false);
2856 mutex_lock(&hba->dev_cmd.lock);
2857 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2858 selector);
2859
2860 switch (opcode) {
2861 case UPIU_QUERY_OPCODE_SET_FLAG:
2862 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2863 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2864 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2865 break;
2866 case UPIU_QUERY_OPCODE_READ_FLAG:
2867 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2868 if (!flag_res) {
2869 /* No dummy reads */
2870 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2871 __func__);
2872 err = -EINVAL;
2873 goto out_unlock;
2874 }
2875 break;
2876 default:
2877 dev_err(hba->dev,
2878 "%s: Expected query flag opcode but got = %d\n",
2879 __func__, opcode);
2880 err = -EINVAL;
2881 goto out_unlock;
2882 }
2883
2884 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
2885 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
2886
2887 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2888
2889 if (err) {
2890 dev_err(hba->dev,
2891 "%s: Sending flag query for idn %d failed, err = %d\n",
2892 __func__, idn, err);
2893 goto out_unlock;
2894 }
2895
2896 if (flag_res)
2897 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2898 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2899
2900 out_unlock:
2901 mutex_unlock(&hba->dev_cmd.lock);
2902 ufshcd_release(hba);
2903 return err;
2904 }
2905
2906 /**
2907 * ufshcd_query_attr - API function for sending attribute requests
2908 * hba: per-adapter instance
2909 * opcode: attribute opcode
2910 * idn: attribute idn to access
2911 * index: index field
2912 * selector: selector field
2913 * attr_val: the attribute value after the query request completes
2914 *
2915 * Returns 0 for success, non-zero in case of failure
2916 */
2917 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2918 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2919 {
2920 struct ufs_query_req *request = NULL;
2921 struct ufs_query_res *response = NULL;
2922 int err;
2923
2924 BUG_ON(!hba);
2925
2926 ufshcd_hold(hba, false);
2927 if (!attr_val) {
2928 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2929 __func__, opcode);
2930 err = -EINVAL;
2931 goto out;
2932 }
2933
2934 mutex_lock(&hba->dev_cmd.lock);
2935 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2936 selector);
2937
2938 switch (opcode) {
2939 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2940 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2941 request->upiu_req.value = cpu_to_be32(*attr_val);
2942 break;
2943 case UPIU_QUERY_OPCODE_READ_ATTR:
2944 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2945 break;
2946 default:
2947 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2948 __func__, opcode);
2949 err = -EINVAL;
2950 goto out_unlock;
2951 }
2952
2953 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2954
2955 if (err) {
2956 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2957 __func__, opcode, idn, index, err);
2958 goto out_unlock;
2959 }
2960
2961 *attr_val = be32_to_cpu(response->upiu_res.value);
2962
2963 out_unlock:
2964 mutex_unlock(&hba->dev_cmd.lock);
2965 out:
2966 ufshcd_release(hba);
2967 return err;
2968 }
2969
2970 /**
2971 * ufshcd_query_attr_retry() - API function for sending query
2972 * attribute with retries
2973 * @hba: per-adapter instance
2974 * @opcode: attribute opcode
2975 * @idn: attribute idn to access
2976 * @index: index field
2977 * @selector: selector field
2978 * @attr_val: the attribute value after the query request
2979 * completes
2980 *
2981 * Returns 0 for success, non-zero in case of failure
2982 */
2983 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2984 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2985 u32 *attr_val)
2986 {
2987 int ret = 0;
2988 u32 retries;
2989
2990 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2991 ret = ufshcd_query_attr(hba, opcode, idn, index,
2992 selector, attr_val);
2993 if (ret)
2994 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2995 __func__, ret, retries);
2996 else
2997 break;
2998 }
2999
3000 if (ret)
3001 dev_err(hba->dev,
3002 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3003 __func__, idn, ret, QUERY_REQ_RETRIES);
3004 return ret;
3005 }
3006
3007 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3008 enum query_opcode opcode, enum desc_idn idn, u8 index,
3009 u8 selector, u8 *desc_buf, int *buf_len)
3010 {
3011 struct ufs_query_req *request = NULL;
3012 struct ufs_query_res *response = NULL;
3013 int err = 0;
3014
3015 BUG_ON(!hba);
3016
3017 ufshcd_hold(hba, false);
3018 if (!desc_buf) {
3019 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3020 __func__, opcode);
3021 err = -EINVAL;
3022 goto out;
3023 }
3024
3025 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3026 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3027 __func__, *buf_len);
3028 err = -EINVAL;
3029 goto out;
3030 }
3031
3032 mutex_lock(&hba->dev_cmd.lock);
3033 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3034 selector);
3035 hba->dev_cmd.query.descriptor = desc_buf;
3036 request->upiu_req.length = cpu_to_be16(*buf_len);
3037
3038 switch (opcode) {
3039 case UPIU_QUERY_OPCODE_WRITE_DESC:
3040 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3041 break;
3042 case UPIU_QUERY_OPCODE_READ_DESC:
3043 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3044 break;
3045 default:
3046 dev_err(hba->dev,
3047 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3048 __func__, opcode);
3049 err = -EINVAL;
3050 goto out_unlock;
3051 }
3052
3053 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3054
3055 if (err) {
3056 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3057 __func__, opcode, idn, index, err);
3058 goto out_unlock;
3059 }
3060
3061 hba->dev_cmd.query.descriptor = NULL;
3062 *buf_len = be16_to_cpu(response->upiu_res.length);
3063
3064 out_unlock:
3065 mutex_unlock(&hba->dev_cmd.lock);
3066 out:
3067 ufshcd_release(hba);
3068 return err;
3069 }
3070
3071 /**
3072 * ufshcd_query_descriptor_retry - API function for sending descriptor
3073 * requests
3074 * hba: per-adapter instance
3075 * opcode: attribute opcode
3076 * idn: attribute idn to access
3077 * index: index field
3078 * selector: selector field
3079 * desc_buf: the buffer that contains the descriptor
3080 * buf_len: length parameter passed to the device
3081 *
3082 * Returns 0 for success, non-zero in case of failure.
3083 * The buf_len parameter will contain, on return, the length parameter
3084 * received on the response.
3085 */
3086 static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3087 enum query_opcode opcode,
3088 enum desc_idn idn, u8 index,
3089 u8 selector,
3090 u8 *desc_buf, int *buf_len)
3091 {
3092 int err;
3093 int retries;
3094
3095 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3096 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3097 selector, desc_buf, buf_len);
3098 if (!err || err == -EINVAL)
3099 break;
3100 }
3101
3102 return err;
3103 }
3104
3105 /**
3106 * ufshcd_read_desc_length - read the specified descriptor length from header
3107 * @hba: Pointer to adapter instance
3108 * @desc_id: descriptor idn value
3109 * @desc_index: descriptor index
3110 * @desc_length: pointer to variable to read the length of descriptor
3111 *
3112 * Return 0 in case of success, non-zero otherwise
3113 */
3114 static int ufshcd_read_desc_length(struct ufs_hba *hba,
3115 enum desc_idn desc_id,
3116 int desc_index,
3117 int *desc_length)
3118 {
3119 int ret;
3120 u8 header[QUERY_DESC_HDR_SIZE];
3121 int header_len = QUERY_DESC_HDR_SIZE;
3122
3123 if (desc_id >= QUERY_DESC_IDN_MAX)
3124 return -EINVAL;
3125
3126 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3127 desc_id, desc_index, 0, header,
3128 &header_len);
3129
3130 if (ret) {
3131 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3132 __func__, desc_id);
3133 return ret;
3134 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3135 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3136 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3137 desc_id);
3138 ret = -EINVAL;
3139 }
3140
3141 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3142 return ret;
3143
3144 }
3145
3146 /**
3147 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3148 * @hba: Pointer to adapter instance
3149 * @desc_id: descriptor idn value
3150 * @desc_len: mapped desc length (out)
3151 *
3152 * Return 0 in case of success, non-zero otherwise
3153 */
3154 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3155 enum desc_idn desc_id, int *desc_len)
3156 {
3157 switch (desc_id) {
3158 case QUERY_DESC_IDN_DEVICE:
3159 *desc_len = hba->desc_size.dev_desc;
3160 break;
3161 case QUERY_DESC_IDN_POWER:
3162 *desc_len = hba->desc_size.pwr_desc;
3163 break;
3164 case QUERY_DESC_IDN_GEOMETRY:
3165 *desc_len = hba->desc_size.geom_desc;
3166 break;
3167 case QUERY_DESC_IDN_CONFIGURATION:
3168 *desc_len = hba->desc_size.conf_desc;
3169 break;
3170 case QUERY_DESC_IDN_UNIT:
3171 *desc_len = hba->desc_size.unit_desc;
3172 break;
3173 case QUERY_DESC_IDN_INTERCONNECT:
3174 *desc_len = hba->desc_size.interc_desc;
3175 break;
3176 case QUERY_DESC_IDN_STRING:
3177 *desc_len = QUERY_DESC_MAX_SIZE;
3178 break;
3179 case QUERY_DESC_IDN_RFU_0:
3180 case QUERY_DESC_IDN_RFU_1:
3181 *desc_len = 0;
3182 break;
3183 default:
3184 *desc_len = 0;
3185 return -EINVAL;
3186 }
3187 return 0;
3188 }
3189 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3190
3191 /**
3192 * ufshcd_read_desc_param - read the specified descriptor parameter
3193 * @hba: Pointer to adapter instance
3194 * @desc_id: descriptor idn value
3195 * @desc_index: descriptor index
3196 * @param_offset: offset of the parameter to read
3197 * @param_read_buf: pointer to buffer where parameter would be read
3198 * @param_size: sizeof(param_read_buf)
3199 *
3200 * Return 0 in case of success, non-zero otherwise
3201 */
3202 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3203 enum desc_idn desc_id,
3204 int desc_index,
3205 u8 param_offset,
3206 u8 *param_read_buf,
3207 u8 param_size)
3208 {
3209 int ret;
3210 u8 *desc_buf;
3211 int buff_len;
3212 bool is_kmalloc = true;
3213
3214 /* Safety check */
3215 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3216 return -EINVAL;
3217
3218 /* Get the max length of descriptor from structure filled up at probe
3219 * time.
3220 */
3221 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3222
3223 /* Sanity checks */
3224 if (ret || !buff_len) {
3225 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3226 __func__);
3227 return ret;
3228 }
3229
3230 /* Check whether we need temp memory */
3231 if (param_offset != 0 || param_size < buff_len) {
3232 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3233 if (!desc_buf)
3234 return -ENOMEM;
3235 } else {
3236 desc_buf = param_read_buf;
3237 is_kmalloc = false;
3238 }
3239
3240 /* Request for full descriptor */
3241 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3242 desc_id, desc_index, 0,
3243 desc_buf, &buff_len);
3244
3245 if (ret) {
3246 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3247 __func__, desc_id, desc_index, param_offset, ret);
3248 goto out;
3249 }
3250
3251 /* Sanity check */
3252 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3253 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3254 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3255 ret = -EINVAL;
3256 goto out;
3257 }
3258
3259 /*
3260 * While reading variable size descriptors (like string descriptor),
3261 * some UFS devices may report the "LENGTH" (field in "Transaction
3262 * Specific fields" of Query Response UPIU) same as what was requested
3263 * in Query Request UPIU instead of reporting the actual size of the
3264 * variable size descriptor.
3265 * Although it's safe to ignore the "LENGTH" field for variable size
3266 * descriptors as we can always derive the length of the descriptor from
3267 * the descriptor header fields. Hence this change impose the length
3268 * match check only for fixed size descriptors (for which we always
3269 * request the correct size as part of Query Request UPIU).
3270 */
3271 if ((desc_id != QUERY_DESC_IDN_STRING) &&
3272 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3273 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3274 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3275 ret = -EINVAL;
3276 goto out;
3277 }
3278 /* Check wherher we will not copy more data, than available */
3279 if (is_kmalloc && param_size > buff_len)
3280 param_size = buff_len;
3281
3282 if (is_kmalloc)
3283 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3284 out:
3285 if (is_kmalloc)
3286 kfree(desc_buf);
3287 return ret;
3288 }
3289
3290 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3291 enum desc_idn desc_id,
3292 int desc_index,
3293 u8 *buf,
3294 u32 size)
3295 {
3296 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3297 }
3298
3299 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3300 u8 *buf,
3301 u32 size)
3302 {
3303 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3304 }
3305
3306 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3307 {
3308 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3309 }
3310
3311 /**
3312 * ufshcd_read_string_desc - read string descriptor
3313 * @hba: pointer to adapter instance
3314 * @desc_index: descriptor index
3315 * @buf: pointer to buffer where descriptor would be read
3316 * @size: size of buf
3317 * @ascii: if true convert from unicode to ascii characters
3318 *
3319 * Return 0 in case of success, non-zero otherwise
3320 */
3321 #define ASCII_STD true
3322 static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3323 u8 *buf, u32 size, bool ascii)
3324 {
3325 int err = 0;
3326
3327 err = ufshcd_read_desc(hba,
3328 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3329
3330 if (err) {
3331 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3332 __func__, QUERY_REQ_RETRIES, err);
3333 goto out;
3334 }
3335
3336 if (ascii) {
3337 int desc_len;
3338 int ascii_len;
3339 int i;
3340 char *buff_ascii;
3341
3342 desc_len = buf[0];
3343 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3344 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3345 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3346 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3347 __func__);
3348 err = -ENOMEM;
3349 goto out;
3350 }
3351
3352 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3353 if (!buff_ascii) {
3354 err = -ENOMEM;
3355 goto out;
3356 }
3357
3358 /*
3359 * the descriptor contains string in UTF16 format
3360 * we need to convert to utf-8 so it can be displayed
3361 */
3362 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3363 desc_len - QUERY_DESC_HDR_SIZE,
3364 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3365
3366 /* replace non-printable or non-ASCII characters with spaces */
3367 for (i = 0; i < ascii_len; i++)
3368 ufshcd_remove_non_printable(&buff_ascii[i]);
3369
3370 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3371 size - QUERY_DESC_HDR_SIZE);
3372 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3373 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3374 kfree(buff_ascii);
3375 }
3376 out:
3377 return err;
3378 }
3379
3380 /**
3381 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3382 * @hba: Pointer to adapter instance
3383 * @lun: lun id
3384 * @param_offset: offset of the parameter to read
3385 * @param_read_buf: pointer to buffer where parameter would be read
3386 * @param_size: sizeof(param_read_buf)
3387 *
3388 * Return 0 in case of success, non-zero otherwise
3389 */
3390 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3391 int lun,
3392 enum unit_desc_param param_offset,
3393 u8 *param_read_buf,
3394 u32 param_size)
3395 {
3396 /*
3397 * Unit descriptors are only available for general purpose LUs (LUN id
3398 * from 0 to 7) and RPMB Well known LU.
3399 */
3400 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
3401 return -EOPNOTSUPP;
3402
3403 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3404 param_offset, param_read_buf, param_size);
3405 }
3406
3407 int ufshcd_read_health_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3408 {
3409 int err = 0;
3410
3411 err = ufshcd_read_desc(hba,
3412 QUERY_DESC_IDN_HEALTH, 0, buf, size);
3413
3414 if (err)
3415 dev_err(hba->dev, "%s: reading Device Health Desc failed. err = %d\n",
3416 __func__, err);
3417
3418 return err;
3419 }
3420
3421 /**
3422 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3423 * @hba: per adapter instance
3424 *
3425 * 1. Allocate DMA memory for Command Descriptor array
3426 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3427 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3428 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3429 * (UTMRDL)
3430 * 4. Allocate memory for local reference block(lrb).
3431 *
3432 * Returns 0 for success, non-zero in case of failure
3433 */
3434 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3435 {
3436 size_t utmrdl_size, utrdl_size, ucdl_size;
3437
3438 /* Allocate memory for UTP command descriptors */
3439 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3440 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3441 ucdl_size,
3442 &hba->ucdl_dma_addr,
3443 GFP_KERNEL);
3444
3445 /*
3446 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3447 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3448 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3449 * be aligned to 128 bytes as well
3450 */
3451 if (!hba->ucdl_base_addr ||
3452 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3453 dev_err(hba->dev,
3454 "Command Descriptor Memory allocation failed\n");
3455 goto out;
3456 }
3457
3458 /*
3459 * Allocate memory for UTP Transfer descriptors
3460 * UFSHCI requires 1024 byte alignment of UTRD
3461 */
3462 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3463 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3464 utrdl_size,
3465 &hba->utrdl_dma_addr,
3466 GFP_KERNEL);
3467 if (!hba->utrdl_base_addr ||
3468 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3469 dev_err(hba->dev,
3470 "Transfer Descriptor Memory allocation failed\n");
3471 goto out;
3472 }
3473
3474 /*
3475 * Allocate memory for UTP Task Management descriptors
3476 * UFSHCI requires 1024 byte alignment of UTMRD
3477 */
3478 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3479 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3480 utmrdl_size,
3481 &hba->utmrdl_dma_addr,
3482 GFP_KERNEL);
3483 if (!hba->utmrdl_base_addr ||
3484 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3485 dev_err(hba->dev,
3486 "Task Management Descriptor Memory allocation failed\n");
3487 goto out;
3488 }
3489
3490 /* Allocate memory for local reference block */
3491 hba->lrb = devm_kzalloc(hba->dev,
3492 hba->nutrs * sizeof(struct ufshcd_lrb),
3493 GFP_KERNEL);
3494 if (!hba->lrb) {
3495 dev_err(hba->dev, "LRB Memory allocation failed\n");
3496 goto out;
3497 }
3498 return 0;
3499 out:
3500 return -ENOMEM;
3501 }
3502
3503 /**
3504 * ufshcd_host_memory_configure - configure local reference block with
3505 * memory offsets
3506 * @hba: per adapter instance
3507 *
3508 * Configure Host memory space
3509 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3510 * address.
3511 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3512 * and PRDT offset.
3513 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3514 * into local reference block.
3515 */
3516 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3517 {
3518 struct utp_transfer_cmd_desc *cmd_descp;
3519 struct utp_transfer_req_desc *utrdlp;
3520 dma_addr_t cmd_desc_dma_addr;
3521 dma_addr_t cmd_desc_element_addr;
3522 u16 response_offset;
3523 u16 prdt_offset;
3524 int cmd_desc_size;
3525 int i;
3526
3527 utrdlp = hba->utrdl_base_addr;
3528 cmd_descp = hba->ucdl_base_addr;
3529
3530 response_offset =
3531 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3532 prdt_offset =
3533 offsetof(struct utp_transfer_cmd_desc, prd_table);
3534
3535 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3536 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3537
3538 for (i = 0; i < hba->nutrs; i++) {
3539 /* Configure UTRD with command descriptor base address */
3540 cmd_desc_element_addr =
3541 (cmd_desc_dma_addr + (cmd_desc_size * i));
3542 utrdlp[i].command_desc_base_addr_lo =
3543 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3544 utrdlp[i].command_desc_base_addr_hi =
3545 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3546
3547 /* Response upiu and prdt offset should be in double words */
3548 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3549 utrdlp[i].response_upiu_offset =
3550 cpu_to_le16(response_offset);
3551 utrdlp[i].prd_table_offset =
3552 cpu_to_le16(prdt_offset);
3553 utrdlp[i].response_upiu_length =
3554 cpu_to_le16(ALIGNED_UPIU_SIZE);
3555 } else {
3556 utrdlp[i].response_upiu_offset =
3557 cpu_to_le16((response_offset >> 2));
3558 utrdlp[i].prd_table_offset =
3559 cpu_to_le16((prdt_offset >> 2));
3560 utrdlp[i].response_upiu_length =
3561 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3562 }
3563
3564 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3565 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3566 (i * sizeof(struct utp_transfer_req_desc));
3567 hba->lrb[i].ucd_req_ptr =
3568 (struct utp_upiu_req *)(cmd_descp + i);
3569 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3570 hba->lrb[i].ucd_rsp_ptr =
3571 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3572 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3573 response_offset;
3574 hba->lrb[i].ucd_prdt_ptr =
3575 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3576 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3577 prdt_offset;
3578 }
3579 }
3580
3581 /**
3582 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3583 * @hba: per adapter instance
3584 *
3585 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3586 * in order to initialize the Unipro link startup procedure.
3587 * Once the Unipro links are up, the device connected to the controller
3588 * is detected.
3589 *
3590 * Returns 0 on success, non-zero value on failure
3591 */
3592 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3593 {
3594 struct uic_command uic_cmd = {0};
3595 int ret;
3596
3597 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3598
3599 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3600 if (ret)
3601 dev_dbg(hba->dev,
3602 "dme-link-startup: error code %d\n", ret);
3603 return ret;
3604 }
3605
3606 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3607 {
3608 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3609 unsigned long min_sleep_time_us;
3610
3611 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3612 return;
3613
3614 /*
3615 * last_dme_cmd_tstamp will be 0 only for 1st call to
3616 * this function
3617 */
3618 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3619 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3620 } else {
3621 unsigned long delta =
3622 (unsigned long) ktime_to_us(
3623 ktime_sub(ktime_get(),
3624 hba->last_dme_cmd_tstamp));
3625
3626 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3627 min_sleep_time_us =
3628 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3629 else
3630 return; /* no more delay required */
3631 }
3632
3633 /* allow sleep for extra 50us if needed */
3634 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3635 }
3636
3637 static int ufshcd_dme_reset(struct ufs_hba *hba)
3638 {
3639 struct uic_command uic_cmd = {0};
3640 int ret;
3641
3642 uic_cmd.command = UIC_CMD_DME_RESET;
3643 uic_cmd.argument1 = 0x1;
3644
3645 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3646 if (ret)
3647 dev_err(hba->dev,
3648 "dme-reset: error code %d\n", ret);
3649
3650 return ret;
3651 }
3652
3653 static int ufshcd_dme_enable(struct ufs_hba *hba)
3654 {
3655 struct uic_command uic_cmd = {0};
3656 int ret;
3657
3658 uic_cmd.command = UIC_CMD_DME_ENABLE;
3659
3660 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3661 if (ret)
3662 dev_err(hba->dev,
3663 "dme-enable: error code %d\n", ret);
3664
3665 return ret;
3666 }
3667
3668 /**
3669 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3670 * @hba: per adapter instance
3671 * @attr_sel: uic command argument1
3672 * @attr_set: attribute set type as uic command argument2
3673 * @mib_val: setting value as uic command argument3
3674 * @peer: indicate whether peer or local
3675 *
3676 * Returns 0 on success, non-zero value on failure
3677 */
3678 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3679 u8 attr_set, u32 mib_val, u8 peer)
3680 {
3681 struct uic_command uic_cmd = {0};
3682 static const char *const action[] = {
3683 "dme-set",
3684 "dme-peer-set"
3685 };
3686 const char *set = action[!!peer];
3687 int ret;
3688 int retries = UFS_UIC_COMMAND_RETRIES;
3689
3690 uic_cmd.command = peer ?
3691 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3692 uic_cmd.argument1 = attr_sel;
3693 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3694 uic_cmd.argument3 = mib_val;
3695
3696 do {
3697 /* for peer attributes we retry upon failure */
3698 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3699 if (ret)
3700 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3701 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3702 } while (ret && peer && --retries);
3703
3704 if (ret)
3705 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3706 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3707 UFS_UIC_COMMAND_RETRIES - retries);
3708
3709 return ret;
3710 }
3711 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3712
3713 /**
3714 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3715 * @hba: per adapter instance
3716 * @attr_sel: uic command argument1
3717 * @mib_val: the value of the attribute as returned by the UIC command
3718 * @peer: indicate whether peer or local
3719 *
3720 * Returns 0 on success, non-zero value on failure
3721 */
3722 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3723 u32 *mib_val, u8 peer)
3724 {
3725 struct uic_command uic_cmd = {0};
3726 static const char *const action[] = {
3727 "dme-get",
3728 "dme-peer-get"
3729 };
3730 const char *get = action[!!peer];
3731 int ret;
3732 int retries = UFS_UIC_COMMAND_RETRIES;
3733 struct ufs_pa_layer_attr orig_pwr_info;
3734 struct ufs_pa_layer_attr temp_pwr_info;
3735 bool pwr_mode_change = false;
3736
3737 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3738 orig_pwr_info = hba->pwr_info;
3739 temp_pwr_info = orig_pwr_info;
3740
3741 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3742 orig_pwr_info.pwr_rx == FAST_MODE) {
3743 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3744 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3745 pwr_mode_change = true;
3746 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3747 orig_pwr_info.pwr_rx == SLOW_MODE) {
3748 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3749 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3750 pwr_mode_change = true;
3751 }
3752 if (pwr_mode_change) {
3753 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3754 if (ret)
3755 goto out;
3756 }
3757 }
3758
3759 uic_cmd.command = peer ?
3760 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3761 uic_cmd.argument1 = attr_sel;
3762
3763 do {
3764 /* for peer attributes we retry upon failure */
3765 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3766 if (ret)
3767 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3768 get, UIC_GET_ATTR_ID(attr_sel), ret);
3769 } while (ret && peer && --retries);
3770
3771 if (ret)
3772 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3773 get, UIC_GET_ATTR_ID(attr_sel),
3774 UFS_UIC_COMMAND_RETRIES - retries);
3775
3776 if (mib_val && !ret)
3777 *mib_val = uic_cmd.argument3;
3778
3779 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3780 && pwr_mode_change)
3781 ufshcd_change_power_mode(hba, &orig_pwr_info);
3782 out:
3783 return ret;
3784 }
3785 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3786
3787 /**
3788 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3789 * state) and waits for it to take effect.
3790 *
3791 * @hba: per adapter instance
3792 * @cmd: UIC command to execute
3793 *
3794 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3795 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3796 * and device UniPro link and hence it's final completion would be indicated by
3797 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3798 * addition to normal UIC command completion Status (UCCS). This function only
3799 * returns after the relevant status bits indicate the completion.
3800 *
3801 * Returns 0 on success, non-zero value on failure
3802 */
3803 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3804 {
3805 struct completion uic_async_done;
3806 unsigned long flags;
3807 u8 status;
3808 int ret;
3809 bool reenable_intr = false;
3810
3811 mutex_lock(&hba->uic_cmd_mutex);
3812 init_completion(&uic_async_done);
3813 ufshcd_add_delay_before_dme_cmd(hba);
3814
3815 spin_lock_irqsave(hba->host->host_lock, flags);
3816 hba->uic_async_done = &uic_async_done;
3817 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3818 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3819 /*
3820 * Make sure UIC command completion interrupt is disabled before
3821 * issuing UIC command.
3822 */
3823 wmb();
3824 reenable_intr = true;
3825 }
3826 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3827 spin_unlock_irqrestore(hba->host->host_lock, flags);
3828 if (ret) {
3829 dev_err(hba->dev,
3830 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3831 cmd->command, cmd->argument3, ret);
3832 goto out;
3833 }
3834
3835 if (!wait_for_completion_timeout(hba->uic_async_done,
3836 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3837 dev_err(hba->dev,
3838 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3839 cmd->command, cmd->argument3);
3840 ret = -ETIMEDOUT;
3841 goto out;
3842 }
3843
3844 status = ufshcd_get_upmcrs(hba, cmd);
3845 if (status != PWR_LOCAL) {
3846 dev_err(hba->dev,
3847 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3848 cmd->command, status);
3849 ret = (status != PWR_OK) ? status : -1;
3850 }
3851 out:
3852 /* Dump debugging information to system memory */
3853 if (ret) {
3854 ufshcd_vops_dbg_register_dump(hba);
3855 exynos_ufs_show_uic_info(hba);
3856 ufshcd_print_host_state(hba);
3857 ufshcd_print_pwr_info(hba);
3858 ufshcd_print_host_regs(hba);
3859 }
3860
3861 spin_lock_irqsave(hba->host->host_lock, flags);
3862 hba->active_uic_cmd = NULL;
3863 hba->uic_async_done = NULL;
3864 if (reenable_intr)
3865 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3866 spin_unlock_irqrestore(hba->host->host_lock, flags);
3867 mutex_unlock(&hba->uic_cmd_mutex);
3868
3869 return ret;
3870 }
3871
3872 /**
3873 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3874 * using DME_SET primitives.
3875 * @hba: per adapter instance
3876 * @mode: powr mode value
3877 *
3878 * Returns 0 on success, non-zero value on failure
3879 */
3880 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3881 {
3882 struct uic_command uic_cmd = {0};
3883 int ret;
3884
3885 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3886 ret = ufshcd_dme_set(hba,
3887 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3888 if (ret) {
3889 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3890 __func__, ret);
3891 goto out;
3892 }
3893 }
3894
3895 uic_cmd.command = UIC_CMD_DME_SET;
3896 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3897 uic_cmd.argument3 = mode;
3898 ufshcd_hold(hba, false);
3899 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3900 ufshcd_release(hba);
3901
3902 out:
3903 return ret;
3904 }
3905
3906 static int ufshcd_link_recovery(struct ufs_hba *hba)
3907 {
3908 int ret;
3909 unsigned long flags;
3910
3911 spin_lock_irqsave(hba->host->host_lock, flags);
3912 hba->ufshcd_state = UFSHCD_STATE_RESET;
3913 ufshcd_set_eh_in_progress(hba);
3914 spin_unlock_irqrestore(hba->host->host_lock, flags);
3915
3916 ret = ufshcd_host_reset_and_restore(hba);
3917
3918 spin_lock_irqsave(hba->host->host_lock, flags);
3919 if (ret)
3920 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3921 ufshcd_clear_eh_in_progress(hba);
3922 spin_unlock_irqrestore(hba->host->host_lock, flags);
3923
3924 if (ret)
3925 dev_err(hba->dev, "%s: link recovery failed, err %d",
3926 __func__, ret);
3927
3928 return ret;
3929 }
3930
3931 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3932 {
3933 int ret;
3934 struct uic_command uic_cmd = {0};
3935 ktime_t start = ktime_get();
3936
3937 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3938 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3939 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3940 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3941
3942 if (ret) {
3943 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3944 __func__, ret);
3945 ssleep(2);
3946 /*
3947 * If link recovery fails then return error so that caller
3948 * don't retry the hibern8 enter again.
3949 */
3950 if (ufshcd_link_recovery(hba))
3951 ret = -ENOLINK;
3952 }
3953
3954 return ret;
3955 }
3956
3957 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3958 {
3959 int ret = 0, retries;
3960
3961 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3962 ret = __ufshcd_uic_hibern8_enter(hba);
3963 if (!ret || ret == -ENOLINK)
3964 goto out;
3965 }
3966 out:
3967 return ret;
3968 }
3969
3970 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3971 {
3972 struct uic_command uic_cmd = {0};
3973 int ret;
3974 ktime_t start = ktime_get();
3975
3976
3977 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3978 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3979 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3980 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3981
3982 if (ret) {
3983 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3984 __func__, ret);
3985 ret = ufshcd_link_recovery(hba);
3986 } else {
3987
3988 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3989 hba->ufs_stats.hibern8_exit_cnt++;
3990 }
3991
3992 return ret;
3993 }
3994
3995 /**
3996 * ufshcd_init_pwr_info - setting the POR (power on reset)
3997 * values in hba power info
3998 * @hba: per-adapter instance
3999 */
4000 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4001 {
4002 hba->pwr_info.gear_rx = UFS_PWM_G1;
4003 hba->pwr_info.gear_tx = UFS_PWM_G1;
4004 hba->pwr_info.lane_rx = 1;
4005 hba->pwr_info.lane_tx = 1;
4006 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4007 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4008 hba->pwr_info.hs_rate = 0;
4009 }
4010
4011 static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en)
4012 {
4013 int ret;
4014
4015 if (hba->vops && hba->vops->hibern8_notify)
4016 hba->vops->hibern8_notify(hba, en, PRE_CHANGE);
4017
4018 if (en)
4019 ret = ufshcd_uic_hibern8_enter(hba);
4020 else
4021 ret = ufshcd_uic_hibern8_exit(hba);
4022
4023 if (ret)
4024 goto out;
4025
4026 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL2) {
4027 if (en)
4028 dev_info(hba->dev, "H8+\n");
4029 else
4030 dev_info(hba->dev, "H8-\n");
4031 }
4032
4033 if (hba->vops && hba->vops->hibern8_notify)
4034 hba->vops->hibern8_notify(hba, en, POST_CHANGE);
4035
4036 out:
4037 hba->tcx_replay_timer_expired_cnt = 0;
4038 hba->fcx_protection_timer_expired_cnt = 0;
4039
4040 return ret;
4041 }
4042
4043 /**
4044 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4045 * @hba: per-adapter instance
4046 */
4047 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4048 {
4049 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4050
4051 if (hba->max_pwr_info.is_valid)
4052 return 0;
4053
4054 pwr_info->pwr_tx = FAST_MODE;
4055 pwr_info->pwr_rx = FAST_MODE;
4056 pwr_info->hs_rate = PA_HS_MODE_B;
4057
4058 /* Get the connected lane count */
4059 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4060 &pwr_info->lane_rx);
4061 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4062 &pwr_info->lane_tx);
4063
4064 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4065 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4066 __func__,
4067 pwr_info->lane_rx,
4068 pwr_info->lane_tx);
4069 return -EINVAL;
4070 }
4071
4072 hba->tcx_replay_timer_expired_cnt = 0;
4073 hba->fcx_protection_timer_expired_cnt = 0;
4074
4075 /* Get the peer available lane count */
4076 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
4077 &pwr_info->peer_available_lane_rx);
4078 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
4079 &pwr_info->peer_available_lane_tx);
4080
4081 if (!pwr_info->peer_available_lane_rx || !pwr_info->peer_available_lane_tx) {
4082 dev_err(hba->dev, "%s: invalid peer available lanes value. rx=%d, tx=%d\n",
4083 __func__,
4084 pwr_info->peer_available_lane_rx,
4085 pwr_info->peer_available_lane_tx);
4086 return -EINVAL;
4087 }
4088
4089 /*
4090 * First, get the maximum gears of HS speed.
4091 * If a zero value, it means there is no HSGEAR capability.
4092 * Then, get the maximum gears of PWM speed.
4093 */
4094 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4095 if (!pwr_info->gear_rx) {
4096 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4097 &pwr_info->gear_rx);
4098 if (!pwr_info->gear_rx) {
4099 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4100 __func__, pwr_info->gear_rx);
4101 return -EINVAL;
4102 }
4103 pwr_info->pwr_rx = SLOW_MODE;
4104 }
4105
4106 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4107 &pwr_info->gear_tx);
4108 if (!pwr_info->gear_tx) {
4109 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4110 &pwr_info->gear_tx);
4111 if (!pwr_info->gear_tx) {
4112 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4113 __func__, pwr_info->gear_tx);
4114 return -EINVAL;
4115 }
4116 pwr_info->pwr_tx = SLOW_MODE;
4117 }
4118
4119 hba->max_pwr_info.is_valid = true;
4120 return 0;
4121 }
4122
4123 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4124 struct ufs_pa_layer_attr *pwr_mode)
4125 {
4126 int ret;
4127
4128 /* if already configured to the requested pwr_mode */
4129 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4130 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4131 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4132 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4133 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4134 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4135 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4136 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4137 return 0;
4138 }
4139
4140 /*
4141 * Configure attributes for power mode change with below.
4142 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4143 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4144 * - PA_HSSERIES
4145 */
4146 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4147 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4148 pwr_mode->lane_rx);
4149 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4150 pwr_mode->pwr_rx == FAST_MODE)
4151 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4152 else
4153 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4154
4155 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4156 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4157 pwr_mode->lane_tx);
4158 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4159 pwr_mode->pwr_tx == FAST_MODE)
4160 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4161 else
4162 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4163
4164 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4165 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4166 pwr_mode->pwr_rx == FAST_MODE ||
4167 pwr_mode->pwr_tx == FAST_MODE)
4168 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4169 pwr_mode->hs_rate);
4170
4171 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4172 | pwr_mode->pwr_tx);
4173
4174 if (ret) {
4175 dev_err(hba->dev,
4176 "%s: power mode change failed %d\n", __func__, ret);
4177 } else {
4178 ufshcd_hold(hba, false);
4179 ret = ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4180 pwr_mode);
4181 ufshcd_release(hba);
4182 if (ret)
4183 goto out;
4184
4185 memcpy(&hba->pwr_info, pwr_mode,
4186 sizeof(struct ufs_pa_layer_attr));
4187 }
4188
4189 out:
4190 return ret;
4191 }
4192
4193 /**
4194 * ufshcd_config_pwr_mode - configure a new power mode
4195 * @hba: per-adapter instance
4196 * @desired_pwr_mode: desired power configuration
4197 */
4198 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4199 struct ufs_pa_layer_attr *desired_pwr_mode)
4200 {
4201 struct ufs_pa_layer_attr final_params = { 0 };
4202 int ret;
4203
4204 ufshcd_hold(hba, false);
4205 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4206 desired_pwr_mode, &final_params);
4207
4208 if (ret) {
4209 if (ret == -ENOTSUPP)
4210 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4211 else
4212 goto out;
4213 }
4214
4215 ret = ufshcd_change_power_mode(hba, &final_params);
4216 if (!ret)
4217 ufshcd_print_pwr_info(hba);
4218 out:
4219 ufshcd_release(hba);
4220 return ret;
4221 }
4222 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4223
4224 /**
4225 * ufshcd_complete_dev_init() - checks device readiness
4226 * hba: per-adapter instance
4227 *
4228 * Set fDeviceInit flag and poll until device toggles it.
4229 */
4230 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4231 {
4232 int i;
4233 int err;
4234 bool flag_res = 1;
4235
4236 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4237 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4238 if (err) {
4239 dev_err(hba->dev,
4240 "%s setting fDeviceInit flag failed with error %d\n",
4241 __func__, err);
4242 goto out;
4243 }
4244
4245 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4246 for (i = 0; i < 1000 && !err && flag_res; i++)
4247 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4248 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4249
4250 if (!err && flag_res)
4251 udelay(100);
4252
4253 if (err)
4254 dev_err(hba->dev,
4255 "%s reading fDeviceInit flag failed with error %d\n",
4256 __func__, err);
4257 else if (flag_res)
4258 dev_err(hba->dev,
4259 "%s fDeviceInit was not cleared by the device\n",
4260 __func__);
4261
4262 out:
4263 return err;
4264 }
4265
4266 /**
4267 * ufshcd_make_hba_operational - Make UFS controller operational
4268 * @hba: per adapter instance
4269 *
4270 * To bring UFS host controller to operational state,
4271 * 1. Enable required interrupts
4272 * 2. Configure interrupt aggregation
4273 * 3. Program UTRL and UTMRL base address
4274 * 4. Configure run-stop-registers
4275 *
4276 * Returns 0 on success, non-zero value on failure
4277 */
4278 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4279 {
4280 int err = 0;
4281 u32 reg;
4282
4283 /* Enable required interrupts */
4284 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4285
4286 /* Configure interrupt aggregation */
4287 if (ufshcd_is_intr_aggr_allowed(hba))
4288 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4289 else
4290 ufshcd_disable_intr_aggr(hba);
4291
4292 /* Configure UTRL and UTMRL base address registers */
4293 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4294 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4295 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4296 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4297 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4298 REG_UTP_TASK_REQ_LIST_BASE_L);
4299 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4300 REG_UTP_TASK_REQ_LIST_BASE_H);
4301
4302 /*
4303 * Make sure base address and interrupt setup are updated before
4304 * enabling the run/stop registers below.
4305 */
4306 wmb();
4307
4308 /*
4309 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4310 */
4311 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4312 if (!(ufshcd_get_lists_status(reg))) {
4313 ufshcd_enable_run_stop_reg(hba);
4314 } else {
4315 dev_err(hba->dev,
4316 "Host controller not ready to process requests");
4317 err = -EIO;
4318 goto out;
4319 }
4320
4321 out:
4322 return err;
4323 }
4324
4325 /**
4326 * ufshcd_hba_stop - Send controller to reset state
4327 * @hba: per adapter instance
4328 * @can_sleep: perform sleep or just spin
4329 */
4330 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4331 {
4332 int err;
4333
4334 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4335 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4336 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4337 10, 1, can_sleep);
4338 if (err)
4339 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4340 }
4341
4342 /**
4343 * _ufshcd_hba_enable - initialize the controller
4344 * @hba: per adapter instance
4345 *
4346 * The controller resets itself and controller firmware initialization
4347 * sequence kicks off. When controller is ready it will set
4348 * the Host Controller Enable bit to 1.
4349 *
4350 * Returns 0 on success, non-zero value on failure
4351 */
4352 static int __ufshcd_hba_enable(struct ufs_hba *hba)
4353 {
4354 int retry;
4355
4356 /*
4357 * msleep of 1 and 5 used in this function might result in msleep(20),
4358 * but it was necessary to send the UFS FPGA to reset mode during
4359 * development and testing of this driver. msleep can be changed to
4360 * mdelay and retry count can be reduced based on the controller.
4361 */
4362 if (!ufshcd_is_hba_active(hba))
4363 /* change controller state to "reset state" */
4364 ufshcd_hba_stop(hba, true);
4365
4366 /* UniPro link is disabled at this point */
4367 ufshcd_set_link_off(hba);
4368
4369 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4370
4371 /* start controller initialization sequence */
4372 ufshcd_hba_start(hba);
4373
4374 /*
4375 * To initialize a UFS host controller HCE bit must be set to 1.
4376 * During initialization the HCE bit value changes from 1->0->1.
4377 * When the host controller completes initialization sequence
4378 * it sets the value of HCE bit to 1. The same HCE bit is read back
4379 * to check if the controller has completed initialization sequence.
4380 * So without this delay the value HCE = 1, set in the previous
4381 * instruction might be read back.
4382 * This delay can be changed based on the controller.
4383 */
4384 msleep(1);
4385
4386 /* wait for the host controller to complete initialization */
4387 retry = 10;
4388 while (ufshcd_is_hba_active(hba)) {
4389 if (retry) {
4390 retry--;
4391 } else {
4392 dev_err(hba->dev,
4393 "Controller enable failed\n");
4394 return -EIO;
4395 }
4396 msleep(5);
4397 }
4398
4399 /* enable UIC related interrupts */
4400 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4401
4402 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4403
4404 return 0;
4405 }
4406
4407 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4408 {
4409 int tx_lanes, i, err = 0;
4410
4411 if (!peer)
4412 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4413 &tx_lanes);
4414 else
4415 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4416 &tx_lanes);
4417 for (i = 0; i < tx_lanes; i++) {
4418 if (!peer)
4419 err = ufshcd_dme_set(hba,
4420 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4421 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4422 0);
4423 else
4424 err = ufshcd_dme_peer_set(hba,
4425 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4426 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4427 0);
4428 if (err) {
4429 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4430 __func__, peer, i, err);
4431 break;
4432 }
4433 }
4434
4435 return err;
4436 }
4437
4438 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4439 {
4440 return ufshcd_disable_tx_lcc(hba, true);
4441 }
4442
4443 static int ufshcd_hba_enable(struct ufs_hba *hba)
4444 {
4445 int ret;
4446 unsigned long flags;
4447
4448 ufshcd_hold(hba, false);
4449
4450 spin_lock_irqsave(hba->host->host_lock, flags);
4451 hba->ufshcd_state = UFSHCD_STATE_RESET;
4452 spin_unlock_irqrestore(hba->host->host_lock, flags);
4453
4454 if (hba->vops && hba->vops->host_reset)
4455 hba->vops->host_reset(hba);
4456
4457 if (hba->quirks & UFSHCD_QUIRK_USE_OF_HCE) {
4458 ufshcd_set_link_off(hba);
4459
4460 /* enable UIC related interrupts */
4461 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4462
4463 ret = ufshcd_dme_reset(hba);
4464 if (!ret)
4465 ret = ufshcd_dme_enable(hba);
4466 } else {
4467 ret = __ufshcd_hba_enable(hba);
4468 }
4469 ufshcd_release(hba);
4470
4471 if (ret)
4472 dev_err(hba->dev, "Host controller enable failed\n");
4473
4474 return ret;
4475 }
4476
4477 /**
4478 * ufshcd_link_startup - Initialize unipro link startup
4479 * @hba: per adapter instance
4480 *
4481 * Returns 0 for success, non-zero in case of failure
4482 */
4483 static int ufshcd_link_startup(struct ufs_hba *hba)
4484 {
4485 int ret;
4486 int retries = DME_LINKSTARTUP_RETRIES;
4487
4488 ufshcd_hold(hba, false);
4489
4490 do {
4491 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4492
4493 ret = ufshcd_dme_link_startup(hba);
4494
4495 /* check if device is detected by inter-connect layer */
4496 if (!ret && !ufshcd_is_device_present(hba)) {
4497 dev_err(hba->dev, "%s: Device not present\n", __func__);
4498 ret = -ENXIO;
4499 goto out;
4500 }
4501
4502 /*
4503 * DME link lost indication is only received when link is up,
4504 * but we can't be sure if the link is up until link startup
4505 * succeeds. So reset the local Uni-Pro and try again.
4506 */
4507 if ((ret && !retries) || (ret && ufshcd_hba_enable(hba)))
4508 goto out;
4509 } while (ret && retries--);
4510
4511 if (ret)
4512 /* failed to get the link up... retire */
4513 goto out;
4514
4515 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4516 ufshcd_init_pwr_info(hba);
4517 ufshcd_print_pwr_info(hba);
4518
4519 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4520 ret = ufshcd_disable_device_tx_lcc(hba);
4521 if (ret)
4522 goto out;
4523 }
4524
4525 /* Include any host controller configuration via UIC commands */
4526 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4527 if (ret)
4528 goto out;
4529
4530 ret = ufshcd_make_hba_operational(hba);
4531 out:
4532 ufshcd_release(hba);
4533
4534 if (ret) {
4535 dev_err(hba->dev, "link startup failed %d\n", ret);
4536 ufshcd_print_host_state(hba);
4537 ufshcd_print_pwr_info(hba);
4538 ufshcd_print_host_regs(hba);
4539 }
4540 return ret;
4541 }
4542
4543 /**
4544 * ufshcd_verify_dev_init() - Verify device initialization
4545 * @hba: per-adapter instance
4546 *
4547 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4548 * device Transport Protocol (UTP) layer is ready after a reset.
4549 * If the UTP layer at the device side is not initialized, it may
4550 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4551 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4552 */
4553 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4554 {
4555 int err = 0;
4556 int retries;
4557
4558 ufshcd_hold(hba, false);
4559 mutex_lock(&hba->dev_cmd.lock);
4560 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4561 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4562 NOP_OUT_TIMEOUT);
4563
4564 if (!err || err == -ETIMEDOUT)
4565 break;
4566
4567 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4568 }
4569 mutex_unlock(&hba->dev_cmd.lock);
4570 ufshcd_release(hba);
4571
4572 if (err)
4573 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4574 return err;
4575 }
4576
4577 /**
4578 * ufshcd_set_queue_depth - set lun queue depth
4579 * @sdev: pointer to SCSI device
4580 *
4581 * Read bLUQueueDepth value and activate scsi tagged command
4582 * queueing. For WLUN, queue depth is set to 1. For best-effort
4583 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4584 * value that host can queue.
4585 */
4586 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4587 {
4588 int ret = 0;
4589 u8 lun_qdepth;
4590 struct ufs_hba *hba;
4591
4592 hba = shost_priv(sdev->host);
4593
4594 lun_qdepth = hba->nutrs;
4595 ret = ufshcd_read_unit_desc_param(hba,
4596 ufshcd_scsi_to_upiu_lun(sdev->lun),
4597 UNIT_DESC_PARAM_LU_Q_DEPTH,
4598 &lun_qdepth,
4599 sizeof(lun_qdepth));
4600
4601 /* Some WLUN doesn't support unit descriptor */
4602 if (ret == -EOPNOTSUPP)
4603 lun_qdepth = 1;
4604 else if (!lun_qdepth)
4605 /* eventually, we can figure out the real queue depth */
4606 lun_qdepth = hba->nutrs;
4607 else
4608 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4609
4610 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4611 __func__, lun_qdepth);
4612 scsi_change_queue_depth(sdev, lun_qdepth);
4613 }
4614
4615 /*
4616 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4617 * @hba: per-adapter instance
4618 * @lun: UFS device lun id
4619 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4620 *
4621 * Returns 0 in case of success and b_lu_write_protect status would be returned
4622 * @b_lu_write_protect parameter.
4623 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4624 * Returns -EINVAL in case of invalid parameters passed to this function.
4625 */
4626 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4627 u8 lun,
4628 u8 *b_lu_write_protect)
4629 {
4630 int ret;
4631
4632 if (!b_lu_write_protect)
4633 ret = -EINVAL;
4634 /*
4635 * According to UFS device spec, RPMB LU can't be write
4636 * protected so skip reading bLUWriteProtect parameter for
4637 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4638 */
4639 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4640 ret = -ENOTSUPP;
4641 else
4642 ret = ufshcd_read_unit_desc_param(hba,
4643 lun,
4644 UNIT_DESC_PARAM_LU_WR_PROTECT,
4645 b_lu_write_protect,
4646 sizeof(*b_lu_write_protect));
4647 return ret;
4648 }
4649
4650 /**
4651 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4652 * status
4653 * @hba: per-adapter instance
4654 * @sdev: pointer to SCSI device
4655 *
4656 */
4657 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4658 struct scsi_device *sdev)
4659 {
4660 if (hba->dev_info.f_power_on_wp_en &&
4661 !hba->dev_info.is_lu_power_on_wp) {
4662 u8 b_lu_write_protect;
4663
4664 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4665 &b_lu_write_protect) &&
4666 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4667 hba->dev_info.is_lu_power_on_wp = true;
4668 }
4669 }
4670
4671 static void ufshcd_done(struct request *rq)
4672 {
4673 struct scsi_cmnd *cmd = rq->special;
4674 scsi_dma_unmap(cmd);
4675 scsi_softirq_done(rq);
4676 }
4677
4678 /**
4679 * ufshcd_slave_alloc - handle initial SCSI device configurations
4680 * @sdev: pointer to SCSI device
4681 *
4682 * Returns success
4683 */
4684 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4685 {
4686 struct ufs_hba *hba;
4687
4688 hba = shost_priv(sdev->host);
4689
4690 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4691 sdev->use_10_for_ms = 1;
4692
4693 /* allow SCSI layer to restart the device in case of errors */
4694 sdev->allow_restart = 1;
4695
4696 /* REPORT SUPPORTED OPERATION CODES is not supported */
4697 sdev->no_report_opcodes = 1;
4698
4699 /* WRITE_SAME command is not supported */
4700 sdev->no_write_same = 1;
4701
4702 ufshcd_set_queue_depth(sdev);
4703
4704 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4705
4706 blk_queue_softirq_done(sdev->request_queue, ufshcd_done);
4707
4708 blk_queue_update_dma_alignment(sdev->request_queue, PAGE_SIZE - 1);
4709
4710 return 0;
4711 }
4712
4713 /**
4714 * ufshcd_change_queue_depth - change queue depth
4715 * @sdev: pointer to SCSI device
4716 * @depth: required depth to set
4717 *
4718 * Change queue depth and make sure the max. limits are not crossed.
4719 */
4720 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4721 {
4722 struct ufs_hba *hba = shost_priv(sdev->host);
4723
4724 if (depth > hba->nutrs)
4725 depth = hba->nutrs;
4726 return scsi_change_queue_depth(sdev, depth);
4727 }
4728
4729 /**
4730 * ufshcd_slave_configure - adjust SCSI device configurations
4731 * @sdev: pointer to SCSI device
4732 */
4733 static int ufshcd_slave_configure(struct scsi_device *sdev)
4734 {
4735 struct request_queue *q = sdev->request_queue;
4736
4737 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4738 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4739 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
4740
4741 return 0;
4742 }
4743
4744 /**
4745 * ufshcd_slave_destroy - remove SCSI device configurations
4746 * @sdev: pointer to SCSI device
4747 */
4748 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4749 {
4750 struct ufs_hba *hba;
4751
4752 hba = shost_priv(sdev->host);
4753 /* Drop the reference as it won't be needed anymore */
4754 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4755 unsigned long flags;
4756
4757 spin_lock_irqsave(hba->host->host_lock, flags);
4758 hba->sdev_ufs_device = NULL;
4759 spin_unlock_irqrestore(hba->host->host_lock, flags);
4760 }
4761 }
4762
4763 /**
4764 * ufshcd_task_req_compl - handle task management request completion
4765 * @hba: per adapter instance
4766 * @index: index of the completed request
4767 * @resp: task management service response
4768 *
4769 * Returns non-zero value on error, zero on success
4770 */
4771 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4772 {
4773 struct utp_task_req_desc *task_req_descp;
4774 struct utp_upiu_task_rsp *task_rsp_upiup;
4775 unsigned long flags;
4776 int ocs_value;
4777 int task_result;
4778
4779 spin_lock_irqsave(hba->host->host_lock, flags);
4780
4781 task_req_descp = hba->utmrdl_base_addr;
4782 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4783
4784 if (ocs_value == OCS_SUCCESS) {
4785 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4786 task_req_descp[index].task_rsp_upiu;
4787 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4788 task_result = task_result & MASK_TM_SERVICE_RESP;
4789 if (resp)
4790 *resp = (u8)task_result;
4791 } else {
4792 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4793 __func__, ocs_value);
4794 }
4795 spin_unlock_irqrestore(hba->host->host_lock, flags);
4796
4797 return ocs_value;
4798 }
4799
4800 /**
4801 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4802 * @lrb: pointer to local reference block of completed command
4803 * @scsi_status: SCSI command status
4804 *
4805 * Returns value base on SCSI command status
4806 */
4807 static inline int
4808 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4809 {
4810 int result = 0;
4811
4812 switch (scsi_status) {
4813 case SAM_STAT_CHECK_CONDITION:
4814 ufshcd_copy_sense_data(lrbp);
4815 case SAM_STAT_GOOD:
4816 result |= DID_OK << 16 |
4817 COMMAND_COMPLETE << 8 |
4818 scsi_status;
4819 break;
4820 case SAM_STAT_TASK_SET_FULL:
4821 case SAM_STAT_BUSY:
4822 case SAM_STAT_TASK_ABORTED:
4823 ufshcd_copy_sense_data(lrbp);
4824 result |= scsi_status;
4825 break;
4826 default:
4827 result |= DID_ERROR << 16;
4828 break;
4829 } /* end of switch */
4830
4831 return result;
4832 }
4833
4834 /**
4835 * ufshcd_transfer_rsp_status - Get overall status of the response
4836 * @hba: per adapter instance
4837 * @lrb: pointer to local reference block of completed command
4838 *
4839 * Returns result of the command to notify SCSI midlayer
4840 */
4841 static inline int
4842 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4843 {
4844 int result = 0;
4845 int scsi_status;
4846 int ocs;
4847
4848 /* overall command status of utrd */
4849 ocs = ufshcd_get_tr_ocs(lrbp);
4850
4851 switch (ocs) {
4852 case OCS_SUCCESS:
4853 case OCS_FATAL_ERROR:
4854 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4855 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4856 switch (result) {
4857 case UPIU_TRANSACTION_RESPONSE:
4858 /*
4859 * get the response UPIU result to extract
4860 * the SCSI command status
4861 */
4862 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4863
4864 /*
4865 * get the result based on SCSI status response
4866 * to notify the SCSI midlayer of the command status
4867 */
4868 scsi_status = result & MASK_SCSI_STATUS;
4869 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4870
4871 /*
4872 * Currently we are only supporting BKOPs exception
4873 * events hence we can ignore BKOPs exception event
4874 * during power management callbacks. BKOPs exception
4875 * event is not expected to be raised in runtime suspend
4876 * callback as it allows the urgent bkops.
4877 * During system suspend, we are anyway forcefully
4878 * disabling the bkops and if urgent bkops is needed
4879 * it will be enabled on system resume. Long term
4880 * solution could be to abort the system suspend if
4881 * UFS device needs urgent BKOPs.
4882 */
4883 if (!hba->pm_op_in_progress &&
4884 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4885 scsi_host_in_recovery(hba->host)) {
4886 schedule_work(&hba->eeh_work);
4887 dev_info(hba->dev, "execption event reported\n");
4888 }
4889
4890 break;
4891 case UPIU_TRANSACTION_REJECT_UPIU:
4892 /* TODO: handle Reject UPIU Response */
4893 result = DID_ERROR << 16;
4894 dev_err(hba->dev,
4895 "Reject UPIU not fully implemented\n");
4896 break;
4897 default:
4898 result = DID_ERROR << 16;
4899 dev_err(hba->dev,
4900 "Unexpected request response code = %x\n",
4901 result);
4902 break;
4903 }
4904 break;
4905 case OCS_ABORTED:
4906 result |= DID_ABORT << 16;
4907 break;
4908 case OCS_INVALID_COMMAND_STATUS:
4909 result |= DID_REQUEUE << 16;
4910 break;
4911 case OCS_INVALID_CMD_TABLE_ATTR:
4912 case OCS_INVALID_PRDT_ATTR:
4913 case OCS_MISMATCH_DATA_BUF_SIZE:
4914 case OCS_MISMATCH_RESP_UPIU_SIZE:
4915 case OCS_PEER_COMM_FAILURE:
4916 default:
4917 result |= DID_ERROR << 16;
4918 dev_err(hba->dev,
4919 "OCS error from controller = %x for tag %d\n",
4920 ocs, lrbp->task_tag);
4921 ufshcd_print_host_regs(hba);
4922 ufshcd_print_host_state(hba);
4923 break;
4924 } /* end of switch */
4925
4926 if (host_byte(result) != DID_OK)
4927 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4928 return result;
4929 }
4930
4931 /**
4932 * ufshcd_uic_cmd_compl - handle completion of uic command
4933 * @hba: per adapter instance
4934 * @intr_status: interrupt status generated by the controller
4935 */
4936 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4937 {
4938 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4939 hba->active_uic_cmd->argument2 |=
4940 ufshcd_get_uic_cmd_result(hba);
4941 hba->active_uic_cmd->argument3 =
4942 ufshcd_get_dme_attr_val(hba);
4943 complete(&hba->active_uic_cmd->done);
4944 }
4945
4946 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4947 complete(hba->uic_async_done);
4948 }
4949
4950 /**
4951 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4952 * @hba: per adapter instance
4953 * @completed_reqs: requests to complete
4954 */
4955 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, int reason,
4956 unsigned long completed_reqs)
4957 {
4958 struct ufshcd_lrb *lrbp;
4959 struct scsi_cmnd *cmd;
4960 int result;
4961 int index;
4962
4963 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4964 lrbp = &hba->lrb[index];
4965 cmd = lrbp->cmd;
4966 if (cmd) {
4967 ufshcd_add_command_trace(hba, index, "complete");
4968 result = ufshcd_vops_crypto_engine_clear(hba, lrbp);
4969 if (result) {
4970 dev_err(hba->dev,
4971 "%s: failed to clear crypto engine (%d)\n",
4972 __func__, result);
4973 }
4974 result = ufshcd_transfer_rsp_status(hba, lrbp);
4975 cmd->result = result;
4976 if (reason)
4977 set_host_byte(cmd, reason);
4978 /* Mark completed command as NULL in LRB */
4979 lrbp->cmd = NULL;
4980 clear_bit_unlock(index, &hba->lrb_in_use);
4981 /* Do not touch lrbp after scsi done */
4982 cmd->scsi_done(cmd);
4983 #ifdef CONFIG_SCSI_UFS_CMD_LOGGING
4984 exynos_ufs_cmd_log_end(hba, index);
4985 #endif
4986 __ufshcd_release(hba);
4987
4988 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
4989 dev_info(hba->dev, "Transfer Done(%d)\n",
4990 index);
4991
4992 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4993 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4994 if (hba->dev_cmd.complete) {
4995 ufshcd_add_command_trace(hba, index,
4996 "dev_complete");
4997 complete(hba->dev_cmd.complete);
4998 }
4999 }
5000 if (ufshcd_is_clkscaling_supported(hba))
5001 hba->clk_scaling.active_reqs--;
5002 }
5003
5004 /* clear corresponding bits of completed commands */
5005 hba->outstanding_reqs ^= completed_reqs;
5006 #if defined(CONFIG_PM_DEVFREQ)
5007 ufshcd_clk_scaling_update_busy(hba);
5008 #endif
5009 /* we might have free'd some tags above */
5010 wake_up(&hba->dev_cmd.tag_wq);
5011 }
5012
5013 /**
5014 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5015 * @hba: per adapter instance
5016 */
5017 static void ufshcd_transfer_req_compl(struct ufs_hba *hba, int reason)
5018 {
5019 unsigned long completed_reqs;
5020 u32 tr_doorbell;
5021
5022 /* Resetting interrupt aggregation counters first and reading the
5023 * DOOR_BELL afterward allows us to handle all the completed requests.
5024 * In order to prevent other interrupts starvation the DB is read once
5025 * after reset. The down side of this solution is the possibility of
5026 * false interrupt if device completes another request after resetting
5027 * aggregation and before reading the DB.
5028 */
5029 if (!ufshcd_can_reset_intr_aggr(hba) && ufshcd_is_intr_aggr_allowed(hba))
5030 ufshcd_reset_intr_aggr(hba);
5031
5032 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5033 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5034
5035 __ufshcd_transfer_req_compl(hba, reason, completed_reqs);
5036 }
5037
5038 /**
5039 * ufshcd_disable_ee - disable exception event
5040 * @hba: per-adapter instance
5041 * @mask: exception event to disable
5042 *
5043 * Disables exception event in the device so that the EVENT_ALERT
5044 * bit is not set.
5045 *
5046 * Returns zero on success, non-zero error value on failure.
5047 */
5048 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5049 {
5050 int err = 0;
5051 u32 val;
5052
5053 if (!(hba->ee_ctrl_mask & mask))
5054 goto out;
5055
5056 val = hba->ee_ctrl_mask & ~mask;
5057 val &= MASK_EE_STATUS;
5058 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5059 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5060 if (!err)
5061 hba->ee_ctrl_mask &= ~mask;
5062 out:
5063 return err;
5064 }
5065
5066 /**
5067 * ufshcd_enable_ee - enable exception event
5068 * @hba: per-adapter instance
5069 * @mask: exception event to enable
5070 *
5071 * Enable corresponding exception event in the device to allow
5072 * device to alert host in critical scenarios.
5073 *
5074 * Returns zero on success, non-zero error value on failure.
5075 */
5076 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5077 {
5078 int err = 0;
5079 u32 val;
5080
5081 if (hba->ee_ctrl_mask & mask)
5082 goto out;
5083
5084 val = hba->ee_ctrl_mask | mask;
5085 val &= MASK_EE_STATUS;
5086 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5087 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5088 if (!err)
5089 hba->ee_ctrl_mask |= mask;
5090 out:
5091 return err;
5092 }
5093
5094 /**
5095 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5096 * @hba: per-adapter instance
5097 *
5098 * Allow device to manage background operations on its own. Enabling
5099 * this might lead to inconsistent latencies during normal data transfers
5100 * as the device is allowed to manage its own way of handling background
5101 * operations.
5102 *
5103 * Returns zero on success, non-zero on failure.
5104 */
5105 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5106 {
5107 int err = 0;
5108
5109 if (hba->auto_bkops_enabled)
5110 goto out;
5111
5112 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5113 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5114 if (err) {
5115 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5116 __func__, err);
5117 goto out;
5118 }
5119
5120 hba->auto_bkops_enabled = true;
5121 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5122
5123 /* No need of URGENT_BKOPS exception from the device */
5124 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5125 if (err)
5126 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5127 __func__, err);
5128 out:
5129 return err;
5130 }
5131
5132 /**
5133 * ufshcd_disable_auto_bkops - block device in doing background operations
5134 * @hba: per-adapter instance
5135 *
5136 * Disabling background operations improves command response latency but
5137 * has drawback of device moving into critical state where the device is
5138 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5139 * host is idle so that BKOPS are managed effectively without any negative
5140 * impacts.
5141 *
5142 * Returns zero on success, non-zero on failure.
5143 */
5144 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5145 {
5146 int err = 0;
5147
5148 if (!hba->auto_bkops_enabled)
5149 goto out;
5150
5151 /*
5152 * If host assisted BKOPs is to be enabled, make sure
5153 * urgent bkops exception is allowed.
5154 */
5155 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5156 if (err) {
5157 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5158 __func__, err);
5159 goto out;
5160 }
5161
5162 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5163 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5164 if (err) {
5165 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5166 __func__, err);
5167 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5168 goto out;
5169 }
5170
5171 hba->auto_bkops_enabled = false;
5172 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5173 out:
5174 return err;
5175 }
5176
5177 /**
5178 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5179 * @hba: per adapter instance
5180 *
5181 * After a device reset the device may toggle the BKOPS_EN flag
5182 * to default value. The s/w tracking variables should be updated
5183 * as well. This function would change the auto-bkops state based on
5184 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5185 */
5186 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5187 {
5188 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5189 hba->auto_bkops_enabled = false;
5190 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5191 ufshcd_enable_auto_bkops(hba);
5192 } else {
5193 hba->auto_bkops_enabled = true;
5194 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5195 ufshcd_disable_auto_bkops(hba);
5196 }
5197 }
5198
5199 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5200 {
5201 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5202 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5203 }
5204
5205 /**
5206 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5207 * @hba: per-adapter instance
5208 * @status: bkops_status value
5209 *
5210 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5211 * flag in the device to permit background operations if the device
5212 * bkops_status is greater than or equal to "status" argument passed to
5213 * this function, disable otherwise.
5214 *
5215 * Returns 0 for success, non-zero in case of failure.
5216 *
5217 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5218 * to know whether auto bkops is enabled or disabled after this function
5219 * returns control to it.
5220 */
5221 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5222 enum bkops_status status)
5223 {
5224 int err;
5225 u32 curr_status = 0;
5226
5227 err = ufshcd_get_bkops_status(hba, &curr_status);
5228 if (err) {
5229 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5230 __func__, err);
5231 goto out;
5232 } else if (curr_status > BKOPS_STATUS_MAX) {
5233 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5234 __func__, curr_status);
5235 err = -EINVAL;
5236 goto out;
5237 }
5238
5239 if (curr_status >= status) {
5240 err = ufshcd_enable_auto_bkops(hba);
5241 if (!err)
5242 dev_info(hba->dev, "%s: auto_bkops enabled, status : %d\n",
5243 __func__, curr_status);
5244 }
5245 else
5246 err = ufshcd_disable_auto_bkops(hba);
5247 out:
5248 return err;
5249 }
5250
5251 /**
5252 * ufshcd_urgent_bkops - handle urgent bkops exception event
5253 * @hba: per-adapter instance
5254 *
5255 * Enable fBackgroundOpsEn flag in the device to permit background
5256 * operations.
5257 *
5258 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5259 * and negative error value for any other failure.
5260 */
5261 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5262 {
5263 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5264 }
5265
5266 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5267 {
5268 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5269 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5270 }
5271
5272 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5273 {
5274 int err;
5275 u32 curr_status = 0;
5276
5277 if (hba->is_urgent_bkops_lvl_checked)
5278 goto enable_auto_bkops;
5279
5280 err = ufshcd_get_bkops_status(hba, &curr_status);
5281 if (err) {
5282 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5283 __func__, err);
5284 goto out;
5285 }
5286
5287 /*
5288 * We are seeing that some devices are raising the urgent bkops
5289 * exception events even when BKOPS status doesn't indicate performace
5290 * impacted or critical. Handle these device by determining their urgent
5291 * bkops status at runtime.
5292 */
5293 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5294 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5295 __func__, curr_status);
5296 /* update the current status as the urgent bkops level */
5297 hba->urgent_bkops_lvl = curr_status;
5298 hba->is_urgent_bkops_lvl_checked = true;
5299 }
5300
5301 enable_auto_bkops:
5302 err = ufshcd_enable_auto_bkops(hba);
5303 out:
5304 if (err < 0)
5305 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5306 __func__, err);
5307 }
5308
5309 /**
5310 * ufshcd_exception_event_handler - handle exceptions raised by device
5311 * @work: pointer to work data
5312 *
5313 * Read bExceptionEventStatus attribute from the device and handle the
5314 * exception event accordingly.
5315 */
5316 static void ufshcd_exception_event_handler(struct work_struct *work)
5317 {
5318 struct ufs_hba *hba;
5319 int err;
5320 u32 status = 0;
5321 hba = container_of(work, struct ufs_hba, eeh_work);
5322
5323 pm_runtime_get_sync(hba->dev);
5324 scsi_block_requests(hba->host);
5325 err = ufshcd_get_ee_status(hba, &status);
5326 if (err) {
5327 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5328 __func__, err);
5329 goto out;
5330 }
5331
5332 status &= hba->ee_ctrl_mask;
5333
5334 if (status & MASK_EE_URGENT_BKOPS)
5335 ufshcd_bkops_exception_event_handler(hba);
5336
5337 out:
5338 scsi_unblock_requests(hba->host);
5339 pm_runtime_put_sync(hba->dev);
5340 return;
5341 }
5342
5343 /* Complete requests that have door-bell cleared */
5344 static void ufshcd_complete_requests(struct ufs_hba *hba)
5345 {
5346 ufshcd_transfer_req_compl(hba, 0);
5347 ufshcd_tmc_handler(hba);
5348 }
5349
5350 /**
5351 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5352 * to recover from the DL NAC errors or not.
5353 * @hba: per-adapter instance
5354 *
5355 * Returns true if error handling is required, false otherwise
5356 */
5357 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5358 {
5359 unsigned long flags;
5360 bool err_handling = true;
5361
5362 spin_lock_irqsave(hba->host->host_lock, flags);
5363 /*
5364 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5365 * device fatal error and/or DL NAC & REPLAY timeout errors.
5366 */
5367 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5368 goto out;
5369
5370 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5371 ((hba->saved_err & UIC_ERROR) &&
5372 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5373 goto out;
5374
5375 if ((hba->saved_err & UIC_ERROR) &&
5376 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5377 int err;
5378 /*
5379 * wait for 50ms to see if we can get any other errors or not.
5380 */
5381 spin_unlock_irqrestore(hba->host->host_lock, flags);
5382 msleep(50);
5383 spin_lock_irqsave(hba->host->host_lock, flags);
5384
5385 /*
5386 * now check if we have got any other severe errors other than
5387 * DL NAC error?
5388 */
5389 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5390 ((hba->saved_err & UIC_ERROR) &&
5391 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5392 goto out;
5393
5394 /*
5395 * As DL NAC is the only error received so far, send out NOP
5396 * command to confirm if link is still active or not.
5397 * - If we don't get any response then do error recovery.
5398 * - If we get response then clear the DL NAC error bit.
5399 */
5400
5401 spin_unlock_irqrestore(hba->host->host_lock, flags);
5402 err = ufshcd_verify_dev_init(hba);
5403 spin_lock_irqsave(hba->host->host_lock, flags);
5404
5405 if (err)
5406 goto out;
5407
5408 /* Link seems to be alive hence ignore the DL NAC errors */
5409 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5410 hba->saved_err &= ~UIC_ERROR;
5411 /* clear NAC error */
5412 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5413 if (!hba->saved_uic_err) {
5414 err_handling = false;
5415 goto out;
5416 }
5417 }
5418 out:
5419 spin_unlock_irqrestore(hba->host->host_lock, flags);
5420 return err_handling;
5421 }
5422
5423 /**
5424 * ufshcd_err_handler - handle UFS errors that require s/w attention
5425 * @work: pointer to work structure
5426 */
5427 static void ufshcd_err_handler(struct work_struct *work)
5428 {
5429 struct ufs_hba *hba;
5430 struct ufs_vreg_info *info;
5431 struct exynos_ufs *ufs;
5432 unsigned long flags;
5433 u32 err_xfer = 0;
5434 u32 err_tm = 0;
5435 int err = 0;
5436 int tag;
5437 bool needs_reset = false;
5438
5439 hba = container_of(work, struct ufs_hba, eh_work);
5440 info = &hba->vreg_info;
5441
5442 pm_runtime_get_sync(hba->dev);
5443 ufshcd_hold(hba, false);
5444
5445 ufs = to_exynos_ufs(hba);
5446 if (hba->saved_err & UIC_ERROR) {
5447 dev_err(hba->dev, ": CLKSTOP CTRL(0x%04x):\t\t\t\t0x%08x\n",
5448 HCI_CLKSTOP_CTRL, hci_readl(ufs, HCI_CLKSTOP_CTRL));
5449 dev_err(hba->dev, ": FORCE HCS(0x%04x):\t\t\t\t0x%08x\n",
5450 HCI_FORCE_HCS, hci_readl(ufs, HCI_FORCE_HCS));
5451 }
5452
5453 /* Dump debugging information to system memory */
5454 ufshcd_vops_dbg_register_dump(hba);
5455
5456 /* Dump UFS power & reset_n GPIO status */
5457 if (gpio_is_valid(info->ufs_power_gpio))
5458 dev_info(hba->dev, "%s: UFS power pin: 0x%08x\n", __func__, gpio_get_value(info->ufs_power_gpio));
5459 if (gpio_is_valid(info->ufs_reset_n_gpio))
5460 dev_info(hba->dev, "%s: RESET_N: 0x%08x\n", __func__, gpio_get_value(info->ufs_reset_n_gpio));
5461
5462 spin_lock_irqsave(hba->host->host_lock, flags);
5463 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5464 goto out;
5465
5466 hba->ufshcd_state = UFSHCD_STATE_RESET;
5467 ufshcd_set_eh_in_progress(hba);
5468 exynos_ufs_show_uic_info(hba);
5469
5470 /* Complete requests that have door-bell cleared by h/w */
5471 ufshcd_complete_requests(hba);
5472
5473 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5474 bool ret;
5475
5476 spin_unlock_irqrestore(hba->host->host_lock, flags);
5477 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5478 ret = ufshcd_quirk_dl_nac_errors(hba);
5479 spin_lock_irqsave(hba->host->host_lock, flags);
5480 if (!ret)
5481 goto skip_err_handling;
5482 }
5483 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5484 ((hba->saved_err & UIC_ERROR) &&
5485 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5486 UFSHCD_UIC_DL_ERROR |
5487 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5488 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5489 needs_reset = true;
5490
5491 /*
5492 * if host reset is required then skip clearing the pending
5493 * transfers forcefully because they will automatically get
5494 * cleared after link startup.
5495 */
5496 if (needs_reset)
5497 goto skip_pending_xfer_clear;
5498
5499 /* release lock as clear command might sleep */
5500 spin_unlock_irqrestore(hba->host->host_lock, flags);
5501 /* Clear pending transfer requests */
5502 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5503 if (ufshcd_clear_cmd(hba, tag)) {
5504 err_xfer = true;
5505 goto lock_skip_pending_xfer_clear;
5506 }
5507 }
5508
5509 /* Clear pending task management requests */
5510 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5511 if (ufshcd_clear_tm_cmd(hba, tag)) {
5512 err_tm = true;
5513 goto lock_skip_pending_xfer_clear;
5514 }
5515 }
5516
5517 lock_skip_pending_xfer_clear:
5518 spin_lock_irqsave(hba->host->host_lock, flags);
5519
5520 /* Complete the requests that are cleared by s/w */
5521 ufshcd_complete_requests(hba);
5522
5523 if (err_xfer || err_tm)
5524 needs_reset = true;
5525
5526 skip_pending_xfer_clear:
5527 /* Fatal errors need reset */
5528 if (needs_reset) {
5529 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5530
5531 /*
5532 * ufshcd_reset_and_restore() does the link reinitialization
5533 * which will need atleast one empty doorbell slot to send the
5534 * device management commands (NOP and query commands).
5535 * If there is no slot empty at this moment then free up last
5536 * slot forcefully.
5537 */
5538 if (hba->outstanding_reqs == max_doorbells)
5539 __ufshcd_transfer_req_compl(hba, 0,
5540 (1UL << (hba->nutrs - 1)));
5541
5542 spin_unlock_irqrestore(hba->host->host_lock, flags);
5543
5544 /* Fatal errors need reset */
5545 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
5546 ((hba->saved_err & UIC_ERROR) &&
5547 ((hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5548 (hba->saved_uic_err & UFSHCD_UIC_DL_ERROR))))
5549 dev_err(hba->dev,
5550 "%s: saved_err:0x%x, saved_uic_err:0x%x\n",
5551 __func__, hba->saved_err, hba->saved_uic_err);
5552
5553 err = ufshcd_reset_and_restore(hba);
5554 spin_lock_irqsave(hba->host->host_lock, flags);
5555 if (err) {
5556 spin_lock_irqsave(hba->host->host_lock, flags);
5557 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5558 spin_unlock_irqrestore(hba->host->host_lock, flags);
5559
5560 dev_err(hba->dev, "%s: reset and restore failed\n",
5561 __func__);
5562 }
5563 hba->saved_err = 0;
5564 hba->saved_uic_err = 0;
5565 }
5566
5567 skip_err_handling:
5568 if (!needs_reset) {
5569 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5570 if (hba->saved_err || hba->saved_uic_err)
5571 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5572 __func__, hba->saved_err, hba->saved_uic_err);
5573 }
5574
5575 ufshcd_clear_eh_in_progress(hba);
5576
5577 out:
5578 spin_unlock_irqrestore(hba->host->host_lock, flags);
5579 scsi_unblock_requests(hba->host);
5580 ufshcd_release(hba);
5581 pm_runtime_put_sync(hba->dev);
5582 }
5583
5584 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5585 u32 reg)
5586 {
5587 reg_hist->reg[reg_hist->pos] = reg;
5588 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5589 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5590 }
5591
5592 /**
5593 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5594 * @hba: per-adapter instance
5595 */
5596 static void ufshcd_update_uic_error(struct ufs_hba *hba)
5597 {
5598 u32 reg;
5599
5600 /* PHY layer lane error */
5601 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5602 /* Ignore LINERESET indication, as this is not an error */
5603 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5604 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5605 /*
5606 * To know whether this error is fatal or not, DB timeout
5607 * must be checked but this error is handled separately.
5608 */
5609 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5610 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5611 }
5612
5613 /* PA_INIT_ERROR is fatal and needs UIC reset */
5614 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5615 if (reg)
5616 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5617
5618 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5619 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5620 else if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_ERROR_IND_RECEIVED) {
5621 if (hba->saved_uic_phy_err_cnt > 10) {
5622 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5623 hba->saved_uic_phy_err_cnt = 0;
5624 } else
5625 hba->saved_uic_phy_err_cnt++;
5626 } else if (hba->dev_quirks &
5627 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5628 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5629 hba->uic_error |=
5630 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5631 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5632 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5633 }
5634
5635 if (reg & UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP)
5636 hba->tcx_replay_timer_expired_cnt++;
5637
5638 if (reg & UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP)
5639 hba->fcx_protection_timer_expired_cnt++;
5640
5641 if (hba->tcx_replay_timer_expired_cnt >= 2 ||
5642 hba->fcx_protection_timer_expired_cnt >= 2)
5643 hba->uic_error |= UFSHCD_UIC_DL_ERROR;
5644
5645 /* UIC NL/TL/DME errors needs software retry */
5646 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5647 if (reg) {
5648 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5649 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5650 }
5651
5652 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5653 if (reg) {
5654 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5655 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5656 }
5657
5658 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5659 if (reg) {
5660 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5661 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5662 }
5663
5664 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5665 __func__, hba->uic_error);
5666 }
5667
5668 /**
5669 * ufshcd_check_errors - Check for errors that need s/w attention
5670 * @hba: per-adapter instance
5671 */
5672 static void ufshcd_check_errors(struct ufs_hba *hba)
5673 {
5674 bool queue_eh_work = false;
5675
5676 if (hba->errors & INT_FATAL_ERRORS)
5677 queue_eh_work = true;
5678
5679 if (hba->errors & UIC_ERROR) {
5680 hba->uic_error = 0;
5681 ufshcd_update_uic_error(hba);
5682 if (hba->uic_error)
5683 queue_eh_work = true;
5684 }
5685
5686 if (queue_eh_work) {
5687 /*
5688 * update the transfer error masks to sticky bits, let's do this
5689 * irrespective of current ufshcd_state.
5690 */
5691 hba->saved_err |= hba->errors;
5692 hba->saved_uic_err |= hba->uic_error;
5693
5694 /* handle fatal errors only when link is functional */
5695 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5696 /* block commands from scsi mid-layer */
5697 scsi_block_requests(hba->host);
5698
5699 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5700
5701 /* dump controller state before resetting */
5702 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5703 bool pr_prdt = !!(hba->saved_err &
5704 SYSTEM_BUS_FATAL_ERROR);
5705
5706 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5707 __func__, hba->saved_err,
5708 hba->saved_uic_err);
5709
5710 ufshcd_print_host_regs(hba);
5711 ufshcd_print_pwr_info(hba);
5712 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5713 ufshcd_print_trs(hba, hba->outstanding_reqs,
5714 pr_prdt);
5715 }
5716 schedule_work(&hba->eh_work);
5717 }
5718 }
5719 /*
5720 * if (!queue_eh_work) -
5721 * Other errors are either non-fatal where host recovers
5722 * itself without s/w intervention or errors that will be
5723 * handled by the SCSI core layer.
5724 */
5725 }
5726
5727 /**
5728 * ufshcd_tmc_handler - handle task management function completion
5729 * @hba: per adapter instance
5730 */
5731 static void ufshcd_tmc_handler(struct ufs_hba *hba)
5732 {
5733 u32 tm_doorbell;
5734
5735 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5736 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5737 hba->outstanding_tasks ^= hba->tm_condition;
5738 wake_up(&hba->tm_wq);
5739 }
5740
5741 /**
5742 * ufshcd_sl_intr - Interrupt service routine
5743 * @hba: per adapter instance
5744 * @intr_status: contains interrupts generated by the controller
5745 */
5746 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5747 {
5748 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5749 if (hba->errors)
5750 ufshcd_check_errors(hba);
5751 else
5752 hba->saved_uic_phy_err_cnt = 0;
5753
5754 if (intr_status & UFSHCD_UIC_MASK)
5755 ufshcd_uic_cmd_compl(hba, intr_status);
5756
5757 if (intr_status & UTP_TASK_REQ_COMPL)
5758 ufshcd_tmc_handler(hba);
5759
5760 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5761 ufshcd_transfer_req_compl(hba, 0);
5762
5763 /* Interrupt disable for stop UIC interrupts storm */
5764 if (hba->saved_uic_err && (hba->ufshcd_state != UFSHCD_STATE_RESET))
5765 ufshcd_disable_intr(hba, UIC_ERROR);
5766 }
5767
5768 /**
5769 * ufshcd_intr - Main interrupt service routine
5770 * @irq: irq number
5771 * @__hba: pointer to adapter instance
5772 *
5773 * Returns IRQ_HANDLED - If interrupt is valid
5774 * IRQ_NONE - If invalid interrupt
5775 */
5776 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5777 {
5778 u32 intr_status, enabled_intr_status;
5779 irqreturn_t retval = IRQ_NONE;
5780 struct ufs_hba *hba = __hba;
5781
5782 spin_lock(hba->host->host_lock);
5783 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5784 enabled_intr_status =
5785 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5786
5787 if (intr_status)
5788 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5789
5790 if (enabled_intr_status) {
5791 ufshcd_sl_intr(hba, enabled_intr_status);
5792 retval = IRQ_HANDLED;
5793 }
5794 spin_unlock(hba->host->host_lock);
5795 return retval;
5796 }
5797
5798 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5799 {
5800 int err = 0;
5801 u32 mask = 1 << tag;
5802 unsigned long flags;
5803
5804 spin_lock_irqsave(hba->host->host_lock, flags);
5805 ufshcd_utmrl_clear(hba, tag);
5806 spin_unlock_irqrestore(hba->host->host_lock, flags);
5807
5808 /* poll for max. 1 sec to clear door bell register by h/w */
5809 err = ufshcd_wait_for_register(hba,
5810 REG_UTP_TASK_REQ_DOOR_BELL,
5811 mask, 0, 1000, 1000, true);
5812 return err;
5813 }
5814
5815 /**
5816 * ufshcd_issue_tm_cmd - issues task management commands to controller
5817 * @hba: per adapter instance
5818 * @lun_id: LUN ID to which TM command is sent
5819 * @task_id: task ID to which the TM command is applicable
5820 * @tm_function: task management function opcode
5821 * @tm_response: task management service response return value
5822 *
5823 * Returns non-zero value on error, zero on success.
5824 */
5825 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5826 u8 tm_function, u8 *tm_response)
5827 {
5828 struct utp_task_req_desc *task_req_descp;
5829 struct utp_upiu_task_req *task_req_upiup;
5830 struct Scsi_Host *host;
5831 unsigned long flags;
5832 int free_slot;
5833 int err;
5834 int task_tag;
5835
5836 host = hba->host;
5837
5838 /*
5839 * Get free slot, sleep if slots are unavailable.
5840 * Even though we use wait_event() which sleeps indefinitely,
5841 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5842 */
5843 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5844 ufshcd_hold(hba, false);
5845
5846 spin_lock_irqsave(host->host_lock, flags);
5847 task_req_descp = hba->utmrdl_base_addr;
5848 task_req_descp += free_slot;
5849
5850 /* Configure task request descriptor */
5851 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5852 task_req_descp->header.dword_2 =
5853 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5854
5855 /* Configure task request UPIU */
5856 task_req_upiup =
5857 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5858 task_tag = hba->nutrs + free_slot;
5859 task_req_upiup->header.dword_0 =
5860 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5861 lun_id, task_tag);
5862 task_req_upiup->header.dword_1 =
5863 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5864 /*
5865 * The host shall provide the same value for LUN field in the basic
5866 * header and for Input Parameter.
5867 */
5868 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5869 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5870
5871 /* send command to the controller */
5872 if (hba->vops && hba->vops->set_nexus_t_task_mgmt)
5873 hba->vops->set_nexus_t_task_mgmt(hba, free_slot, tm_function);
5874 __set_bit(free_slot, &hba->outstanding_tasks);
5875
5876 /* Make sure descriptors are ready before ringing the task doorbell */
5877 wmb();
5878
5879 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5880 /* Make sure that doorbell is committed immediately */
5881 wmb();
5882
5883 spin_unlock_irqrestore(host->host_lock, flags);
5884
5885 /* wait until the task management command is completed */
5886 err = wait_event_timeout(hba->tm_wq,
5887 test_bit(free_slot, &hba->tm_condition),
5888 msecs_to_jiffies(TM_CMD_TIMEOUT));
5889 if (!err) {
5890 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5891 __func__, tm_function);
5892 if (!ufshcd_clear_tm_cmd(hba, free_slot)) {
5893 spin_lock_irqsave(hba->host->host_lock, flags);
5894 __clear_bit(free_slot, &hba->outstanding_tasks);
5895 spin_unlock_irqrestore(hba->host->host_lock, flags);
5896 } else {
5897 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5898 __func__, free_slot);
5899 }
5900 err = -ETIMEDOUT;
5901 } else {
5902 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5903 }
5904
5905 clear_bit(free_slot, &hba->tm_condition);
5906 ufshcd_put_tm_slot(hba, free_slot);
5907 wake_up(&hba->tm_tag_wq);
5908
5909 ufshcd_release(hba);
5910 return err;
5911 }
5912
5913 /**
5914 * ufshcd_eh_device_reset_handler - device reset handler registered to
5915 * scsi layer.
5916 * @cmd: SCSI command pointer
5917 *
5918 * Returns SUCCESS/FAILED
5919 */
5920 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5921 {
5922 struct Scsi_Host *host;
5923 struct ufs_hba *hba;
5924 unsigned int tag;
5925 u32 pos;
5926 int err;
5927 u8 resp = 0xF;
5928 struct ufshcd_lrb *lrbp;
5929 unsigned long flags;
5930
5931 host = cmd->device->host;
5932 hba = shost_priv(host);
5933 tag = cmd->request->tag;
5934
5935 /* secure log */
5936 #ifdef CONFIG_EXYNOS_SMC_LOGGING
5937 exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
5938 #endif
5939
5940 /* Dump debugging information to system memory */
5941 ufshcd_vops_dbg_register_dump(hba);
5942 exynos_ufs_show_uic_info(hba);
5943
5944 lrbp = &hba->lrb[tag];
5945 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5946 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5947 if (!err)
5948 err = resp;
5949 goto out;
5950 }
5951
5952 /* clear the commands that were pending for corresponding LUN */
5953 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5954 if (hba->lrb[pos].lun == lrbp->lun) {
5955 err = ufshcd_clear_cmd(hba, pos);
5956 if (err)
5957 break;
5958 }
5959 }
5960 spin_lock_irqsave(host->host_lock, flags);
5961 ufshcd_transfer_req_compl(hba, DID_RESET);
5962 spin_unlock_irqrestore(host->host_lock, flags);
5963
5964 out:
5965 hba->req_abort_count = 0;
5966 if (!err) {
5967 dev_info(hba->dev, "%s: LU reset succeeded\n", __func__);
5968 err = SUCCESS;
5969 } else {
5970 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5971 err = FAILED;
5972 }
5973 return err;
5974 }
5975
5976 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5977 {
5978 struct ufshcd_lrb *lrbp;
5979 int tag;
5980
5981 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5982 lrbp = &hba->lrb[tag];
5983 lrbp->req_abort_skip = true;
5984 }
5985 }
5986
5987 /**
5988 * ufshcd_abort - abort a specific command
5989 * @cmd: SCSI command pointer
5990 *
5991 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5992 * command, and in host controller by clearing the door-bell register. There can
5993 * be race between controller sending the command to the device while abort is
5994 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5995 * really issued and then try to abort it.
5996 *
5997 * Returns SUCCESS/FAILED
5998 */
5999 static int ufshcd_abort(struct scsi_cmnd *cmd)
6000 {
6001 struct Scsi_Host *host;
6002 struct ufs_hba *hba;
6003 unsigned long flags;
6004 unsigned int tag;
6005 int err = 0;
6006 int poll_cnt;
6007 u8 resp = 0xF;
6008 struct ufshcd_lrb *lrbp;
6009 u32 reg;
6010
6011 host = cmd->device->host;
6012 hba = shost_priv(host);
6013 tag = cmd->request->tag;
6014 lrbp = &hba->lrb[tag];
6015 if (!ufshcd_valid_tag(hba, tag)) {
6016 dev_err(hba->dev,
6017 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6018 __func__, tag, cmd, cmd->request);
6019 BUG();
6020 }
6021
6022 /*
6023 * Task abort to the device W-LUN is illegal. When this command
6024 * will fail, due to spec violation, scsi err handling next step
6025 * will be to send LU reset which, again, is a spec violation.
6026 * To avoid these unnecessary/illegal step we skip to the last error
6027 * handling stage: reset and restore.
6028 */
6029 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6030 return ufshcd_eh_host_reset_handler(cmd);
6031
6032 /* secure log */
6033 #ifdef CONFIG_EXYNOS_SMC_LOGGING
6034 exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
6035 #endif
6036
6037 if (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10) {
6038 unsigned long lba = (unsigned long) ((cmd->cmnd[2] << 24) |
6039 (cmd->cmnd[3] << 16) |
6040 (cmd->cmnd[4] << 8) |
6041 (cmd->cmnd[5] << 0));
6042 unsigned int sct = (cmd->cmnd[7] << 8) |
6043 (cmd->cmnd[8] << 0);
6044
6045 dev_err(hba->dev, "%s: tag:%d, cmd:0x%x, "
6046 "lba:0x%08lx, sct:0x%04x, retries %d\n",
6047 __func__, tag, cmd->cmnd[0], lba, sct, cmd->retries);
6048 } else {
6049 dev_err(hba->dev, "%s: tag:%d, cmd:0x%x, retries %d\n",
6050 __func__, tag, cmd->cmnd[0], cmd->retries);
6051 }
6052
6053 ufshcd_hold(hba, false);
6054
6055 /* Dump debugging information to system memory */
6056 ufshcd_vops_dbg_register_dump(hba);
6057 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6058 /* If command is already aborted/completed, return SUCCESS */
6059 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6060 dev_err(hba->dev,
6061 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6062 __func__, tag, hba->outstanding_reqs, reg);
6063 goto out;
6064 }
6065
6066 if (!(reg & (1 << tag))) {
6067 dev_err(hba->dev,
6068 "%s: cmd was completed, but without a notifying intr, tag = %d",
6069 __func__, tag);
6070 goto clean;
6071 }
6072
6073 /* Print Transfer Request of aborted task */
6074 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6075
6076 /*
6077 * Print detailed info about aborted request.
6078 * As more than one request might get aborted at the same time,
6079 * print full information only for the first aborted request in order
6080 * to reduce repeated printouts. For other aborted requests only print
6081 * basic details.
6082 */
6083 scsi_print_command(hba->lrb[tag].cmd);
6084 if (!hba->req_abort_count) {
6085 ufshcd_print_host_regs(hba);
6086 ufshcd_print_host_state(hba);
6087 ufshcd_print_pwr_info(hba);
6088 ufshcd_print_trs(hba, 1 << tag, true);
6089 } else {
6090 ufshcd_print_trs(hba, 1 << tag, false);
6091 }
6092 hba->req_abort_count++;
6093
6094 /* Skip task abort in case previous aborts failed and report failure */
6095 if (lrbp->req_abort_skip) {
6096 err = -EIO;
6097 goto out;
6098 }
6099
6100 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6101 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6102 UFS_QUERY_TASK, &resp);
6103 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6104 /* cmd pending in the device */
6105 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6106 __func__, tag);
6107 break;
6108 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6109 /*
6110 * cmd not pending in the device, check if it is
6111 * in transition.
6112 */
6113 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6114 __func__, tag);
6115 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6116 if (reg & (1 << tag)) {
6117 /* sleep for max. 200us to stabilize */
6118 usleep_range(100, 200);
6119 continue;
6120 }
6121 /* command completed already */
6122 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6123 __func__, tag);
6124 goto out;
6125 } else {
6126 dev_err(hba->dev,
6127 "%s: no response from device. tag = %d, err %d\n",
6128 __func__, tag, err);
6129 if (!err)
6130 err = resp; /* service response error */
6131 dev_err(hba->dev,
6132 "%s: query task failed with err %d\n",
6133 __func__, err);
6134 goto out;
6135 }
6136 }
6137
6138 if (!poll_cnt) {
6139 err = -EBUSY;
6140 dev_err(hba->dev,
6141 "%s: cmd might be missed, not pending in device\n",
6142 __func__);
6143 goto out;
6144 }
6145
6146 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6147 UFS_ABORT_TASK, &resp);
6148 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6149 if (!err) {
6150 err = resp; /* service response error */
6151 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6152 __func__, tag, err);
6153 }
6154 goto out;
6155 }
6156
6157 err = ufshcd_clear_cmd(hba, tag);
6158 if (err) {
6159 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6160 __func__, tag, err);
6161 goto out;
6162 }
6163 clean:
6164 scsi_dma_unmap(cmd);
6165
6166 spin_lock_irqsave(host->host_lock, flags);
6167 ufshcd_outstanding_req_clear(hba, tag);
6168 hba->lrb[tag].cmd = NULL;
6169 spin_unlock_irqrestore(host->host_lock, flags);
6170
6171 clear_bit_unlock(tag, &hba->lrb_in_use);
6172 wake_up(&hba->dev_cmd.tag_wq);
6173
6174 out:
6175 if (!err) {
6176 err = SUCCESS;
6177 } else {
6178 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6179 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6180 err = FAILED;
6181 }
6182
6183 /*
6184 * This ufshcd_release() corresponds to the original scsi cmd that got
6185 * aborted here (as we won't get any IRQ for it).
6186 */
6187 ufshcd_release(hba);
6188 return err;
6189 }
6190
6191 /**
6192 * ufshcd_host_reset_and_restore - reset and restore host controller
6193 * @hba: per-adapter instance
6194 *
6195 * Note that host controller reset may issue DME_RESET to
6196 * local and remote (device) Uni-Pro stack and the attributes
6197 * are reset to default state.
6198 *
6199 * Returns zero on success, non-zero on failure
6200 */
6201 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6202 {
6203 int err = 0;
6204 unsigned long flags;
6205
6206 /* Reset the host controller */
6207 spin_lock_irqsave(hba->host->host_lock, flags);
6208 hba->ufshcd_state = UFSHCD_STATE_RESET;
6209 ufshcd_set_eh_in_progress(hba);
6210 ufshcd_hba_stop(hba, false);
6211 spin_unlock_irqrestore(hba->host->host_lock, flags);
6212
6213 #if defined(CONFIG_PM_DEVFREQ)
6214 /* scale up clocks to max frequency before full reinitialization */
6215 ufshcd_scale_clks(hba, true);
6216 #endif
6217
6218 /* Establish the link again and restore the device */
6219 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
6220 if (hba->pm_op_in_progress)
6221 async_schedule(ufshcd_async_scan, hba);
6222 else
6223 #endif
6224 {
6225 err = ufshcd_probe_hba(hba);
6226
6227 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
6228 dev_err(hba->dev, "%s: failed\n", __func__);
6229 err = -EIO;
6230 }
6231 }
6232
6233 spin_lock_irqsave(hba->host->host_lock, flags);
6234 ufshcd_clear_eh_in_progress(hba);
6235 spin_unlock_irqrestore(hba->host->host_lock, flags);
6236
6237 return err;
6238 }
6239
6240 /**
6241 * ufshcd_reset_and_restore - reset and re-initialize host/device
6242 * @hba: per-adapter instance
6243 *
6244 * Reset and recover device, host and re-establish link. This
6245 * is helpful to recover the communication in fatal error conditions.
6246 *
6247 * Returns zero on success, non-zero on failure
6248 */
6249 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6250 {
6251 int err = 0;
6252 unsigned long flags;
6253 int retries = MAX_HOST_RESET_RETRIES;
6254
6255 int tag;
6256
6257 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
6258 ufshcd_clear_cmd(hba, tag);
6259
6260 spin_lock_irqsave(hba->host->host_lock, flags);
6261 ufshcd_transfer_req_compl(hba, DID_RESET);
6262 spin_unlock_irqrestore(hba->host->host_lock, flags);
6263
6264 ssleep(1);
6265
6266 do {
6267 err = ufshcd_host_reset_and_restore(hba);
6268 } while (err && --retries);
6269
6270 /*
6271 * After reset the door-bell might be cleared, complete
6272 * outstanding requests in s/w here.
6273 */
6274 spin_lock_irqsave(hba->host->host_lock, flags);
6275 ufshcd_transfer_req_compl(hba, DID_RESET);
6276 ufshcd_tmc_handler(hba);
6277 spin_unlock_irqrestore(hba->host->host_lock, flags);
6278
6279 return err;
6280 }
6281
6282 /**
6283 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6284 * @cmd - SCSI command pointer
6285 *
6286 * Returns SUCCESS/FAILED
6287 */
6288 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6289 {
6290 int err;
6291 unsigned long flags;
6292 struct ufs_hba *hba;
6293
6294 hba = shost_priv(cmd->device->host);
6295
6296 ufshcd_hold(hba, false);
6297 /*
6298 * Check if there is any race with fatal error handling.
6299 * If so, wait for it to complete. Even though fatal error
6300 * handling does reset and restore in some cases, don't assume
6301 * anything out of it. We are just avoiding race here.
6302 */
6303 do {
6304 spin_lock_irqsave(hba->host->host_lock, flags);
6305 if (!(work_pending(&hba->eh_work) ||
6306 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6307 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6308 break;
6309 spin_unlock_irqrestore(hba->host->host_lock, flags);
6310 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6311 flush_work(&hba->eh_work);
6312 } while (1);
6313
6314 hba->ufshcd_state = UFSHCD_STATE_RESET;
6315 ufshcd_set_eh_in_progress(hba);
6316 spin_unlock_irqrestore(hba->host->host_lock, flags);
6317
6318 err = ufshcd_reset_and_restore(hba);
6319
6320 spin_lock_irqsave(hba->host->host_lock, flags);
6321 if (!err) {
6322 err = SUCCESS;
6323 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6324 } else {
6325 err = FAILED;
6326 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6327 }
6328 ufshcd_clear_eh_in_progress(hba);
6329 spin_unlock_irqrestore(hba->host->host_lock, flags);
6330
6331 ufshcd_release(hba);
6332 return err;
6333 }
6334
6335 /**
6336 * ufshcd_get_max_icc_level - calculate the ICC level
6337 * @sup_curr_uA: max. current supported by the regulator
6338 * @start_scan: row at the desc table to start scan from
6339 * @buff: power descriptor buffer
6340 *
6341 * Returns calculated max ICC level for specific regulator
6342 */
6343 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6344 {
6345 int i;
6346 int curr_uA;
6347 u16 data;
6348 u16 unit;
6349
6350 for (i = start_scan; i >= 0; i--) {
6351 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6352 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6353 ATTR_ICC_LVL_UNIT_OFFSET;
6354 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6355 switch (unit) {
6356 case UFSHCD_NANO_AMP:
6357 curr_uA = curr_uA / 1000;
6358 break;
6359 case UFSHCD_MILI_AMP:
6360 curr_uA = curr_uA * 1000;
6361 break;
6362 case UFSHCD_AMP:
6363 curr_uA = curr_uA * 1000 * 1000;
6364 break;
6365 case UFSHCD_MICRO_AMP:
6366 default:
6367 break;
6368 }
6369 if (sup_curr_uA >= curr_uA)
6370 break;
6371 }
6372 if (i < 0) {
6373 i = 0;
6374 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6375 }
6376
6377 return (u32)i;
6378 }
6379
6380 /**
6381 * ufshcd_calc_icc_level - calculate the max ICC level
6382 * In case regulators are not initialized we'll return 0
6383 * @hba: per-adapter instance
6384 * @desc_buf: power descriptor buffer to extract ICC levels from.
6385 * @len: length of desc_buff
6386 *
6387 * Returns calculated ICC level
6388 */
6389 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6390 u8 *desc_buf, int len)
6391 {
6392 u32 icc_level = 0;
6393
6394 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6395 !hba->vreg_info.vccq2) {
6396 dev_err(hba->dev,
6397 "%s: Regulator capability was not set, actvIccLevel=%d",
6398 __func__, icc_level);
6399 goto out;
6400 }
6401
6402 if (hba->vreg_info.vcc)
6403 icc_level = ufshcd_get_max_icc_level(
6404 hba->vreg_info.vcc->max_uA,
6405 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6406 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6407
6408 if (hba->vreg_info.vccq)
6409 icc_level = ufshcd_get_max_icc_level(
6410 hba->vreg_info.vccq->max_uA,
6411 icc_level,
6412 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6413
6414 if (hba->vreg_info.vccq2)
6415 icc_level = ufshcd_get_max_icc_level(
6416 hba->vreg_info.vccq2->max_uA,
6417 icc_level,
6418 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6419 out:
6420 return icc_level;
6421 }
6422
6423 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6424 {
6425 int ret;
6426 int buff_len = hba->desc_size.pwr_desc;
6427 u8 desc_buf[hba->desc_size.pwr_desc];
6428
6429 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6430 if (ret) {
6431 dev_err(hba->dev,
6432 "%s: Failed reading power descriptor.len = %d ret = %d",
6433 __func__, buff_len, ret);
6434 return;
6435 }
6436
6437 hba->init_prefetch_data.icc_level =
6438 ufshcd_find_max_sup_active_icc_level(hba,
6439 desc_buf, buff_len);
6440 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6441 __func__, hba->init_prefetch_data.icc_level);
6442
6443 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6444 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6445 &hba->init_prefetch_data.icc_level);
6446
6447 if (ret)
6448 dev_err(hba->dev,
6449 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6450 __func__, hba->init_prefetch_data.icc_level , ret);
6451
6452 }
6453
6454 /**
6455 * ufshcd_scsi_add_wlus - Adds required W-LUs
6456 * @hba: per-adapter instance
6457 *
6458 * UFS device specification requires the UFS devices to support 4 well known
6459 * logical units:
6460 * "REPORT_LUNS" (address: 01h)
6461 * "UFS Device" (address: 50h)
6462 * "RPMB" (address: 44h)
6463 * "BOOT" (address: 30h)
6464 * UFS device's power management needs to be controlled by "POWER CONDITION"
6465 * field of SSU (START STOP UNIT) command. But this "power condition" field
6466 * will take effect only when its sent to "UFS device" well known logical unit
6467 * hence we require the scsi_device instance to represent this logical unit in
6468 * order for the UFS host driver to send the SSU command for power management.
6469
6470 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6471 * Block) LU so user space process can control this LU. User space may also
6472 * want to have access to BOOT LU.
6473
6474 * This function adds scsi device instances for each of all well known LUs
6475 * (except "REPORT LUNS" LU).
6476 *
6477 * Returns zero on success (all required W-LUs are added successfully),
6478 * non-zero error value on failure (if failed to add any of the required W-LU).
6479 */
6480 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6481 {
6482 int ret = 0;
6483 struct scsi_device *sdev_boot;
6484
6485 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6486 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6487 if (IS_ERR(hba->sdev_ufs_device)) {
6488 ret = PTR_ERR(hba->sdev_ufs_device);
6489 hba->sdev_ufs_device = NULL;
6490 goto out;
6491 }
6492 scsi_device_put(hba->sdev_ufs_device);
6493
6494 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6495 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6496 if (IS_ERR(sdev_boot)) {
6497 ret = PTR_ERR(sdev_boot);
6498 goto remove_sdev_ufs_device;
6499 }
6500 scsi_device_put(sdev_boot);
6501
6502 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6503 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6504 if (IS_ERR(hba->sdev_rpmb)) {
6505 ret = PTR_ERR(hba->sdev_rpmb);
6506 goto remove_sdev_boot;
6507 }
6508 scsi_device_put(hba->sdev_rpmb);
6509 goto out;
6510
6511 remove_sdev_boot:
6512 scsi_remove_device(sdev_boot);
6513 remove_sdev_ufs_device:
6514 scsi_remove_device(hba->sdev_ufs_device);
6515 out:
6516 return ret;
6517 }
6518
6519 static int ufs_get_device_desc(struct ufs_hba *hba,
6520 struct ufs_dev_desc *dev_desc)
6521 {
6522 int err;
6523 u8 model_index;
6524 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6525 u8 desc_buf[hba->desc_size.dev_desc];
6526
6527 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6528 if (err) {
6529 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6530 __func__, err);
6531 goto out;
6532 }
6533
6534 /*
6535 * getting vendor (manufacturerID) and Bank Index in big endian
6536 * format
6537 */
6538 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6539 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6540
6541 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6542
6543 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6544 QUERY_DESC_MAX_SIZE, ASCII_STD);
6545 if (err) {
6546 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6547 __func__, err);
6548 goto out;
6549 }
6550
6551 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6552 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6553 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6554 MAX_MODEL_LEN));
6555
6556 /* Null terminate the model string */
6557 dev_desc->model[MAX_MODEL_LEN] = '\0';
6558
6559 out:
6560 return err;
6561 }
6562
6563 static void ufs_fixup_device_setup(struct ufs_hba *hba,
6564 struct ufs_dev_desc *dev_desc)
6565 {
6566 struct ufs_dev_fix *f;
6567
6568 for (f = ufs_fixups; f->quirk; f++) {
6569 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6570 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6571 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6572 !strcmp(f->card.model, UFS_ANY_MODEL)))
6573 hba->dev_quirks |= f->quirk;
6574 }
6575 }
6576
6577 /**
6578 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6579 * @hba: per-adapter instance
6580 *
6581 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6582 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6583 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6584 * the hibern8 exit latency.
6585 *
6586 * Returns zero on success, non-zero error value on failure.
6587 */
6588 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6589 {
6590 int ret = 0;
6591 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6592
6593 ret = ufshcd_dme_peer_get(hba,
6594 UIC_ARG_MIB_SEL(
6595 RX_MIN_ACTIVATETIME_CAPABILITY,
6596 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6597 &peer_rx_min_activatetime);
6598 if (ret)
6599 goto out;
6600
6601 /* make sure proper unit conversion is applied */
6602 tuned_pa_tactivate =
6603 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6604 / PA_TACTIVATE_TIME_UNIT_US);
6605 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6606 tuned_pa_tactivate);
6607
6608 out:
6609 return ret;
6610 }
6611
6612 /**
6613 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6614 * @hba: per-adapter instance
6615 *
6616 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6617 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6618 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6619 * This optimal value can help reduce the hibern8 exit latency.
6620 *
6621 * Returns zero on success, non-zero error value on failure.
6622 */
6623 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6624 {
6625 int ret = 0;
6626 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6627 u32 max_hibern8_time, tuned_pa_hibern8time;
6628
6629 ret = ufshcd_dme_get(hba,
6630 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6631 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6632 &local_tx_hibern8_time_cap);
6633 if (ret)
6634 goto out;
6635
6636 ret = ufshcd_dme_peer_get(hba,
6637 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6638 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6639 &peer_rx_hibern8_time_cap);
6640 if (ret)
6641 goto out;
6642
6643 max_hibern8_time = max(local_tx_hibern8_time_cap,
6644 peer_rx_hibern8_time_cap);
6645 /* make sure proper unit conversion is applied */
6646 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6647 / PA_HIBERN8_TIME_UNIT_US);
6648 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6649 tuned_pa_hibern8time);
6650 out:
6651 return ret;
6652 }
6653
6654 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6655 {
6656 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6657 ufshcd_tune_pa_tactivate(hba);
6658 ufshcd_tune_pa_hibern8time(hba);
6659 }
6660
6661 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6662 /* set 1ms timeout for PA_TACTIVATE */
6663 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6664
6665
6666 }
6667
6668 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6669 {
6670 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6671
6672 hba->ufs_stats.hibern8_exit_cnt = 0;
6673 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6674
6675 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6676 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6677 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6678 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6679 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6680
6681 hba->req_abort_count = 0;
6682 }
6683
6684 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6685 {
6686 int err;
6687
6688 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6689 &hba->desc_size.dev_desc);
6690 if (err)
6691 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6692
6693 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6694 &hba->desc_size.pwr_desc);
6695 if (err)
6696 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6697
6698 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6699 &hba->desc_size.interc_desc);
6700 if (err)
6701 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6702
6703 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6704 &hba->desc_size.conf_desc);
6705 if (err)
6706 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6707
6708 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6709 &hba->desc_size.unit_desc);
6710 if (err)
6711 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6712
6713 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6714 &hba->desc_size.geom_desc);
6715 if (err)
6716 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6717 }
6718
6719 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6720 {
6721 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6722 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6723 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6724 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6725 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6726 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6727 }
6728
6729 /**
6730 * ufshcd_probe_hba - probe hba to detect device and initialize
6731 * @hba: per-adapter instance
6732 *
6733 * Execute link-startup and verify device initialization
6734 */
6735 static int ufshcd_probe_hba(struct ufs_hba *hba)
6736 {
6737 struct ufs_dev_desc card = {0};
6738 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
6739 struct ufs_vreg_info *info = &hba->vreg_info;
6740 int re_cnt = 0;
6741 int ret, link_startup_fail = 0, device_reset = 0;
6742 ktime_t start = ktime_get();
6743 unsigned long flags;
6744
6745 retry:
6746 /* For deivce power control when link startup fail. */
6747 if (link_startup_fail || device_reset) {
6748 ufshcd_vreg_set_lpm(hba);
6749 ret = ufshcd_vreg_set_hpm(hba);
6750 device_reset = 0;
6751
6752 if (gpio_is_valid(info->ufs_power_gpio))
6753 dev_info(hba->dev, "%s: UFS power pin: 0x%08x\n", __func__, gpio_get_value(info->ufs_power_gpio));
6754 if (gpio_is_valid(info->ufs_reset_n_gpio))
6755 dev_info(hba->dev, "%s: RESET_N: 0x%08x\n", __func__, gpio_get_value(info->ufs_reset_n_gpio));
6756 if (ret)
6757 goto out;
6758 }
6759
6760 ret = ufshcd_hba_enable(hba);
6761 if (ret)
6762 goto out;
6763
6764 ret = ufshcd_link_startup(hba);
6765 if (ret) {
6766 link_startup_fail = 1;
6767 goto out;
6768 }
6769 link_startup_fail = 0;
6770
6771 dev_info(hba->dev, "UFS link established\n");
6772
6773 /* set the default level for urgent bkops */
6774 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6775 hba->is_urgent_bkops_lvl_checked = false;
6776
6777 /* Debug counters initialization */
6778 ufshcd_clear_dbg_ufs_stats(hba);
6779
6780 /* UniPro link is active now */
6781 ufshcd_set_link_active(hba);
6782
6783 ret = ufshcd_verify_dev_init(hba);
6784 if (ret)
6785 goto out;
6786
6787 ret = ufshcd_complete_dev_init(hba);
6788 if (ret)
6789 goto out;
6790
6791 /* Init check for device descriptor sizes */
6792 ufshcd_init_desc_sizes(hba);
6793
6794 ret = ufs_get_device_desc(hba, &card);
6795 if (ret) {
6796 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6797 __func__, ret);
6798 goto out;
6799 }
6800
6801 ufs_fixup_device_setup(hba, &card);
6802 ufshcd_tune_unipro_params(hba);
6803
6804 ret = ufshcd_set_vccq_rail_unused(hba,
6805 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6806 if (ret)
6807 goto out;
6808
6809 /* UFS device is also active now */
6810 ufshcd_set_ufs_dev_active(hba);
6811 ufshcd_force_reset_auto_bkops(hba);
6812 hba->wlun_dev_clr_ua = true;
6813
6814 if (ufshcd_get_max_pwr_mode(hba)) {
6815 dev_err(hba->dev,
6816 "%s: Failed getting max supported power mode\n",
6817 __func__);
6818 } else {
6819 if ((pwr_info->lane_rx != pwr_info->peer_available_lane_rx)
6820 || (pwr_info->lane_tx != pwr_info->peer_available_lane_tx)) {
6821 dev_info(hba->dev,
6822 "%s: availabele lanes, Host:Device Lane tx %d%d rx %d:%d\n",
6823 __func__,
6824 pwr_info->lane_tx, pwr_info->peer_available_lane_tx,
6825 pwr_info->lane_rx, pwr_info->peer_available_lane_rx);
6826 }
6827 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6828 if (ret) {
6829 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6830 __func__, ret);
6831 goto out;
6832 }
6833
6834 if (hba->max_pwr_info.info.pwr_rx == FAST_MODE ||
6835 hba->max_pwr_info.info.pwr_tx == FAST_MODE ||
6836 hba->max_pwr_info.info.pwr_rx == FASTAUTO_MODE ||
6837 hba->max_pwr_info.info.pwr_tx == FASTAUTO_MODE)
6838 dev_info(hba->dev, "HS mode configured\n");
6839 }
6840
6841 /* set the state as operational after switching to desired gear */
6842 spin_lock_irqsave(hba->host->host_lock, flags);
6843 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6844 spin_unlock_irqrestore(hba->host->host_lock, flags);
6845
6846 /*
6847 * If we are in error handling context or in power management callbacks
6848 * context, no need to scan the host
6849 */
6850 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress
6851 && !hba->async_resume) {
6852 bool flag;
6853
6854 /* clear any previous UFS device information */
6855 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6856 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6857 QUERY_FLAG_IDN_PWR_ON_WPE, &flag);
6858 if (!ret)
6859 hba->dev_info.f_power_on_wp_en = flag;
6860 else {
6861 device_reset = 1;
6862 goto out;
6863 }
6864 device_reset = 0;
6865
6866 if (!hba->is_init_prefetch)
6867 ufshcd_init_icc_levels(hba);
6868
6869 scsi_scan_host(hba->host);
6870
6871 /* Add required well known logical units to scsi mid layer */
6872 ret = ufshcd_scsi_add_wlus(hba);
6873 if (ret) {
6874 dev_warn(hba->dev, "%s failed to add w-lus %d\n",
6875 __func__, ret);
6876 ret = 0;
6877 }
6878
6879 /* Initialize devfreq after UFS device is detected */
6880 if (ufshcd_is_clkscaling_supported(hba)) {
6881 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6882 &hba->pwr_info,
6883 sizeof(struct ufs_pa_layer_attr));
6884 hba->clk_scaling.saved_pwr_info.is_valid = true;
6885 if (!hba->devfreq) {
6886 #if defined(CONFIG_PM_DEVFREQ)
6887 hba->devfreq = devm_devfreq_add_device(hba->dev,
6888 &ufs_devfreq_profile,
6889 "simple_ondemand",
6890 NULL);
6891 #endif
6892 if (IS_ERR(hba->devfreq)) {
6893 ret = PTR_ERR(hba->devfreq);
6894 dev_err(hba->dev, "Unable to register with devfreq %d\n",
6895 ret);
6896 goto out;
6897 }
6898 }
6899 hba->clk_scaling.is_allowed = true;
6900 }
6901
6902 pm_runtime_put_sync(hba->dev);
6903 }
6904
6905 hba->host->wlun_clr_uac = true;
6906 if (!hba->is_init_prefetch)
6907 hba->is_init_prefetch = true;
6908
6909 out:
6910 if (ret && re_cnt++ < UFS_LINK_SETUP_RETRIES) {
6911 dev_err(hba->dev, "%s failed with err %d, retrying:%d\n",
6912 __func__, ret, re_cnt);
6913 goto retry;
6914 } else if (ret && re_cnt >= UFS_LINK_SETUP_RETRIES) {
6915 dev_err(hba->dev, "%s failed after retries with err %d\n",
6916 __func__, ret);
6917 exynos_ufs_dump_uic_info(hba);
6918 spin_lock_irqsave(hba->host->host_lock, flags);
6919 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6920 spin_unlock_irqrestore(hba->host->host_lock, flags);
6921 }
6922
6923 /*
6924 * If we failed to initialize the device or the device is not
6925 * present, turn off the power/clocks etc.
6926 */
6927 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6928 pm_runtime_put_sync(hba->dev);
6929 ufshcd_hba_exit(hba);
6930 }
6931
6932 trace_ufshcd_init(dev_name(hba->dev), ret,
6933 ktime_to_us(ktime_sub(ktime_get(), start)),
6934 hba->curr_dev_pwr_mode, hba->uic_link_state);
6935
6936 if (!ret) {
6937 /*
6938 * Inform scsi mid-layer that we did reset and allow to handle
6939 * Unit Attention properly.
6940 */
6941 spin_lock_irqsave(hba->host->host_lock, flags);
6942 scsi_report_bus_reset(hba->host, 0);
6943 spin_unlock_irqrestore(hba->host->host_lock, flags);
6944 }
6945
6946 hba->async_resume = false;
6947
6948 return ret;
6949 }
6950
6951 /**
6952 * ufshcd_async_scan - asynchronous execution for probing hba
6953 * @data: data pointer to pass to this function
6954 * @cookie: cookie data
6955 */
6956 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6957 {
6958 struct ufs_hba *hba = (struct ufs_hba *)data;
6959 int err = 0;
6960
6961 if (hba->async_resume) {
6962 scsi_block_requests(hba->host);
6963 err = ufshcd_probe_hba(hba);
6964 if (err)
6965 goto err;
6966
6967 if (!ufshcd_is_ufs_dev_active(hba)) {
6968 scsi_unblock_requests(hba->host);
6969 ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6970 scsi_block_requests(hba->host);
6971 }
6972
6973 /*
6974 * If BKOPs operations are urgently needed at this moment then
6975 * keep auto-bkops enabled or else disable it.
6976 */
6977 ufshcd_urgent_bkops(hba);
6978 err:
6979 scsi_unblock_requests(hba->host);
6980 } else {
6981 ufshcd_probe_hba(hba);
6982 }
6983 }
6984
6985 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6986 {
6987 unsigned long flags;
6988 struct Scsi_Host *host;
6989 struct ufs_hba *hba;
6990 int index;
6991 bool found = false;
6992
6993 if (!scmd || !scmd->device || !scmd->device->host)
6994 return BLK_EH_NOT_HANDLED;
6995
6996 host = scmd->device->host;
6997 hba = shost_priv(host);
6998 if (!hba)
6999 return BLK_EH_NOT_HANDLED;
7000
7001 spin_lock_irqsave(host->host_lock, flags);
7002
7003 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7004 if (hba->lrb[index].cmd == scmd) {
7005 found = true;
7006 break;
7007 }
7008 }
7009
7010 spin_unlock_irqrestore(host->host_lock, flags);
7011
7012 /*
7013 * Bypass SCSI error handling and reset the block layer timer if this
7014 * SCSI command was not actually dispatched to UFS driver, otherwise
7015 * let SCSI layer handle the error as usual.
7016 */
7017 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
7018 }
7019
7020 /**
7021 * ufshcd_query_ioctl - perform user read queries
7022 * @hba: per-adapter instance
7023 * @lun: used for lun specific queries
7024 * @buffer: user space buffer for reading and submitting query data and params
7025 * @return: 0 for success negative error code otherwise
7026 *
7027 * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7028 * It will read the opcode, idn and buf_length parameters, and, put the
7029 * response in the buffer field while updating the used size in buf_length.
7030 */
7031 static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
7032 {
7033 struct ufs_ioctl_query_data *ioctl_data;
7034 int err = 0;
7035 int length = 0;
7036 void *data_ptr;
7037 bool flag;
7038 u32 att;
7039 u8 index;
7040 u8 *desc = NULL;
7041
7042 ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
7043 if (!ioctl_data) {
7044 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
7045 sizeof(struct ufs_ioctl_query_data));
7046 err = -ENOMEM;
7047 goto out;
7048 }
7049
7050 /* extract params from user buffer */
7051 err = copy_from_user(ioctl_data, buffer,
7052 sizeof(struct ufs_ioctl_query_data));
7053 if (err) {
7054 dev_err(hba->dev,
7055 "%s: Failed copying buffer from user, err %d\n",
7056 __func__, err);
7057 goto out_release_mem;
7058 }
7059
7060 /* verify legal parameters & send query */
7061 switch (ioctl_data->opcode) {
7062 case UPIU_QUERY_OPCODE_READ_DESC:
7063 switch (ioctl_data->idn) {
7064 case QUERY_DESC_IDN_DEVICE:
7065 case QUERY_DESC_IDN_CONFIGURATION:
7066 case QUERY_DESC_IDN_INTERCONNECT:
7067 case QUERY_DESC_IDN_GEOMETRY:
7068 case QUERY_DESC_IDN_POWER:
7069 case QUERY_DESC_IDN_HEALTH:
7070 index = 0;
7071 break;
7072 case QUERY_DESC_IDN_UNIT:
7073 if (!ufs_is_valid_unit_desc_lun(lun)) {
7074 dev_err(hba->dev,
7075 "%s: No unit descriptor for lun 0x%x\n",
7076 __func__, lun);
7077 err = -EINVAL;
7078 goto out_release_mem;
7079 }
7080 index = lun;
7081 break;
7082 default:
7083 goto out_einval;
7084 }
7085 length = min_t(int, QUERY_DESC_MAX_SIZE,
7086 ioctl_data->buf_size);
7087 desc = kzalloc(length, GFP_KERNEL);
7088 if (!desc) {
7089 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
7090 __func__, length);
7091 err = -ENOMEM;
7092 goto out_release_mem;
7093 }
7094 err = ufshcd_query_descriptor_retry(hba, ioctl_data->opcode,
7095 ioctl_data->idn, index, 0, desc, &length);
7096 break;
7097 case UPIU_QUERY_OPCODE_READ_ATTR:
7098 switch (ioctl_data->idn) {
7099 case QUERY_ATTR_IDN_BOOT_LU_EN:
7100 case QUERY_ATTR_IDN_POWER_MODE:
7101 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
7102 case QUERY_ATTR_IDN_OOO_DATA_EN:
7103 case QUERY_ATTR_IDN_BKOPS_STATUS:
7104 case QUERY_ATTR_IDN_PURGE_STATUS:
7105 case QUERY_ATTR_IDN_MAX_DATA_IN:
7106 case QUERY_ATTR_IDN_MAX_DATA_OUT:
7107 case QUERY_ATTR_IDN_REF_CLK_FREQ:
7108 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
7109 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
7110 case QUERY_ATTR_IDN_EE_CONTROL:
7111 case QUERY_ATTR_IDN_EE_STATUS:
7112 case QUERY_ATTR_IDN_SECONDS_PASSED:
7113 index = 0;
7114 break;
7115 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
7116 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
7117 index = lun;
7118 break;
7119 default:
7120 goto out_einval;
7121 }
7122 err = ufshcd_query_attr_retry(hba, ioctl_data->opcode,
7123 ioctl_data->idn, index, 0, &att);
7124 break;
7125 case UPIU_QUERY_OPCODE_READ_FLAG:
7126 switch (ioctl_data->idn) {
7127 case QUERY_FLAG_IDN_FDEVICEINIT:
7128 case QUERY_FLAG_IDN_PERMANENT_WPE:
7129 case QUERY_FLAG_IDN_PWR_ON_WPE:
7130 case QUERY_FLAG_IDN_BKOPS_EN:
7131 case QUERY_FLAG_IDN_PURGE_ENABLE:
7132 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
7133 case QUERY_FLAG_IDN_BUSY_RTC:
7134 break;
7135 default:
7136 goto out_einval;
7137 }
7138 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
7139 ioctl_data->idn, &flag);
7140 break;
7141 default:
7142 goto out_einval;
7143 }
7144
7145 if (err) {
7146 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
7147 ioctl_data->idn);
7148 goto out_release_mem;
7149 }
7150
7151 /*
7152 * copy response data
7153 * As we might end up reading less data then what is specified in
7154 * "ioct_data->buf_size". So we are updating "ioct_data->
7155 * buf_size" to what exactly we have read.
7156 */
7157 switch (ioctl_data->opcode) {
7158 case UPIU_QUERY_OPCODE_READ_DESC:
7159 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
7160 data_ptr = desc;
7161 break;
7162 case UPIU_QUERY_OPCODE_READ_ATTR:
7163 ioctl_data->buf_size = sizeof(u32);
7164 data_ptr = &att;
7165 break;
7166 case UPIU_QUERY_OPCODE_READ_FLAG:
7167 ioctl_data->buf_size = 1;
7168 data_ptr = &flag;
7169 break;
7170 default:
7171 BUG_ON(true);
7172 }
7173
7174 /* copy to user */
7175 err = copy_to_user(buffer, ioctl_data,
7176 sizeof(struct ufs_ioctl_query_data));
7177 if (err)
7178 dev_err(hba->dev, "%s: Failed copying back to user.\n",
7179 __func__);
7180 err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
7181 data_ptr, ioctl_data->buf_size);
7182 if (err)
7183 dev_err(hba->dev, "%s: err %d copying back to user.\n",
7184 __func__, err);
7185 goto out_release_mem;
7186
7187 out_einval:
7188 dev_err(hba->dev,
7189 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7190 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
7191 err = -EINVAL;
7192 out_release_mem:
7193 kfree(ioctl_data);
7194 kfree(desc);
7195 out:
7196 return err;
7197 }
7198
7199 /**
7200 * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7201 * @dev: scsi device required for per LUN queries
7202 * @cmd: command opcode
7203 * @buffer: user space buffer for transferring data
7204 *
7205 * Supported commands:
7206 * UFS_IOCTL_QUERY
7207 */
7208 static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
7209 {
7210 struct ufs_hba *hba = shost_priv(dev->host);
7211 int err = 0;
7212
7213 BUG_ON(!hba);
7214 if (!buffer) {
7215 if (cmd != SCSI_UFS_REQUEST_SENSE) {
7216 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
7217 return -EINVAL;
7218 }
7219 }
7220 switch (cmd) {
7221 case SCSI_UFS_REQUEST_SENSE:
7222 err = ufshcd_send_request_sense(hba, hba->sdev_rpmb);
7223 if (err) {
7224 dev_warn(hba->dev, "%s failed to clear uac on rpmb(w-lu) %d\n",
7225 __func__, err);
7226 }
7227 hba->host->wlun_clr_uac = false;
7228 break;
7229 case UFS_IOCTL_QUERY:
7230 //pm_runtime_get_sync(hba->dev);
7231 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
7232 buffer);
7233 //pm_runtime_put_sync(hba->dev);
7234 break;
7235 case UFS_IOCTL_BLKROSET:
7236 err = -ENOIOCTLCMD;
7237 break;
7238 default:
7239 err = -EINVAL;
7240 dev_err(hba->dev, "%s: Illegal ufs-IOCTL cmd %d\n", __func__,
7241 cmd);
7242 break;
7243 }
7244
7245 return err;
7246 }
7247 static struct scsi_host_template ufshcd_driver_template = {
7248 .module = THIS_MODULE,
7249 .name = UFSHCD,
7250 .proc_name = UFSHCD,
7251 .queuecommand = ufshcd_queuecommand,
7252 .slave_alloc = ufshcd_slave_alloc,
7253 .slave_configure = ufshcd_slave_configure,
7254 .slave_destroy = ufshcd_slave_destroy,
7255 .change_queue_depth = ufshcd_change_queue_depth,
7256 .eh_abort_handler = ufshcd_abort,
7257 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7258 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7259 .eh_timed_out = ufshcd_eh_timed_out,
7260 .ioctl = ufshcd_ioctl,
7261 .this_id = -1,
7262 .sg_tablesize = SG_ALL,
7263 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7264 .can_queue = UFSHCD_CAN_QUEUE,
7265 .max_host_blocked = 1,
7266 .skip_settle_delay = 1,
7267 .track_queue_depth = 1,
7268 };
7269
7270 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7271 int ua)
7272 {
7273 int ret;
7274
7275 if (!vreg)
7276 return 0;
7277
7278 ret = regulator_set_load(vreg->reg, ua);
7279 if (ret < 0) {
7280 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7281 __func__, vreg->name, ua, ret);
7282 }
7283
7284 return ret;
7285 }
7286
7287 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7288 struct ufs_vreg *vreg)
7289 {
7290 if (!vreg)
7291 return 0;
7292 else if (vreg->unused)
7293 return 0;
7294 else
7295 return ufshcd_config_vreg_load(hba->dev, vreg,
7296 UFS_VREG_LPM_LOAD_UA);
7297 }
7298
7299 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7300 struct ufs_vreg *vreg)
7301 {
7302 if (!vreg)
7303 return 0;
7304 else if (vreg->unused)
7305 return 0;
7306 else
7307 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7308 }
7309
7310 static int ufshcd_config_vreg(struct device *dev,
7311 struct ufs_vreg *vreg, bool on)
7312 {
7313 int ret = 0;
7314 struct regulator *reg;
7315 const char *name;
7316 int min_uV, uA_load;
7317
7318 BUG_ON(!vreg);
7319
7320 reg = vreg->reg;
7321 name = vreg->name;
7322
7323 if (regulator_count_voltages(reg) > 0) {
7324 min_uV = on ? vreg->min_uV : 0;
7325 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7326 if (ret) {
7327 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
7328 __func__, name, ret);
7329 goto out;
7330 }
7331
7332 uA_load = on ? vreg->max_uA : 0;
7333 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7334 if (ret)
7335 goto out;
7336 }
7337 out:
7338 return ret;
7339 }
7340
7341 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7342 {
7343 int ret = 0;
7344
7345 if (!vreg)
7346 goto out;
7347 else if (vreg->enabled || vreg->unused)
7348 goto out;
7349
7350 ret = ufshcd_config_vreg(dev, vreg, true);
7351 if (!ret)
7352 ret = regulator_enable(vreg->reg);
7353
7354 if (!ret)
7355 vreg->enabled = true;
7356 else
7357 dev_err(dev, "%s: %s enable failed, err=%d\n",
7358 __func__, vreg->name, ret);
7359 out:
7360 return ret;
7361 }
7362
7363 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7364 {
7365 int ret = 0;
7366
7367 if (!vreg)
7368 goto out;
7369 else if (!vreg->enabled || vreg->unused)
7370 goto out;
7371
7372 ret = regulator_disable(vreg->reg);
7373
7374 if (!ret) {
7375 /* ignore errors on applying disable config */
7376 ufshcd_config_vreg(dev, vreg, false);
7377 vreg->enabled = false;
7378 } else {
7379 dev_err(dev, "%s: %s disable failed, err=%d\n",
7380 __func__, vreg->name, ret);
7381 }
7382 out:
7383 return ret;
7384 }
7385
7386 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7387 {
7388 int ret = 0;
7389 struct device *dev = hba->dev;
7390 struct ufs_vreg_info *info = &hba->vreg_info;
7391
7392 if (!info)
7393 goto out;
7394
7395 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7396 if (ret)
7397 goto out;
7398
7399 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7400 if (ret)
7401 goto out;
7402
7403 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7404 if (ret)
7405 goto out;
7406
7407 out:
7408 if (ret) {
7409 ufshcd_toggle_vreg(dev, info->vccq2, false);
7410 ufshcd_toggle_vreg(dev, info->vccq, false);
7411 ufshcd_toggle_vreg(dev, info->vcc, false);
7412 }
7413 return ret;
7414 }
7415
7416 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7417 {
7418 struct ufs_vreg_info *info = &hba->vreg_info;
7419
7420 if (info)
7421 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7422
7423 return 0;
7424 }
7425
7426 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7427 {
7428 int ret = 0;
7429
7430 if (!vreg)
7431 goto out;
7432
7433 vreg->reg = devm_regulator_get(dev, vreg->name);
7434 if (IS_ERR(vreg->reg)) {
7435 ret = PTR_ERR(vreg->reg);
7436 dev_err(dev, "%s: %s get failed, err=%d\n",
7437 __func__, vreg->name, ret);
7438 }
7439 out:
7440 return ret;
7441 }
7442
7443 static int ufshcd_init_vreg(struct ufs_hba *hba)
7444 {
7445 int ret = 0;
7446 struct device *dev = hba->dev;
7447 struct ufs_vreg_info *info = &hba->vreg_info;
7448
7449 if (!info)
7450 goto out;
7451
7452 ret = ufshcd_get_vreg(dev, info->vcc);
7453 if (ret)
7454 goto out;
7455
7456 ret = ufshcd_get_vreg(dev, info->vccq);
7457 if (ret)
7458 goto out;
7459
7460 ret = ufshcd_get_vreg(dev, info->vccq2);
7461 out:
7462 return ret;
7463 }
7464
7465 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7466 {
7467 struct ufs_vreg_info *info = &hba->vreg_info;
7468
7469 if (info)
7470 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7471
7472 return 0;
7473 }
7474
7475 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
7476 {
7477 int ret = 0;
7478 struct ufs_vreg_info *info = &hba->vreg_info;
7479
7480 if (!info)
7481 goto out;
7482 else if (!info->vccq)
7483 goto out;
7484
7485 if (unused) {
7486 /* shut off the rail here */
7487 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7488 /*
7489 * Mark this rail as no longer used, so it doesn't get enabled
7490 * later by mistake
7491 */
7492 if (!ret)
7493 info->vccq->unused = true;
7494 } else {
7495 /*
7496 * rail should have been already enabled hence just make sure
7497 * that unused flag is cleared.
7498 */
7499 info->vccq->unused = false;
7500 }
7501 out:
7502 return ret;
7503 }
7504
7505 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7506 bool skip_ref_clk)
7507 {
7508 int ret = 0;
7509 struct ufs_clk_info *clki;
7510 struct list_head *head = &hba->clk_list_head;
7511 const char *ref_clk = "ref_clk";
7512 unsigned long flags;
7513 ktime_t start = ktime_get();
7514 bool clk_state_changed = false;
7515
7516 if (list_empty(head))
7517 goto out;
7518
7519 ufshcd_vops_pre_setup_clocks(hba, on);
7520
7521 list_for_each_entry(clki, head, list) {
7522 if (!IS_ERR_OR_NULL(clki->clk)) {
7523 if (skip_ref_clk &&
7524 !strncmp(clki->name, ref_clk, strlen(ref_clk)))
7525 continue;
7526
7527 clk_state_changed = on ^ clki->enabled;
7528 if (on && !clki->enabled) {
7529 ret = clk_prepare_enable(clki->clk);
7530 if (ret) {
7531 hba->clk_gating.state = CLKS_DISABLE;
7532 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7533 __func__, clki->name, ret);
7534 goto out;
7535 }
7536 } else if (!on && clki->enabled) {
7537 clk_disable_unprepare(clki->clk);
7538 }
7539 clki->enabled = on;
7540 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7541 clki->name, on ? "en" : "dis");
7542 }
7543 }
7544
7545 ret = ufshcd_vops_setup_clocks(hba, on);
7546
7547 out:
7548 if (ret) {
7549 list_for_each_entry(clki, head, list) {
7550 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7551 clk_disable_unprepare(clki->clk);
7552 }
7553 } else if (!ret && on) {
7554 spin_lock_irqsave(hba->host->host_lock, flags);
7555 hba->clk_gating.state = CLKS_ON;
7556 trace_ufshcd_clk_gating(dev_name(hba->dev),
7557 hba->clk_gating.state);
7558 spin_unlock_irqrestore(hba->host->host_lock, flags);
7559 }
7560
7561 if (clk_state_changed)
7562 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7563 (on ? "on" : "off"),
7564 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7565 return ret;
7566 }
7567
7568 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7569 {
7570 return __ufshcd_setup_clocks(hba, on, false);
7571 }
7572
7573 static int ufshcd_init_clocks(struct ufs_hba *hba)
7574 {
7575 int ret = 0;
7576 struct ufs_clk_info *clki;
7577 struct device *dev = hba->dev;
7578 struct list_head *head = &hba->clk_list_head;
7579
7580 if (list_empty(head))
7581 goto out;
7582
7583 list_for_each_entry(clki, head, list) {
7584 if (!clki->name)
7585 continue;
7586
7587 clki->clk = devm_clk_get(dev, clki->name);
7588 if (IS_ERR(clki->clk)) {
7589 ret = PTR_ERR(clki->clk);
7590 dev_err(dev, "%s: %s clk get failed, %d\n",
7591 __func__, clki->name, ret);
7592 goto out;
7593 }
7594
7595 if (clki->max_freq) {
7596 ret = clk_set_rate(clki->clk, clki->max_freq);
7597 if (ret) {
7598 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7599 __func__, clki->name,
7600 clki->max_freq, ret);
7601 goto out;
7602 }
7603 #if defined(CONFIG_PM_DEVFREQ)
7604 clki->curr_freq = clki->max_freq;
7605 #endif
7606 }
7607 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7608 clki->name, clk_get_rate(clki->clk));
7609 }
7610 out:
7611 return ret;
7612 }
7613
7614 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7615 {
7616 int err = 0;
7617
7618 if (!hba->vops)
7619 goto out;
7620
7621 err = ufshcd_vops_init(hba);
7622 if (err)
7623 goto out;
7624
7625 err = ufshcd_vops_setup_regulators(hba, true);
7626 if (err)
7627 goto out_exit;
7628
7629 goto out;
7630
7631 out_exit:
7632 ufshcd_vops_exit(hba);
7633 out:
7634 if (err)
7635 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7636 __func__, ufshcd_get_var_name(hba), err);
7637 return err;
7638 }
7639
7640 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7641 {
7642 if (!hba->vops)
7643 return;
7644
7645 ufshcd_vops_setup_regulators(hba, false);
7646
7647 ufshcd_vops_exit(hba);
7648 }
7649
7650 static int ufshcd_hba_init(struct ufs_hba *hba)
7651 {
7652 int err;
7653
7654 /*
7655 * Handle host controller power separately from the UFS device power
7656 * rails as it will help controlling the UFS host controller power
7657 * collapse easily which is different than UFS device power collapse.
7658 * Also, enable the host controller power before we go ahead with rest
7659 * of the initialization here.
7660 */
7661 err = ufshcd_init_hba_vreg(hba);
7662 if (err)
7663 goto out;
7664
7665 err = ufshcd_setup_hba_vreg(hba, true);
7666 if (err)
7667 goto out;
7668
7669 err = ufshcd_init_clocks(hba);
7670 if (err)
7671 goto out_disable_hba_vreg;
7672
7673 err = ufshcd_setup_clocks(hba, true);
7674 if (err)
7675 goto out_disable_hba_vreg;
7676
7677 err = ufshcd_init_vreg(hba);
7678 if (err)
7679 goto out_disable_clks;
7680
7681 err = ufshcd_setup_vreg(hba, true);
7682 if (err)
7683 goto out_disable_clks;
7684
7685 err = ufshcd_variant_hba_init(hba);
7686 if (err)
7687 goto out_disable_vreg;
7688
7689 hba->is_powered = true;
7690 goto out;
7691
7692 out_disable_vreg:
7693 ufshcd_setup_vreg(hba, false);
7694 out_disable_clks:
7695 ufshcd_setup_clocks(hba, false);
7696 out_disable_hba_vreg:
7697 ufshcd_setup_hba_vreg(hba, false);
7698 out:
7699 return err;
7700 }
7701
7702 static void ufshcd_hba_exit(struct ufs_hba *hba)
7703 {
7704 if (hba->is_powered) {
7705 ufshcd_variant_hba_exit(hba);
7706 ufshcd_setup_vreg(hba, false);
7707 #if defined(CONFIG_PM_DEVFREQ)
7708 ufshcd_suspend_clkscaling(hba);
7709 #endif
7710 if (ufshcd_is_clkscaling_supported(hba)) {
7711 #if defined(CONFIG_PM_DEVFREQ)
7712 if (hba->devfreq)
7713 ufshcd_suspend_clkscaling(hba);
7714 #endif
7715 destroy_workqueue(hba->clk_scaling.workq);
7716 }
7717 ufshcd_setup_clocks(hba, false);
7718 ufshcd_setup_hba_vreg(hba, false);
7719 hba->is_powered = false;
7720 }
7721 }
7722
7723 static int
7724 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7725 {
7726 unsigned char cmd[6] = {REQUEST_SENSE,
7727 0,
7728 0,
7729 0,
7730 UFSHCD_REQ_SENSE_SIZE,
7731 0};
7732 char *buffer;
7733 int ret;
7734
7735 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7736 if (!buffer) {
7737 ret = -ENOMEM;
7738 goto out;
7739 }
7740
7741 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7742 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7743 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7744 if (ret)
7745 pr_err("%s: failed with err %d\n", __func__, ret);
7746
7747 kfree(buffer);
7748 out:
7749 return ret;
7750 }
7751
7752 /**
7753 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7754 * power mode
7755 * @hba: per adapter instance
7756 * @pwr_mode: device power mode to set
7757 *
7758 * Returns 0 if requested power mode is set successfully
7759 * Returns non-zero if failed to set the requested power mode
7760 */
7761 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7762 enum ufs_dev_pwr_mode pwr_mode)
7763 {
7764 unsigned char cmd[6] = { START_STOP };
7765 struct scsi_sense_hdr sshdr;
7766 struct scsi_device *sdp;
7767 unsigned long flags;
7768 int ret;
7769
7770 spin_lock_irqsave(hba->host->host_lock, flags);
7771 sdp = hba->sdev_ufs_device;
7772 if (sdp) {
7773 ret = scsi_device_get(sdp);
7774 if (!ret && !scsi_device_online(sdp)) {
7775 ret = -ENODEV;
7776 scsi_device_put(sdp);
7777 }
7778 } else {
7779 ret = -ENODEV;
7780 }
7781 spin_unlock_irqrestore(hba->host->host_lock, flags);
7782
7783 if (ret)
7784 return ret;
7785
7786 /*
7787 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7788 * handling, which would wait for host to be resumed. Since we know
7789 * we are functional while we are here, skip host resume in error
7790 * handling context.
7791 */
7792 hba->host->eh_noresume = 1;
7793 if (hba->wlun_dev_clr_ua) {
7794 ret = ufshcd_send_request_sense(hba, sdp);
7795 if (ret)
7796 goto out;
7797 /* Unit attention condition is cleared now */
7798 hba->wlun_dev_clr_ua = false;
7799 }
7800
7801 cmd[4] = pwr_mode << 4;
7802
7803 /*
7804 * Current function would be generally called from the power management
7805 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7806 * already suspended childs.
7807 */
7808 pr_info("%s %d\n", __func__, __LINE__);
7809 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7810 (23 * HZ), 0, 0, RQF_PM, NULL);
7811 pr_info("%s %d\n", __func__, __LINE__);
7812 if (ret) {
7813 sdev_printk(KERN_WARNING, sdp,
7814 "START_STOP failed for power mode: %d, result %x\n",
7815 pwr_mode, ret);
7816 if (driver_byte(ret) & DRIVER_SENSE)
7817 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7818 }
7819
7820 if (!ret)
7821 hba->curr_dev_pwr_mode = pwr_mode;
7822 out:
7823 scsi_device_put(sdp);
7824 hba->host->eh_noresume = 0;
7825 return ret;
7826 }
7827
7828 static int ufshcd_link_state_transition(struct ufs_hba *hba,
7829 enum uic_link_state req_link_state,
7830 int check_for_bkops)
7831 {
7832 int ret = 0;
7833
7834 if (req_link_state == hba->uic_link_state)
7835 return 0;
7836
7837 if (req_link_state == UIC_LINK_HIBERN8_STATE ||
7838 req_link_state == UIC_LINK_OFF_STATE) {
7839 ufshcd_set_link_trans_hibern8(hba);
7840 ret = ufshcd_link_hibern8_ctrl(hba, true);
7841 if (!ret)
7842 ufshcd_set_link_hibern8(hba);
7843 else {
7844 unsigned long flags;
7845 bool saved_is_suspended = hba->clk_gating.is_suspended;
7846
7847 spin_lock_irqsave(hba->host->host_lock, flags);
7848 hba->clk_gating.state = __CLKS_ON;
7849 spin_unlock_irqrestore(hba->host->host_lock, flags);
7850
7851 hba->clk_gating.is_suspended = true;
7852 ufshcd_host_reset_and_restore(hba);
7853 spin_lock_irqsave(hba->host->host_lock, flags);
7854 hba->clk_gating.state = CLKS_ON;
7855 spin_unlock_irqrestore(hba->host->host_lock, flags);
7856 hba->clk_gating.is_suspended = saved_is_suspended;
7857
7858 goto out;
7859 }
7860
7861
7862 /*
7863 * If autobkops is enabled, link can't be turned off because
7864 * turning off the link would also turn off the device.
7865 */
7866 if ((req_link_state == UIC_LINK_OFF_STATE) &&
7867 (!check_for_bkops || (check_for_bkops &&
7868 !hba->auto_bkops_enabled))) {
7869 unsigned long flags;
7870
7871 /*
7872 * Change controller state to "reset state" which
7873 * should also put the link in off/reset state
7874 */
7875
7876 spin_lock_irqsave(hba->host->host_lock, flags);
7877 hba->ufshcd_state = UFSHCD_STATE_RESET;
7878 ufshcd_hba_stop(hba, true);
7879 spin_unlock_irqrestore(hba->host->host_lock, flags);
7880 /*
7881 * TODO: Check if we need any delay to make sure that
7882 * controller is reset
7883 */
7884 ufshcd_set_link_off(hba);
7885 }
7886 }
7887
7888 out:
7889 return ret;
7890 }
7891
7892 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7893 {
7894 /*
7895 * It seems some UFS devices may keep drawing more than sleep current
7896 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7897 * To avoid this situation, add 2ms delay before putting these UFS
7898 * rails in LPM mode.
7899 */
7900 if (!ufshcd_is_link_active(hba) &&
7901 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7902 usleep_range(2000, 2100);
7903
7904 /*
7905 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7906 * power.
7907 *
7908 * If UFS device and link is in OFF state, all power supplies (VCC,
7909 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7910 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7911 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7912 *
7913 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7914 * in low power state which would save some power.
7915 */
7916 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7917 !hba->dev_info.is_lu_power_on_wp) {
7918 ufshcd_setup_vreg(hba, false);
7919 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7920 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7921 if (!ufshcd_is_link_active(hba)) {
7922 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7923 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7924 }
7925 }
7926 }
7927
7928 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7929 {
7930 int ret = 0;
7931
7932 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7933 !hba->dev_info.is_lu_power_on_wp) {
7934 ret = ufshcd_setup_vreg(hba, true);
7935 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7936 if (!ret && !ufshcd_is_link_active(hba)) {
7937 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7938 if (ret)
7939 goto vcc_disable;
7940 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7941 if (ret)
7942 goto vccq_lpm;
7943 }
7944 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7945 }
7946 goto out;
7947
7948 vccq_lpm:
7949 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7950 vcc_disable:
7951 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7952 out:
7953 return ret;
7954 }
7955
7956 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7957 {
7958 if (ufshcd_is_link_off(hba))
7959 ufshcd_setup_hba_vreg(hba, false);
7960 }
7961
7962 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7963 {
7964 if (ufshcd_is_link_off(hba))
7965 ufshcd_setup_hba_vreg(hba, true);
7966 }
7967
7968 /**
7969 * ufshcd_suspend - helper function for suspend operations
7970 * @hba: per adapter instance
7971 * @pm_op: desired low power operation type
7972 *
7973 * This function will try to put the UFS device and link into low power
7974 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7975 * (System PM level).
7976 *
7977 * If this function is called during shutdown, it will make sure that
7978 * both UFS device and UFS link is powered off.
7979 *
7980 * NOTE: UFS device & link must be active before we enter in this function.
7981 *
7982 * Returns 0 for success and non-zero for failure
7983 */
7984 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7985 {
7986 int ret = 0;
7987 enum ufs_pm_level pm_lvl;
7988 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7989 enum uic_link_state req_link_state;
7990 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
7991
7992 hba->pm_op_in_progress = 1;
7993 if (!ufshcd_is_shutdown_pm(pm_op)) {
7994 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7995 hba->rpm_lvl : hba->spm_lvl;
7996 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7997 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7998 } else {
7999 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8000 req_link_state = UIC_LINK_OFF_STATE;
8001 }
8002
8003 /*
8004 * If we can't transition into any of the low power modes
8005 * just gate the clocks.
8006 */
8007 ufshcd_hold(hba, false);
8008 hba->clk_gating.is_suspended = true;
8009
8010 if (hba->clk_scaling.is_allowed) {
8011 cancel_work_sync(&hba->clk_scaling.suspend_work);
8012 cancel_work_sync(&hba->clk_scaling.resume_work);
8013 #if defined(CONFIG_PM_DEVFREQ)
8014 ufshcd_suspend_clkscaling(hba);
8015 #endif
8016 }
8017
8018 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8019 req_link_state == UIC_LINK_ACTIVE_STATE) {
8020 goto disable_clks;
8021 }
8022
8023 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8024 (req_link_state == hba->uic_link_state))
8025 goto enable_gating;
8026
8027 /* UFS device & link must be active before we enter in this function */
8028 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8029 ret = -EINVAL;
8030 goto enable_gating;
8031 }
8032
8033 if (ufshcd_is_runtime_pm(pm_op)) {
8034 if (ufshcd_can_autobkops_during_suspend(hba)) {
8035 /*
8036 * The device is idle with no requests in the queue,
8037 * allow background operations if bkops status shows
8038 * that performance might be impacted.
8039 */
8040 ret = ufshcd_urgent_bkops(hba);
8041 if (ret)
8042 goto enable_gating;
8043 } else {
8044 /* make sure that auto bkops is disabled */
8045 ufshcd_disable_auto_bkops(hba);
8046 }
8047 }
8048
8049 if (ufshcd_is_shutdown_pm(pm_op))
8050 ufs_shutdown_state = 1;
8051
8052 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
8053 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8054 !ufshcd_is_runtime_pm(pm_op))) {
8055 /* ensure that bkops is disabled */
8056 ufshcd_disable_auto_bkops(hba);
8057 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8058 if (ret)
8059 goto enable_gating;
8060 }
8061
8062 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8063 if (ret)
8064 goto set_dev_active;
8065
8066 disable_clks:
8067
8068
8069 /*
8070 * Flush pending works before clock is disabled
8071 */
8072 cancel_work_sync(&hba->eh_work);
8073 cancel_work_sync(&hba->eeh_work);
8074
8075 /*
8076 * Disable the host irq as host controller as there won't be any
8077 * host controller trasanction expected till resume.
8078 */
8079 ufshcd_disable_irq(hba);
8080
8081 ufshcd_vreg_set_lpm(hba);
8082 udelay(50);
8083
8084 if (gating_allowed) {
8085 if (!ufshcd_is_link_active(hba))
8086 ufshcd_setup_clocks(hba, false);
8087 else
8088 /* If link is active, device ref_clk can't be switched off */
8089 __ufshcd_setup_clocks(hba, false, true);
8090 }
8091
8092 hba->clk_gating.state = CLKS_OFF;
8093 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
8094 /*
8095 * Call vendor specific suspend callback. As these callbacks may access
8096 * vendor specific host controller register space call them before the
8097 * host clocks are ON.
8098 */
8099 ret = ufshcd_vops_suspend(hba, pm_op);
8100 if (ret)
8101 goto set_link_active;
8102
8103
8104 /* Put the host controller in low power mode if possible */
8105 ufshcd_hba_vreg_set_lpm(hba);
8106 goto out;
8107
8108 set_link_active:
8109 #if defined(CONFIG_PM_DEVFREQ)
8110 if (hba->clk_scaling.is_allowed)
8111 ufshcd_resume_clkscaling(hba);
8112 #endif
8113
8114 if (ufshcd_is_shutdown_pm(pm_op))
8115 goto out;
8116
8117 ret = ufshcd_enable_irq(hba);
8118 if (ret)
8119 goto out;
8120
8121 if (ufshcd_is_link_hibern8(hba)) {
8122 ufshcd_set_link_trans_active(hba);
8123 if (!ufshcd_link_hibern8_ctrl(hba, false))
8124 ufshcd_set_link_active(hba);
8125 else
8126 ufshcd_set_link_off(hba);
8127 } else if (ufshcd_is_link_off(hba))
8128 ufshcd_host_reset_and_restore(hba);
8129 set_dev_active:
8130 if (ufshcd_is_shutdown_pm(pm_op))
8131 goto out;
8132
8133 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8134 ufshcd_disable_auto_bkops(hba);
8135 enable_gating:
8136 #if defined(CONFIG_PM_DEVFREQ)
8137 if (hba->clk_scaling.is_allowed)
8138 ufshcd_resume_clkscaling(hba);
8139 hba->clk_gating.is_suspended = false;
8140 #endif
8141 ufshcd_release(hba);
8142 out:
8143 hba->pm_op_in_progress = 0;
8144 dev_info(hba->dev, "UFS suspend done\n");
8145
8146 return ret;
8147 }
8148
8149 /**
8150 * ufshcd_resume - helper function for resume operations
8151 * @hba: per adapter instance
8152 * @pm_op: runtime PM or system PM
8153 *
8154 * This function basically brings the UFS device, UniPro link and controller
8155 * to active state.
8156 *
8157 * Returns 0 for success and non-zero for failure
8158 */
8159 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8160 {
8161 int ret;
8162 enum uic_link_state old_link_state;
8163 enum ufs_pm_level pm_lvl;
8164 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
8165
8166 hba->pm_op_in_progress = 1;
8167 if (ufshcd_is_system_pm(pm_op))
8168 pm_lvl = hba->spm_lvl;
8169 else
8170 pm_lvl = hba->rpm_lvl;
8171
8172 if (ufs_get_pm_lvl_to_link_pwr_state(pm_lvl) == UIC_LINK_OFF_STATE)
8173 hba->uic_link_state = UIC_LINK_OFF_STATE;
8174 old_link_state = hba->uic_link_state;
8175
8176 ufshcd_hba_vreg_set_hpm(hba);
8177
8178 ret = ufshcd_vreg_set_hpm(hba);
8179 if (ret)
8180 goto disable_irq_and_vops_clks;
8181
8182 /*
8183 * Call vendor specific resume callback. As these callbacks may access
8184 * vendor specific host controller register space call them when the
8185 * host clocks are ON.
8186 */
8187 ret = ufshcd_vops_resume(hba, pm_op);
8188 if (ret)
8189 goto disable_vreg;
8190
8191 if (gating_allowed) {
8192 /* Make sure clocks are enabled before accessing controller */
8193 ret = ufshcd_setup_clocks(hba, true);
8194 if (ret)
8195 goto disable_vreg;
8196 }
8197
8198 /* enable the host irq as host controller would be active soon */
8199 ret = ufshcd_enable_irq(hba);
8200 if (ret)
8201 goto disable_irq_and_vops_clks;
8202
8203 if (ufshcd_is_link_hibern8(hba)) {
8204 ufshcd_set_link_trans_active(hba);
8205 ret = ufshcd_link_hibern8_ctrl(hba, false);
8206 if (!ret)
8207 ufshcd_set_link_active(hba);
8208 else {
8209 ufshcd_set_link_off(hba);
8210 goto vendor_suspend;
8211 }
8212 } else if (ufshcd_is_link_off(hba)) {
8213 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
8214 hba->async_resume = true;
8215 ret = ufshcd_host_reset_and_restore(hba);
8216 goto async_resume;
8217 #else
8218 ret = ufshcd_host_reset_and_restore(hba);
8219 #endif
8220
8221 /*
8222 * ufshcd_host_reset_and_restore() should have already
8223 * set the link state as active
8224 */
8225 if (ret || !ufshcd_is_link_active(hba))
8226 goto vendor_suspend;
8227 }
8228
8229 if (!ufshcd_is_ufs_dev_active(hba)) {
8230 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8231 if (ret)
8232 goto set_old_link_state;
8233 }
8234
8235 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8236 ufshcd_enable_auto_bkops(hba);
8237 else
8238 /*
8239 * If BKOPs operations are urgently needed at this moment then
8240 * keep auto-bkops enabled or else disable it.
8241 */
8242 ufshcd_urgent_bkops(hba);
8243 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
8244 async_resume:
8245 #endif
8246 hba->clk_gating.is_suspended = false;
8247
8248 #if defined(CONFIG_PM_DEVFREQ)
8249 if (hba->clk_scaling.is_allowed)
8250 ufshcd_resume_clkscaling(hba);
8251 #endif
8252
8253 /* Schedule clock gating in case of no access to UFS device yet */
8254 ufshcd_release(hba);
8255 goto out;
8256
8257 set_old_link_state:
8258 ufshcd_link_state_transition(hba, old_link_state, 0);
8259 vendor_suspend:
8260 ufshcd_vops_suspend(hba, pm_op);
8261 disable_irq_and_vops_clks:
8262 ufshcd_disable_irq(hba);
8263 #if defined(CONFIG_PM_DEVFREQ)
8264 if (hba->clk_scaling.is_allowed)
8265 ufshcd_suspend_clkscaling(hba);
8266 #endif
8267
8268 if (gating_allowed)
8269 ufshcd_setup_clocks(hba, false);
8270 disable_vreg:
8271 ufshcd_vreg_set_lpm(hba);
8272 out:
8273 hba->pm_op_in_progress = 0;
8274
8275 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
8276 dev_info(hba->dev, "UFS resume done\n");
8277
8278 return ret;
8279 }
8280
8281 /**
8282 * ufshcd_system_suspend - system suspend routine
8283 * @hba: per adapter instance
8284 * @pm_op: runtime PM or system PM
8285 *
8286 * Check the description of ufshcd_suspend() function for more details.
8287 *
8288 * Returns 0 for success and non-zero for failure
8289 */
8290 int ufshcd_system_suspend(struct ufs_hba *hba)
8291 {
8292 int ret = 0;
8293 ktime_t start = ktime_get();
8294
8295 if (!hba || !hba->is_powered)
8296 return 0;
8297
8298 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8299 hba->curr_dev_pwr_mode) &&
8300 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8301 hba->uic_link_state))
8302 goto out;
8303
8304 if (pm_runtime_suspended(hba->dev)) {
8305 /*
8306 * UFS device and/or UFS link low power states during runtime
8307 * suspend seems to be different than what is expected during
8308 * system suspend. Hence runtime resume the devic & link and
8309 * let the system suspend low power states to take effect.
8310 * TODO: If resume takes longer time, we might have optimize
8311 * it in future by not resuming everything if possible.
8312 */
8313 ret = ufshcd_runtime_resume(hba);
8314 if (ret)
8315 goto out;
8316 }
8317
8318 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8319 out:
8320 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8321 ktime_to_us(ktime_sub(ktime_get(), start)),
8322 hba->curr_dev_pwr_mode, hba->uic_link_state);
8323 if (!ret)
8324 hba->is_sys_suspended = true;
8325 return ret;
8326 }
8327 EXPORT_SYMBOL(ufshcd_system_suspend);
8328
8329 /**
8330 * ufshcd_system_resume - system resume routine
8331 * @hba: per adapter instance
8332 *
8333 * Returns 0 for success and non-zero for failure
8334 */
8335
8336 int ufshcd_system_resume(struct ufs_hba *hba)
8337 {
8338 int ret = 0;
8339 ktime_t start = ktime_get();
8340
8341 if (!hba)
8342 return -EINVAL;
8343
8344 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8345 /*
8346 * Let the runtime resume take care of resuming
8347 * if runtime suspended.
8348 */
8349 goto out;
8350 else
8351 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8352 out:
8353 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8354 ktime_to_us(ktime_sub(ktime_get(), start)),
8355 hba->curr_dev_pwr_mode, hba->uic_link_state);
8356 if (!ret)
8357 hba->is_sys_suspended = false;
8358 return ret;
8359 }
8360 EXPORT_SYMBOL(ufshcd_system_resume);
8361
8362 /**
8363 * ufshcd_runtime_suspend - runtime suspend routine
8364 * @hba: per adapter instance
8365 *
8366 * Check the description of ufshcd_suspend() function for more details.
8367 *
8368 * Returns 0 for success and non-zero for failure
8369 */
8370 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8371 {
8372 int ret = 0;
8373 ktime_t start = ktime_get();
8374
8375 if (!hba)
8376 return -EINVAL;
8377
8378 if (!hba->is_powered)
8379 goto out;
8380 else
8381 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8382 out:
8383 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8384 ktime_to_us(ktime_sub(ktime_get(), start)),
8385 hba->curr_dev_pwr_mode, hba->uic_link_state);
8386 return ret;
8387 }
8388 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8389
8390 /**
8391 * ufshcd_runtime_resume - runtime resume routine
8392 * @hba: per adapter instance
8393 *
8394 * This function basically brings the UFS device, UniPro link and controller
8395 * to active state. Following operations are done in this function:
8396 *
8397 * 1. Turn on all the controller related clocks
8398 * 2. Bring the UniPro link out of Hibernate state
8399 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8400 * to active state.
8401 * 4. If auto-bkops is enabled on the device, disable it.
8402 *
8403 * So following would be the possible power state after this function return
8404 * successfully:
8405 * S1: UFS device in Active state with VCC rail ON
8406 * UniPro link in Active state
8407 * All the UFS/UniPro controller clocks are ON
8408 *
8409 * Returns 0 for success and non-zero for failure
8410 */
8411 int ufshcd_runtime_resume(struct ufs_hba *hba)
8412 {
8413 int ret = 0;
8414 ktime_t start = ktime_get();
8415
8416 if (!hba)
8417 return -EINVAL;
8418
8419 if (!hba->is_powered)
8420 goto out;
8421 else
8422 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8423 out:
8424 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8425 ktime_to_us(ktime_sub(ktime_get(), start)),
8426 hba->curr_dev_pwr_mode, hba->uic_link_state);
8427 return ret;
8428 }
8429 EXPORT_SYMBOL(ufshcd_runtime_resume);
8430
8431 int ufshcd_runtime_idle(struct ufs_hba *hba)
8432 {
8433 return 0;
8434 }
8435 EXPORT_SYMBOL(ufshcd_runtime_idle);
8436
8437 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
8438 struct device_attribute *attr,
8439 const char *buf, size_t count,
8440 bool rpm)
8441 {
8442 struct ufs_hba *hba = dev_get_drvdata(dev);
8443 unsigned long flags, value;
8444
8445 if (kstrtoul(buf, 0, &value))
8446 return -EINVAL;
8447
8448 if (value >= UFS_PM_LVL_MAX)
8449 return -EINVAL;
8450
8451 spin_lock_irqsave(hba->host->host_lock, flags);
8452 if (rpm)
8453 hba->rpm_lvl = value;
8454 else
8455 hba->spm_lvl = value;
8456 spin_unlock_irqrestore(hba->host->host_lock, flags);
8457 return count;
8458 }
8459
8460 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
8461 struct device_attribute *attr, char *buf)
8462 {
8463 struct ufs_hba *hba = dev_get_drvdata(dev);
8464 int curr_len;
8465 u8 lvl;
8466
8467 curr_len = snprintf(buf, PAGE_SIZE,
8468 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
8469 hba->rpm_lvl,
8470 ufschd_ufs_dev_pwr_mode_to_string(
8471 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
8472 ufschd_uic_link_state_to_string(
8473 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
8474
8475 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8476 "\nAll available Runtime PM levels info:\n");
8477 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
8478 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8479 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
8480 lvl,
8481 ufschd_ufs_dev_pwr_mode_to_string(
8482 ufs_pm_lvl_states[lvl].dev_state),
8483 ufschd_uic_link_state_to_string(
8484 ufs_pm_lvl_states[lvl].link_state));
8485
8486 return curr_len;
8487 }
8488
8489 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
8490 struct device_attribute *attr, const char *buf, size_t count)
8491 {
8492 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
8493 }
8494
8495 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
8496 {
8497 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
8498 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
8499 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
8500 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
8501 hba->rpm_lvl_attr.attr.mode = 0644;
8502 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
8503 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
8504 }
8505
8506 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
8507 struct device_attribute *attr, char *buf)
8508 {
8509 struct ufs_hba *hba = dev_get_drvdata(dev);
8510 int curr_len;
8511 u8 lvl;
8512
8513 curr_len = snprintf(buf, PAGE_SIZE,
8514 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
8515 hba->spm_lvl,
8516 ufschd_ufs_dev_pwr_mode_to_string(
8517 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
8518 ufschd_uic_link_state_to_string(
8519 ufs_pm_lvl_states[hba->spm_lvl].link_state));
8520
8521 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8522 "\nAll available System PM levels info:\n");
8523 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
8524 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8525 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
8526 lvl,
8527 ufschd_ufs_dev_pwr_mode_to_string(
8528 ufs_pm_lvl_states[lvl].dev_state),
8529 ufschd_uic_link_state_to_string(
8530 ufs_pm_lvl_states[lvl].link_state));
8531
8532 return curr_len;
8533 }
8534
8535 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
8536 struct device_attribute *attr, const char *buf, size_t count)
8537 {
8538 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
8539 }
8540
8541 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
8542 {
8543 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
8544 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
8545 sysfs_attr_init(&hba->spm_lvl_attr.attr);
8546 hba->spm_lvl_attr.attr.name = "spm_lvl";
8547 hba->spm_lvl_attr.attr.mode = 0644;
8548 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
8549 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
8550 }
8551
8552 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
8553 {
8554 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
8555 ufshcd_add_spm_lvl_sysfs_nodes(hba);
8556 }
8557
8558 static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
8559 {
8560 device_remove_file(hba->dev, &hba->rpm_lvl_attr);
8561 device_remove_file(hba->dev, &hba->spm_lvl_attr);
8562 }
8563
8564 /**
8565 * ufshcd_shutdown - shutdown routine
8566 * @hba: per adapter instance
8567 *
8568 * This function would power off both UFS device and UFS link.
8569 *
8570 * Returns 0 always to allow force shutdown even in case of errors.
8571 */
8572 int ufshcd_shutdown(struct ufs_hba *hba)
8573 {
8574 int ret = 0;
8575
8576 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8577 goto out;
8578
8579 if (pm_runtime_suspended(hba->dev)) {
8580 ret = ufshcd_runtime_resume(hba);
8581 if (ret)
8582 goto out;
8583 }
8584
8585 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8586 out:
8587 if (ret)
8588 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8589 /* allow force shutdown even in case of errors */
8590 return 0;
8591 }
8592 EXPORT_SYMBOL(ufshcd_shutdown);
8593
8594 /**
8595 * ufshcd_remove - de-allocate SCSI host and host memory space
8596 * data structure memory
8597 * @hba - per adapter instance
8598 */
8599 void ufshcd_remove(struct ufs_hba *hba)
8600 {
8601 ufshcd_remove_sysfs_nodes(hba);
8602 scsi_remove_host(hba->host);
8603 /* disable interrupts */
8604 ufshcd_disable_intr(hba, hba->intr_mask);
8605 ufshcd_hba_stop(hba, true);
8606
8607 ufshcd_exit_clk_gating(hba);
8608 #if defined(CONFIG_PM_DEVFREQ)
8609 if (ufshcd_is_clkscaling_supported(hba))
8610 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8611 #endif
8612 ufshcd_hba_exit(hba);
8613 }
8614 EXPORT_SYMBOL_GPL(ufshcd_remove);
8615
8616 /**
8617 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8618 * @hba: pointer to Host Bus Adapter (HBA)
8619 */
8620 void ufshcd_dealloc_host(struct ufs_hba *hba)
8621 {
8622 scsi_host_put(hba->host);
8623 }
8624 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8625
8626 /**
8627 * ufshcd_set_dma_mask - Set dma mask based on the controller
8628 * addressing capability
8629 * @hba: per adapter instance
8630 *
8631 * Returns 0 for success, non-zero for failure
8632 */
8633 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8634 {
8635 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8636 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8637 return 0;
8638 }
8639 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8640 }
8641
8642 /**
8643 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8644 * @dev: pointer to device handle
8645 * @hba_handle: driver private handle
8646 * Returns 0 on success, non-zero value on failure
8647 */
8648 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8649 {
8650 struct Scsi_Host *host;
8651 struct ufs_hba *hba;
8652 int err = 0;
8653
8654 if (!dev) {
8655 dev_err(dev,
8656 "Invalid memory reference for dev is NULL\n");
8657 err = -ENODEV;
8658 goto out_error;
8659 }
8660
8661 host = scsi_host_alloc(&ufshcd_driver_template,
8662 sizeof(struct ufs_hba));
8663 if (!host) {
8664 dev_err(dev, "scsi_host_alloc failed\n");
8665 err = -ENOMEM;
8666 goto out_error;
8667 }
8668 hba = shost_priv(host);
8669 hba->host = host;
8670 hba->dev = dev;
8671 *hba_handle = hba;
8672
8673 INIT_LIST_HEAD(&hba->clk_list_head);
8674
8675 out_error:
8676 return err;
8677 }
8678 EXPORT_SYMBOL(ufshcd_alloc_host);
8679
8680 /**
8681 * ufshcd_init - Driver initialization routine
8682 * @hba: per-adapter instance
8683 * @mmio_base: base register address
8684 * @irq: Interrupt line of device
8685 * Returns 0 on success, non-zero value on failure
8686 */
8687 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8688 {
8689 int err;
8690 struct Scsi_Host *host = hba->host;
8691 struct device *dev = hba->dev;
8692
8693 if (!mmio_base) {
8694 dev_err(hba->dev,
8695 "Invalid memory reference for mmio_base is NULL\n");
8696 err = -ENODEV;
8697 goto out_error;
8698 }
8699
8700 hba->mmio_base = mmio_base;
8701 hba->irq = irq;
8702
8703 /* Set descriptor lengths to specification defaults */
8704 ufshcd_def_desc_sizes(hba);
8705
8706 err = ufshcd_hba_init(hba);
8707 if (err)
8708 goto out_error;
8709
8710 /* Read capabilities registers */
8711 ufshcd_hba_capabilities(hba);
8712
8713 /* Get UFS version supported by the controller */
8714 hba->ufs_version = ufshcd_get_ufs_version(hba);
8715
8716 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8717 (hba->ufs_version != UFSHCI_VERSION_11) &&
8718 (hba->ufs_version != UFSHCI_VERSION_20) &&
8719 (hba->ufs_version != UFSHCI_VERSION_21))
8720 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8721 hba->ufs_version);
8722
8723 /* Get Interrupt bit mask per version */
8724 hba->intr_mask = ufshcd_get_intr_mask(hba);
8725
8726 err = ufshcd_set_dma_mask(hba);
8727 if (err) {
8728 dev_err(hba->dev, "set dma mask failed\n");
8729 goto out_disable;
8730 }
8731
8732 /* Allocate memory for host memory space */
8733 err = ufshcd_memory_alloc(hba);
8734 if (err) {
8735 dev_err(hba->dev, "Memory allocation failed\n");
8736 goto out_disable;
8737 }
8738
8739 /* Configure LRB */
8740 ufshcd_host_memory_configure(hba);
8741
8742 host->can_queue = hba->nutrs;
8743 host->cmd_per_lun = hba->nutrs;
8744 host->max_id = UFSHCD_MAX_ID;
8745 host->max_lun = UFS_MAX_LUNS;
8746 host->max_channel = UFSHCD_MAX_CHANNEL;
8747 host->unique_id = host->host_no;
8748 host->max_cmd_len = MAX_CDB_SIZE;
8749
8750 hba->max_pwr_info.is_valid = false;
8751
8752 /* Initailize wait queue for task management */
8753 init_waitqueue_head(&hba->tm_wq);
8754 init_waitqueue_head(&hba->tm_tag_wq);
8755
8756 /* Initialize work queues */
8757 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8758 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8759
8760 /* Initialize UIC command mutex */
8761 mutex_init(&hba->uic_cmd_mutex);
8762
8763 /* Initialize mutex for device management commands */
8764 mutex_init(&hba->dev_cmd.lock);
8765
8766 init_rwsem(&hba->clk_scaling_lock);
8767
8768 /* Initialize device management tag acquire wait queue */
8769 init_waitqueue_head(&hba->dev_cmd.tag_wq);
8770
8771 /* Initialize monitor */
8772 ufshcd_init_monitor(hba);
8773
8774 err = ufshcd_init_clk_gating(hba);
8775 if (err) {
8776 dev_err(hba->dev, "init clk_gating failed\n");
8777 goto out_disable;
8778 }
8779
8780 /*
8781 * In order to avoid any spurious interrupt immediately after
8782 * registering UFS controller interrupt handler, clear any pending UFS
8783 * interrupt status and disable all the UFS interrupts.
8784 */
8785 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8786 REG_INTERRUPT_STATUS);
8787 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8788 /*
8789 * Make sure that UFS interrupts are disabled and any pending interrupt
8790 * status is cleared before registering UFS interrupt handler.
8791 */
8792 mb();
8793
8794 /* IRQ registration */
8795 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8796 if (err) {
8797 dev_err(hba->dev, "request irq failed\n");
8798 goto exit_gating;
8799 } else {
8800 hba->is_irq_enabled = true;
8801 }
8802
8803 err = scsi_add_host(host, hba->dev);
8804 if (err) {
8805 dev_err(hba->dev, "scsi_add_host failed\n");
8806 goto exit_gating;
8807 }
8808
8809 #if defined(CONFIG_PM_DEVFREQ)
8810 if (ufshcd_is_clkscaling_supported(hba)) {
8811 char wq_name[sizeof("ufs_clkscaling_00")];
8812
8813 INIT_WORK(&hba->clk_scaling.suspend_work,
8814 ufshcd_clk_scaling_suspend_work);
8815 INIT_WORK(&hba->clk_scaling.resume_work,
8816 ufshcd_clk_scaling_resume_work);
8817
8818 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
8819 host->host_no);
8820 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
8821
8822 ufshcd_clkscaling_init_sysfs(hba);
8823 }
8824 #endif
8825
8826 /* Hold auto suspend until async scan completes */
8827 pm_runtime_get_sync(dev);
8828
8829 /*
8830 * The device-initialize-sequence hasn't been invoked yet.
8831 * Set the device to power-off state
8832 */
8833 ufshcd_set_ufs_dev_poweroff(hba);
8834
8835 async_schedule(ufshcd_async_scan, hba);
8836 ufshcd_add_sysfs_nodes(hba);
8837
8838 return 0;
8839
8840 exit_gating:
8841 ufshcd_exit_clk_gating(hba);
8842 out_disable:
8843 hba->is_irq_enabled = false;
8844 ufshcd_hba_exit(hba);
8845 out_error:
8846 return err;
8847 }
8848 EXPORT_SYMBOL_GPL(ufshcd_init);
8849
8850 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8851 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8852 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8853 MODULE_LICENSE("GPL");
8854 MODULE_VERSION(UFSHCD_DRIVER_VERSION);