Merge branch 'android-4.14-p' into android-exynos-4.14-ww-9610-minor_up-dev
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2 * Universal Flash Storage Host controller driver Core
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7 *
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
38 */
39
40 #include <linux/async.h>
41 #if defined(CONFIG_PM_DEVFREQ)
42 #include <linux/devfreq.h>
43 #endif
44 #include <linux/nls.h>
45 #include <linux/smc.h>
46 #include <scsi/ufs/ioctl.h>
47 #include <linux/of.h>
48 #include <linux/blkdev.h>
49 #include <linux/gpio.h>
50
51 #include "ufshcd.h"
52 #include "ufs_quirks.h"
53 #include "unipro.h"
54 #include "ufs-exynos.h"
55 #include "ufs_quirks.h"
56
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/ufs.h>
59
60 #define UFSHCD_REQ_SENSE_SIZE 18
61
62 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
63 UTP_TASK_REQ_COMPL |\
64 UFSHCD_ERROR_MASK)
65 /* UIC command timeout, unit: ms */
66 #define UIC_CMD_TIMEOUT 500
67
68 /* NOP OUT retries waiting for NOP IN response */
69 #define NOP_OUT_RETRIES 10
70 /* Timeout after 30 msecs if NOP OUT hangs without response */
71 #define NOP_OUT_TIMEOUT 30 /* msecs */
72
73 /* Query request retries */
74 #define QUERY_REQ_RETRIES 3
75 /* Query request timeout */
76 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
77 /*
78 * Query request timeout for fDeviceInit flag
79 * fDeviceInit query response time for some devices is too large that default
80 * QUERY_REQ_TIMEOUT may not be enough for such devices.
81 */
82 #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
83
84 /* Task management command timeout */
85 #define TM_CMD_TIMEOUT 300 /* msecs */
86
87 /* maximum number of retries for a general UIC command */
88 #define UFS_UIC_COMMAND_RETRIES 3
89
90 /* maximum number of link-startup retries */
91 #define DME_LINKSTARTUP_RETRIES 3
92
93 /* Maximum retries for Hibern8 enter */
94 #define UIC_HIBERN8_ENTER_RETRIES 3
95
96 /* maximum number of reset retries before giving up */
97 #define MAX_HOST_RESET_RETRIES 5
98
99 /* Expose the flag value from utp_upiu_query.value */
100 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
101
102 /* Interrupt aggregation default timeout, unit: 40us */
103 #define INT_AGGR_DEF_TO 0x01
104
105 /* Link Hibernation delay, msecs */
106 #define LINK_H8_DELAY 20
107
108 /* UFS link setup retries */
109 #define UFS_LINK_SETUP_RETRIES 5
110
111 /* IOCTL opcode for command - ufs set device read only */
112 #define UFS_IOCTL_BLKROSET BLKROSET
113
114 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
115 ({ \
116 int _ret; \
117 if (_on) \
118 _ret = ufshcd_enable_vreg(_dev, _vreg); \
119 else \
120 _ret = ufshcd_disable_vreg(_dev, _vreg); \
121 _ret; \
122 })
123
124 static int ufs_shutdown_state = 0;
125
126 #define ufshcd_hex_dump(prefix_str, buf, len) \
127 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
128
129 enum {
130 UFSHCD_MAX_CHANNEL = 0,
131 UFSHCD_MAX_ID = 1,
132 UFSHCD_CMD_PER_LUN = 32,
133 UFSHCD_CAN_QUEUE = 32,
134 };
135
136 /* UFSHCD states */
137 enum {
138 UFSHCD_STATE_RESET,
139 UFSHCD_STATE_ERROR,
140 UFSHCD_STATE_OPERATIONAL,
141 UFSHCD_STATE_EH_SCHEDULED,
142 };
143
144 /* UFSHCD error handling flags */
145 enum {
146 UFSHCD_EH_IN_PROGRESS = (1 << 0),
147 };
148
149 /* UFSHCD UIC layer error flags */
150 enum {
151 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
152 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
153 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
154 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
155 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
156 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
157 UFSHCD_UIC_DL_ERROR = (1 << 6), /* Data link layer error */
158 };
159
160 #define ufshcd_set_eh_in_progress(h) \
161 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
162 #define ufshcd_eh_in_progress(h) \
163 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
164 #define ufshcd_clear_eh_in_progress(h) \
165 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
166
167 #define ufshcd_set_ufs_dev_active(h) \
168 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
169 #define ufshcd_set_ufs_dev_sleep(h) \
170 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
171 #define ufshcd_set_ufs_dev_poweroff(h) \
172 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
173 #define ufshcd_is_ufs_dev_active(h) \
174 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
175 #define ufshcd_is_ufs_dev_sleep(h) \
176 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
177 #define ufshcd_is_ufs_dev_poweroff(h) \
178 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
179
180 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
181 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
182 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
183 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
184 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
185 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
186 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
187 };
188
189 static inline enum ufs_dev_pwr_mode
190 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
191 {
192 return ufs_pm_lvl_states[lvl].dev_state;
193 }
194
195 static inline enum uic_link_state
196 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
197 {
198 return ufs_pm_lvl_states[lvl].link_state;
199 }
200
201 static struct ufs_dev_fix ufs_fixups[] = {
202 /* UFS cards deviations table */
203 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
204 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
206 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
207 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
208 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
209 UFS_DEVICE_NO_FASTAUTO),
210 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
211 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
212 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
213 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
214 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
215 UFS_DEVICE_QUIRK_PA_TACTIVATE),
216 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
217 UFS_DEVICE_QUIRK_PA_TACTIVATE),
218 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
219 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
220 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
221
222 END_FIX
223 };
224
225 static void ufshcd_tmc_handler(struct ufs_hba *hba);
226 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
227 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
228 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
229 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
230 static void ufshcd_hba_exit(struct ufs_hba *hba);
231 static int ufshcd_probe_hba(struct ufs_hba *hba);
232 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
233 bool skip_ref_clk);
234 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
235 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
236 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
237 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
238 static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en);
239 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
240 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
241 #if defined(CONFIG_PM_DEVFREQ)
242 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
243 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
244 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
245 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
246 #endif
247 static irqreturn_t ufshcd_intr(int irq, void *__hba);
248 static int ufshcd_change_power_mode(struct ufs_hba *hba,
249 struct ufs_pa_layer_attr *pwr_mode);
250 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
251 enum ufs_dev_pwr_mode pwr_mode);
252 static int ufshcd_send_request_sense(struct ufs_hba *hba,
253 struct scsi_device *sdp);
254 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba);
255 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba);
256 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
257 {
258 return tag >= 0 && tag < hba->nutrs;
259 }
260
261 static ssize_t ufshcd_monitor_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263 {
264 struct ufs_hba *hba = dev_get_drvdata(dev);
265
266 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->monitor.flag);
267 }
268
269 static ssize_t ufshcd_monitor_store(struct device *dev,
270 struct device_attribute *attr, const char *buf, size_t count)
271 {
272 struct ufs_hba *hba = dev_get_drvdata(dev);
273 unsigned long value;
274
275 if (kstrtoul(buf, 0, &value))
276 return -EINVAL;
277
278 hba->monitor.flag = value;
279 return count;
280 }
281
282 static void ufshcd_init_monitor(struct ufs_hba *hba)
283 {
284 hba->monitor.attrs.show = ufshcd_monitor_show;
285 hba->monitor.attrs.store = ufshcd_monitor_store;
286 sysfs_attr_init(&hba->monitor.attrs.attr);
287 hba->monitor.attrs.attr.name = "monitor";
288 hba->monitor.attrs.attr.mode = S_IRUGO | S_IWUSR;
289 if (device_create_file(hba->dev, &hba->monitor.attrs))
290 dev_err(hba->dev, "Failed to create sysfs for monitor\n");
291 }
292
293
294 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
295 {
296 int ret = 0;
297
298 if (!hba->is_irq_enabled) {
299 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
300 hba);
301 if (ret)
302 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
303 __func__, ret);
304 hba->is_irq_enabled = true;
305 }
306
307 return ret;
308 }
309
310 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
311 {
312 if (hba->is_irq_enabled) {
313 free_irq(hba->irq, hba);
314 hba->is_irq_enabled = false;
315 }
316 }
317
318 /* replace non-printable or non-ASCII characters with spaces */
319 static inline void ufshcd_remove_non_printable(char *val)
320 {
321 if (!val)
322 return;
323
324 if (*val < 0x20 || *val > 0x7e)
325 *val = ' ';
326 }
327
328 static void ufshcd_add_command_trace(struct ufs_hba *hba,
329 unsigned int tag, const char *str)
330 {
331 sector_t lba = -1;
332 u8 opcode = 0;
333 u32 intr, doorbell;
334 struct ufshcd_lrb *lrbp;
335 int transfer_len = -1;
336
337 if (!trace_ufshcd_command_enabled())
338 return;
339
340 lrbp = &hba->lrb[tag];
341
342 if (lrbp->cmd) { /* data phase exists */
343 opcode = (u8)(*lrbp->cmd->cmnd);
344 if ((opcode == READ_10) || (opcode == WRITE_10)) {
345 /*
346 * Currently we only fully trace read(10) and write(10)
347 * commands
348 */
349 if (lrbp->cmd->request && lrbp->cmd->request->bio)
350 lba =
351 lrbp->cmd->request->bio->bi_iter.bi_sector;
352 transfer_len = be32_to_cpu(
353 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
354 }
355 }
356
357 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
358 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
359 trace_ufshcd_command(dev_name(hba->dev), str, tag,
360 doorbell, transfer_len, intr, lba, opcode);
361 }
362
363 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
364 {
365 struct ufs_clk_info *clki;
366 struct list_head *head = &hba->clk_list_head;
367
368 if (list_empty(head))
369 return;
370
371 list_for_each_entry(clki, head, list) {
372 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
373 clki->max_freq)
374 dev_err(hba->dev, "clk: %s, rate: %u\n",
375 clki->name, clki->curr_freq);
376 }
377 }
378
379 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
380 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
381 {
382 int i;
383
384 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
385 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
386
387 if (err_hist->reg[p] == 0)
388 continue;
389 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
390 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
391 }
392 }
393
394 static void ufshcd_print_host_regs(struct ufs_hba *hba)
395 {
396 /*
397 * hex_dump reads its data without the readl macro. This might
398 * cause inconsistency issues on some platform, as the printed
399 * values may be from cache and not the most recent value.
400 * To know whether you are looking at an un-cached version verify
401 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
402 * during platform/pci probe function.
403 */
404 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
405 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
406 hba->ufs_version, hba->capabilities);
407 dev_err(hba->dev,
408 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
409 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
410 dev_err(hba->dev,
411 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
412 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
413 hba->ufs_stats.hibern8_exit_cnt);
414
415 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
416 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
417 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
418 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
419 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
420
421 ufshcd_print_clk_freqs(hba);
422
423 if (hba->vops && hba->vops->dbg_register_dump)
424 hba->vops->dbg_register_dump(hba);
425 }
426
427 static
428 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
429 {
430 struct ufshcd_lrb *lrbp;
431 int prdt_length;
432 int tag;
433
434 for_each_set_bit(tag, &bitmap, hba->nutrs) {
435 lrbp = &hba->lrb[tag];
436
437 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
438 tag, ktime_to_us(lrbp->issue_time_stamp));
439 dev_err(hba->dev,
440 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
441 tag, (u64)lrbp->utrd_dma_addr);
442
443 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
444 sizeof(struct utp_transfer_req_desc));
445 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
446 (u64)lrbp->ucd_req_dma_addr);
447 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
448 sizeof(struct utp_upiu_req));
449 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
450 (u64)lrbp->ucd_rsp_dma_addr);
451 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
452 sizeof(struct utp_upiu_rsp));
453
454 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
455 prdt_length = le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length)
456 / sizeof(struct ufshcd_sg_entry);
457 else
458 prdt_length = le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
459
460 dev_err(hba->dev,
461 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
462 tag, prdt_length,
463 (u64)lrbp->ucd_prdt_dma_addr);
464 if (pr_prdt)
465 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
466 sizeof(struct ufshcd_sg_entry) * prdt_length);
467 }
468 }
469
470 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
471 {
472 struct utp_task_req_desc *tmrdp;
473 int tag;
474
475 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
476 tmrdp = &hba->utmrdl_base_addr[tag];
477 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
478 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
479 sizeof(struct request_desc_header));
480 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
481 tag);
482 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
483 sizeof(struct utp_upiu_req));
484 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
485 tag);
486 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
487 sizeof(struct utp_task_req_desc));
488 }
489 }
490
491 static void ufshcd_print_host_state(struct ufs_hba *hba)
492 {
493 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
494 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
495 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
496 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
497 hba->saved_err, hba->saved_uic_err);
498 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
499 hba->curr_dev_pwr_mode, hba->uic_link_state);
500 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
501 hba->pm_op_in_progress, hba->is_sys_suspended);
502 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
503 hba->auto_bkops_enabled, hba->host->host_self_blocked);
504 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
505 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
506 hba->eh_flags, hba->req_abort_count);
507 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
508 hba->capabilities, hba->caps);
509 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
510 hba->dev_quirks);
511 }
512
513 /**
514 * ufshcd_print_pwr_info - print power params as saved in hba
515 * power info
516 * @hba: per-adapter instance
517 */
518 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
519 {
520 static const char * const names[] = {
521 "INVALID MODE",
522 "FAST MODE",
523 "SLOW_MODE",
524 "INVALID MODE",
525 "FASTAUTO_MODE",
526 "SLOWAUTO_MODE",
527 "INVALID MODE",
528 };
529
530 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
531 __func__,
532 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
533 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
534 names[hba->pwr_info.pwr_rx],
535 names[hba->pwr_info.pwr_tx],
536 hba->pwr_info.hs_rate);
537 }
538
539 /*
540 * ufshcd_wait_for_register - wait for register value to change
541 * @hba - per-adapter interface
542 * @reg - mmio register offset
543 * @mask - mask to apply to read register value
544 * @val - wait condition
545 * @interval_us - polling interval in microsecs
546 * @timeout_ms - timeout in millisecs
547 * @can_sleep - perform sleep or just spin
548 *
549 * Returns -ETIMEDOUT on error, zero on success
550 */
551 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
552 u32 val, unsigned long interval_us,
553 unsigned long timeout_ms, bool can_sleep)
554 {
555 int err = 0;
556 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
557
558 /* ignore bits that we don't intend to wait on */
559 val = val & mask;
560
561 while ((ufshcd_readl(hba, reg) & mask) != val) {
562 if (can_sleep)
563 usleep_range(interval_us, interval_us + 50);
564 else
565 udelay(interval_us);
566 if (time_after(jiffies, timeout)) {
567 if ((ufshcd_readl(hba, reg) & mask) != val)
568 err = -ETIMEDOUT;
569 break;
570 }
571 }
572
573 return err;
574 }
575
576 /**
577 * ufshcd_get_intr_mask - Get the interrupt bit mask
578 * @hba - Pointer to adapter instance
579 *
580 * Returns interrupt bit mask per version
581 */
582 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
583 {
584 u32 intr_mask = 0;
585
586 switch (hba->ufs_version) {
587 case UFSHCI_VERSION_10:
588 intr_mask = INTERRUPT_MASK_ALL_VER_10;
589 break;
590 case UFSHCI_VERSION_11:
591 case UFSHCI_VERSION_20:
592 intr_mask = INTERRUPT_MASK_ALL_VER_11;
593 break;
594 case UFSHCI_VERSION_21:
595 default:
596 intr_mask = INTERRUPT_MASK_ALL_VER_21;
597 break;
598 }
599
600 return intr_mask;
601 }
602
603 /**
604 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
605 * @hba - Pointer to adapter instance
606 *
607 * Returns UFSHCI version supported by the controller
608 */
609 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
610 {
611 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
612 return ufshcd_vops_get_ufs_hci_version(hba);
613
614 return ufshcd_readl(hba, REG_UFS_VERSION);
615 }
616
617 /**
618 * ufshcd_is_device_present - Check if any device connected to
619 * the host controller
620 * @hba: pointer to adapter instance
621 *
622 * Returns true if device present, false if no device detected
623 */
624 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
625 {
626 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
627 DEVICE_PRESENT) ? true : false;
628 }
629
630 /**
631 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
632 * @lrb: pointer to local command reference block
633 *
634 * This function is used to get the OCS field from UTRD
635 * Returns the OCS field in the UTRD
636 */
637 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
638 {
639 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
640 }
641
642 /**
643 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
644 * @task_req_descp: pointer to utp_task_req_desc structure
645 *
646 * This function is used to get the OCS field from UTMRD
647 * Returns the OCS field in the UTMRD
648 */
649 static inline int
650 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
651 {
652 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
653 }
654
655 /**
656 * ufshcd_get_tm_free_slot - get a free slot for task management request
657 * @hba: per adapter instance
658 * @free_slot: pointer to variable with available slot value
659 *
660 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
661 * Returns 0 if free slot is not available, else return 1 with tag value
662 * in @free_slot.
663 */
664 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
665 {
666 int tag;
667 bool ret = false;
668
669 if (!free_slot)
670 goto out;
671
672 do {
673 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
674 if (tag >= hba->nutmrs)
675 goto out;
676 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
677
678 *free_slot = tag;
679 ret = true;
680 out:
681 return ret;
682 }
683
684 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
685 {
686 clear_bit_unlock(slot, &hba->tm_slots_in_use);
687 }
688
689 /**
690 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
691 * @hba: per adapter instance
692 * @pos: position of the bit to be cleared
693 */
694 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
695 {
696 u32 clear;
697
698 if (hba->quirks & UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR)
699 clear = (1 << pos);
700 else
701 clear = ~(1 << pos);
702
703 ufshcd_writel(hba, clear, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
704 }
705
706 /**
707 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
708 * @hba: per adapter instance
709 * @pos: position of the bit to be cleared
710 */
711 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
712 {
713 u32 clear;
714
715 if (hba->quirks & UFSHCD_QUIRK_BROKEN_REQ_LIST_CLR)
716 clear = (1 << pos);
717 else
718 clear = ~(1 << pos);
719
720 ufshcd_writel(hba, clear, REG_UTP_TASK_REQ_LIST_CLEAR);
721 }
722
723 /**
724 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
725 * @hba: per adapter instance
726 * @tag: position of the bit to be cleared
727 */
728 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
729 {
730 __clear_bit(tag, &hba->outstanding_reqs);
731 }
732
733 /**
734 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
735 * @reg: Register value of host controller status
736 *
737 * Returns integer, 0 on Success and positive value if failed
738 */
739 static inline int ufshcd_get_lists_status(u32 reg)
740 {
741 /*
742 * The mask 0xFF is for the following HCS register bits
743 * Bit Description
744 * 0 Device Present
745 * 1 UTRLRDY
746 * 2 UTMRLRDY
747 * 3 UCRDY
748 * 4-7 reserved
749 */
750 return ((reg & 0xFF) >> 1) ^ 0x07;
751 }
752
753 /**
754 * ufshcd_get_uic_cmd_result - Get the UIC command result
755 * @hba: Pointer to adapter instance
756 *
757 * This function gets the result of UIC command completion
758 * Returns 0 on success, non zero value on error
759 */
760 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
761 {
762 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
763 MASK_UIC_COMMAND_RESULT;
764 }
765
766 /**
767 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
768 * @hba: Pointer to adapter instance
769 *
770 * This function gets UIC command argument3
771 * Returns 0 on success, non zero value on error
772 */
773 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
774 {
775 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
776 }
777
778 /**
779 * ufshcd_get_req_rsp - returns the TR response transaction type
780 * @ucd_rsp_ptr: pointer to response UPIU
781 */
782 static inline int
783 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
784 {
785 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
786 }
787
788 /**
789 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
790 * @ucd_rsp_ptr: pointer to response UPIU
791 *
792 * This function gets the response status and scsi_status from response UPIU
793 * Returns the response result code.
794 */
795 static inline int
796 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
797 {
798 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
799 }
800
801 /*
802 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
803 * from response UPIU
804 * @ucd_rsp_ptr: pointer to response UPIU
805 *
806 * Return the data segment length.
807 */
808 static inline unsigned int
809 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
810 {
811 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
812 MASK_RSP_UPIU_DATA_SEG_LEN;
813 }
814
815 /**
816 * ufshcd_is_exception_event - Check if the device raised an exception event
817 * @ucd_rsp_ptr: pointer to response UPIU
818 *
819 * The function checks if the device raised an exception event indicated in
820 * the Device Information field of response UPIU.
821 *
822 * Returns true if exception is raised, false otherwise.
823 */
824 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
825 {
826 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
827 MASK_RSP_EXCEPTION_EVENT ? true : false;
828 }
829
830 /**
831 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
832 * @hba: per adapter instance
833 */
834 static inline void
835 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
836 {
837 ufshcd_writel(hba, INT_AGGR_ENABLE |
838 INT_AGGR_COUNTER_AND_TIMER_RESET,
839 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
840 }
841
842 /**
843 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
844 * @hba: per adapter instance
845 * @cnt: Interrupt aggregation counter threshold
846 * @tmout: Interrupt aggregation timeout value
847 */
848 static inline void
849 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
850 {
851 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
852 INT_AGGR_COUNTER_THLD_VAL(cnt) |
853 INT_AGGR_TIMEOUT_VAL(tmout),
854 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
855 }
856
857 /**
858 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
859 * @hba: per adapter instance
860 */
861 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
862 {
863 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
864 }
865
866 /**
867 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
868 * When run-stop registers are set to 1, it indicates the
869 * host controller that it can process the requests
870 * @hba: per adapter instance
871 */
872 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
873 {
874 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
875 REG_UTP_TASK_REQ_LIST_RUN_STOP);
876 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
877 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
878 }
879
880 /**
881 * ufshcd_hba_start - Start controller initialization sequence
882 * @hba: per adapter instance
883 */
884 static inline void ufshcd_hba_start(struct ufs_hba *hba)
885 {
886 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
887 }
888
889 /**
890 * ufshcd_is_hba_active - Get controller state
891 * @hba: per adapter instance
892 *
893 * Returns false if controller is active, true otherwise
894 */
895 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
896 {
897 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
898 ? false : true;
899 }
900
901 static const char *ufschd_uic_link_state_to_string(
902 enum uic_link_state state)
903 {
904 switch (state) {
905 case UIC_LINK_OFF_STATE: return "OFF";
906 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
907 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
908 default: return "UNKNOWN";
909 }
910 }
911
912 static const char *ufschd_ufs_dev_pwr_mode_to_string(
913 enum ufs_dev_pwr_mode state)
914 {
915 switch (state) {
916 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
917 case UFS_SLEEP_PWR_MODE: return "SLEEP";
918 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
919 default: return "UNKNOWN";
920 }
921 }
922
923 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
924 {
925 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
926 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
927 (hba->ufs_version == UFSHCI_VERSION_11))
928 return UFS_UNIPRO_VER_1_41;
929 else
930 return UFS_UNIPRO_VER_1_6;
931 }
932 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
933
934 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
935 {
936 /*
937 * If both host and device support UniPro ver1.6 or later, PA layer
938 * parameters tuning happens during link startup itself.
939 *
940 * We can manually tune PA layer parameters if either host or device
941 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
942 * logic simple, we will only do manual tuning if local unipro version
943 * doesn't support ver1.6 or later.
944 */
945 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
946 return true;
947 else
948 return false;
949 }
950
951 #if defined(CONFIG_PM_DEVFREQ)
952 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
953 {
954 int ret = 0;
955 struct ufs_clk_info *clki;
956 struct list_head *head = &hba->clk_list_head;
957 ktime_t start = ktime_get();
958 bool clk_state_changed = false;
959
960 if (list_empty(head))
961 goto out;
962
963 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
964 if (ret)
965 return ret;
966
967 list_for_each_entry(clki, head, list) {
968 if (!IS_ERR_OR_NULL(clki->clk)) {
969 if (scale_up && clki->max_freq) {
970 if (clki->curr_freq == clki->max_freq)
971 continue;
972
973 clk_state_changed = true;
974 ret = clk_set_rate(clki->clk, clki->max_freq);
975 if (ret) {
976 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
977 __func__, clki->name,
978 clki->max_freq, ret);
979 break;
980 }
981 trace_ufshcd_clk_scaling(dev_name(hba->dev),
982 "scaled up", clki->name,
983 clki->curr_freq,
984 clki->max_freq);
985
986 clki->curr_freq = clki->max_freq;
987
988 } else if (!scale_up && clki->min_freq) {
989 if (clki->curr_freq == clki->min_freq)
990 continue;
991
992 clk_state_changed = true;
993 ret = clk_set_rate(clki->clk, clki->min_freq);
994 if (ret) {
995 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
996 __func__, clki->name,
997 clki->min_freq, ret);
998 break;
999 }
1000 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1001 "scaled down", clki->name,
1002 clki->curr_freq,
1003 clki->min_freq);
1004 clki->curr_freq = clki->min_freq;
1005 }
1006 }
1007 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1008 clki->name, clk_get_rate(clki->clk));
1009 }
1010
1011 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1012
1013 out:
1014 if (clk_state_changed)
1015 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1016 (scale_up ? "up" : "down"),
1017 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1018 return ret;
1019 }
1020
1021 /**
1022 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1023 * @hba: per adapter instance
1024 * @scale_up: True if scaling up and false if scaling down
1025 *
1026 * Returns true if scaling is required, false otherwise.
1027 */
1028 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1029 bool scale_up)
1030 {
1031 struct ufs_clk_info *clki;
1032 struct list_head *head = &hba->clk_list_head;
1033
1034 if (list_empty(head))
1035 return false;
1036
1037 list_for_each_entry(clki, head, list) {
1038 if (!IS_ERR_OR_NULL(clki->clk)) {
1039 if (scale_up && clki->max_freq) {
1040 if (clki->curr_freq == clki->max_freq)
1041 continue;
1042 return true;
1043 } else if (!scale_up && clki->min_freq) {
1044 if (clki->curr_freq == clki->min_freq)
1045 continue;
1046 return true;
1047 }
1048 }
1049 }
1050
1051 return false;
1052 }
1053
1054 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1055 u64 wait_timeout_us)
1056 {
1057 unsigned long flags;
1058 int ret = 0;
1059 u32 tm_doorbell;
1060 u32 tr_doorbell;
1061 bool timeout = false, do_last_check = false;
1062 ktime_t start;
1063
1064 ufshcd_hold(hba, false);
1065 spin_lock_irqsave(hba->host->host_lock, flags);
1066 /*
1067 * Wait for all the outstanding tasks/transfer requests.
1068 * Verify by checking the doorbell registers are clear.
1069 */
1070 start = ktime_get();
1071 do {
1072 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1073 ret = -EBUSY;
1074 goto out;
1075 }
1076
1077 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1078 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1079 if (!tm_doorbell && !tr_doorbell) {
1080 timeout = false;
1081 break;
1082 } else if (do_last_check) {
1083 break;
1084 }
1085
1086 spin_unlock_irqrestore(hba->host->host_lock, flags);
1087 schedule();
1088 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1089 wait_timeout_us) {
1090 timeout = true;
1091 /*
1092 * We might have scheduled out for long time so make
1093 * sure to check if doorbells are cleared by this time
1094 * or not.
1095 */
1096 do_last_check = true;
1097 }
1098 spin_lock_irqsave(hba->host->host_lock, flags);
1099 } while (tm_doorbell || tr_doorbell);
1100
1101 if (timeout) {
1102 dev_err(hba->dev,
1103 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1104 __func__, tm_doorbell, tr_doorbell);
1105 ret = -EBUSY;
1106 }
1107 out:
1108 spin_unlock_irqrestore(hba->host->host_lock, flags);
1109 ufshcd_release(hba);
1110 return ret;
1111 }
1112
1113 /**
1114 * ufshcd_scale_gear - scale up/down UFS gear
1115 * @hba: per adapter instance
1116 * @scale_up: True for scaling up gear and false for scaling down
1117 *
1118 * Returns 0 for success,
1119 * Returns -EBUSY if scaling can't happen at this time
1120 * Returns non-zero for any other errors
1121 */
1122 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1123 {
1124 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1125 int ret = 0;
1126 struct ufs_pa_layer_attr new_pwr_info;
1127
1128 if (scale_up) {
1129 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1130 sizeof(struct ufs_pa_layer_attr));
1131 } else {
1132 memcpy(&new_pwr_info, &hba->pwr_info,
1133 sizeof(struct ufs_pa_layer_attr));
1134
1135 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1136 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1137 /* save the current power mode */
1138 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1139 &hba->pwr_info,
1140 sizeof(struct ufs_pa_layer_attr));
1141
1142 /* scale down gear */
1143 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1144 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1145 }
1146 }
1147
1148 /* check if the power mode needs to be changed or not? */
1149 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1150
1151 if (ret)
1152 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1153 __func__, ret,
1154 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1155 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1156
1157 return ret;
1158 }
1159
1160 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1161 {
1162 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1163 int ret = 0;
1164 /*
1165 * make sure that there are no outstanding requests when
1166 * clock scaling is in progress
1167 */
1168 scsi_block_requests(hba->host);
1169 down_write(&hba->clk_scaling_lock);
1170 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1171 ret = -EBUSY;
1172 up_write(&hba->clk_scaling_lock);
1173 scsi_unblock_requests(hba->host);
1174 }
1175
1176 return ret;
1177 }
1178
1179 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1180 {
1181 up_write(&hba->clk_scaling_lock);
1182 scsi_unblock_requests(hba->host);
1183 }
1184
1185 /**
1186 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1187 * @hba: per adapter instance
1188 * @scale_up: True for scaling up and false for scalin down
1189 *
1190 * Returns 0 for success,
1191 * Returns -EBUSY if scaling can't happen at this time
1192 * Returns non-zero for any other errors
1193 */
1194 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1195 {
1196 int ret = 0;
1197
1198 /* let's not get into low power until clock scaling is completed */
1199 ufshcd_hold(hba, false);
1200
1201 ret = ufshcd_clock_scaling_prepare(hba);
1202 if (ret)
1203 return ret;
1204
1205 /* scale down the gear before scaling down clocks */
1206 if (!scale_up) {
1207 ret = ufshcd_scale_gear(hba, false);
1208 if (ret)
1209 goto out;
1210 }
1211
1212 ret = ufshcd_scale_clks(hba, scale_up);
1213 if (ret) {
1214 if (!scale_up)
1215 ufshcd_scale_gear(hba, true);
1216 goto out;
1217 }
1218
1219 /* scale up the gear after scaling up clocks */
1220 if (scale_up) {
1221 ret = ufshcd_scale_gear(hba, true);
1222 if (ret) {
1223 ufshcd_scale_clks(hba, false);
1224 goto out;
1225 }
1226 }
1227
1228 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1229
1230 out:
1231 ufshcd_clock_scaling_unprepare(hba);
1232 ufshcd_release(hba);
1233 return ret;
1234 }
1235
1236 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1237 {
1238 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1239 clk_scaling.suspend_work);
1240 unsigned long irq_flags;
1241
1242 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1243 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1244 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1245 return;
1246 }
1247 hba->clk_scaling.is_suspended = true;
1248 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1249
1250 __ufshcd_suspend_clkscaling(hba);
1251 }
1252
1253 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1254 {
1255 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1256 clk_scaling.resume_work);
1257 unsigned long irq_flags;
1258
1259 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1260 if (!hba->clk_scaling.is_suspended) {
1261 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1262 return;
1263 }
1264 hba->clk_scaling.is_suspended = false;
1265 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1266
1267 devfreq_resume_device(hba->devfreq);
1268 }
1269
1270 static int ufshcd_devfreq_target(struct device *dev,
1271 unsigned long *freq, u32 flags)
1272 {
1273 int ret = 0;
1274 struct ufs_hba *hba = dev_get_drvdata(dev);
1275 ktime_t start;
1276 bool scale_up, sched_clk_scaling_suspend_work = false;
1277 unsigned long irq_flags;
1278
1279 if (!ufshcd_is_clkscaling_supported(hba))
1280 return -EINVAL;
1281
1282 if ((*freq > 0) && (*freq < UINT_MAX)) {
1283 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1284 return -EINVAL;
1285 }
1286
1287 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1288 if (ufshcd_eh_in_progress(hba)) {
1289 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1290 return 0;
1291 }
1292
1293 if (!hba->clk_scaling.active_reqs)
1294 sched_clk_scaling_suspend_work = true;
1295
1296 scale_up = (*freq == UINT_MAX) ? true : false;
1297 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1298 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1299 ret = 0;
1300 goto out; /* no state change required */
1301 }
1302 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1303
1304 start = ktime_get();
1305 ret = ufshcd_devfreq_scale(hba, scale_up);
1306
1307 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1308 (scale_up ? "up" : "down"),
1309 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1310
1311 out:
1312 if (sched_clk_scaling_suspend_work)
1313 queue_work(hba->clk_scaling.workq,
1314 &hba->clk_scaling.suspend_work);
1315
1316 return ret;
1317 }
1318
1319
1320 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1321 struct devfreq_dev_status *stat)
1322 {
1323 struct ufs_hba *hba = dev_get_drvdata(dev);
1324 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1325 unsigned long flags;
1326
1327 if (!ufshcd_is_clkscaling_supported(hba))
1328 return -EINVAL;
1329
1330 memset(stat, 0, sizeof(*stat));
1331
1332 spin_lock_irqsave(hba->host->host_lock, flags);
1333 if (!scaling->window_start_t)
1334 goto start_window;
1335
1336 if (scaling->is_busy_started)
1337 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1338 scaling->busy_start_t));
1339
1340 stat->total_time = jiffies_to_usecs((long)jiffies -
1341 (long)scaling->window_start_t);
1342 stat->busy_time = scaling->tot_busy_t;
1343 start_window:
1344 scaling->window_start_t = jiffies;
1345 scaling->tot_busy_t = 0;
1346
1347 if (hba->outstanding_reqs) {
1348 scaling->busy_start_t = ktime_get();
1349 scaling->is_busy_started = true;
1350 } else {
1351 scaling->busy_start_t = 0;
1352 scaling->is_busy_started = false;
1353 }
1354 spin_unlock_irqrestore(hba->host->host_lock, flags);
1355 return 0;
1356 }
1357
1358 static struct devfreq_dev_profile ufs_devfreq_profile = {
1359 .polling_ms = 100,
1360 .target = ufshcd_devfreq_target,
1361 .get_dev_status = ufshcd_devfreq_get_dev_status,
1362 };
1363
1364 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1365 {
1366 unsigned long flags;
1367
1368 devfreq_suspend_device(hba->devfreq);
1369 spin_lock_irqsave(hba->host->host_lock, flags);
1370 hba->clk_scaling.window_start_t = 0;
1371 spin_unlock_irqrestore(hba->host->host_lock, flags);
1372 }
1373
1374 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1375 {
1376 unsigned long flags;
1377 bool suspend = false;
1378
1379 if (!ufshcd_is_clkscaling_supported(hba))
1380 return;
1381
1382 spin_lock_irqsave(hba->host->host_lock, flags);
1383 if (!hba->clk_scaling.is_suspended) {
1384 suspend = true;
1385 hba->clk_scaling.is_suspended = true;
1386 }
1387 spin_unlock_irqrestore(hba->host->host_lock, flags);
1388
1389 if (suspend)
1390 __ufshcd_suspend_clkscaling(hba);
1391 }
1392
1393 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1394 {
1395 unsigned long flags;
1396 bool resume = false;
1397
1398 if (!ufshcd_is_clkscaling_supported(hba))
1399 return;
1400
1401 spin_lock_irqsave(hba->host->host_lock, flags);
1402 if (hba->clk_scaling.is_suspended) {
1403 resume = true;
1404 hba->clk_scaling.is_suspended = false;
1405 }
1406 spin_unlock_irqrestore(hba->host->host_lock, flags);
1407
1408 if (resume)
1409 devfreq_resume_device(hba->devfreq);
1410 }
1411
1412 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1413 struct device_attribute *attr, char *buf)
1414 {
1415 struct ufs_hba *hba = dev_get_drvdata(dev);
1416
1417 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1418 }
1419
1420 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1421 struct device_attribute *attr, const char *buf, size_t count)
1422 {
1423 struct ufs_hba *hba = dev_get_drvdata(dev);
1424 u32 value;
1425 int err;
1426
1427 if (kstrtou32(buf, 0, &value))
1428 return -EINVAL;
1429
1430 value = !!value;
1431 if (value == hba->clk_scaling.is_allowed)
1432 goto out;
1433
1434 pm_runtime_get_sync(hba->dev);
1435 ufshcd_hold(hba, false);
1436
1437 cancel_work_sync(&hba->clk_scaling.suspend_work);
1438 cancel_work_sync(&hba->clk_scaling.resume_work);
1439
1440 hba->clk_scaling.is_allowed = value;
1441
1442 if (value) {
1443 ufshcd_resume_clkscaling(hba);
1444 } else {
1445 ufshcd_suspend_clkscaling(hba);
1446 err = ufshcd_devfreq_scale(hba, true);
1447 if (err)
1448 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1449 __func__, err);
1450 }
1451
1452 ufshcd_release(hba);
1453 pm_runtime_put_sync(hba->dev);
1454 out:
1455 return count;
1456 }
1457
1458 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1459 {
1460 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1461 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1462 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1463 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1464 hba->clk_scaling.enable_attr.attr.mode = 0644;
1465 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1466 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1467 }
1468 #endif
1469
1470 static void ufshcd_ungate_work(struct work_struct *work)
1471 {
1472 int ret;
1473 unsigned long flags;
1474 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1475 clk_gating.ungate_work);
1476 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
1477
1478 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1479
1480 spin_lock_irqsave(hba->host->host_lock, flags);
1481 if (hba->clk_gating.state == CLKS_ON && gating_allowed) {
1482 spin_unlock_irqrestore(hba->host->host_lock, flags);
1483 goto unblock_reqs;
1484 }
1485
1486 spin_unlock_irqrestore(hba->host->host_lock, flags);
1487 if (gating_allowed) {
1488 ufshcd_setup_clocks(hba, true);
1489 } else {
1490 spin_lock_irqsave(hba->host->host_lock, flags);
1491 hba->clk_gating.state = CLKS_ON;
1492 spin_unlock_irqrestore(hba->host->host_lock, flags);
1493 }
1494
1495 /* Exit from hibern8 */
1496 if (ufshcd_can_hibern8_during_gating(hba)) {
1497 /* Prevent gating in this path */
1498 hba->clk_gating.is_suspended = true;
1499 if (ufshcd_is_link_hibern8(hba)) {
1500 ufshcd_set_link_trans_active(hba);
1501 ret = ufshcd_link_hibern8_ctrl(hba, false);
1502 if (ret) {
1503 ufshcd_set_link_off(hba);
1504 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1505 __func__, ret);
1506 } else {
1507 ufshcd_set_link_active(hba);
1508 }
1509 }
1510 hba->clk_gating.is_suspended = false;
1511 }
1512 unblock_reqs:
1513 scsi_unblock_requests(hba->host);
1514 }
1515
1516 /**
1517 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1518 * Also, exit from hibern8 mode and set the link as active.
1519 * @hba: per adapter instance
1520 * @async: This indicates whether caller should ungate clocks asynchronously.
1521 */
1522 int ufshcd_hold(struct ufs_hba *hba, bool async)
1523 {
1524 int rc = 0;
1525 unsigned long flags;
1526
1527 if (!ufshcd_is_clkgating_allowed(hba))
1528 goto out;
1529 spin_lock_irqsave(hba->host->host_lock, flags);
1530 hba->clk_gating.active_reqs++;
1531
1532 if (ufshcd_eh_in_progress(hba)) {
1533 spin_unlock_irqrestore(hba->host->host_lock, flags);
1534 return 0;
1535 }
1536
1537 start:
1538 switch (hba->clk_gating.state) {
1539 case __CLKS_ON:
1540 rc = -EAGAIN;
1541 if (async)
1542 hba->clk_gating.active_reqs--;
1543 case CLKS_ON:
1544 break;
1545 case REQ_CLKS_OFF:
1546 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1547 hba->clk_gating.state = CLKS_ON;
1548 trace_ufshcd_clk_gating(dev_name(hba->dev),
1549 hba->clk_gating.state);
1550 break;
1551 }
1552 /*
1553 * If we are here, it means gating work is either done or
1554 * currently running. Hence, fall through to cancel gating
1555 * work and to enable clocks.
1556 */
1557 case CLKS_OFF:
1558 scsi_block_requests(hba->host);
1559 hba->clk_gating.state = REQ_CLKS_ON;
1560 trace_ufshcd_clk_gating(dev_name(hba->dev),
1561 hba->clk_gating.state);
1562 queue_work(hba->ufshcd_workq, &hba->clk_gating.ungate_work);
1563 /*
1564 * fall through to check if we should wait for this
1565 * work to be done or not.
1566 */
1567 case REQ_CLKS_ON:
1568 if (async) {
1569 rc = -EAGAIN;
1570 hba->clk_gating.active_reqs--;
1571 break;
1572 }
1573
1574 spin_unlock_irqrestore(hba->host->host_lock, flags);
1575 flush_work(&hba->clk_gating.ungate_work);
1576 /* Make sure state is CLKS_ON before returning */
1577 spin_lock_irqsave(hba->host->host_lock, flags);
1578 goto start;
1579 default:
1580 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1581 __func__, hba->clk_gating.state);
1582 break;
1583 }
1584 spin_unlock_irqrestore(hba->host->host_lock, flags);
1585 out:
1586 return rc;
1587 }
1588 EXPORT_SYMBOL_GPL(ufshcd_hold);
1589
1590 static void ufshcd_gate_work(struct work_struct *work)
1591 {
1592 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1593 clk_gating.gate_work.work);
1594 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
1595 unsigned long flags;
1596
1597 spin_lock_irqsave(hba->host->host_lock, flags);
1598 /*
1599 * In case you are here to cancel this work the gating state
1600 * would be marked as REQ_CLKS_ON. In this case save time by
1601 * skipping the gating work and exit after changing the clock
1602 * state to CLKS_ON.
1603 */
1604 if (hba->clk_gating.is_suspended ||
1605 (hba->clk_gating.state == REQ_CLKS_ON)) {
1606 hba->clk_gating.state = CLKS_ON;
1607 trace_ufshcd_clk_gating(dev_name(hba->dev),
1608 hba->clk_gating.state);
1609 goto rel_lock;
1610 }
1611
1612 if (hba->clk_gating.active_reqs
1613 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1614 || hba->lrb_in_use || hba->outstanding_tasks
1615 || hba->active_uic_cmd || hba->uic_async_done
1616 || scsi_host_in_recovery(hba->host))
1617 goto rel_lock;
1618
1619 spin_unlock_irqrestore(hba->host->host_lock, flags);
1620
1621 /* put the link into hibern8 mode before turning off clocks */
1622 if (ufshcd_can_hibern8_during_gating(hba)) {
1623 ufshcd_set_link_trans_hibern8(hba);
1624 if (ufshcd_link_hibern8_ctrl(hba, true)) {
1625 spin_lock_irqsave(hba->host->host_lock, flags);
1626 hba->clk_gating.state = __CLKS_ON;
1627 spin_unlock_irqrestore(hba->host->host_lock, flags);
1628 hba->clk_gating.is_suspended = true;
1629 ufshcd_reset_and_restore(hba);
1630 spin_lock_irqsave(hba->host->host_lock, flags);
1631 hba->clk_gating.state = CLKS_ON;
1632 spin_unlock_irqrestore(hba->host->host_lock, flags);
1633 hba->clk_gating.is_suspended = false;
1634 scsi_unblock_requests(hba->host);
1635 trace_ufshcd_clk_gating(dev_name(hba->dev),
1636 hba->clk_gating.state);
1637 goto out;
1638 }
1639 ufshcd_set_link_hibern8(hba);
1640 }
1641
1642 if (gating_allowed) {
1643 if (!ufshcd_is_link_active(hba))
1644 ufshcd_setup_clocks(hba, false);
1645 else
1646 /* If link is active, device ref_clk can't be switched off */
1647 __ufshcd_setup_clocks(hba, false, true);
1648 }
1649
1650 /*
1651 * In case you are here to cancel this work the gating state
1652 * would be marked as REQ_CLKS_ON. In this case keep the state
1653 * as REQ_CLKS_ON which would anyway imply that clocks are off
1654 * and a request to turn them on is pending. By doing this way,
1655 * we keep the state machine in tact and this would ultimately
1656 * prevent from doing cancel work multiple times when there are
1657 * new requests arriving before the current cancel work is done.
1658 */
1659 spin_lock_irqsave(hba->host->host_lock, flags);
1660 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1661 hba->clk_gating.state = CLKS_OFF;
1662 trace_ufshcd_clk_gating(dev_name(hba->dev),
1663 hba->clk_gating.state);
1664 }
1665 rel_lock:
1666 spin_unlock_irqrestore(hba->host->host_lock, flags);
1667 out:
1668 return;
1669 }
1670
1671 /* host lock must be held before calling this variant */
1672 static void __ufshcd_release(struct ufs_hba *hba)
1673 {
1674 if (!ufshcd_is_clkgating_allowed(hba))
1675 return;
1676
1677 hba->clk_gating.active_reqs--;
1678
1679 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1680 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1681 || hba->lrb_in_use || hba->outstanding_tasks
1682 || hba->active_uic_cmd || hba->uic_async_done
1683 || scsi_host_in_recovery(hba->host)
1684 || ufshcd_eh_in_progress(hba))
1685 return;
1686
1687 hba->clk_gating.state = REQ_CLKS_OFF;
1688 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1689 queue_delayed_work(hba->ufshcd_workq, &hba->clk_gating.gate_work,
1690 msecs_to_jiffies(hba->clk_gating.delay_ms));
1691 }
1692
1693 void ufshcd_release(struct ufs_hba *hba)
1694 {
1695 unsigned long flags;
1696
1697 spin_lock_irqsave(hba->host->host_lock, flags);
1698 __ufshcd_release(hba);
1699 spin_unlock_irqrestore(hba->host->host_lock, flags);
1700 }
1701 EXPORT_SYMBOL_GPL(ufshcd_release);
1702
1703 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1704 struct device_attribute *attr, char *buf)
1705 {
1706 struct ufs_hba *hba = dev_get_drvdata(dev);
1707
1708 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1709 }
1710
1711 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1712 struct device_attribute *attr, const char *buf, size_t count)
1713 {
1714 struct ufs_hba *hba = dev_get_drvdata(dev);
1715 unsigned long flags, value;
1716
1717 if (kstrtoul(buf, 0, &value))
1718 return -EINVAL;
1719
1720 spin_lock_irqsave(hba->host->host_lock, flags);
1721 hba->clk_gating.delay_ms = value;
1722 spin_unlock_irqrestore(hba->host->host_lock, flags);
1723 return count;
1724 }
1725
1726 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1727 struct device_attribute *attr, char *buf)
1728 {
1729 struct ufs_hba *hba = dev_get_drvdata(dev);
1730
1731 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1732 }
1733
1734 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1735 struct device_attribute *attr, const char *buf, size_t count)
1736 {
1737 struct ufs_hba *hba = dev_get_drvdata(dev);
1738 unsigned long flags;
1739 u32 value;
1740
1741 if (kstrtou32(buf, 0, &value))
1742 return -EINVAL;
1743
1744 value = !!value;
1745 if (value == hba->clk_gating.is_enabled)
1746 goto out;
1747
1748 if (value) {
1749 ufshcd_release(hba);
1750 } else {
1751 spin_lock_irqsave(hba->host->host_lock, flags);
1752 hba->clk_gating.active_reqs++;
1753 spin_unlock_irqrestore(hba->host->host_lock, flags);
1754 }
1755
1756 hba->clk_gating.is_enabled = value;
1757 out:
1758 return count;
1759 }
1760
1761 static int ufshcd_init_clk_gating(struct ufs_hba *hba)
1762 {
1763 int ret = 0;
1764
1765 if (!ufshcd_is_clkgating_allowed(hba))
1766 goto out;
1767
1768 hba->ufshcd_workq = alloc_workqueue("ufshcd_wq", WQ_HIGHPRI, 0);
1769 if (!hba->ufshcd_workq) {
1770 ret = -ENOMEM;
1771 goto out;
1772 }
1773
1774 hba->clk_gating.delay_ms = LINK_H8_DELAY;
1775 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1776 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1777
1778 hba->clk_gating.is_enabled = true;
1779
1780 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1781 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1782 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1783 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1784 hba->clk_gating.delay_attr.attr.mode = 0644;
1785 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1786 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1787
1788 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1789 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1790 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1791 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1792 hba->clk_gating.enable_attr.attr.mode = 0644;
1793 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1794 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1795
1796 out:
1797 return ret;
1798 }
1799
1800 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1801 {
1802 if (!ufshcd_is_clkgating_allowed(hba))
1803 return;
1804 destroy_workqueue(hba->ufshcd_workq);
1805 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1806 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1807 }
1808
1809 #if defined(CONFIG_PM_DEVFREQ)
1810 /* Must be called with host lock acquired */
1811 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1812 {
1813 bool queue_resume_work = false;
1814
1815 if (!ufshcd_is_clkscaling_supported(hba))
1816 return;
1817
1818 if (!hba->clk_scaling.active_reqs++)
1819 queue_resume_work = true;
1820
1821 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1822 return;
1823
1824 if (queue_resume_work)
1825 queue_work(hba->clk_scaling.workq,
1826 &hba->clk_scaling.resume_work);
1827
1828 if (!hba->clk_scaling.window_start_t) {
1829 hba->clk_scaling.window_start_t = jiffies;
1830 hba->clk_scaling.tot_busy_t = 0;
1831 hba->clk_scaling.is_busy_started = false;
1832 }
1833
1834 if (!hba->clk_scaling.is_busy_started) {
1835 hba->clk_scaling.busy_start_t = ktime_get();
1836 hba->clk_scaling.is_busy_started = true;
1837 }
1838 }
1839
1840 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1841 {
1842 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1843
1844 if (!ufshcd_is_clkscaling_supported(hba))
1845 return;
1846
1847 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1848 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1849 scaling->busy_start_t));
1850 scaling->busy_start_t = 0;
1851 scaling->is_busy_started = false;
1852 }
1853 }
1854 #endif
1855
1856 /**
1857 * ufshcd_send_command - Send SCSI or device management commands
1858 * @hba: per adapter instance
1859 * @task_tag: Task tag of the command
1860 */
1861 static inline
1862 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1863 {
1864 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1865 #if defined(CONFIG_PM_DEVFREQ)
1866 ufshcd_clk_scaling_start_busy(hba);
1867 #endif
1868 __set_bit(task_tag, &hba->outstanding_reqs);
1869 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1870 /* Make sure that doorbell is committed immediately */
1871 wmb();
1872 ufshcd_add_command_trace(hba, task_tag, "send");
1873 }
1874
1875 /**
1876 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1877 * @lrb - pointer to local reference block
1878 */
1879 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1880 {
1881 int len;
1882 if (lrbp->sense_buffer &&
1883 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1884 int len_to_copy;
1885
1886 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1887 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1888
1889 memcpy(lrbp->sense_buffer,
1890 lrbp->ucd_rsp_ptr->sr.sense_data,
1891 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1892 }
1893 }
1894
1895 /**
1896 * ufshcd_copy_query_response() - Copy the Query Response and the data
1897 * descriptor
1898 * @hba: per adapter instance
1899 * @lrb - pointer to local reference block
1900 */
1901 static
1902 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1903 {
1904 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1905
1906 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1907
1908 /* Get the descriptor */
1909 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1910 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1911 GENERAL_UPIU_REQUEST_SIZE;
1912 u16 resp_len;
1913 u16 buf_len;
1914
1915 /* data segment length */
1916 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1917 MASK_QUERY_DATA_SEG_LEN;
1918 buf_len = be16_to_cpu(
1919 hba->dev_cmd.query.request.upiu_req.length);
1920 if (likely(buf_len >= resp_len)) {
1921 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1922 } else {
1923 dev_warn(hba->dev,
1924 "%s: Response size is bigger than buffer",
1925 __func__);
1926 return -EINVAL;
1927 }
1928 }
1929
1930 return 0;
1931 }
1932
1933 /**
1934 * ufshcd_hba_capabilities - Read controller capabilities
1935 * @hba: per adapter instance
1936 */
1937 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1938 {
1939 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1940
1941 /* nutrs and nutmrs are 0 based values */
1942 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1943 hba->nutmrs =
1944 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1945 }
1946
1947 /**
1948 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1949 * to accept UIC commands
1950 * @hba: per adapter instance
1951 * Return true on success, else false
1952 */
1953 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1954 {
1955 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1956 return true;
1957 else
1958 return false;
1959 }
1960
1961 /**
1962 * ufshcd_get_upmcrs - Get the power mode change request status
1963 * @hba: Pointer to adapter instance
1964 *
1965 * This function gets the UPMCRS field of HCS register
1966 * Returns value of UPMCRS field
1967 */
1968 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba, struct uic_command *cmd)
1969 {
1970 if (hba->quirks & UFSHCD_QUIRK_GET_GENERRCODE_DIRECT) {
1971 if (cmd->command == UIC_CMD_DME_SET &&
1972 cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE))
1973 return ufshcd_vops_get_unipro(hba, 3);
1974 else if (cmd->command == UIC_CMD_DME_HIBER_ENTER)
1975 return ufshcd_vops_get_unipro(hba, 4);
1976 else if (cmd->command == UIC_CMD_DME_HIBER_EXIT)
1977 return ufshcd_vops_get_unipro(hba, 5);
1978 else
1979 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1980 } else
1981 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1982 }
1983
1984 /**
1985 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1986 * @hba: per adapter instance
1987 * @uic_cmd: UIC command
1988 *
1989 * Mutex must be held.
1990 */
1991 static inline void
1992 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1993 {
1994 WARN_ON(hba->active_uic_cmd);
1995
1996 hba->active_uic_cmd = uic_cmd;
1997
1998 /* Write Args */
1999 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2000 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2001 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2002
2003 /* Write UIC Cmd */
2004 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2005 REG_UIC_COMMAND);
2006 }
2007
2008 /**
2009 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2010 * @hba: per adapter instance
2011 * @uic_command: UIC command
2012 *
2013 * Must be called with mutex held.
2014 * Returns 0 only if success.
2015 */
2016 static int
2017 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2018 {
2019 int ret;
2020 unsigned long flags;
2021
2022 if (wait_for_completion_timeout(&uic_cmd->done,
2023 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2024 switch (uic_cmd->command) {
2025 case UIC_CMD_DME_LINK_STARTUP:
2026 case UIC_CMD_DME_HIBER_ENTER:
2027 case UIC_CMD_DME_HIBER_EXIT:
2028 if (hba->quirks & UFSHCD_QUIRK_GET_GENERRCODE_DIRECT)
2029 ret = ufshcd_vops_get_unipro(hba, uic_cmd->command - UIC_CMD_DME_LINK_STARTUP);
2030 else
2031 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2032 break;
2033 default:
2034 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2035 break;
2036 }
2037 } else
2038 ret = -ETIMEDOUT;
2039
2040 spin_lock_irqsave(hba->host->host_lock, flags);
2041 hba->active_uic_cmd = NULL;
2042 spin_unlock_irqrestore(hba->host->host_lock, flags);
2043
2044 return ret;
2045 }
2046
2047 /**
2048 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2049 * @hba: per adapter instance
2050 * @uic_cmd: UIC command
2051 * @completion: initialize the completion only if this is set to true
2052 *
2053 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2054 * with mutex held and host_lock locked.
2055 * Returns 0 only if success.
2056 */
2057 static int
2058 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2059 bool completion)
2060 {
2061 if (!ufshcd_ready_for_uic_cmd(hba)) {
2062 dev_err(hba->dev,
2063 "Controller not ready to accept UIC commands\n");
2064 return -EIO;
2065 }
2066
2067 if (completion)
2068 init_completion(&uic_cmd->done);
2069
2070 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2071
2072 return 0;
2073 }
2074
2075 /**
2076 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2077 * @hba: per adapter instance
2078 * @uic_cmd: UIC command
2079 *
2080 * Returns 0 only if success.
2081 */
2082 static int
2083 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2084 {
2085 int ret;
2086 unsigned long flags;
2087
2088 ufshcd_hold(hba, false);
2089 mutex_lock(&hba->uic_cmd_mutex);
2090 ufshcd_add_delay_before_dme_cmd(hba);
2091
2092 spin_lock_irqsave(hba->host->host_lock, flags);
2093 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2094 spin_unlock_irqrestore(hba->host->host_lock, flags);
2095 if (!ret)
2096 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2097
2098 mutex_unlock(&hba->uic_cmd_mutex);
2099
2100 ufshcd_release(hba);
2101 return ret;
2102 }
2103
2104 /**
2105 * ufshcd_map_sg - Map scatter-gather list to prdt
2106 * @lrbp - pointer to local reference block
2107 *
2108 * Returns 0 in case of success, non-zero value in case of failure
2109 */
2110 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2111 {
2112 struct ufshcd_sg_entry *prd_table;
2113 struct scatterlist *sg;
2114 struct scsi_cmnd *cmd;
2115 int sg_segments;
2116 int i, ret;
2117 int sector_offset = 0;
2118 int page_index = 0;
2119
2120 cmd = lrbp->cmd;
2121 sg_segments = scsi_dma_map(cmd);
2122 if (sg_segments < 0)
2123 return sg_segments;
2124
2125 if (sg_segments) {
2126 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2127 lrbp->utr_descriptor_ptr->prd_table_length =
2128 cpu_to_le16((u16)(sg_segments *
2129 sizeof(struct ufshcd_sg_entry)));
2130 else
2131 lrbp->utr_descriptor_ptr->prd_table_length =
2132 cpu_to_le16((u16) (sg_segments));
2133
2134 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2135
2136 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2137 prd_table[i].size =
2138 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2139 prd_table[i].base_addr =
2140 cpu_to_le32(lower_32_bits(sg->dma_address));
2141 prd_table[i].upper_addr =
2142 cpu_to_le32(upper_32_bits(sg->dma_address));
2143 prd_table[i].reserved = 0;
2144 hba->transferred_sector += prd_table[i].size;
2145
2146 ret = ufshcd_vops_crypto_engine_cfg(hba, lrbp, sg, i, sector_offset, page_index++);
2147 if (ret) {
2148 dev_err(hba->dev,
2149 "%s: failed to configure crypto engine (%d)\n",
2150 __func__, ret);
2151 return ret;
2152 }
2153 sector_offset += UFSHCI_SECTOR_SIZE / MIN_SECTOR_SIZE;
2154 }
2155 } else {
2156 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2157 }
2158
2159 return 0;
2160 }
2161
2162 /**
2163 * ufshcd_enable_intr - enable interrupts
2164 * @hba: per adapter instance
2165 * @intrs: interrupt bits
2166 */
2167 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2168 {
2169 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2170
2171 if (hba->ufs_version == UFSHCI_VERSION_10) {
2172 u32 rw;
2173 rw = set & INTERRUPT_MASK_RW_VER_10;
2174 set = rw | ((set ^ intrs) & intrs);
2175 } else {
2176 set |= intrs;
2177 }
2178
2179 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2180 }
2181
2182 /**
2183 * ufshcd_disable_intr - disable interrupts
2184 * @hba: per adapter instance
2185 * @intrs: interrupt bits
2186 */
2187 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2188 {
2189 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2190
2191 if (hba->ufs_version == UFSHCI_VERSION_10) {
2192 u32 rw;
2193 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2194 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2195 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2196
2197 } else {
2198 set &= ~intrs;
2199 }
2200
2201 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2202 }
2203
2204 /**
2205 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2206 * descriptor according to request
2207 * @lrbp: pointer to local reference block
2208 * @upiu_flags: flags required in the header
2209 * @cmd_dir: requests data direction
2210 */
2211 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2212 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2213 {
2214 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2215 u32 data_direction;
2216 u32 dword_0;
2217
2218 if (cmd_dir == DMA_FROM_DEVICE) {
2219 data_direction = UTP_DEVICE_TO_HOST;
2220 *upiu_flags = UPIU_CMD_FLAGS_READ;
2221 } else if (cmd_dir == DMA_TO_DEVICE) {
2222 data_direction = UTP_HOST_TO_DEVICE;
2223 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2224 } else {
2225 data_direction = UTP_NO_DATA_TRANSFER;
2226 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2227 }
2228
2229 dword_0 = data_direction | (lrbp->command_type
2230 << UPIU_COMMAND_TYPE_OFFSET);
2231 if (lrbp->intr_cmd)
2232 dword_0 |= UTP_REQ_DESC_INT_CMD;
2233
2234 /* Transfer request descriptor header fields */
2235 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2236 /* dword_1 is reserved, hence it is set to 0 */
2237 req_desc->header.dword_1 = 0;
2238 /*
2239 * assigning invalid value for command status. Controller
2240 * updates OCS on command completion, with the command
2241 * status
2242 */
2243 req_desc->header.dword_2 =
2244 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2245 /* dword_3 is reserved, hence it is set to 0 */
2246 req_desc->header.dword_3 = 0;
2247
2248 req_desc->prd_table_length = 0;
2249 }
2250
2251 /**
2252 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2253 * for scsi commands
2254 * @lrbp - local reference block pointer
2255 * @upiu_flags - flags
2256 */
2257 static
2258 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2259 {
2260 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2261 unsigned short cdb_len;
2262
2263 /* command descriptor fields */
2264 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2265 UPIU_TRANSACTION_COMMAND, upiu_flags,
2266 lrbp->lun, lrbp->task_tag);
2267 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2268 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2269
2270 /* Total EHS length and Data segment length will be zero */
2271 ucd_req_ptr->header.dword_2 = 0;
2272
2273 ucd_req_ptr->sc.exp_data_transfer_len =
2274 cpu_to_be32(lrbp->cmd->sdb.length);
2275
2276 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2277 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2278 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2279
2280 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2281 }
2282
2283 /**
2284 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2285 * for query requsts
2286 * @hba: UFS hba
2287 * @lrbp: local reference block pointer
2288 * @upiu_flags: flags
2289 */
2290 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2291 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2292 {
2293 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2294 struct ufs_query *query = &hba->dev_cmd.query;
2295 u16 len = be16_to_cpu(query->request.upiu_req.length);
2296 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2297
2298 /* Query request header */
2299 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2300 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2301 lrbp->lun, lrbp->task_tag);
2302 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2303 0, query->request.query_func, 0, 0);
2304
2305 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_READ_DESC)
2306 len = 0;
2307
2308 /* Data segment length only need for WRITE_DESC */
2309 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2310 ucd_req_ptr->header.dword_2 =
2311 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2312 else
2313 ucd_req_ptr->header.dword_2 = 0;
2314
2315 /* Copy the Query Request buffer as is */
2316 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2317 QUERY_OSF_SIZE);
2318
2319 /* Copy the Descriptor */
2320 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2321 memcpy(descp, query->descriptor, len);
2322
2323 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2324 }
2325
2326 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2327 {
2328 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2329
2330 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2331
2332 /* command descriptor fields */
2333 ucd_req_ptr->header.dword_0 =
2334 UPIU_HEADER_DWORD(
2335 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2336 /* clear rest of the fields of basic header */
2337 ucd_req_ptr->header.dword_1 = 0;
2338 ucd_req_ptr->header.dword_2 = 0;
2339
2340 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2341 }
2342
2343 /**
2344 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2345 * for Device Management Purposes
2346 * @hba - per adapter instance
2347 * @lrb - pointer to local reference block
2348 */
2349 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2350 {
2351 u32 upiu_flags;
2352 int ret = 0;
2353
2354 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2355 (hba->ufs_version == UFSHCI_VERSION_11))
2356 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2357 else
2358 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2359
2360 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2361 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2362 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2363 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2364 ufshcd_prepare_utp_nop_upiu(lrbp);
2365 else
2366 ret = -EINVAL;
2367
2368 return ret;
2369 }
2370
2371 /**
2372 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2373 * for SCSI Purposes
2374 * @hba - per adapter instance
2375 * @lrb - pointer to local reference block
2376 */
2377 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2378 {
2379 u32 upiu_flags;
2380 int ret = 0;
2381
2382 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2383 (hba->ufs_version == UFSHCI_VERSION_11))
2384 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2385 else
2386 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2387
2388 if (likely(lrbp->cmd)) {
2389 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2390 lrbp->cmd->sc_data_direction);
2391 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2392 } else {
2393 ret = -EINVAL;
2394 }
2395
2396 return ret;
2397 }
2398
2399 /*
2400 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2401 * @scsi_lun: scsi LUN id
2402 *
2403 * Returns UPIU LUN id
2404 */
2405 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2406 {
2407 if (scsi_is_wlun(scsi_lun))
2408 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2409 | UFS_UPIU_WLUN_ID;
2410 else
2411 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2412 }
2413
2414 static inline unsigned int ufshcd_get_scsi_lun(struct scsi_cmnd *cmd)
2415 {
2416 if (cmd->cmnd[0] == SECURITY_PROTOCOL_IN ||
2417 cmd->cmnd[0] == SECURITY_PROTOCOL_OUT)
2418 return (SCSI_W_LUN_BASE |
2419 (UFS_UPIU_RPMB_WLUN & UFS_UPIU_MAX_UNIT_NUM_ID));
2420 else
2421 return cmd->device->lun;
2422 }
2423
2424 /**
2425 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2426 * @scsi_lun: UPIU W-LUN id
2427 *
2428 * Returns SCSI W-LUN id
2429 */
2430 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2431 {
2432 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2433 }
2434
2435 /**
2436 * ufshcd_queuecommand - main entry point for SCSI requests
2437 * @cmd: command from SCSI Midlayer
2438 * @done: call back function
2439 *
2440 * Returns 0 for success, non-zero in case of failure
2441 */
2442 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2443 {
2444 struct ufshcd_lrb *lrbp;
2445 struct ufs_hba *hba;
2446 unsigned long flags;
2447 int tag;
2448 int err = 0;
2449 unsigned int scsi_lun;
2450
2451 hba = shost_priv(host);
2452
2453 tag = cmd->request->tag;
2454 if (!ufshcd_valid_tag(hba, tag)) {
2455 dev_err(hba->dev,
2456 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2457 __func__, tag, cmd, cmd->request);
2458 BUG();
2459 }
2460
2461 if (!down_read_trylock(&hba->clk_scaling_lock))
2462 return SCSI_MLQUEUE_HOST_BUSY;
2463
2464 if ((ufs_shutdown_state == 1) && (cmd->cmnd[0] == START_STOP)) {
2465 scsi_block_requests(hba->host);
2466 cancel_work_sync(&hba->clk_gating.ungate_work);
2467 }
2468
2469 spin_lock_irqsave(hba->host->host_lock, flags);
2470 switch (hba->ufshcd_state) {
2471 case UFSHCD_STATE_OPERATIONAL:
2472 break;
2473 case UFSHCD_STATE_EH_SCHEDULED:
2474 case UFSHCD_STATE_RESET:
2475 err = SCSI_MLQUEUE_HOST_BUSY;
2476 goto out_unlock;
2477 case UFSHCD_STATE_ERROR:
2478 set_host_byte(cmd, DID_ERROR);
2479 scsi_dma_map(cmd);
2480 cmd->scsi_done(cmd);
2481 goto out_unlock;
2482 default:
2483 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2484 __func__, hba->ufshcd_state);
2485 set_host_byte(cmd, DID_BAD_TARGET);
2486 cmd->scsi_done(cmd);
2487 goto out_unlock;
2488 }
2489
2490 /* if error handling is in progress, don't issue commands */
2491 if (ufshcd_eh_in_progress(hba)) {
2492 set_host_byte(cmd, DID_ERROR);
2493 cmd->scsi_done(cmd);
2494 goto out_unlock;
2495 }
2496 spin_unlock_irqrestore(hba->host->host_lock, flags);
2497
2498 hba->req_abort_count = 0;
2499
2500 /* acquire the tag to make sure device cmds don't use it */
2501 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2502 /*
2503 * Dev manage command in progress, requeue the command.
2504 * Requeuing the command helps in cases where the request *may*
2505 * find different tag instead of waiting for dev manage command
2506 * completion.
2507 */
2508 err = SCSI_MLQUEUE_HOST_BUSY;
2509 goto out;
2510 }
2511
2512 err = ufshcd_hold(hba, true);
2513 if (err) {
2514 err = SCSI_MLQUEUE_HOST_BUSY;
2515 clear_bit_unlock(tag, &hba->lrb_in_use);
2516 goto out;
2517 }
2518 WARN_ON(hba->clk_gating.state != CLKS_ON);
2519
2520 lrbp = &hba->lrb[tag];
2521
2522 WARN_ON(lrbp->cmd);
2523 lrbp->cmd = cmd;
2524 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2525 lrbp->sense_buffer = cmd->sense_buffer;
2526 lrbp->task_tag = tag;
2527
2528 scsi_lun = ufshcd_get_scsi_lun(cmd);
2529 lrbp->lun = ufshcd_scsi_to_upiu_lun(scsi_lun);
2530 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2531 lrbp->req_abort_skip = false;
2532
2533 ufshcd_comp_scsi_upiu(hba, lrbp);
2534
2535 err = ufshcd_map_sg(hba, lrbp);
2536 if (err) {
2537 lrbp->cmd = NULL;
2538 clear_bit_unlock(tag, &hba->lrb_in_use);
2539 goto out;
2540 }
2541 /* Make sure descriptors are ready before ringing the doorbell */
2542 wmb();
2543
2544 /* issue command to the controller */
2545 spin_lock_irqsave(hba->host->host_lock, flags);
2546 if (hba->vops && hba->vops->set_nexus_t_xfer_req)
2547 hba->vops->set_nexus_t_xfer_req(hba, tag, lrbp->cmd);
2548 #ifdef CONFIG_SCSI_UFS_CMD_LOGGING
2549 exynos_ufs_cmd_log_start(hba, cmd);
2550 #endif
2551 ufshcd_send_command(hba, tag);
2552
2553 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
2554 dev_info(hba->dev, "IO issued(%d)\n", tag);
2555 out_unlock:
2556 spin_unlock_irqrestore(hba->host->host_lock, flags);
2557 out:
2558 up_read(&hba->clk_scaling_lock);
2559 return err;
2560 }
2561
2562 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2563 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2564 {
2565 lrbp->cmd = NULL;
2566 lrbp->sense_bufflen = 0;
2567 lrbp->sense_buffer = NULL;
2568 lrbp->task_tag = tag;
2569 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2570 lrbp->intr_cmd = true; /* No interrupt aggregation */
2571 hba->dev_cmd.type = cmd_type;
2572
2573 return ufshcd_comp_devman_upiu(hba, lrbp);
2574 }
2575
2576 static int
2577 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2578 {
2579 int err = 0;
2580 unsigned long flags;
2581 u32 mask = 1 << tag;
2582
2583 /* clear outstanding transaction before retry */
2584 spin_lock_irqsave(hba->host->host_lock, flags);
2585 ufshcd_utrl_clear(hba, tag);
2586 spin_unlock_irqrestore(hba->host->host_lock, flags);
2587
2588 /*
2589 * wait for for h/w to clear corresponding bit in door-bell.
2590 * max. wait is 1 sec.
2591 */
2592 err = ufshcd_wait_for_register(hba,
2593 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2594 mask, ~mask, 1000, 1000, true);
2595
2596 return err;
2597 }
2598
2599 static int
2600 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2601 {
2602 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2603
2604 /* Get the UPIU response */
2605 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2606 UPIU_RSP_CODE_OFFSET;
2607 return query_res->response;
2608 }
2609
2610 /**
2611 * ufshcd_dev_cmd_completion() - handles device management command responses
2612 * @hba: per adapter instance
2613 * @lrbp: pointer to local reference block
2614 */
2615 static int
2616 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2617 {
2618 int resp;
2619 int err = 0;
2620
2621 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2622 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2623
2624 switch (resp) {
2625 case UPIU_TRANSACTION_NOP_IN:
2626 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2627 err = -EINVAL;
2628 dev_err(hba->dev, "%s: unexpected response %x\n",
2629 __func__, resp);
2630 }
2631 break;
2632 case UPIU_TRANSACTION_QUERY_RSP:
2633 err = ufshcd_check_query_response(hba, lrbp);
2634 if (!err)
2635 err = ufshcd_copy_query_response(hba, lrbp);
2636 break;
2637 case UPIU_TRANSACTION_REJECT_UPIU:
2638 /* TODO: handle Reject UPIU Response */
2639 err = -EPERM;
2640 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2641 __func__);
2642 break;
2643 default:
2644 err = -EINVAL;
2645 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2646 __func__, resp);
2647 break;
2648 }
2649
2650 return err;
2651 }
2652
2653 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2654 struct ufshcd_lrb *lrbp, int max_timeout)
2655 {
2656 int err = 0;
2657 unsigned long time_left;
2658 unsigned long flags;
2659
2660 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2661 msecs_to_jiffies(max_timeout));
2662
2663 /* Make sure descriptors are ready before ringing the doorbell */
2664 wmb();
2665 spin_lock_irqsave(hba->host->host_lock, flags);
2666 hba->dev_cmd.complete = NULL;
2667 if (likely(time_left)) {
2668 err = ufshcd_get_tr_ocs(lrbp);
2669 if (!err)
2670 err = ufshcd_dev_cmd_completion(hba, lrbp);
2671 }
2672 spin_unlock_irqrestore(hba->host->host_lock, flags);
2673
2674 if (!time_left) {
2675 err = -ETIMEDOUT;
2676 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2677 __func__, lrbp->task_tag);
2678 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2679 /* successfully cleared the command, retry if needed */
2680 err = -EAGAIN;
2681 /*
2682 * in case of an error, after clearing the doorbell,
2683 * we also need to clear the outstanding_request
2684 * field in hba
2685 */
2686 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2687 }
2688
2689 return err;
2690 }
2691
2692 /**
2693 * ufshcd_get_dev_cmd_tag - Get device management command tag
2694 * @hba: per-adapter instance
2695 * @tag: pointer to variable with available slot value
2696 *
2697 * Get a free slot and lock it until device management command
2698 * completes.
2699 *
2700 * Returns false if free slot is unavailable for locking, else
2701 * return true with tag value in @tag.
2702 */
2703 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2704 {
2705 int tag;
2706 bool ret = false;
2707 unsigned long tmp;
2708
2709 if (!tag_out)
2710 goto out;
2711
2712 do {
2713 tmp = ~hba->lrb_in_use;
2714 tag = find_last_bit(&tmp, hba->nutrs);
2715 if (tag >= hba->nutrs)
2716 goto out;
2717 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2718
2719 *tag_out = tag;
2720 ret = true;
2721 out:
2722 return ret;
2723 }
2724
2725 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2726 {
2727 clear_bit_unlock(tag, &hba->lrb_in_use);
2728 }
2729
2730 /**
2731 * ufshcd_exec_dev_cmd - API for sending device management requests
2732 * @hba - UFS hba
2733 * @cmd_type - specifies the type (NOP, Query...)
2734 * @timeout - time in seconds
2735 *
2736 * NOTE: Since there is only one available tag for device management commands,
2737 * it is expected you hold the hba->dev_cmd.lock mutex.
2738 */
2739 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2740 enum dev_cmd_type cmd_type, int timeout)
2741 {
2742 struct ufshcd_lrb *lrbp;
2743 int err;
2744 int tag;
2745 struct completion wait;
2746 unsigned long flags;
2747
2748 if (!ufshcd_is_link_active(hba)) {
2749 flush_work(&hba->clk_gating.ungate_work);
2750 if (!ufshcd_is_link_active(hba))
2751 return -EPERM;
2752 }
2753
2754 down_read(&hba->clk_scaling_lock);
2755
2756 /*
2757 * Get free slot, sleep if slots are unavailable.
2758 * Even though we use wait_event() which sleeps indefinitely,
2759 * the maximum wait time is bounded by SCSI request timeout.
2760 */
2761 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2762
2763 init_completion(&wait);
2764 lrbp = &hba->lrb[tag];
2765 WARN_ON(lrbp->cmd);
2766 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2767 if (unlikely(err))
2768 goto out_put_tag;
2769
2770 hba->dev_cmd.complete = &wait;
2771
2772 /* Make sure descriptors are ready before ringing the doorbell */
2773 wmb();
2774 spin_lock_irqsave(hba->host->host_lock, flags);
2775 if (hba->vops && hba->vops->set_nexus_t_xfer_req)
2776 hba->vops->set_nexus_t_xfer_req(hba, tag, lrbp->cmd);
2777 ufshcd_send_command(hba, tag);
2778 spin_unlock_irqrestore(hba->host->host_lock, flags);
2779
2780 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2781
2782 out_put_tag:
2783 ufshcd_put_dev_cmd_tag(hba, tag);
2784 wake_up(&hba->dev_cmd.tag_wq);
2785 up_read(&hba->clk_scaling_lock);
2786 return err;
2787 }
2788
2789 /**
2790 * ufshcd_init_query() - init the query response and request parameters
2791 * @hba: per-adapter instance
2792 * @request: address of the request pointer to be initialized
2793 * @response: address of the response pointer to be initialized
2794 * @opcode: operation to perform
2795 * @idn: flag idn to access
2796 * @index: LU number to access
2797 * @selector: query/flag/descriptor further identification
2798 */
2799 static inline void ufshcd_init_query(struct ufs_hba *hba,
2800 struct ufs_query_req **request, struct ufs_query_res **response,
2801 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2802 {
2803 *request = &hba->dev_cmd.query.request;
2804 *response = &hba->dev_cmd.query.response;
2805 memset(*request, 0, sizeof(struct ufs_query_req));
2806 memset(*response, 0, sizeof(struct ufs_query_res));
2807 (*request)->upiu_req.opcode = opcode;
2808 (*request)->upiu_req.idn = idn;
2809 (*request)->upiu_req.index = index;
2810 (*request)->upiu_req.selector = selector;
2811 }
2812
2813 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2814 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2815 {
2816 int ret;
2817 int retries;
2818
2819 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2820 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2821 if (ret)
2822 dev_dbg(hba->dev,
2823 "%s: failed with error %d, retries %d\n",
2824 __func__, ret, retries);
2825 else
2826 break;
2827 }
2828
2829 if (ret)
2830 dev_err(hba->dev,
2831 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2832 __func__, opcode, idn, ret, retries);
2833 return ret;
2834 }
2835
2836 /**
2837 * ufshcd_query_flag() - API function for sending flag query requests
2838 * hba: per-adapter instance
2839 * query_opcode: flag query to perform
2840 * idn: flag idn to access
2841 * flag_res: the flag value after the query request completes
2842 *
2843 * Returns 0 for success, non-zero in case of failure
2844 */
2845 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2846 enum flag_idn idn, bool *flag_res)
2847 {
2848 struct ufs_query_req *request = NULL;
2849 struct ufs_query_res *response = NULL;
2850 int err, index = 0, selector = 0;
2851 int timeout = QUERY_REQ_TIMEOUT;
2852
2853 BUG_ON(!hba);
2854
2855 ufshcd_hold(hba, false);
2856 mutex_lock(&hba->dev_cmd.lock);
2857 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2858 selector);
2859
2860 switch (opcode) {
2861 case UPIU_QUERY_OPCODE_SET_FLAG:
2862 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2863 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2864 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2865 break;
2866 case UPIU_QUERY_OPCODE_READ_FLAG:
2867 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2868 if (!flag_res) {
2869 /* No dummy reads */
2870 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2871 __func__);
2872 err = -EINVAL;
2873 goto out_unlock;
2874 }
2875 break;
2876 default:
2877 dev_err(hba->dev,
2878 "%s: Expected query flag opcode but got = %d\n",
2879 __func__, opcode);
2880 err = -EINVAL;
2881 goto out_unlock;
2882 }
2883
2884 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
2885 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
2886
2887 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2888
2889 if (err) {
2890 dev_err(hba->dev,
2891 "%s: Sending flag query for idn %d failed, err = %d\n",
2892 __func__, idn, err);
2893 goto out_unlock;
2894 }
2895
2896 if (flag_res)
2897 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2898 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2899
2900 out_unlock:
2901 mutex_unlock(&hba->dev_cmd.lock);
2902 ufshcd_release(hba);
2903 return err;
2904 }
2905
2906 /**
2907 * ufshcd_query_attr - API function for sending attribute requests
2908 * hba: per-adapter instance
2909 * opcode: attribute opcode
2910 * idn: attribute idn to access
2911 * index: index field
2912 * selector: selector field
2913 * attr_val: the attribute value after the query request completes
2914 *
2915 * Returns 0 for success, non-zero in case of failure
2916 */
2917 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2918 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2919 {
2920 struct ufs_query_req *request = NULL;
2921 struct ufs_query_res *response = NULL;
2922 int err;
2923
2924 BUG_ON(!hba);
2925
2926 ufshcd_hold(hba, false);
2927 if (!attr_val) {
2928 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2929 __func__, opcode);
2930 err = -EINVAL;
2931 goto out;
2932 }
2933
2934 mutex_lock(&hba->dev_cmd.lock);
2935 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2936 selector);
2937
2938 switch (opcode) {
2939 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2940 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2941 request->upiu_req.value = cpu_to_be32(*attr_val);
2942 break;
2943 case UPIU_QUERY_OPCODE_READ_ATTR:
2944 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2945 break;
2946 default:
2947 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2948 __func__, opcode);
2949 err = -EINVAL;
2950 goto out_unlock;
2951 }
2952
2953 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2954
2955 if (err) {
2956 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2957 __func__, opcode, idn, index, err);
2958 goto out_unlock;
2959 }
2960
2961 *attr_val = be32_to_cpu(response->upiu_res.value);
2962
2963 out_unlock:
2964 mutex_unlock(&hba->dev_cmd.lock);
2965 out:
2966 ufshcd_release(hba);
2967 return err;
2968 }
2969
2970 /**
2971 * ufshcd_query_attr_retry() - API function for sending query
2972 * attribute with retries
2973 * @hba: per-adapter instance
2974 * @opcode: attribute opcode
2975 * @idn: attribute idn to access
2976 * @index: index field
2977 * @selector: selector field
2978 * @attr_val: the attribute value after the query request
2979 * completes
2980 *
2981 * Returns 0 for success, non-zero in case of failure
2982 */
2983 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2984 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2985 u32 *attr_val)
2986 {
2987 int ret = 0;
2988 u32 retries;
2989
2990 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2991 ret = ufshcd_query_attr(hba, opcode, idn, index,
2992 selector, attr_val);
2993 if (ret)
2994 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2995 __func__, ret, retries);
2996 else
2997 break;
2998 }
2999
3000 if (ret)
3001 dev_err(hba->dev,
3002 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3003 __func__, idn, ret, QUERY_REQ_RETRIES);
3004 return ret;
3005 }
3006
3007 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3008 enum query_opcode opcode, enum desc_idn idn, u8 index,
3009 u8 selector, u8 *desc_buf, int *buf_len)
3010 {
3011 struct ufs_query_req *request = NULL;
3012 struct ufs_query_res *response = NULL;
3013 int err = 0;
3014
3015 BUG_ON(!hba);
3016
3017 ufshcd_hold(hba, false);
3018 if (!desc_buf) {
3019 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3020 __func__, opcode);
3021 err = -EINVAL;
3022 goto out;
3023 }
3024
3025 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3026 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3027 __func__, *buf_len);
3028 err = -EINVAL;
3029 goto out;
3030 }
3031
3032 mutex_lock(&hba->dev_cmd.lock);
3033 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3034 selector);
3035 hba->dev_cmd.query.descriptor = desc_buf;
3036 request->upiu_req.length = cpu_to_be16(*buf_len);
3037
3038 switch (opcode) {
3039 case UPIU_QUERY_OPCODE_WRITE_DESC:
3040 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3041 break;
3042 case UPIU_QUERY_OPCODE_READ_DESC:
3043 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3044 break;
3045 default:
3046 dev_err(hba->dev,
3047 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3048 __func__, opcode);
3049 err = -EINVAL;
3050 goto out_unlock;
3051 }
3052
3053 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3054
3055 if (err) {
3056 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3057 __func__, opcode, idn, index, err);
3058 goto out_unlock;
3059 }
3060
3061 hba->dev_cmd.query.descriptor = NULL;
3062 *buf_len = be16_to_cpu(response->upiu_res.length);
3063
3064 out_unlock:
3065 mutex_unlock(&hba->dev_cmd.lock);
3066 out:
3067 ufshcd_release(hba);
3068 return err;
3069 }
3070
3071 /**
3072 * ufshcd_query_descriptor_retry - API function for sending descriptor
3073 * requests
3074 * hba: per-adapter instance
3075 * opcode: attribute opcode
3076 * idn: attribute idn to access
3077 * index: index field
3078 * selector: selector field
3079 * desc_buf: the buffer that contains the descriptor
3080 * buf_len: length parameter passed to the device
3081 *
3082 * Returns 0 for success, non-zero in case of failure.
3083 * The buf_len parameter will contain, on return, the length parameter
3084 * received on the response.
3085 */
3086 static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3087 enum query_opcode opcode,
3088 enum desc_idn idn, u8 index,
3089 u8 selector,
3090 u8 *desc_buf, int *buf_len)
3091 {
3092 int err;
3093 int retries;
3094
3095 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3096 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3097 selector, desc_buf, buf_len);
3098 if (!err || err == -EINVAL)
3099 break;
3100 }
3101
3102 return err;
3103 }
3104
3105 /**
3106 * ufshcd_read_desc_length - read the specified descriptor length from header
3107 * @hba: Pointer to adapter instance
3108 * @desc_id: descriptor idn value
3109 * @desc_index: descriptor index
3110 * @desc_length: pointer to variable to read the length of descriptor
3111 *
3112 * Return 0 in case of success, non-zero otherwise
3113 */
3114 static int ufshcd_read_desc_length(struct ufs_hba *hba,
3115 enum desc_idn desc_id,
3116 int desc_index,
3117 int *desc_length)
3118 {
3119 int ret;
3120 u8 header[QUERY_DESC_HDR_SIZE];
3121 int header_len = QUERY_DESC_HDR_SIZE;
3122
3123 if (desc_id >= QUERY_DESC_IDN_MAX)
3124 return -EINVAL;
3125
3126 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3127 desc_id, desc_index, 0, header,
3128 &header_len);
3129
3130 if (ret) {
3131 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3132 __func__, desc_id);
3133 return ret;
3134 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3135 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3136 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3137 desc_id);
3138 ret = -EINVAL;
3139 }
3140
3141 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3142 return ret;
3143
3144 }
3145
3146 /**
3147 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3148 * @hba: Pointer to adapter instance
3149 * @desc_id: descriptor idn value
3150 * @desc_len: mapped desc length (out)
3151 *
3152 * Return 0 in case of success, non-zero otherwise
3153 */
3154 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3155 enum desc_idn desc_id, int *desc_len)
3156 {
3157 switch (desc_id) {
3158 case QUERY_DESC_IDN_DEVICE:
3159 *desc_len = hba->desc_size.dev_desc;
3160 break;
3161 case QUERY_DESC_IDN_POWER:
3162 *desc_len = hba->desc_size.pwr_desc;
3163 break;
3164 case QUERY_DESC_IDN_GEOMETRY:
3165 *desc_len = hba->desc_size.geom_desc;
3166 break;
3167 case QUERY_DESC_IDN_CONFIGURATION:
3168 *desc_len = hba->desc_size.conf_desc;
3169 break;
3170 case QUERY_DESC_IDN_UNIT:
3171 *desc_len = hba->desc_size.unit_desc;
3172 break;
3173 case QUERY_DESC_IDN_INTERCONNECT:
3174 *desc_len = hba->desc_size.interc_desc;
3175 break;
3176 case QUERY_DESC_IDN_STRING:
3177 *desc_len = QUERY_DESC_MAX_SIZE;
3178 break;
3179 case QUERY_DESC_IDN_RFU_0:
3180 case QUERY_DESC_IDN_RFU_1:
3181 *desc_len = 0;
3182 break;
3183 default:
3184 *desc_len = 0;
3185 return -EINVAL;
3186 }
3187 return 0;
3188 }
3189 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3190
3191 /**
3192 * ufshcd_read_desc_param - read the specified descriptor parameter
3193 * @hba: Pointer to adapter instance
3194 * @desc_id: descriptor idn value
3195 * @desc_index: descriptor index
3196 * @param_offset: offset of the parameter to read
3197 * @param_read_buf: pointer to buffer where parameter would be read
3198 * @param_size: sizeof(param_read_buf)
3199 *
3200 * Return 0 in case of success, non-zero otherwise
3201 */
3202 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3203 enum desc_idn desc_id,
3204 int desc_index,
3205 u8 param_offset,
3206 u8 *param_read_buf,
3207 u8 param_size)
3208 {
3209 int ret;
3210 u8 *desc_buf;
3211 int buff_len;
3212 bool is_kmalloc = true;
3213
3214 /* Safety check */
3215 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3216 return -EINVAL;
3217
3218 /* Get the max length of descriptor from structure filled up at probe
3219 * time.
3220 */
3221 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3222
3223 /* Sanity checks */
3224 if (ret || !buff_len) {
3225 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3226 __func__);
3227 return ret;
3228 }
3229
3230 /* Check whether we need temp memory */
3231 if (param_offset != 0 || param_size < buff_len) {
3232 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3233 if (!desc_buf)
3234 return -ENOMEM;
3235 } else {
3236 desc_buf = param_read_buf;
3237 is_kmalloc = false;
3238 }
3239
3240 /* Request for full descriptor */
3241 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3242 desc_id, desc_index, 0,
3243 desc_buf, &buff_len);
3244
3245 if (ret) {
3246 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3247 __func__, desc_id, desc_index, param_offset, ret);
3248 goto out;
3249 }
3250
3251 /* Sanity check */
3252 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3253 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3254 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3255 ret = -EINVAL;
3256 goto out;
3257 }
3258
3259 /*
3260 * While reading variable size descriptors (like string descriptor),
3261 * some UFS devices may report the "LENGTH" (field in "Transaction
3262 * Specific fields" of Query Response UPIU) same as what was requested
3263 * in Query Request UPIU instead of reporting the actual size of the
3264 * variable size descriptor.
3265 * Although it's safe to ignore the "LENGTH" field for variable size
3266 * descriptors as we can always derive the length of the descriptor from
3267 * the descriptor header fields. Hence this change impose the length
3268 * match check only for fixed size descriptors (for which we always
3269 * request the correct size as part of Query Request UPIU).
3270 */
3271 if ((desc_id != QUERY_DESC_IDN_STRING) &&
3272 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3273 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3274 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3275 ret = -EINVAL;
3276 goto out;
3277 }
3278 /* Check wherher we will not copy more data, than available */
3279 if (is_kmalloc && param_size > buff_len)
3280 param_size = buff_len;
3281
3282 if (is_kmalloc)
3283 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3284 out:
3285 if (is_kmalloc)
3286 kfree(desc_buf);
3287 return ret;
3288 }
3289
3290 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3291 enum desc_idn desc_id,
3292 int desc_index,
3293 u8 *buf,
3294 u32 size)
3295 {
3296 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3297 }
3298
3299 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3300 u8 *buf,
3301 u32 size)
3302 {
3303 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3304 }
3305
3306 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3307 {
3308 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3309 }
3310
3311 /**
3312 * ufshcd_read_string_desc - read string descriptor
3313 * @hba: pointer to adapter instance
3314 * @desc_index: descriptor index
3315 * @buf: pointer to buffer where descriptor would be read
3316 * @size: size of buf
3317 * @ascii: if true convert from unicode to ascii characters
3318 *
3319 * Return 0 in case of success, non-zero otherwise
3320 */
3321 #define ASCII_STD true
3322 static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3323 u8 *buf, u32 size, bool ascii)
3324 {
3325 int err = 0;
3326
3327 err = ufshcd_read_desc(hba,
3328 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3329
3330 if (err) {
3331 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3332 __func__, QUERY_REQ_RETRIES, err);
3333 goto out;
3334 }
3335
3336 if (ascii) {
3337 int desc_len;
3338 int ascii_len;
3339 int i;
3340 char *buff_ascii;
3341
3342 desc_len = buf[0];
3343 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3344 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3345 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3346 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3347 __func__);
3348 err = -ENOMEM;
3349 goto out;
3350 }
3351
3352 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3353 if (!buff_ascii) {
3354 err = -ENOMEM;
3355 goto out;
3356 }
3357
3358 /*
3359 * the descriptor contains string in UTF16 format
3360 * we need to convert to utf-8 so it can be displayed
3361 */
3362 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3363 desc_len - QUERY_DESC_HDR_SIZE,
3364 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3365
3366 /* replace non-printable or non-ASCII characters with spaces */
3367 for (i = 0; i < ascii_len; i++)
3368 ufshcd_remove_non_printable(&buff_ascii[i]);
3369
3370 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3371 size - QUERY_DESC_HDR_SIZE);
3372 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3373 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3374 kfree(buff_ascii);
3375 }
3376 out:
3377 return err;
3378 }
3379
3380 /**
3381 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3382 * @hba: Pointer to adapter instance
3383 * @lun: lun id
3384 * @param_offset: offset of the parameter to read
3385 * @param_read_buf: pointer to buffer where parameter would be read
3386 * @param_size: sizeof(param_read_buf)
3387 *
3388 * Return 0 in case of success, non-zero otherwise
3389 */
3390 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3391 int lun,
3392 enum unit_desc_param param_offset,
3393 u8 *param_read_buf,
3394 u32 param_size)
3395 {
3396 /*
3397 * Unit descriptors are only available for general purpose LUs (LUN id
3398 * from 0 to 7) and RPMB Well known LU.
3399 */
3400 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
3401 return -EOPNOTSUPP;
3402
3403 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3404 param_offset, param_read_buf, param_size);
3405 }
3406
3407 int ufshcd_read_health_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3408 {
3409 int err = 0;
3410
3411 err = ufshcd_read_desc(hba,
3412 QUERY_DESC_IDN_HEALTH, 0, buf, size);
3413
3414 if (err)
3415 dev_err(hba->dev, "%s: reading Device Health Desc failed. err = %d\n",
3416 __func__, err);
3417
3418 return err;
3419 }
3420
3421 /**
3422 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3423 * @hba: per adapter instance
3424 *
3425 * 1. Allocate DMA memory for Command Descriptor array
3426 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3427 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3428 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3429 * (UTMRDL)
3430 * 4. Allocate memory for local reference block(lrb).
3431 *
3432 * Returns 0 for success, non-zero in case of failure
3433 */
3434 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3435 {
3436 size_t utmrdl_size, utrdl_size, ucdl_size;
3437
3438 /* Allocate memory for UTP command descriptors */
3439 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3440 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3441 ucdl_size,
3442 &hba->ucdl_dma_addr,
3443 GFP_KERNEL);
3444
3445 /*
3446 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3447 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3448 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3449 * be aligned to 128 bytes as well
3450 */
3451 if (!hba->ucdl_base_addr ||
3452 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3453 dev_err(hba->dev,
3454 "Command Descriptor Memory allocation failed\n");
3455 goto out;
3456 }
3457
3458 /*
3459 * Allocate memory for UTP Transfer descriptors
3460 * UFSHCI requires 1024 byte alignment of UTRD
3461 */
3462 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3463 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3464 utrdl_size,
3465 &hba->utrdl_dma_addr,
3466 GFP_KERNEL);
3467 if (!hba->utrdl_base_addr ||
3468 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3469 dev_err(hba->dev,
3470 "Transfer Descriptor Memory allocation failed\n");
3471 goto out;
3472 }
3473
3474 /*
3475 * Allocate memory for UTP Task Management descriptors
3476 * UFSHCI requires 1024 byte alignment of UTMRD
3477 */
3478 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3479 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3480 utmrdl_size,
3481 &hba->utmrdl_dma_addr,
3482 GFP_KERNEL);
3483 if (!hba->utmrdl_base_addr ||
3484 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3485 dev_err(hba->dev,
3486 "Task Management Descriptor Memory allocation failed\n");
3487 goto out;
3488 }
3489
3490 /* Allocate memory for local reference block */
3491 hba->lrb = devm_kzalloc(hba->dev,
3492 hba->nutrs * sizeof(struct ufshcd_lrb),
3493 GFP_KERNEL);
3494 if (!hba->lrb) {
3495 dev_err(hba->dev, "LRB Memory allocation failed\n");
3496 goto out;
3497 }
3498 return 0;
3499 out:
3500 return -ENOMEM;
3501 }
3502
3503 /**
3504 * ufshcd_host_memory_configure - configure local reference block with
3505 * memory offsets
3506 * @hba: per adapter instance
3507 *
3508 * Configure Host memory space
3509 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3510 * address.
3511 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3512 * and PRDT offset.
3513 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3514 * into local reference block.
3515 */
3516 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3517 {
3518 struct utp_transfer_cmd_desc *cmd_descp;
3519 struct utp_transfer_req_desc *utrdlp;
3520 dma_addr_t cmd_desc_dma_addr;
3521 dma_addr_t cmd_desc_element_addr;
3522 u16 response_offset;
3523 u16 prdt_offset;
3524 int cmd_desc_size;
3525 int i;
3526
3527 utrdlp = hba->utrdl_base_addr;
3528 cmd_descp = hba->ucdl_base_addr;
3529
3530 response_offset =
3531 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3532 prdt_offset =
3533 offsetof(struct utp_transfer_cmd_desc, prd_table);
3534
3535 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3536 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3537
3538 for (i = 0; i < hba->nutrs; i++) {
3539 /* Configure UTRD with command descriptor base address */
3540 cmd_desc_element_addr =
3541 (cmd_desc_dma_addr + (cmd_desc_size * i));
3542 utrdlp[i].command_desc_base_addr_lo =
3543 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3544 utrdlp[i].command_desc_base_addr_hi =
3545 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3546
3547 /* Response upiu and prdt offset should be in double words */
3548 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3549 utrdlp[i].response_upiu_offset =
3550 cpu_to_le16(response_offset);
3551 utrdlp[i].prd_table_offset =
3552 cpu_to_le16(prdt_offset);
3553 utrdlp[i].response_upiu_length =
3554 cpu_to_le16(ALIGNED_UPIU_SIZE);
3555 } else {
3556 utrdlp[i].response_upiu_offset =
3557 cpu_to_le16((response_offset >> 2));
3558 utrdlp[i].prd_table_offset =
3559 cpu_to_le16((prdt_offset >> 2));
3560 utrdlp[i].response_upiu_length =
3561 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3562 }
3563
3564 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3565 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3566 (i * sizeof(struct utp_transfer_req_desc));
3567 hba->lrb[i].ucd_req_ptr =
3568 (struct utp_upiu_req *)(cmd_descp + i);
3569 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3570 hba->lrb[i].ucd_rsp_ptr =
3571 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3572 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3573 response_offset;
3574 hba->lrb[i].ucd_prdt_ptr =
3575 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3576 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3577 prdt_offset;
3578 }
3579 }
3580
3581 /**
3582 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3583 * @hba: per adapter instance
3584 *
3585 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3586 * in order to initialize the Unipro link startup procedure.
3587 * Once the Unipro links are up, the device connected to the controller
3588 * is detected.
3589 *
3590 * Returns 0 on success, non-zero value on failure
3591 */
3592 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3593 {
3594 struct uic_command uic_cmd = {0};
3595 int ret;
3596
3597 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3598
3599 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3600 if (ret)
3601 dev_dbg(hba->dev,
3602 "dme-link-startup: error code %d\n", ret);
3603 return ret;
3604 }
3605
3606 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3607 {
3608 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3609 unsigned long min_sleep_time_us;
3610
3611 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3612 return;
3613
3614 /*
3615 * last_dme_cmd_tstamp will be 0 only for 1st call to
3616 * this function
3617 */
3618 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3619 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3620 } else {
3621 unsigned long delta =
3622 (unsigned long) ktime_to_us(
3623 ktime_sub(ktime_get(),
3624 hba->last_dme_cmd_tstamp));
3625
3626 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3627 min_sleep_time_us =
3628 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3629 else
3630 return; /* no more delay required */
3631 }
3632
3633 /* allow sleep for extra 50us if needed */
3634 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3635 }
3636
3637 static int ufshcd_dme_reset(struct ufs_hba *hba)
3638 {
3639 struct uic_command uic_cmd = {0};
3640 int ret;
3641
3642 uic_cmd.command = UIC_CMD_DME_RESET;
3643 uic_cmd.argument1 = 0x1;
3644
3645 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3646 if (ret)
3647 dev_err(hba->dev,
3648 "dme-reset: error code %d\n", ret);
3649
3650 return ret;
3651 }
3652
3653 static int ufshcd_dme_enable(struct ufs_hba *hba)
3654 {
3655 struct uic_command uic_cmd = {0};
3656 int ret;
3657
3658 uic_cmd.command = UIC_CMD_DME_ENABLE;
3659
3660 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3661 if (ret)
3662 dev_err(hba->dev,
3663 "dme-enable: error code %d\n", ret);
3664
3665 return ret;
3666 }
3667
3668 /**
3669 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3670 * @hba: per adapter instance
3671 * @attr_sel: uic command argument1
3672 * @attr_set: attribute set type as uic command argument2
3673 * @mib_val: setting value as uic command argument3
3674 * @peer: indicate whether peer or local
3675 *
3676 * Returns 0 on success, non-zero value on failure
3677 */
3678 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3679 u8 attr_set, u32 mib_val, u8 peer)
3680 {
3681 struct uic_command uic_cmd = {0};
3682 static const char *const action[] = {
3683 "dme-set",
3684 "dme-peer-set"
3685 };
3686 const char *set = action[!!peer];
3687 int ret;
3688 int retries = UFS_UIC_COMMAND_RETRIES;
3689
3690 uic_cmd.command = peer ?
3691 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3692 uic_cmd.argument1 = attr_sel;
3693 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3694 uic_cmd.argument3 = mib_val;
3695
3696 do {
3697 /* for peer attributes we retry upon failure */
3698 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3699 if (ret)
3700 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3701 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3702 } while (ret && peer && --retries);
3703
3704 if (ret)
3705 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3706 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3707 UFS_UIC_COMMAND_RETRIES - retries);
3708
3709 return ret;
3710 }
3711 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3712
3713 /**
3714 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3715 * @hba: per adapter instance
3716 * @attr_sel: uic command argument1
3717 * @mib_val: the value of the attribute as returned by the UIC command
3718 * @peer: indicate whether peer or local
3719 *
3720 * Returns 0 on success, non-zero value on failure
3721 */
3722 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3723 u32 *mib_val, u8 peer)
3724 {
3725 struct uic_command uic_cmd = {0};
3726 static const char *const action[] = {
3727 "dme-get",
3728 "dme-peer-get"
3729 };
3730 const char *get = action[!!peer];
3731 int ret;
3732 int retries = UFS_UIC_COMMAND_RETRIES;
3733 struct ufs_pa_layer_attr orig_pwr_info;
3734 struct ufs_pa_layer_attr temp_pwr_info;
3735 bool pwr_mode_change = false;
3736
3737 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3738 orig_pwr_info = hba->pwr_info;
3739 temp_pwr_info = orig_pwr_info;
3740
3741 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3742 orig_pwr_info.pwr_rx == FAST_MODE) {
3743 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3744 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3745 pwr_mode_change = true;
3746 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3747 orig_pwr_info.pwr_rx == SLOW_MODE) {
3748 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3749 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3750 pwr_mode_change = true;
3751 }
3752 if (pwr_mode_change) {
3753 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3754 if (ret)
3755 goto out;
3756 }
3757 }
3758
3759 uic_cmd.command = peer ?
3760 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3761 uic_cmd.argument1 = attr_sel;
3762
3763 do {
3764 /* for peer attributes we retry upon failure */
3765 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3766 if (ret)
3767 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3768 get, UIC_GET_ATTR_ID(attr_sel), ret);
3769 } while (ret && peer && --retries);
3770
3771 if (ret)
3772 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3773 get, UIC_GET_ATTR_ID(attr_sel),
3774 UFS_UIC_COMMAND_RETRIES - retries);
3775
3776 if (mib_val && !ret)
3777 *mib_val = uic_cmd.argument3;
3778
3779 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3780 && pwr_mode_change)
3781 ufshcd_change_power_mode(hba, &orig_pwr_info);
3782 out:
3783 return ret;
3784 }
3785 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3786
3787 /**
3788 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3789 * state) and waits for it to take effect.
3790 *
3791 * @hba: per adapter instance
3792 * @cmd: UIC command to execute
3793 *
3794 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3795 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3796 * and device UniPro link and hence it's final completion would be indicated by
3797 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3798 * addition to normal UIC command completion Status (UCCS). This function only
3799 * returns after the relevant status bits indicate the completion.
3800 *
3801 * Returns 0 on success, non-zero value on failure
3802 */
3803 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3804 {
3805 struct completion uic_async_done;
3806 unsigned long flags;
3807 u8 status;
3808 int ret;
3809 bool reenable_intr = false;
3810
3811 mutex_lock(&hba->uic_cmd_mutex);
3812 init_completion(&uic_async_done);
3813 ufshcd_add_delay_before_dme_cmd(hba);
3814
3815 spin_lock_irqsave(hba->host->host_lock, flags);
3816 hba->uic_async_done = &uic_async_done;
3817 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3818 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3819 /*
3820 * Make sure UIC command completion interrupt is disabled before
3821 * issuing UIC command.
3822 */
3823 wmb();
3824 reenable_intr = true;
3825 }
3826 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3827 spin_unlock_irqrestore(hba->host->host_lock, flags);
3828 if (ret) {
3829 dev_err(hba->dev,
3830 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3831 cmd->command, cmd->argument3, ret);
3832 goto out;
3833 }
3834
3835 if (!wait_for_completion_timeout(hba->uic_async_done,
3836 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3837 dev_err(hba->dev,
3838 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3839 cmd->command, cmd->argument3);
3840 ret = -ETIMEDOUT;
3841 goto out;
3842 }
3843
3844 status = ufshcd_get_upmcrs(hba, cmd);
3845 if (status != PWR_LOCAL) {
3846 dev_err(hba->dev,
3847 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3848 cmd->command, status);
3849 ret = (status != PWR_OK) ? status : -1;
3850 }
3851 out:
3852 /* Dump debugging information to system memory */
3853 if (ret) {
3854 ufshcd_vops_dbg_register_dump(hba);
3855 exynos_ufs_show_uic_info(hba);
3856 ufshcd_print_host_state(hba);
3857 ufshcd_print_pwr_info(hba);
3858 ufshcd_print_host_regs(hba);
3859 }
3860
3861 spin_lock_irqsave(hba->host->host_lock, flags);
3862 hba->active_uic_cmd = NULL;
3863 hba->uic_async_done = NULL;
3864 if (reenable_intr)
3865 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3866 spin_unlock_irqrestore(hba->host->host_lock, flags);
3867 mutex_unlock(&hba->uic_cmd_mutex);
3868
3869 return ret;
3870 }
3871
3872 /**
3873 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3874 * using DME_SET primitives.
3875 * @hba: per adapter instance
3876 * @mode: powr mode value
3877 *
3878 * Returns 0 on success, non-zero value on failure
3879 */
3880 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3881 {
3882 struct uic_command uic_cmd = {0};
3883 int ret;
3884
3885 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3886 ret = ufshcd_dme_set(hba,
3887 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3888 if (ret) {
3889 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3890 __func__, ret);
3891 goto out;
3892 }
3893 }
3894
3895 uic_cmd.command = UIC_CMD_DME_SET;
3896 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3897 uic_cmd.argument3 = mode;
3898 ufshcd_hold(hba, false);
3899 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3900 ufshcd_release(hba);
3901
3902 out:
3903 return ret;
3904 }
3905
3906 static int ufshcd_link_recovery(struct ufs_hba *hba)
3907 {
3908 int ret;
3909 unsigned long flags;
3910
3911 spin_lock_irqsave(hba->host->host_lock, flags);
3912 hba->ufshcd_state = UFSHCD_STATE_RESET;
3913 ufshcd_set_eh_in_progress(hba);
3914 spin_unlock_irqrestore(hba->host->host_lock, flags);
3915
3916 ret = ufshcd_host_reset_and_restore(hba);
3917
3918 spin_lock_irqsave(hba->host->host_lock, flags);
3919 if (ret)
3920 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3921 ufshcd_clear_eh_in_progress(hba);
3922 spin_unlock_irqrestore(hba->host->host_lock, flags);
3923
3924 if (ret)
3925 dev_err(hba->dev, "%s: link recovery failed, err %d",
3926 __func__, ret);
3927
3928 return ret;
3929 }
3930
3931 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3932 {
3933 int ret;
3934 struct uic_command uic_cmd = {0};
3935 ktime_t start = ktime_get();
3936
3937 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3938 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3939 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3940 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3941
3942 if (ret) {
3943 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3944 __func__, ret);
3945 ssleep(2);
3946 /*
3947 * If link recovery fails then return error so that caller
3948 * don't retry the hibern8 enter again.
3949 */
3950 if (ufshcd_link_recovery(hba))
3951 ret = -ENOLINK;
3952 }
3953
3954 return ret;
3955 }
3956
3957 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3958 {
3959 int ret = 0, retries;
3960
3961 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3962 ret = __ufshcd_uic_hibern8_enter(hba);
3963 if (!ret || ret == -ENOLINK)
3964 goto out;
3965 }
3966 out:
3967 return ret;
3968 }
3969
3970 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3971 {
3972 struct uic_command uic_cmd = {0};
3973 int ret;
3974 ktime_t start = ktime_get();
3975
3976
3977 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3978 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3979 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3980 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3981
3982 if (ret) {
3983 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3984 __func__, ret);
3985 ret = ufshcd_link_recovery(hba);
3986 } else {
3987
3988 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3989 hba->ufs_stats.hibern8_exit_cnt++;
3990 }
3991
3992 return ret;
3993 }
3994
3995 /**
3996 * ufshcd_init_pwr_info - setting the POR (power on reset)
3997 * values in hba power info
3998 * @hba: per-adapter instance
3999 */
4000 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4001 {
4002 hba->pwr_info.gear_rx = UFS_PWM_G1;
4003 hba->pwr_info.gear_tx = UFS_PWM_G1;
4004 hba->pwr_info.lane_rx = 1;
4005 hba->pwr_info.lane_tx = 1;
4006 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4007 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4008 hba->pwr_info.hs_rate = 0;
4009 }
4010
4011 static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en)
4012 {
4013 int ret;
4014
4015 if (hba->vops && hba->vops->hibern8_notify)
4016 hba->vops->hibern8_notify(hba, en, PRE_CHANGE);
4017
4018 if (en)
4019 ret = ufshcd_uic_hibern8_enter(hba);
4020 else
4021 ret = ufshcd_uic_hibern8_exit(hba);
4022
4023 if (ret)
4024 goto out;
4025
4026 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL2) {
4027 if (en)
4028 dev_info(hba->dev, "H8+\n");
4029 else
4030 dev_info(hba->dev, "H8-\n");
4031 }
4032
4033 if (hba->vops && hba->vops->hibern8_notify)
4034 hba->vops->hibern8_notify(hba, en, POST_CHANGE);
4035
4036 out:
4037 hba->tcx_replay_timer_expired_cnt = 0;
4038 hba->fcx_protection_timer_expired_cnt = 0;
4039
4040 return ret;
4041 }
4042
4043 /**
4044 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4045 * @hba: per-adapter instance
4046 */
4047 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4048 {
4049 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4050
4051 if (hba->max_pwr_info.is_valid)
4052 return 0;
4053
4054 pwr_info->pwr_tx = FAST_MODE;
4055 pwr_info->pwr_rx = FAST_MODE;
4056 pwr_info->hs_rate = PA_HS_MODE_B;
4057
4058 /* Get the connected lane count */
4059 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4060 &pwr_info->lane_rx);
4061 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4062 &pwr_info->lane_tx);
4063
4064 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4065 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4066 __func__,
4067 pwr_info->lane_rx,
4068 pwr_info->lane_tx);
4069 return -EINVAL;
4070 }
4071
4072 hba->tcx_replay_timer_expired_cnt = 0;
4073 hba->fcx_protection_timer_expired_cnt = 0;
4074
4075 /* Get the peer available lane count */
4076 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
4077 &pwr_info->peer_available_lane_rx);
4078 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
4079 &pwr_info->peer_available_lane_tx);
4080
4081 if (!pwr_info->peer_available_lane_rx || !pwr_info->peer_available_lane_tx) {
4082 dev_err(hba->dev, "%s: invalid peer available lanes value. rx=%d, tx=%d\n",
4083 __func__,
4084 pwr_info->peer_available_lane_rx,
4085 pwr_info->peer_available_lane_tx);
4086 return -EINVAL;
4087 }
4088
4089 /*
4090 * First, get the maximum gears of HS speed.
4091 * If a zero value, it means there is no HSGEAR capability.
4092 * Then, get the maximum gears of PWM speed.
4093 */
4094 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4095 if (!pwr_info->gear_rx) {
4096 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4097 &pwr_info->gear_rx);
4098 if (!pwr_info->gear_rx) {
4099 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4100 __func__, pwr_info->gear_rx);
4101 return -EINVAL;
4102 }
4103 pwr_info->pwr_rx = SLOW_MODE;
4104 }
4105
4106 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4107 &pwr_info->gear_tx);
4108 if (!pwr_info->gear_tx) {
4109 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4110 &pwr_info->gear_tx);
4111 if (!pwr_info->gear_tx) {
4112 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4113 __func__, pwr_info->gear_tx);
4114 return -EINVAL;
4115 }
4116 pwr_info->pwr_tx = SLOW_MODE;
4117 }
4118
4119 hba->max_pwr_info.is_valid = true;
4120 return 0;
4121 }
4122
4123 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4124 struct ufs_pa_layer_attr *pwr_mode)
4125 {
4126 int ret;
4127
4128 /* if already configured to the requested pwr_mode */
4129 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4130 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4131 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4132 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4133 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4134 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4135 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4136 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4137 return 0;
4138 }
4139
4140 /*
4141 * Configure attributes for power mode change with below.
4142 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4143 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4144 * - PA_HSSERIES
4145 */
4146 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4147 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4148 pwr_mode->lane_rx);
4149 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4150 pwr_mode->pwr_rx == FAST_MODE)
4151 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4152 else
4153 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4154
4155 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4156 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4157 pwr_mode->lane_tx);
4158 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4159 pwr_mode->pwr_tx == FAST_MODE)
4160 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4161 else
4162 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4163
4164 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4165 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4166 pwr_mode->pwr_rx == FAST_MODE ||
4167 pwr_mode->pwr_tx == FAST_MODE)
4168 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4169 pwr_mode->hs_rate);
4170
4171 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4172 | pwr_mode->pwr_tx);
4173
4174 if (ret) {
4175 dev_err(hba->dev,
4176 "%s: power mode change failed %d\n", __func__, ret);
4177 } else {
4178 ufshcd_hold(hba, false);
4179 ret = ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4180 pwr_mode);
4181 ufshcd_release(hba);
4182 if (ret)
4183 goto out;
4184
4185 memcpy(&hba->pwr_info, pwr_mode,
4186 sizeof(struct ufs_pa_layer_attr));
4187 }
4188
4189 out:
4190 return ret;
4191 }
4192
4193 /**
4194 * ufshcd_config_pwr_mode - configure a new power mode
4195 * @hba: per-adapter instance
4196 * @desired_pwr_mode: desired power configuration
4197 */
4198 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4199 struct ufs_pa_layer_attr *desired_pwr_mode)
4200 {
4201 struct ufs_pa_layer_attr final_params = { 0 };
4202 int ret;
4203
4204 ufshcd_hold(hba, false);
4205 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4206 desired_pwr_mode, &final_params);
4207
4208 if (ret) {
4209 if (ret == -ENOTSUPP)
4210 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4211 else
4212 goto out;
4213 }
4214
4215 ret = ufshcd_change_power_mode(hba, &final_params);
4216 if (!ret)
4217 ufshcd_print_pwr_info(hba);
4218 out:
4219 ufshcd_release(hba);
4220 return ret;
4221 }
4222 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4223
4224 /**
4225 * ufshcd_complete_dev_init() - checks device readiness
4226 * hba: per-adapter instance
4227 *
4228 * Set fDeviceInit flag and poll until device toggles it.
4229 */
4230 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4231 {
4232 int i;
4233 int err;
4234 bool flag_res = 1;
4235
4236 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4237 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4238 if (err) {
4239 dev_err(hba->dev,
4240 "%s setting fDeviceInit flag failed with error %d\n",
4241 __func__, err);
4242 goto out;
4243 }
4244
4245 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4246 for (i = 0; i < 1000 && !err && flag_res; i++)
4247 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4248 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4249
4250 if (!err && flag_res)
4251 udelay(100);
4252
4253 if (err)
4254 dev_err(hba->dev,
4255 "%s reading fDeviceInit flag failed with error %d\n",
4256 __func__, err);
4257 else if (flag_res)
4258 dev_err(hba->dev,
4259 "%s fDeviceInit was not cleared by the device\n",
4260 __func__);
4261
4262 out:
4263 return err;
4264 }
4265
4266 /**
4267 * ufshcd_make_hba_operational - Make UFS controller operational
4268 * @hba: per adapter instance
4269 *
4270 * To bring UFS host controller to operational state,
4271 * 1. Enable required interrupts
4272 * 2. Configure interrupt aggregation
4273 * 3. Program UTRL and UTMRL base address
4274 * 4. Configure run-stop-registers
4275 *
4276 * Returns 0 on success, non-zero value on failure
4277 */
4278 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4279 {
4280 int err = 0;
4281 u32 reg;
4282
4283 /* Enable required interrupts */
4284 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4285
4286 /* Configure interrupt aggregation */
4287 if (ufshcd_is_intr_aggr_allowed(hba))
4288 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4289 else
4290 ufshcd_disable_intr_aggr(hba);
4291
4292 /* Configure UTRL and UTMRL base address registers */
4293 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4294 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4295 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4296 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4297 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4298 REG_UTP_TASK_REQ_LIST_BASE_L);
4299 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4300 REG_UTP_TASK_REQ_LIST_BASE_H);
4301
4302 /*
4303 * Make sure base address and interrupt setup are updated before
4304 * enabling the run/stop registers below.
4305 */
4306 wmb();
4307
4308 /*
4309 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4310 */
4311 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4312 if (!(ufshcd_get_lists_status(reg))) {
4313 ufshcd_enable_run_stop_reg(hba);
4314 } else {
4315 dev_err(hba->dev,
4316 "Host controller not ready to process requests");
4317 err = -EIO;
4318 goto out;
4319 }
4320
4321 out:
4322 return err;
4323 }
4324
4325 /**
4326 * ufshcd_hba_stop - Send controller to reset state
4327 * @hba: per adapter instance
4328 * @can_sleep: perform sleep or just spin
4329 */
4330 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4331 {
4332 int err;
4333
4334 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4335 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4336 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4337 10, 1, can_sleep);
4338 if (err)
4339 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4340 }
4341
4342 /**
4343 * _ufshcd_hba_enable - initialize the controller
4344 * @hba: per adapter instance
4345 *
4346 * The controller resets itself and controller firmware initialization
4347 * sequence kicks off. When controller is ready it will set
4348 * the Host Controller Enable bit to 1.
4349 *
4350 * Returns 0 on success, non-zero value on failure
4351 */
4352 static int __ufshcd_hba_enable(struct ufs_hba *hba)
4353 {
4354 int retry;
4355
4356 /*
4357 * msleep of 1 and 5 used in this function might result in msleep(20),
4358 * but it was necessary to send the UFS FPGA to reset mode during
4359 * development and testing of this driver. msleep can be changed to
4360 * mdelay and retry count can be reduced based on the controller.
4361 */
4362 if (!ufshcd_is_hba_active(hba))
4363 /* change controller state to "reset state" */
4364 ufshcd_hba_stop(hba, true);
4365
4366 /* UniPro link is disabled at this point */
4367 ufshcd_set_link_off(hba);
4368
4369 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4370
4371 /* start controller initialization sequence */
4372 ufshcd_hba_start(hba);
4373
4374 /*
4375 * To initialize a UFS host controller HCE bit must be set to 1.
4376 * During initialization the HCE bit value changes from 1->0->1.
4377 * When the host controller completes initialization sequence
4378 * it sets the value of HCE bit to 1. The same HCE bit is read back
4379 * to check if the controller has completed initialization sequence.
4380 * So without this delay the value HCE = 1, set in the previous
4381 * instruction might be read back.
4382 * This delay can be changed based on the controller.
4383 */
4384 msleep(1);
4385
4386 /* wait for the host controller to complete initialization */
4387 retry = 10;
4388 while (ufshcd_is_hba_active(hba)) {
4389 if (retry) {
4390 retry--;
4391 } else {
4392 dev_err(hba->dev,
4393 "Controller enable failed\n");
4394 return -EIO;
4395 }
4396 msleep(5);
4397 }
4398
4399 /* enable UIC related interrupts */
4400 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4401
4402 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4403
4404 return 0;
4405 }
4406
4407 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4408 {
4409 int tx_lanes, i, err = 0;
4410
4411 if (!peer)
4412 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4413 &tx_lanes);
4414 else
4415 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4416 &tx_lanes);
4417 for (i = 0; i < tx_lanes; i++) {
4418 if (!peer)
4419 err = ufshcd_dme_set(hba,
4420 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4421 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4422 0);
4423 else
4424 err = ufshcd_dme_peer_set(hba,
4425 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4426 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4427 0);
4428 if (err) {
4429 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4430 __func__, peer, i, err);
4431 break;
4432 }
4433 }
4434
4435 return err;
4436 }
4437
4438 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4439 {
4440 return ufshcd_disable_tx_lcc(hba, true);
4441 }
4442
4443 static int ufshcd_hba_enable(struct ufs_hba *hba)
4444 {
4445 int ret;
4446 unsigned long flags;
4447
4448 ufshcd_hold(hba, false);
4449
4450 spin_lock_irqsave(hba->host->host_lock, flags);
4451 hba->ufshcd_state = UFSHCD_STATE_RESET;
4452 spin_unlock_irqrestore(hba->host->host_lock, flags);
4453
4454 if (hba->vops && hba->vops->host_reset)
4455 hba->vops->host_reset(hba);
4456
4457 if (hba->quirks & UFSHCD_QUIRK_USE_OF_HCE) {
4458 ufshcd_set_link_off(hba);
4459
4460 /* enable UIC related interrupts */
4461 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4462
4463 ret = ufshcd_dme_reset(hba);
4464 if (!ret)
4465 ret = ufshcd_dme_enable(hba);
4466 } else {
4467 ret = __ufshcd_hba_enable(hba);
4468 }
4469 ufshcd_release(hba);
4470
4471 if (ret)
4472 dev_err(hba->dev, "Host controller enable failed\n");
4473
4474 return ret;
4475 }
4476
4477 /**
4478 * ufshcd_link_startup - Initialize unipro link startup
4479 * @hba: per adapter instance
4480 *
4481 * Returns 0 for success, non-zero in case of failure
4482 */
4483 static int ufshcd_link_startup(struct ufs_hba *hba)
4484 {
4485 int ret;
4486 int retries = DME_LINKSTARTUP_RETRIES;
4487
4488 ufshcd_hold(hba, false);
4489
4490 do {
4491 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4492
4493 ret = ufshcd_dme_link_startup(hba);
4494
4495 /* check if device is detected by inter-connect layer */
4496 if (!ret && !ufshcd_is_device_present(hba)) {
4497 dev_err(hba->dev, "%s: Device not present\n", __func__);
4498 ret = -ENXIO;
4499 goto out;
4500 }
4501
4502 /*
4503 * DME link lost indication is only received when link is up,
4504 * but we can't be sure if the link is up until link startup
4505 * succeeds. So reset the local Uni-Pro and try again.
4506 */
4507 if ((ret && !retries) || (ret && ufshcd_hba_enable(hba)))
4508 goto out;
4509 } while (ret && retries--);
4510
4511 if (ret)
4512 /* failed to get the link up... retire */
4513 goto out;
4514
4515 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4516 ufshcd_init_pwr_info(hba);
4517 ufshcd_print_pwr_info(hba);
4518
4519 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4520 ret = ufshcd_disable_device_tx_lcc(hba);
4521 if (ret)
4522 goto out;
4523 }
4524
4525 /* Include any host controller configuration via UIC commands */
4526 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4527 if (ret)
4528 goto out;
4529
4530 ret = ufshcd_make_hba_operational(hba);
4531 out:
4532 ufshcd_release(hba);
4533
4534 if (ret) {
4535 dev_err(hba->dev, "link startup failed %d\n", ret);
4536 ufshcd_print_host_state(hba);
4537 ufshcd_print_pwr_info(hba);
4538 ufshcd_print_host_regs(hba);
4539 }
4540 return ret;
4541 }
4542
4543 /**
4544 * ufshcd_verify_dev_init() - Verify device initialization
4545 * @hba: per-adapter instance
4546 *
4547 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4548 * device Transport Protocol (UTP) layer is ready after a reset.
4549 * If the UTP layer at the device side is not initialized, it may
4550 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4551 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4552 */
4553 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4554 {
4555 int err = 0;
4556 int retries;
4557
4558 ufshcd_hold(hba, false);
4559 mutex_lock(&hba->dev_cmd.lock);
4560 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4561 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4562 NOP_OUT_TIMEOUT);
4563
4564 if (!err || err == -ETIMEDOUT)
4565 break;
4566
4567 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4568 }
4569 mutex_unlock(&hba->dev_cmd.lock);
4570 ufshcd_release(hba);
4571
4572 if (err)
4573 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4574 return err;
4575 }
4576
4577 /**
4578 * ufshcd_set_queue_depth - set lun queue depth
4579 * @sdev: pointer to SCSI device
4580 *
4581 * Read bLUQueueDepth value and activate scsi tagged command
4582 * queueing. For WLUN, queue depth is set to 1. For best-effort
4583 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4584 * value that host can queue.
4585 */
4586 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4587 {
4588 int ret = 0;
4589 u8 lun_qdepth;
4590 struct ufs_hba *hba;
4591
4592 hba = shost_priv(sdev->host);
4593
4594 lun_qdepth = hba->nutrs;
4595 ret = ufshcd_read_unit_desc_param(hba,
4596 ufshcd_scsi_to_upiu_lun(sdev->lun),
4597 UNIT_DESC_PARAM_LU_Q_DEPTH,
4598 &lun_qdepth,
4599 sizeof(lun_qdepth));
4600
4601 /* Some WLUN doesn't support unit descriptor */
4602 if (ret == -EOPNOTSUPP)
4603 lun_qdepth = 1;
4604 else if (!lun_qdepth)
4605 /* eventually, we can figure out the real queue depth */
4606 lun_qdepth = hba->nutrs;
4607 else
4608 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4609
4610 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4611 __func__, lun_qdepth);
4612 scsi_change_queue_depth(sdev, lun_qdepth);
4613 }
4614
4615 /*
4616 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4617 * @hba: per-adapter instance
4618 * @lun: UFS device lun id
4619 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4620 *
4621 * Returns 0 in case of success and b_lu_write_protect status would be returned
4622 * @b_lu_write_protect parameter.
4623 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4624 * Returns -EINVAL in case of invalid parameters passed to this function.
4625 */
4626 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4627 u8 lun,
4628 u8 *b_lu_write_protect)
4629 {
4630 int ret;
4631
4632 if (!b_lu_write_protect)
4633 ret = -EINVAL;
4634 /*
4635 * According to UFS device spec, RPMB LU can't be write
4636 * protected so skip reading bLUWriteProtect parameter for
4637 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4638 */
4639 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4640 ret = -ENOTSUPP;
4641 else
4642 ret = ufshcd_read_unit_desc_param(hba,
4643 lun,
4644 UNIT_DESC_PARAM_LU_WR_PROTECT,
4645 b_lu_write_protect,
4646 sizeof(*b_lu_write_protect));
4647 return ret;
4648 }
4649
4650 /**
4651 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4652 * status
4653 * @hba: per-adapter instance
4654 * @sdev: pointer to SCSI device
4655 *
4656 */
4657 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4658 struct scsi_device *sdev)
4659 {
4660 if (hba->dev_info.f_power_on_wp_en &&
4661 !hba->dev_info.is_lu_power_on_wp) {
4662 u8 b_lu_write_protect;
4663
4664 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4665 &b_lu_write_protect) &&
4666 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4667 hba->dev_info.is_lu_power_on_wp = true;
4668 }
4669 }
4670
4671 static void ufshcd_done(struct request *rq)
4672 {
4673 struct scsi_cmnd *cmd = rq->special;
4674 scsi_dma_unmap(cmd);
4675 scsi_softirq_done(rq);
4676 }
4677
4678 /**
4679 * ufshcd_slave_alloc - handle initial SCSI device configurations
4680 * @sdev: pointer to SCSI device
4681 *
4682 * Returns success
4683 */
4684 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4685 {
4686 struct ufs_hba *hba;
4687
4688 hba = shost_priv(sdev->host);
4689
4690 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4691 sdev->use_10_for_ms = 1;
4692
4693 /* allow SCSI layer to restart the device in case of errors */
4694 sdev->allow_restart = 1;
4695
4696 /* REPORT SUPPORTED OPERATION CODES is not supported */
4697 sdev->no_report_opcodes = 1;
4698
4699 /* WRITE_SAME command is not supported */
4700 sdev->no_write_same = 1;
4701
4702 ufshcd_set_queue_depth(sdev);
4703
4704 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4705
4706 blk_queue_softirq_done(sdev->request_queue, ufshcd_done);
4707
4708 blk_queue_update_dma_alignment(sdev->request_queue, PAGE_SIZE - 1);
4709
4710 return 0;
4711 }
4712
4713 /**
4714 * ufshcd_change_queue_depth - change queue depth
4715 * @sdev: pointer to SCSI device
4716 * @depth: required depth to set
4717 *
4718 * Change queue depth and make sure the max. limits are not crossed.
4719 */
4720 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4721 {
4722 struct ufs_hba *hba = shost_priv(sdev->host);
4723
4724 if (depth > hba->nutrs)
4725 depth = hba->nutrs;
4726 return scsi_change_queue_depth(sdev, depth);
4727 }
4728
4729 /**
4730 * ufshcd_slave_configure - adjust SCSI device configurations
4731 * @sdev: pointer to SCSI device
4732 */
4733 static int ufshcd_slave_configure(struct scsi_device *sdev)
4734 {
4735 struct request_queue *q = sdev->request_queue;
4736
4737 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4738 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4739 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
4740
4741 return 0;
4742 }
4743
4744 /**
4745 * ufshcd_slave_destroy - remove SCSI device configurations
4746 * @sdev: pointer to SCSI device
4747 */
4748 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4749 {
4750 struct ufs_hba *hba;
4751
4752 hba = shost_priv(sdev->host);
4753 /* Drop the reference as it won't be needed anymore */
4754 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4755 unsigned long flags;
4756
4757 spin_lock_irqsave(hba->host->host_lock, flags);
4758 hba->sdev_ufs_device = NULL;
4759 spin_unlock_irqrestore(hba->host->host_lock, flags);
4760 }
4761 }
4762
4763 /**
4764 * ufshcd_task_req_compl - handle task management request completion
4765 * @hba: per adapter instance
4766 * @index: index of the completed request
4767 * @resp: task management service response
4768 *
4769 * Returns non-zero value on error, zero on success
4770 */
4771 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4772 {
4773 struct utp_task_req_desc *task_req_descp;
4774 struct utp_upiu_task_rsp *task_rsp_upiup;
4775 unsigned long flags;
4776 int ocs_value;
4777 int task_result;
4778
4779 spin_lock_irqsave(hba->host->host_lock, flags);
4780
4781 task_req_descp = hba->utmrdl_base_addr;
4782 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4783
4784 if (ocs_value == OCS_SUCCESS) {
4785 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4786 task_req_descp[index].task_rsp_upiu;
4787 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4788 task_result = task_result & MASK_TM_SERVICE_RESP;
4789 if (resp)
4790 *resp = (u8)task_result;
4791 } else {
4792 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4793 __func__, ocs_value);
4794 }
4795 spin_unlock_irqrestore(hba->host->host_lock, flags);
4796
4797 return ocs_value;
4798 }
4799
4800 /**
4801 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4802 * @lrb: pointer to local reference block of completed command
4803 * @scsi_status: SCSI command status
4804 *
4805 * Returns value base on SCSI command status
4806 */
4807 static inline int
4808 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4809 {
4810 int result = 0;
4811
4812 switch (scsi_status) {
4813 case SAM_STAT_CHECK_CONDITION:
4814 ufshcd_copy_sense_data(lrbp);
4815 case SAM_STAT_GOOD:
4816 result |= DID_OK << 16 |
4817 COMMAND_COMPLETE << 8 |
4818 scsi_status;
4819 break;
4820 case SAM_STAT_TASK_SET_FULL:
4821 case SAM_STAT_BUSY:
4822 case SAM_STAT_TASK_ABORTED:
4823 ufshcd_copy_sense_data(lrbp);
4824 result |= scsi_status;
4825 break;
4826 default:
4827 result |= DID_ERROR << 16;
4828 break;
4829 } /* end of switch */
4830
4831 return result;
4832 }
4833
4834 /**
4835 * ufshcd_transfer_rsp_status - Get overall status of the response
4836 * @hba: per adapter instance
4837 * @lrb: pointer to local reference block of completed command
4838 *
4839 * Returns result of the command to notify SCSI midlayer
4840 */
4841 static inline int
4842 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4843 {
4844 int result = 0;
4845 int scsi_status;
4846 int ocs;
4847
4848 /* overall command status of utrd */
4849 ocs = ufshcd_get_tr_ocs(lrbp);
4850
4851 switch (ocs) {
4852 case OCS_SUCCESS:
4853 case OCS_FATAL_ERROR:
4854 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4855 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4856 switch (result) {
4857 case UPIU_TRANSACTION_RESPONSE:
4858 /*
4859 * get the response UPIU result to extract
4860 * the SCSI command status
4861 */
4862 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4863
4864 /*
4865 * get the result based on SCSI status response
4866 * to notify the SCSI midlayer of the command status
4867 */
4868 scsi_status = result & MASK_SCSI_STATUS;
4869 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4870
4871 /*
4872 * Currently we are only supporting BKOPs exception
4873 * events hence we can ignore BKOPs exception event
4874 * during power management callbacks. BKOPs exception
4875 * event is not expected to be raised in runtime suspend
4876 * callback as it allows the urgent bkops.
4877 * During system suspend, we are anyway forcefully
4878 * disabling the bkops and if urgent bkops is needed
4879 * it will be enabled on system resume. Long term
4880 * solution could be to abort the system suspend if
4881 * UFS device needs urgent BKOPs.
4882 */
4883 if (!hba->pm_op_in_progress &&
4884 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4885 scsi_host_in_recovery(hba->host)) {
4886 schedule_work(&hba->eeh_work);
4887 dev_info(hba->dev, "execption event reported\n");
4888 }
4889
4890 break;
4891 case UPIU_TRANSACTION_REJECT_UPIU:
4892 /* TODO: handle Reject UPIU Response */
4893 result = DID_ERROR << 16;
4894 dev_err(hba->dev,
4895 "Reject UPIU not fully implemented\n");
4896 break;
4897 default:
4898 result = DID_ERROR << 16;
4899 dev_err(hba->dev,
4900 "Unexpected request response code = %x\n",
4901 result);
4902 break;
4903 }
4904 break;
4905 case OCS_ABORTED:
4906 result |= DID_ABORT << 16;
4907 break;
4908 case OCS_INVALID_COMMAND_STATUS:
4909 result |= DID_REQUEUE << 16;
4910 break;
4911 case OCS_INVALID_CMD_TABLE_ATTR:
4912 case OCS_INVALID_PRDT_ATTR:
4913 case OCS_MISMATCH_DATA_BUF_SIZE:
4914 case OCS_MISMATCH_RESP_UPIU_SIZE:
4915 case OCS_PEER_COMM_FAILURE:
4916 default:
4917 result |= DID_ERROR << 16;
4918 dev_err(hba->dev,
4919 "OCS error from controller = %x for tag %d\n",
4920 ocs, lrbp->task_tag);
4921 ufshcd_print_host_regs(hba);
4922 ufshcd_print_host_state(hba);
4923 break;
4924 } /* end of switch */
4925
4926 if (host_byte(result) != DID_OK)
4927 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4928 return result;
4929 }
4930
4931 /**
4932 * ufshcd_uic_cmd_compl - handle completion of uic command
4933 * @hba: per adapter instance
4934 * @intr_status: interrupt status generated by the controller
4935 */
4936 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4937 {
4938 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4939 hba->active_uic_cmd->argument2 |=
4940 ufshcd_get_uic_cmd_result(hba);
4941 hba->active_uic_cmd->argument3 =
4942 ufshcd_get_dme_attr_val(hba);
4943 complete(&hba->active_uic_cmd->done);
4944 }
4945
4946 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4947 complete(hba->uic_async_done);
4948 }
4949
4950 /**
4951 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4952 * @hba: per adapter instance
4953 * @completed_reqs: requests to complete
4954 */
4955 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, int reason,
4956 unsigned long completed_reqs)
4957 {
4958 struct ufshcd_lrb *lrbp;
4959 struct scsi_cmnd *cmd;
4960 int result;
4961 int index;
4962
4963 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4964 lrbp = &hba->lrb[index];
4965 cmd = lrbp->cmd;
4966 if (cmd) {
4967 ufshcd_add_command_trace(hba, index, "complete");
4968 result = ufshcd_vops_crypto_engine_clear(hba, lrbp);
4969 if (result) {
4970 dev_err(hba->dev,
4971 "%s: failed to clear crypto engine (%d)\n",
4972 __func__, result);
4973 }
4974 result = ufshcd_transfer_rsp_status(hba, lrbp);
4975 cmd->result = result;
4976 if (reason)
4977 set_host_byte(cmd, reason);
4978 /* Mark completed command as NULL in LRB */
4979 lrbp->cmd = NULL;
4980 clear_bit_unlock(index, &hba->lrb_in_use);
4981 /* Do not touch lrbp after scsi done */
4982 cmd->scsi_done(cmd);
4983 #ifdef CONFIG_SCSI_UFS_CMD_LOGGING
4984 exynos_ufs_cmd_log_end(hba, index);
4985 #endif
4986 __ufshcd_release(hba);
4987
4988 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
4989 dev_info(hba->dev, "Transfer Done(%d)\n",
4990 index);
4991
4992 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4993 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4994 if (hba->dev_cmd.complete) {
4995 ufshcd_add_command_trace(hba, index,
4996 "dev_complete");
4997 complete(hba->dev_cmd.complete);
4998 }
4999 }
5000 if (ufshcd_is_clkscaling_supported(hba))
5001 hba->clk_scaling.active_reqs--;
5002 }
5003
5004 /* clear corresponding bits of completed commands */
5005 hba->outstanding_reqs ^= completed_reqs;
5006 #if defined(CONFIG_PM_DEVFREQ)
5007 ufshcd_clk_scaling_update_busy(hba);
5008 #endif
5009 /* we might have free'd some tags above */
5010 wake_up(&hba->dev_cmd.tag_wq);
5011 }
5012
5013 /**
5014 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5015 * @hba: per adapter instance
5016 */
5017 static void ufshcd_transfer_req_compl(struct ufs_hba *hba, int reason)
5018 {
5019 unsigned long completed_reqs;
5020 u32 tr_doorbell;
5021
5022 /* Resetting interrupt aggregation counters first and reading the
5023 * DOOR_BELL afterward allows us to handle all the completed requests.
5024 * In order to prevent other interrupts starvation the DB is read once
5025 * after reset. The down side of this solution is the possibility of
5026 * false interrupt if device completes another request after resetting
5027 * aggregation and before reading the DB.
5028 */
5029 if (!ufshcd_can_reset_intr_aggr(hba) && ufshcd_is_intr_aggr_allowed(hba))
5030 ufshcd_reset_intr_aggr(hba);
5031
5032 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5033 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5034
5035 __ufshcd_transfer_req_compl(hba, reason, completed_reqs);
5036 }
5037
5038 /**
5039 * ufshcd_disable_ee - disable exception event
5040 * @hba: per-adapter instance
5041 * @mask: exception event to disable
5042 *
5043 * Disables exception event in the device so that the EVENT_ALERT
5044 * bit is not set.
5045 *
5046 * Returns zero on success, non-zero error value on failure.
5047 */
5048 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5049 {
5050 int err = 0;
5051 u32 val;
5052
5053 if (!(hba->ee_ctrl_mask & mask))
5054 goto out;
5055
5056 val = hba->ee_ctrl_mask & ~mask;
5057 val &= MASK_EE_STATUS;
5058 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5059 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5060 if (!err)
5061 hba->ee_ctrl_mask &= ~mask;
5062 out:
5063 return err;
5064 }
5065
5066 /**
5067 * ufshcd_enable_ee - enable exception event
5068 * @hba: per-adapter instance
5069 * @mask: exception event to enable
5070 *
5071 * Enable corresponding exception event in the device to allow
5072 * device to alert host in critical scenarios.
5073 *
5074 * Returns zero on success, non-zero error value on failure.
5075 */
5076 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5077 {
5078 int err = 0;
5079 u32 val;
5080
5081 if (hba->ee_ctrl_mask & mask)
5082 goto out;
5083
5084 val = hba->ee_ctrl_mask | mask;
5085 val &= MASK_EE_STATUS;
5086 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5087 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5088 if (!err)
5089 hba->ee_ctrl_mask |= mask;
5090 out:
5091 return err;
5092 }
5093
5094 /**
5095 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5096 * @hba: per-adapter instance
5097 *
5098 * Allow device to manage background operations on its own. Enabling
5099 * this might lead to inconsistent latencies during normal data transfers
5100 * as the device is allowed to manage its own way of handling background
5101 * operations.
5102 *
5103 * Returns zero on success, non-zero on failure.
5104 */
5105 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5106 {
5107 int err = 0;
5108
5109 if (hba->auto_bkops_enabled)
5110 goto out;
5111
5112 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5113 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5114 if (err) {
5115 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5116 __func__, err);
5117 goto out;
5118 }
5119
5120 hba->auto_bkops_enabled = true;
5121 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5122
5123 /* No need of URGENT_BKOPS exception from the device */
5124 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5125 if (err)
5126 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5127 __func__, err);
5128 out:
5129 return err;
5130 }
5131
5132 /**
5133 * ufshcd_disable_auto_bkops - block device in doing background operations
5134 * @hba: per-adapter instance
5135 *
5136 * Disabling background operations improves command response latency but
5137 * has drawback of device moving into critical state where the device is
5138 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5139 * host is idle so that BKOPS are managed effectively without any negative
5140 * impacts.
5141 *
5142 * Returns zero on success, non-zero on failure.
5143 */
5144 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5145 {
5146 int err = 0;
5147
5148 if (!hba->auto_bkops_enabled)
5149 goto out;
5150
5151 /*
5152 * If host assisted BKOPs is to be enabled, make sure
5153 * urgent bkops exception is allowed.
5154 */
5155 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5156 if (err) {
5157 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5158 __func__, err);
5159 goto out;
5160 }
5161
5162 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5163 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5164 if (err) {
5165 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5166 __func__, err);
5167 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5168 goto out;
5169 }
5170
5171 hba->auto_bkops_enabled = false;
5172 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5173 out:
5174 return err;
5175 }
5176
5177 /**
5178 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5179 * @hba: per adapter instance
5180 *
5181 * After a device reset the device may toggle the BKOPS_EN flag
5182 * to default value. The s/w tracking variables should be updated
5183 * as well. This function would change the auto-bkops state based on
5184 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5185 */
5186 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5187 {
5188 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5189 hba->auto_bkops_enabled = false;
5190 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5191 ufshcd_enable_auto_bkops(hba);
5192 } else {
5193 hba->auto_bkops_enabled = true;
5194 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5195 ufshcd_disable_auto_bkops(hba);
5196 }
5197 }
5198
5199 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5200 {
5201 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5202 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5203 }
5204
5205 /**
5206 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5207 * @hba: per-adapter instance
5208 * @status: bkops_status value
5209 *
5210 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5211 * flag in the device to permit background operations if the device
5212 * bkops_status is greater than or equal to "status" argument passed to
5213 * this function, disable otherwise.
5214 *
5215 * Returns 0 for success, non-zero in case of failure.
5216 *
5217 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5218 * to know whether auto bkops is enabled or disabled after this function
5219 * returns control to it.
5220 */
5221 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5222 enum bkops_status status)
5223 {
5224 int err;
5225 u32 curr_status = 0;
5226
5227 err = ufshcd_get_bkops_status(hba, &curr_status);
5228 if (err) {
5229 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5230 __func__, err);
5231 goto out;
5232 } else if (curr_status > BKOPS_STATUS_MAX) {
5233 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5234 __func__, curr_status);
5235 err = -EINVAL;
5236 goto out;
5237 }
5238
5239 if (curr_status >= status) {
5240 err = ufshcd_enable_auto_bkops(hba);
5241 if (!err)
5242 dev_info(hba->dev, "%s: auto_bkops enabled, status : %d\n",
5243 __func__, curr_status);
5244 }
5245 else
5246 err = ufshcd_disable_auto_bkops(hba);
5247 out:
5248 return err;
5249 }
5250
5251 /**
5252 * ufshcd_urgent_bkops - handle urgent bkops exception event
5253 * @hba: per-adapter instance
5254 *
5255 * Enable fBackgroundOpsEn flag in the device to permit background
5256 * operations.
5257 *
5258 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5259 * and negative error value for any other failure.
5260 */
5261 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5262 {
5263 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5264 }
5265
5266 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5267 {
5268 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5269 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5270 }
5271
5272 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5273 {
5274 int err;
5275 u32 curr_status = 0;
5276
5277 if (hba->is_urgent_bkops_lvl_checked)
5278 goto enable_auto_bkops;
5279
5280 err = ufshcd_get_bkops_status(hba, &curr_status);
5281 if (err) {
5282 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5283 __func__, err);
5284 goto out;
5285 }
5286
5287 /*
5288 * We are seeing that some devices are raising the urgent bkops
5289 * exception events even when BKOPS status doesn't indicate performace
5290 * impacted or critical. Handle these device by determining their urgent
5291 * bkops status at runtime.
5292 */
5293 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5294 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5295 __func__, curr_status);
5296 /* update the current status as the urgent bkops level */
5297 hba->urgent_bkops_lvl = curr_status;
5298 hba->is_urgent_bkops_lvl_checked = true;
5299 }
5300
5301 enable_auto_bkops:
5302 err = ufshcd_enable_auto_bkops(hba);
5303 out:
5304 if (err < 0)
5305 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5306 __func__, err);
5307 }
5308
5309 /**
5310 * ufshcd_exception_event_handler - handle exceptions raised by device
5311 * @work: pointer to work data
5312 *
5313 * Read bExceptionEventStatus attribute from the device and handle the
5314 * exception event accordingly.
5315 */
5316 static void ufshcd_exception_event_handler(struct work_struct *work)
5317 {
5318 struct ufs_hba *hba;
5319 int err;
5320 u32 status = 0;
5321 hba = container_of(work, struct ufs_hba, eeh_work);
5322
5323 pm_runtime_get_sync(hba->dev);
5324 scsi_block_requests(hba->host);
5325 err = ufshcd_get_ee_status(hba, &status);
5326 if (err) {
5327 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5328 __func__, err);
5329 goto out;
5330 }
5331
5332 status &= hba->ee_ctrl_mask;
5333
5334 if (status & MASK_EE_URGENT_BKOPS)
5335 ufshcd_bkops_exception_event_handler(hba);
5336
5337 out:
5338 scsi_unblock_requests(hba->host);
5339 pm_runtime_put_sync(hba->dev);
5340 return;
5341 }
5342
5343 /* Complete requests that have door-bell cleared */
5344 static void ufshcd_complete_requests(struct ufs_hba *hba)
5345 {
5346 ufshcd_transfer_req_compl(hba, 0);
5347 ufshcd_tmc_handler(hba);
5348 }
5349
5350 /**
5351 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5352 * to recover from the DL NAC errors or not.
5353 * @hba: per-adapter instance
5354 *
5355 * Returns true if error handling is required, false otherwise
5356 */
5357 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5358 {
5359 unsigned long flags;
5360 bool err_handling = true;
5361
5362 spin_lock_irqsave(hba->host->host_lock, flags);
5363 /*
5364 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5365 * device fatal error and/or DL NAC & REPLAY timeout errors.
5366 */
5367 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5368 goto out;
5369
5370 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5371 ((hba->saved_err & UIC_ERROR) &&
5372 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5373 goto out;
5374
5375 if ((hba->saved_err & UIC_ERROR) &&
5376 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5377 int err;
5378 /*
5379 * wait for 50ms to see if we can get any other errors or not.
5380 */
5381 spin_unlock_irqrestore(hba->host->host_lock, flags);
5382 msleep(50);
5383 spin_lock_irqsave(hba->host->host_lock, flags);
5384
5385 /*
5386 * now check if we have got any other severe errors other than
5387 * DL NAC error?
5388 */
5389 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5390 ((hba->saved_err & UIC_ERROR) &&
5391 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5392 goto out;
5393
5394 /*
5395 * As DL NAC is the only error received so far, send out NOP
5396 * command to confirm if link is still active or not.
5397 * - If we don't get any response then do error recovery.
5398 * - If we get response then clear the DL NAC error bit.
5399 */
5400
5401 spin_unlock_irqrestore(hba->host->host_lock, flags);
5402 err = ufshcd_verify_dev_init(hba);
5403 spin_lock_irqsave(hba->host->host_lock, flags);
5404
5405 if (err)
5406 goto out;
5407
5408 /* Link seems to be alive hence ignore the DL NAC errors */
5409 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5410 hba->saved_err &= ~UIC_ERROR;
5411 /* clear NAC error */
5412 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5413 if (!hba->saved_uic_err) {
5414 err_handling = false;
5415 goto out;
5416 }
5417 }
5418 out:
5419 spin_unlock_irqrestore(hba->host->host_lock, flags);
5420 return err_handling;
5421 }
5422
5423 /**
5424 * ufshcd_err_handler - handle UFS errors that require s/w attention
5425 * @work: pointer to work structure
5426 */
5427 static void ufshcd_err_handler(struct work_struct *work)
5428 {
5429 struct ufs_hba *hba;
5430 struct ufs_vreg_info *info;
5431 struct exynos_ufs *ufs;
5432 unsigned long flags;
5433 u32 err_xfer = 0;
5434 u32 err_tm = 0;
5435 int err = 0;
5436 int tag;
5437 bool needs_reset = false;
5438
5439 hba = container_of(work, struct ufs_hba, eh_work);
5440 info = &hba->vreg_info;
5441
5442 pm_runtime_get_sync(hba->dev);
5443 ufshcd_hold(hba, false);
5444
5445 ufs = to_exynos_ufs(hba);
5446 if (hba->saved_err & UIC_ERROR) {
5447 dev_err(hba->dev, ": CLKSTOP CTRL(0x%04x):\t\t\t\t0x%08x\n",
5448 HCI_CLKSTOP_CTRL, hci_readl(ufs, HCI_CLKSTOP_CTRL));
5449 dev_err(hba->dev, ": FORCE HCS(0x%04x):\t\t\t\t0x%08x\n",
5450 HCI_FORCE_HCS, hci_readl(ufs, HCI_FORCE_HCS));
5451 }
5452
5453 /* Dump debugging information to system memory */
5454 ufshcd_vops_dbg_register_dump(hba);
5455
5456 /* Dump UFS power & reset_n GPIO status */
5457 if (gpio_is_valid(info->ufs_power_gpio))
5458 dev_info(hba->dev, "%s: UFS power pin: 0x%08x\n", __func__, gpio_get_value(info->ufs_power_gpio));
5459 if (gpio_is_valid(info->ufs_reset_n_gpio))
5460 dev_info(hba->dev, "%s: RESET_N: 0x%08x\n", __func__, gpio_get_value(info->ufs_reset_n_gpio));
5461
5462 /* dump controller state before resetting */
5463 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5464 bool pr_prdt = !!(hba->saved_err &
5465 SYSTEM_BUS_FATAL_ERROR);
5466
5467 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5468 __func__, hba->saved_err,
5469 hba->saved_uic_err);
5470
5471 ufshcd_print_host_regs(hba);
5472 ufshcd_print_pwr_info(hba);
5473 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5474 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
5475 }
5476
5477 spin_lock_irqsave(hba->host->host_lock, flags);
5478 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5479 goto out;
5480
5481 hba->ufshcd_state = UFSHCD_STATE_RESET;
5482 ufshcd_set_eh_in_progress(hba);
5483 exynos_ufs_show_uic_info(hba);
5484
5485 /* Complete requests that have door-bell cleared by h/w */
5486 ufshcd_complete_requests(hba);
5487
5488 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5489 bool ret;
5490
5491 spin_unlock_irqrestore(hba->host->host_lock, flags);
5492 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5493 ret = ufshcd_quirk_dl_nac_errors(hba);
5494 spin_lock_irqsave(hba->host->host_lock, flags);
5495 if (!ret)
5496 goto skip_err_handling;
5497 }
5498 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5499 ((hba->saved_err & UIC_ERROR) &&
5500 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5501 UFSHCD_UIC_DL_ERROR |
5502 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5503 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5504 needs_reset = true;
5505
5506 /*
5507 * if host reset is required then skip clearing the pending
5508 * transfers forcefully because they will automatically get
5509 * cleared after link startup.
5510 */
5511 if (needs_reset)
5512 goto skip_pending_xfer_clear;
5513
5514 /* release lock as clear command might sleep */
5515 spin_unlock_irqrestore(hba->host->host_lock, flags);
5516 /* Clear pending transfer requests */
5517 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5518 if (ufshcd_clear_cmd(hba, tag)) {
5519 err_xfer = true;
5520 goto lock_skip_pending_xfer_clear;
5521 }
5522 }
5523
5524 /* Clear pending task management requests */
5525 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5526 if (ufshcd_clear_tm_cmd(hba, tag)) {
5527 err_tm = true;
5528 goto lock_skip_pending_xfer_clear;
5529 }
5530 }
5531
5532 lock_skip_pending_xfer_clear:
5533 spin_lock_irqsave(hba->host->host_lock, flags);
5534
5535 /* Complete the requests that are cleared by s/w */
5536 ufshcd_complete_requests(hba);
5537
5538 if (err_xfer || err_tm)
5539 needs_reset = true;
5540
5541 skip_pending_xfer_clear:
5542 /* Fatal errors need reset */
5543 if (needs_reset) {
5544 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5545
5546 /*
5547 * ufshcd_reset_and_restore() does the link reinitialization
5548 * which will need atleast one empty doorbell slot to send the
5549 * device management commands (NOP and query commands).
5550 * If there is no slot empty at this moment then free up last
5551 * slot forcefully.
5552 */
5553 if (hba->outstanding_reqs == max_doorbells)
5554 __ufshcd_transfer_req_compl(hba, 0,
5555 (1UL << (hba->nutrs - 1)));
5556
5557 spin_unlock_irqrestore(hba->host->host_lock, flags);
5558
5559 /* Fatal errors need reset */
5560 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
5561 ((hba->saved_err & UIC_ERROR) &&
5562 ((hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5563 (hba->saved_uic_err & UFSHCD_UIC_DL_ERROR))))
5564 dev_err(hba->dev,
5565 "%s: saved_err:0x%x, saved_uic_err:0x%x\n",
5566 __func__, hba->saved_err, hba->saved_uic_err);
5567
5568 err = ufshcd_reset_and_restore(hba);
5569 spin_lock_irqsave(hba->host->host_lock, flags);
5570 if (err) {
5571 spin_lock_irqsave(hba->host->host_lock, flags);
5572 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5573 spin_unlock_irqrestore(hba->host->host_lock, flags);
5574
5575 dev_err(hba->dev, "%s: reset and restore failed\n",
5576 __func__);
5577 }
5578 hba->saved_err = 0;
5579 hba->saved_uic_err = 0;
5580 }
5581
5582 skip_err_handling:
5583 if (!needs_reset) {
5584 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5585 if (hba->saved_err || hba->saved_uic_err)
5586 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5587 __func__, hba->saved_err, hba->saved_uic_err);
5588 }
5589
5590 ufshcd_clear_eh_in_progress(hba);
5591
5592 out:
5593 spin_unlock_irqrestore(hba->host->host_lock, flags);
5594 scsi_unblock_requests(hba->host);
5595 ufshcd_release(hba);
5596 pm_runtime_put_sync(hba->dev);
5597 }
5598
5599 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5600 u32 reg)
5601 {
5602 reg_hist->reg[reg_hist->pos] = reg;
5603 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5604 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5605 }
5606
5607 /**
5608 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5609 * @hba: per-adapter instance
5610 */
5611 static void ufshcd_update_uic_error(struct ufs_hba *hba)
5612 {
5613 u32 reg;
5614
5615 /* PHY layer lane error */
5616 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5617 /* Ignore LINERESET indication, as this is not an error */
5618 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5619 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5620 /*
5621 * To know whether this error is fatal or not, DB timeout
5622 * must be checked but this error is handled separately.
5623 */
5624 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5625 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5626 }
5627
5628 /* PA_INIT_ERROR is fatal and needs UIC reset */
5629 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5630 if (reg)
5631 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5632
5633 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5634 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5635 else if (hba->dev_quirks &
5636 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5637 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5638 hba->uic_error |=
5639 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5640 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5641 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5642 }
5643
5644 if (reg & UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP)
5645 hba->tcx_replay_timer_expired_cnt++;
5646
5647 if (reg & UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP)
5648 hba->fcx_protection_timer_expired_cnt++;
5649
5650 if (hba->tcx_replay_timer_expired_cnt >= 2 ||
5651 hba->fcx_protection_timer_expired_cnt >= 2)
5652 hba->uic_error |= UFSHCD_UIC_DL_ERROR;
5653
5654 /* UIC NL/TL/DME errors needs software retry */
5655 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5656 if (reg) {
5657 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5658 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5659 }
5660
5661 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5662 if (reg) {
5663 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5664 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5665 }
5666
5667 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5668 if (reg) {
5669 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5670 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5671 }
5672
5673 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5674 __func__, hba->uic_error);
5675 }
5676
5677 /**
5678 * ufshcd_check_errors - Check for errors that need s/w attention
5679 * @hba: per-adapter instance
5680 */
5681 static void ufshcd_check_errors(struct ufs_hba *hba)
5682 {
5683 bool queue_eh_work = false;
5684
5685 if (hba->errors & INT_FATAL_ERRORS)
5686 queue_eh_work = true;
5687
5688 if (hba->errors & UIC_ERROR) {
5689 hba->uic_error = 0;
5690 ufshcd_update_uic_error(hba);
5691 if (hba->uic_error)
5692 queue_eh_work = true;
5693 }
5694
5695 if (queue_eh_work) {
5696 /*
5697 * update the transfer error masks to sticky bits, let's do this
5698 * irrespective of current ufshcd_state.
5699 */
5700 hba->saved_err |= hba->errors;
5701 hba->saved_uic_err |= hba->uic_error;
5702
5703 /* handle fatal errors only when link is functional */
5704 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5705 /* block commands from scsi mid-layer */
5706 scsi_block_requests(hba->host);
5707
5708 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5709
5710 schedule_work(&hba->eh_work);
5711 }
5712 }
5713 /*
5714 * if (!queue_eh_work) -
5715 * Other errors are either non-fatal where host recovers
5716 * itself without s/w intervention or errors that will be
5717 * handled by the SCSI core layer.
5718 */
5719 }
5720
5721 /**
5722 * ufshcd_tmc_handler - handle task management function completion
5723 * @hba: per adapter instance
5724 */
5725 static void ufshcd_tmc_handler(struct ufs_hba *hba)
5726 {
5727 u32 tm_doorbell;
5728
5729 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5730 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5731 hba->outstanding_tasks ^= hba->tm_condition;
5732 wake_up(&hba->tm_wq);
5733 }
5734
5735 /**
5736 * ufshcd_sl_intr - Interrupt service routine
5737 * @hba: per adapter instance
5738 * @intr_status: contains interrupts generated by the controller
5739 */
5740 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5741 {
5742 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5743 if (hba->errors)
5744 ufshcd_check_errors(hba);
5745
5746 if (intr_status & UFSHCD_UIC_MASK)
5747 ufshcd_uic_cmd_compl(hba, intr_status);
5748
5749 if (intr_status & UTP_TASK_REQ_COMPL)
5750 ufshcd_tmc_handler(hba);
5751
5752 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5753 ufshcd_transfer_req_compl(hba, 0);
5754
5755 /* Interrupt disable for stop UIC interrupts storm */
5756 if (hba->saved_uic_err && (hba->ufshcd_state != UFSHCD_STATE_RESET))
5757 ufshcd_disable_intr(hba, UIC_ERROR);
5758 }
5759
5760 /**
5761 * ufshcd_intr - Main interrupt service routine
5762 * @irq: irq number
5763 * @__hba: pointer to adapter instance
5764 *
5765 * Returns IRQ_HANDLED - If interrupt is valid
5766 * IRQ_NONE - If invalid interrupt
5767 */
5768 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5769 {
5770 u32 intr_status, enabled_intr_status;
5771 irqreturn_t retval = IRQ_NONE;
5772 struct ufs_hba *hba = __hba;
5773
5774 spin_lock(hba->host->host_lock);
5775 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5776 enabled_intr_status =
5777 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5778
5779 if (intr_status)
5780 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5781
5782 if (enabled_intr_status) {
5783 ufshcd_sl_intr(hba, enabled_intr_status);
5784 retval = IRQ_HANDLED;
5785 }
5786 spin_unlock(hba->host->host_lock);
5787 return retval;
5788 }
5789
5790 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5791 {
5792 int err = 0;
5793 u32 mask = 1 << tag;
5794 unsigned long flags;
5795
5796 spin_lock_irqsave(hba->host->host_lock, flags);
5797 ufshcd_utmrl_clear(hba, tag);
5798 spin_unlock_irqrestore(hba->host->host_lock, flags);
5799
5800 /* poll for max. 1 sec to clear door bell register by h/w */
5801 err = ufshcd_wait_for_register(hba,
5802 REG_UTP_TASK_REQ_DOOR_BELL,
5803 mask, 0, 1000, 1000, true);
5804 return err;
5805 }
5806
5807 /**
5808 * ufshcd_issue_tm_cmd - issues task management commands to controller
5809 * @hba: per adapter instance
5810 * @lun_id: LUN ID to which TM command is sent
5811 * @task_id: task ID to which the TM command is applicable
5812 * @tm_function: task management function opcode
5813 * @tm_response: task management service response return value
5814 *
5815 * Returns non-zero value on error, zero on success.
5816 */
5817 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5818 u8 tm_function, u8 *tm_response)
5819 {
5820 struct utp_task_req_desc *task_req_descp;
5821 struct utp_upiu_task_req *task_req_upiup;
5822 struct Scsi_Host *host;
5823 unsigned long flags;
5824 int free_slot;
5825 int err;
5826 int task_tag;
5827
5828 host = hba->host;
5829
5830 /*
5831 * Get free slot, sleep if slots are unavailable.
5832 * Even though we use wait_event() which sleeps indefinitely,
5833 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5834 */
5835 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5836 ufshcd_hold(hba, false);
5837
5838 spin_lock_irqsave(host->host_lock, flags);
5839 task_req_descp = hba->utmrdl_base_addr;
5840 task_req_descp += free_slot;
5841
5842 /* Configure task request descriptor */
5843 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5844 task_req_descp->header.dword_2 =
5845 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5846
5847 /* Configure task request UPIU */
5848 task_req_upiup =
5849 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5850 task_tag = hba->nutrs + free_slot;
5851 task_req_upiup->header.dword_0 =
5852 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5853 lun_id, task_tag);
5854 task_req_upiup->header.dword_1 =
5855 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5856 /*
5857 * The host shall provide the same value for LUN field in the basic
5858 * header and for Input Parameter.
5859 */
5860 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5861 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5862
5863 /* send command to the controller */
5864 if (hba->vops && hba->vops->set_nexus_t_task_mgmt)
5865 hba->vops->set_nexus_t_task_mgmt(hba, free_slot, tm_function);
5866 __set_bit(free_slot, &hba->outstanding_tasks);
5867
5868 /* Make sure descriptors are ready before ringing the task doorbell */
5869 wmb();
5870
5871 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5872 /* Make sure that doorbell is committed immediately */
5873 wmb();
5874
5875 spin_unlock_irqrestore(host->host_lock, flags);
5876
5877 /* wait until the task management command is completed */
5878 err = wait_event_timeout(hba->tm_wq,
5879 test_bit(free_slot, &hba->tm_condition),
5880 msecs_to_jiffies(TM_CMD_TIMEOUT));
5881 if (!err) {
5882 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5883 __func__, tm_function);
5884 if (!ufshcd_clear_tm_cmd(hba, free_slot)) {
5885 spin_lock_irqsave(hba->host->host_lock, flags);
5886 __clear_bit(free_slot, &hba->outstanding_tasks);
5887 spin_unlock_irqrestore(hba->host->host_lock, flags);
5888 } else {
5889 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5890 __func__, free_slot);
5891 }
5892 err = -ETIMEDOUT;
5893 } else {
5894 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5895 }
5896
5897 clear_bit(free_slot, &hba->tm_condition);
5898 ufshcd_put_tm_slot(hba, free_slot);
5899 wake_up(&hba->tm_tag_wq);
5900
5901 ufshcd_release(hba);
5902 return err;
5903 }
5904
5905 /**
5906 * ufshcd_eh_device_reset_handler - device reset handler registered to
5907 * scsi layer.
5908 * @cmd: SCSI command pointer
5909 *
5910 * Returns SUCCESS/FAILED
5911 */
5912 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5913 {
5914 struct Scsi_Host *host;
5915 struct ufs_hba *hba;
5916 unsigned int tag;
5917 u32 pos;
5918 int err;
5919 u8 resp = 0xF;
5920 struct ufshcd_lrb *lrbp;
5921 unsigned long flags;
5922
5923 host = cmd->device->host;
5924 hba = shost_priv(host);
5925 tag = cmd->request->tag;
5926
5927 /* secure log */
5928 #ifdef CONFIG_EXYNOS_SMC_LOGGING
5929 exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
5930 #endif
5931
5932 /* Dump debugging information to system memory */
5933 ufshcd_vops_dbg_register_dump(hba);
5934 exynos_ufs_show_uic_info(hba);
5935
5936 lrbp = &hba->lrb[tag];
5937 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5938 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5939 if (!err)
5940 err = resp;
5941 goto out;
5942 }
5943
5944 /* clear the commands that were pending for corresponding LUN */
5945 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5946 if (hba->lrb[pos].lun == lrbp->lun) {
5947 err = ufshcd_clear_cmd(hba, pos);
5948 if (err)
5949 break;
5950 }
5951 }
5952 spin_lock_irqsave(host->host_lock, flags);
5953 ufshcd_transfer_req_compl(hba, DID_RESET);
5954 spin_unlock_irqrestore(host->host_lock, flags);
5955
5956 out:
5957 hba->req_abort_count = 0;
5958 if (!err) {
5959 dev_info(hba->dev, "%s: LU reset succeeded\n", __func__);
5960 err = SUCCESS;
5961 } else {
5962 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5963 err = FAILED;
5964 }
5965 return err;
5966 }
5967
5968 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5969 {
5970 struct ufshcd_lrb *lrbp;
5971 int tag;
5972
5973 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5974 lrbp = &hba->lrb[tag];
5975 lrbp->req_abort_skip = true;
5976 }
5977 }
5978
5979 /**
5980 * ufshcd_abort - abort a specific command
5981 * @cmd: SCSI command pointer
5982 *
5983 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5984 * command, and in host controller by clearing the door-bell register. There can
5985 * be race between controller sending the command to the device while abort is
5986 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5987 * really issued and then try to abort it.
5988 *
5989 * Returns SUCCESS/FAILED
5990 */
5991 static int ufshcd_abort(struct scsi_cmnd *cmd)
5992 {
5993 struct Scsi_Host *host;
5994 struct ufs_hba *hba;
5995 unsigned long flags;
5996 unsigned int tag;
5997 int err = 0;
5998 int poll_cnt;
5999 u8 resp = 0xF;
6000 struct ufshcd_lrb *lrbp;
6001 u32 reg;
6002
6003 host = cmd->device->host;
6004 hba = shost_priv(host);
6005 tag = cmd->request->tag;
6006 lrbp = &hba->lrb[tag];
6007 if (!ufshcd_valid_tag(hba, tag)) {
6008 dev_err(hba->dev,
6009 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6010 __func__, tag, cmd, cmd->request);
6011 BUG();
6012 }
6013
6014 /*
6015 * Task abort to the device W-LUN is illegal. When this command
6016 * will fail, due to spec violation, scsi err handling next step
6017 * will be to send LU reset which, again, is a spec violation.
6018 * To avoid these unnecessary/illegal step we skip to the last error
6019 * handling stage: reset and restore.
6020 */
6021 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6022 return ufshcd_eh_host_reset_handler(cmd);
6023
6024 /* secure log */
6025 #ifdef CONFIG_EXYNOS_SMC_LOGGING
6026 exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
6027 #endif
6028
6029 if (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10) {
6030 unsigned long lba = (unsigned long) ((cmd->cmnd[2] << 24) |
6031 (cmd->cmnd[3] << 16) |
6032 (cmd->cmnd[4] << 8) |
6033 (cmd->cmnd[5] << 0));
6034 unsigned int sct = (cmd->cmnd[7] << 8) |
6035 (cmd->cmnd[8] << 0);
6036
6037 dev_err(hba->dev, "%s: tag:%d, cmd:0x%x, "
6038 "lba:0x%08lx, sct:0x%04x, retries %d\n",
6039 __func__, tag, cmd->cmnd[0], lba, sct, cmd->retries);
6040 } else {
6041 dev_err(hba->dev, "%s: tag:%d, cmd:0x%x, retries %d\n",
6042 __func__, tag, cmd->cmnd[0], cmd->retries);
6043 }
6044
6045 ufshcd_hold(hba, false);
6046
6047 /* Dump debugging information to system memory */
6048 ufshcd_vops_dbg_register_dump(hba);
6049 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6050 /* If command is already aborted/completed, return SUCCESS */
6051 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6052 dev_err(hba->dev,
6053 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6054 __func__, tag, hba->outstanding_reqs, reg);
6055 goto out;
6056 }
6057
6058 if (!(reg & (1 << tag))) {
6059 dev_err(hba->dev,
6060 "%s: cmd was completed, but without a notifying intr, tag = %d",
6061 __func__, tag);
6062 goto clean;
6063 }
6064
6065 /* Print Transfer Request of aborted task */
6066 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6067
6068 /*
6069 * Print detailed info about aborted request.
6070 * As more than one request might get aborted at the same time,
6071 * print full information only for the first aborted request in order
6072 * to reduce repeated printouts. For other aborted requests only print
6073 * basic details.
6074 */
6075 scsi_print_command(hba->lrb[tag].cmd);
6076 if (!hba->req_abort_count) {
6077 ufshcd_print_host_regs(hba);
6078 ufshcd_print_host_state(hba);
6079 ufshcd_print_pwr_info(hba);
6080 ufshcd_print_trs(hba, 1 << tag, true);
6081 } else {
6082 ufshcd_print_trs(hba, 1 << tag, false);
6083 }
6084 hba->req_abort_count++;
6085
6086 /* Skip task abort in case previous aborts failed and report failure */
6087 if (lrbp->req_abort_skip) {
6088 err = -EIO;
6089 goto out;
6090 }
6091
6092 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6093 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6094 UFS_QUERY_TASK, &resp);
6095 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6096 /* cmd pending in the device */
6097 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6098 __func__, tag);
6099 break;
6100 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6101 /*
6102 * cmd not pending in the device, check if it is
6103 * in transition.
6104 */
6105 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6106 __func__, tag);
6107 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6108 if (reg & (1 << tag)) {
6109 /* sleep for max. 200us to stabilize */
6110 usleep_range(100, 200);
6111 continue;
6112 }
6113 /* command completed already */
6114 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6115 __func__, tag);
6116 goto out;
6117 } else {
6118 dev_err(hba->dev,
6119 "%s: no response from device. tag = %d, err %d\n",
6120 __func__, tag, err);
6121 if (!err)
6122 err = resp; /* service response error */
6123 dev_err(hba->dev,
6124 "%s: query task failed with err %d\n",
6125 __func__, err);
6126 goto out;
6127 }
6128 }
6129
6130 if (!poll_cnt) {
6131 err = -EBUSY;
6132 dev_err(hba->dev,
6133 "%s: cmd might be missed, not pending in device\n",
6134 __func__);
6135 goto out;
6136 }
6137
6138 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6139 UFS_ABORT_TASK, &resp);
6140 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6141 if (!err) {
6142 err = resp; /* service response error */
6143 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6144 __func__, tag, err);
6145 }
6146 goto out;
6147 }
6148
6149 err = ufshcd_clear_cmd(hba, tag);
6150 if (err) {
6151 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6152 __func__, tag, err);
6153 goto out;
6154 }
6155 clean:
6156 scsi_dma_unmap(cmd);
6157
6158 spin_lock_irqsave(host->host_lock, flags);
6159 ufshcd_outstanding_req_clear(hba, tag);
6160 hba->lrb[tag].cmd = NULL;
6161 spin_unlock_irqrestore(host->host_lock, flags);
6162
6163 clear_bit_unlock(tag, &hba->lrb_in_use);
6164 wake_up(&hba->dev_cmd.tag_wq);
6165
6166 out:
6167 if (!err) {
6168 err = SUCCESS;
6169 } else {
6170 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6171 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6172 err = FAILED;
6173 }
6174
6175 /*
6176 * This ufshcd_release() corresponds to the original scsi cmd that got
6177 * aborted here (as we won't get any IRQ for it).
6178 */
6179 ufshcd_release(hba);
6180 return err;
6181 }
6182
6183 /**
6184 * ufshcd_host_reset_and_restore - reset and restore host controller
6185 * @hba: per-adapter instance
6186 *
6187 * Note that host controller reset may issue DME_RESET to
6188 * local and remote (device) Uni-Pro stack and the attributes
6189 * are reset to default state.
6190 *
6191 * Returns zero on success, non-zero on failure
6192 */
6193 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6194 {
6195 int err = 0;
6196 unsigned long flags;
6197
6198 /* Reset the host controller */
6199 spin_lock_irqsave(hba->host->host_lock, flags);
6200 hba->ufshcd_state = UFSHCD_STATE_RESET;
6201 ufshcd_set_eh_in_progress(hba);
6202 ufshcd_hba_stop(hba, false);
6203 spin_unlock_irqrestore(hba->host->host_lock, flags);
6204
6205 #if defined(CONFIG_PM_DEVFREQ)
6206 /* scale up clocks to max frequency before full reinitialization */
6207 ufshcd_scale_clks(hba, true);
6208 #endif
6209
6210 /* Establish the link again and restore the device */
6211 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
6212 if (hba->pm_op_in_progress)
6213 async_schedule(ufshcd_async_scan, hba);
6214 else
6215 #endif
6216 {
6217 err = ufshcd_probe_hba(hba);
6218
6219 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
6220 dev_err(hba->dev, "%s: failed\n", __func__);
6221 err = -EIO;
6222 }
6223 }
6224
6225 spin_lock_irqsave(hba->host->host_lock, flags);
6226 ufshcd_clear_eh_in_progress(hba);
6227 spin_unlock_irqrestore(hba->host->host_lock, flags);
6228
6229 return err;
6230 }
6231
6232 /**
6233 * ufshcd_reset_and_restore - reset and re-initialize host/device
6234 * @hba: per-adapter instance
6235 *
6236 * Reset and recover device, host and re-establish link. This
6237 * is helpful to recover the communication in fatal error conditions.
6238 *
6239 * Returns zero on success, non-zero on failure
6240 */
6241 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6242 {
6243 int err = 0;
6244 unsigned long flags;
6245 int retries = MAX_HOST_RESET_RETRIES;
6246
6247 int tag;
6248
6249 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
6250 ufshcd_clear_cmd(hba, tag);
6251
6252 spin_lock_irqsave(hba->host->host_lock, flags);
6253 ufshcd_transfer_req_compl(hba, DID_RESET);
6254 spin_unlock_irqrestore(hba->host->host_lock, flags);
6255
6256 ssleep(1);
6257
6258 do {
6259 err = ufshcd_host_reset_and_restore(hba);
6260 } while (err && --retries);
6261
6262 /*
6263 * After reset the door-bell might be cleared, complete
6264 * outstanding requests in s/w here.
6265 */
6266 spin_lock_irqsave(hba->host->host_lock, flags);
6267 ufshcd_transfer_req_compl(hba, DID_RESET);
6268 ufshcd_tmc_handler(hba);
6269 spin_unlock_irqrestore(hba->host->host_lock, flags);
6270
6271 return err;
6272 }
6273
6274 /**
6275 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6276 * @cmd - SCSI command pointer
6277 *
6278 * Returns SUCCESS/FAILED
6279 */
6280 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6281 {
6282 int err;
6283 unsigned long flags;
6284 struct ufs_hba *hba;
6285
6286 hba = shost_priv(cmd->device->host);
6287
6288 ufshcd_hold(hba, false);
6289 /*
6290 * Check if there is any race with fatal error handling.
6291 * If so, wait for it to complete. Even though fatal error
6292 * handling does reset and restore in some cases, don't assume
6293 * anything out of it. We are just avoiding race here.
6294 */
6295 do {
6296 spin_lock_irqsave(hba->host->host_lock, flags);
6297 if (!(work_pending(&hba->eh_work) ||
6298 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6299 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6300 break;
6301 spin_unlock_irqrestore(hba->host->host_lock, flags);
6302 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6303 flush_work(&hba->eh_work);
6304 } while (1);
6305
6306 hba->ufshcd_state = UFSHCD_STATE_RESET;
6307 ufshcd_set_eh_in_progress(hba);
6308 spin_unlock_irqrestore(hba->host->host_lock, flags);
6309
6310 err = ufshcd_reset_and_restore(hba);
6311
6312 spin_lock_irqsave(hba->host->host_lock, flags);
6313 if (!err) {
6314 err = SUCCESS;
6315 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6316 } else {
6317 err = FAILED;
6318 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6319 }
6320 ufshcd_clear_eh_in_progress(hba);
6321 spin_unlock_irqrestore(hba->host->host_lock, flags);
6322
6323 ufshcd_release(hba);
6324 return err;
6325 }
6326
6327 /**
6328 * ufshcd_get_max_icc_level - calculate the ICC level
6329 * @sup_curr_uA: max. current supported by the regulator
6330 * @start_scan: row at the desc table to start scan from
6331 * @buff: power descriptor buffer
6332 *
6333 * Returns calculated max ICC level for specific regulator
6334 */
6335 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6336 {
6337 int i;
6338 int curr_uA;
6339 u16 data;
6340 u16 unit;
6341
6342 for (i = start_scan; i >= 0; i--) {
6343 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6344 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6345 ATTR_ICC_LVL_UNIT_OFFSET;
6346 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6347 switch (unit) {
6348 case UFSHCD_NANO_AMP:
6349 curr_uA = curr_uA / 1000;
6350 break;
6351 case UFSHCD_MILI_AMP:
6352 curr_uA = curr_uA * 1000;
6353 break;
6354 case UFSHCD_AMP:
6355 curr_uA = curr_uA * 1000 * 1000;
6356 break;
6357 case UFSHCD_MICRO_AMP:
6358 default:
6359 break;
6360 }
6361 if (sup_curr_uA >= curr_uA)
6362 break;
6363 }
6364 if (i < 0) {
6365 i = 0;
6366 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6367 }
6368
6369 return (u32)i;
6370 }
6371
6372 /**
6373 * ufshcd_calc_icc_level - calculate the max ICC level
6374 * In case regulators are not initialized we'll return 0
6375 * @hba: per-adapter instance
6376 * @desc_buf: power descriptor buffer to extract ICC levels from.
6377 * @len: length of desc_buff
6378 *
6379 * Returns calculated ICC level
6380 */
6381 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6382 u8 *desc_buf, int len)
6383 {
6384 u32 icc_level = 0;
6385
6386 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6387 !hba->vreg_info.vccq2) {
6388 dev_err(hba->dev,
6389 "%s: Regulator capability was not set, actvIccLevel=%d",
6390 __func__, icc_level);
6391 goto out;
6392 }
6393
6394 if (hba->vreg_info.vcc)
6395 icc_level = ufshcd_get_max_icc_level(
6396 hba->vreg_info.vcc->max_uA,
6397 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6398 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6399
6400 if (hba->vreg_info.vccq)
6401 icc_level = ufshcd_get_max_icc_level(
6402 hba->vreg_info.vccq->max_uA,
6403 icc_level,
6404 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6405
6406 if (hba->vreg_info.vccq2)
6407 icc_level = ufshcd_get_max_icc_level(
6408 hba->vreg_info.vccq2->max_uA,
6409 icc_level,
6410 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6411 out:
6412 return icc_level;
6413 }
6414
6415 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6416 {
6417 int ret;
6418 int buff_len = hba->desc_size.pwr_desc;
6419 u8 desc_buf[hba->desc_size.pwr_desc];
6420
6421 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6422 if (ret) {
6423 dev_err(hba->dev,
6424 "%s: Failed reading power descriptor.len = %d ret = %d",
6425 __func__, buff_len, ret);
6426 return;
6427 }
6428
6429 hba->init_prefetch_data.icc_level =
6430 ufshcd_find_max_sup_active_icc_level(hba,
6431 desc_buf, buff_len);
6432 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6433 __func__, hba->init_prefetch_data.icc_level);
6434
6435 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6436 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6437 &hba->init_prefetch_data.icc_level);
6438
6439 if (ret)
6440 dev_err(hba->dev,
6441 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6442 __func__, hba->init_prefetch_data.icc_level , ret);
6443
6444 }
6445
6446 /**
6447 * ufshcd_scsi_add_wlus - Adds required W-LUs
6448 * @hba: per-adapter instance
6449 *
6450 * UFS device specification requires the UFS devices to support 4 well known
6451 * logical units:
6452 * "REPORT_LUNS" (address: 01h)
6453 * "UFS Device" (address: 50h)
6454 * "RPMB" (address: 44h)
6455 * "BOOT" (address: 30h)
6456 * UFS device's power management needs to be controlled by "POWER CONDITION"
6457 * field of SSU (START STOP UNIT) command. But this "power condition" field
6458 * will take effect only when its sent to "UFS device" well known logical unit
6459 * hence we require the scsi_device instance to represent this logical unit in
6460 * order for the UFS host driver to send the SSU command for power management.
6461
6462 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6463 * Block) LU so user space process can control this LU. User space may also
6464 * want to have access to BOOT LU.
6465
6466 * This function adds scsi device instances for each of all well known LUs
6467 * (except "REPORT LUNS" LU).
6468 *
6469 * Returns zero on success (all required W-LUs are added successfully),
6470 * non-zero error value on failure (if failed to add any of the required W-LU).
6471 */
6472 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6473 {
6474 int ret = 0;
6475 struct scsi_device *sdev_boot;
6476
6477 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6478 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6479 if (IS_ERR(hba->sdev_ufs_device)) {
6480 ret = PTR_ERR(hba->sdev_ufs_device);
6481 hba->sdev_ufs_device = NULL;
6482 goto out;
6483 }
6484 scsi_device_put(hba->sdev_ufs_device);
6485
6486 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6487 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6488 if (IS_ERR(sdev_boot)) {
6489 ret = PTR_ERR(sdev_boot);
6490 goto remove_sdev_ufs_device;
6491 }
6492 scsi_device_put(sdev_boot);
6493
6494 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6495 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6496 if (IS_ERR(hba->sdev_rpmb)) {
6497 ret = PTR_ERR(hba->sdev_rpmb);
6498 goto remove_sdev_boot;
6499 }
6500 scsi_device_put(hba->sdev_rpmb);
6501 goto out;
6502
6503 remove_sdev_boot:
6504 scsi_remove_device(sdev_boot);
6505 remove_sdev_ufs_device:
6506 scsi_remove_device(hba->sdev_ufs_device);
6507 out:
6508 return ret;
6509 }
6510
6511 static int ufs_get_device_desc(struct ufs_hba *hba,
6512 struct ufs_dev_desc *dev_desc)
6513 {
6514 int err;
6515 u8 model_index;
6516 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6517 u8 desc_buf[hba->desc_size.dev_desc];
6518
6519 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6520 if (err) {
6521 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6522 __func__, err);
6523 goto out;
6524 }
6525
6526 /*
6527 * getting vendor (manufacturerID) and Bank Index in big endian
6528 * format
6529 */
6530 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6531 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6532
6533 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6534
6535 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6536 QUERY_DESC_MAX_SIZE, ASCII_STD);
6537 if (err) {
6538 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6539 __func__, err);
6540 goto out;
6541 }
6542
6543 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6544 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6545 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6546 MAX_MODEL_LEN));
6547
6548 /* Null terminate the model string */
6549 dev_desc->model[MAX_MODEL_LEN] = '\0';
6550
6551 out:
6552 return err;
6553 }
6554
6555 static void ufs_fixup_device_setup(struct ufs_hba *hba,
6556 struct ufs_dev_desc *dev_desc)
6557 {
6558 struct ufs_dev_fix *f;
6559
6560 for (f = ufs_fixups; f->quirk; f++) {
6561 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6562 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6563 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6564 !strcmp(f->card.model, UFS_ANY_MODEL)))
6565 hba->dev_quirks |= f->quirk;
6566 }
6567 }
6568
6569 /**
6570 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6571 * @hba: per-adapter instance
6572 *
6573 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6574 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6575 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6576 * the hibern8 exit latency.
6577 *
6578 * Returns zero on success, non-zero error value on failure.
6579 */
6580 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6581 {
6582 int ret = 0;
6583 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6584
6585 ret = ufshcd_dme_peer_get(hba,
6586 UIC_ARG_MIB_SEL(
6587 RX_MIN_ACTIVATETIME_CAPABILITY,
6588 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6589 &peer_rx_min_activatetime);
6590 if (ret)
6591 goto out;
6592
6593 /* make sure proper unit conversion is applied */
6594 tuned_pa_tactivate =
6595 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6596 / PA_TACTIVATE_TIME_UNIT_US);
6597 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6598 tuned_pa_tactivate);
6599
6600 out:
6601 return ret;
6602 }
6603
6604 /**
6605 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6606 * @hba: per-adapter instance
6607 *
6608 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6609 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6610 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6611 * This optimal value can help reduce the hibern8 exit latency.
6612 *
6613 * Returns zero on success, non-zero error value on failure.
6614 */
6615 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6616 {
6617 int ret = 0;
6618 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6619 u32 max_hibern8_time, tuned_pa_hibern8time;
6620
6621 ret = ufshcd_dme_get(hba,
6622 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6623 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6624 &local_tx_hibern8_time_cap);
6625 if (ret)
6626 goto out;
6627
6628 ret = ufshcd_dme_peer_get(hba,
6629 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6630 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6631 &peer_rx_hibern8_time_cap);
6632 if (ret)
6633 goto out;
6634
6635 max_hibern8_time = max(local_tx_hibern8_time_cap,
6636 peer_rx_hibern8_time_cap);
6637 /* make sure proper unit conversion is applied */
6638 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6639 / PA_HIBERN8_TIME_UNIT_US);
6640 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6641 tuned_pa_hibern8time);
6642 out:
6643 return ret;
6644 }
6645
6646 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6647 {
6648 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6649 ufshcd_tune_pa_tactivate(hba);
6650 ufshcd_tune_pa_hibern8time(hba);
6651 }
6652
6653 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6654 /* set 1ms timeout for PA_TACTIVATE */
6655 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6656
6657
6658 }
6659
6660 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6661 {
6662 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6663
6664 hba->ufs_stats.hibern8_exit_cnt = 0;
6665 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6666
6667 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6668 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6669 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6670 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6671 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6672
6673 hba->req_abort_count = 0;
6674 }
6675
6676 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6677 {
6678 int err;
6679
6680 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6681 &hba->desc_size.dev_desc);
6682 if (err)
6683 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6684
6685 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6686 &hba->desc_size.pwr_desc);
6687 if (err)
6688 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6689
6690 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6691 &hba->desc_size.interc_desc);
6692 if (err)
6693 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6694
6695 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6696 &hba->desc_size.conf_desc);
6697 if (err)
6698 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6699
6700 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6701 &hba->desc_size.unit_desc);
6702 if (err)
6703 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6704
6705 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6706 &hba->desc_size.geom_desc);
6707 if (err)
6708 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6709 }
6710
6711 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6712 {
6713 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6714 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6715 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6716 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6717 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6718 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6719 }
6720
6721 /**
6722 * ufshcd_probe_hba - probe hba to detect device and initialize
6723 * @hba: per-adapter instance
6724 *
6725 * Execute link-startup and verify device initialization
6726 */
6727 static int ufshcd_probe_hba(struct ufs_hba *hba)
6728 {
6729 struct ufs_dev_desc card = {0};
6730 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
6731 struct ufs_vreg_info *info = &hba->vreg_info;
6732 int re_cnt = 0;
6733 int ret, link_startup_fail = 0;
6734 ktime_t start = ktime_get();
6735 unsigned long flags;
6736
6737 retry:
6738 /* For deivce power control when link startup fail. */
6739 if (link_startup_fail) {
6740 ufshcd_vreg_set_lpm(hba);
6741 ret = ufshcd_vreg_set_hpm(hba);
6742
6743 if (gpio_is_valid(info->ufs_power_gpio))
6744 dev_info(hba->dev, "%s: UFS power pin: 0x%08x\n", __func__, gpio_get_value(info->ufs_power_gpio));
6745 if (gpio_is_valid(info->ufs_reset_n_gpio))
6746 dev_info(hba->dev, "%s: RESET_N: 0x%08x\n", __func__, gpio_get_value(info->ufs_reset_n_gpio));
6747 if (ret)
6748 goto out;
6749 }
6750
6751 ret = ufshcd_hba_enable(hba);
6752 if (ret)
6753 goto out;
6754
6755 ret = ufshcd_link_startup(hba);
6756 if (ret) {
6757 link_startup_fail = 1;
6758 goto out;
6759 }
6760 link_startup_fail = 0;
6761
6762 dev_info(hba->dev, "UFS link established\n");
6763
6764 /* set the default level for urgent bkops */
6765 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6766 hba->is_urgent_bkops_lvl_checked = false;
6767
6768 /* Debug counters initialization */
6769 ufshcd_clear_dbg_ufs_stats(hba);
6770
6771 /* UniPro link is active now */
6772 ufshcd_set_link_active(hba);
6773
6774 ret = ufshcd_verify_dev_init(hba);
6775 if (ret)
6776 goto out;
6777
6778 ret = ufshcd_complete_dev_init(hba);
6779 if (ret)
6780 goto out;
6781
6782 /* Init check for device descriptor sizes */
6783 ufshcd_init_desc_sizes(hba);
6784
6785 ret = ufs_get_device_desc(hba, &card);
6786 if (ret) {
6787 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6788 __func__, ret);
6789 goto out;
6790 }
6791
6792 ufs_fixup_device_setup(hba, &card);
6793 ufshcd_tune_unipro_params(hba);
6794
6795 ret = ufshcd_set_vccq_rail_unused(hba,
6796 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6797 if (ret)
6798 goto out;
6799
6800 /* UFS device is also active now */
6801 ufshcd_set_ufs_dev_active(hba);
6802 ufshcd_force_reset_auto_bkops(hba);
6803 hba->wlun_dev_clr_ua = true;
6804
6805 if (ufshcd_get_max_pwr_mode(hba)) {
6806 dev_err(hba->dev,
6807 "%s: Failed getting max supported power mode\n",
6808 __func__);
6809 } else {
6810 if ((pwr_info->lane_rx != pwr_info->peer_available_lane_rx)
6811 || (pwr_info->lane_tx != pwr_info->peer_available_lane_tx)) {
6812 dev_info(hba->dev,
6813 "%s: availabele lanes, Host:Device Lane tx %d%d rx %d:%d\n",
6814 __func__,
6815 pwr_info->lane_tx, pwr_info->peer_available_lane_tx,
6816 pwr_info->lane_rx, pwr_info->peer_available_lane_rx);
6817 }
6818 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6819 if (ret) {
6820 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6821 __func__, ret);
6822 goto out;
6823 }
6824
6825 if (hba->max_pwr_info.info.pwr_rx == FAST_MODE ||
6826 hba->max_pwr_info.info.pwr_tx == FAST_MODE ||
6827 hba->max_pwr_info.info.pwr_rx == FASTAUTO_MODE ||
6828 hba->max_pwr_info.info.pwr_tx == FASTAUTO_MODE)
6829 dev_info(hba->dev, "HS mode configured\n");
6830 }
6831
6832 /* set the state as operational after switching to desired gear */
6833 spin_lock_irqsave(hba->host->host_lock, flags);
6834 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6835 spin_unlock_irqrestore(hba->host->host_lock, flags);
6836
6837 /*
6838 * If we are in error handling context or in power management callbacks
6839 * context, no need to scan the host
6840 */
6841 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress
6842 && !hba->async_resume) {
6843 bool flag;
6844
6845 /* clear any previous UFS device information */
6846 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6847 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6848 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6849 hba->dev_info.f_power_on_wp_en = flag;
6850
6851 if (!hba->is_init_prefetch)
6852 ufshcd_init_icc_levels(hba);
6853
6854 scsi_scan_host(hba->host);
6855
6856 /* Add required well known logical units to scsi mid layer */
6857 ret = ufshcd_scsi_add_wlus(hba);
6858 if (ret) {
6859 dev_warn(hba->dev, "%s failed to add w-lus %d\n",
6860 __func__, ret);
6861 ret = 0;
6862 }
6863
6864 /* Initialize devfreq after UFS device is detected */
6865 if (ufshcd_is_clkscaling_supported(hba)) {
6866 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6867 &hba->pwr_info,
6868 sizeof(struct ufs_pa_layer_attr));
6869 hba->clk_scaling.saved_pwr_info.is_valid = true;
6870 if (!hba->devfreq) {
6871 #if defined(CONFIG_PM_DEVFREQ)
6872 hba->devfreq = devm_devfreq_add_device(hba->dev,
6873 &ufs_devfreq_profile,
6874 "simple_ondemand",
6875 NULL);
6876 #endif
6877 if (IS_ERR(hba->devfreq)) {
6878 ret = PTR_ERR(hba->devfreq);
6879 dev_err(hba->dev, "Unable to register with devfreq %d\n",
6880 ret);
6881 goto out;
6882 }
6883 }
6884 hba->clk_scaling.is_allowed = true;
6885 }
6886
6887 pm_runtime_put_sync(hba->dev);
6888 }
6889
6890 hba->host->wlun_clr_uac = true;
6891 if (!hba->is_init_prefetch)
6892 hba->is_init_prefetch = true;
6893
6894 out:
6895 if (ret && re_cnt++ < UFS_LINK_SETUP_RETRIES) {
6896 dev_err(hba->dev, "%s failed with err %d, retrying:%d\n",
6897 __func__, ret, re_cnt);
6898 goto retry;
6899 } else if (ret && re_cnt >= UFS_LINK_SETUP_RETRIES) {
6900 dev_err(hba->dev, "%s failed after retries with err %d\n",
6901 __func__, ret);
6902 exynos_ufs_dump_uic_info(hba);
6903 spin_lock_irqsave(hba->host->host_lock, flags);
6904 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6905 spin_unlock_irqrestore(hba->host->host_lock, flags);
6906 }
6907
6908 /*
6909 * If we failed to initialize the device or the device is not
6910 * present, turn off the power/clocks etc.
6911 */
6912 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6913 pm_runtime_put_sync(hba->dev);
6914 ufshcd_hba_exit(hba);
6915 }
6916
6917 trace_ufshcd_init(dev_name(hba->dev), ret,
6918 ktime_to_us(ktime_sub(ktime_get(), start)),
6919 hba->curr_dev_pwr_mode, hba->uic_link_state);
6920
6921 if (!ret) {
6922 /*
6923 * Inform scsi mid-layer that we did reset and allow to handle
6924 * Unit Attention properly.
6925 */
6926 spin_lock_irqsave(hba->host->host_lock, flags);
6927 scsi_report_bus_reset(hba->host, 0);
6928 spin_unlock_irqrestore(hba->host->host_lock, flags);
6929 }
6930
6931 hba->async_resume = false;
6932
6933 return ret;
6934 }
6935
6936 /**
6937 * ufshcd_async_scan - asynchronous execution for probing hba
6938 * @data: data pointer to pass to this function
6939 * @cookie: cookie data
6940 */
6941 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6942 {
6943 struct ufs_hba *hba = (struct ufs_hba *)data;
6944 int err = 0;
6945
6946 if (hba->async_resume) {
6947 scsi_block_requests(hba->host);
6948 err = ufshcd_probe_hba(hba);
6949 if (err)
6950 goto err;
6951
6952 if (!ufshcd_is_ufs_dev_active(hba)) {
6953 scsi_unblock_requests(hba->host);
6954 ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6955 scsi_block_requests(hba->host);
6956 }
6957
6958 /*
6959 * If BKOPs operations are urgently needed at this moment then
6960 * keep auto-bkops enabled or else disable it.
6961 */
6962 ufshcd_urgent_bkops(hba);
6963 err:
6964 scsi_unblock_requests(hba->host);
6965 } else {
6966 ufshcd_probe_hba(hba);
6967 }
6968 }
6969
6970 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6971 {
6972 unsigned long flags;
6973 struct Scsi_Host *host;
6974 struct ufs_hba *hba;
6975 int index;
6976 bool found = false;
6977
6978 if (!scmd || !scmd->device || !scmd->device->host)
6979 return BLK_EH_NOT_HANDLED;
6980
6981 host = scmd->device->host;
6982 hba = shost_priv(host);
6983 if (!hba)
6984 return BLK_EH_NOT_HANDLED;
6985
6986 spin_lock_irqsave(host->host_lock, flags);
6987
6988 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6989 if (hba->lrb[index].cmd == scmd) {
6990 found = true;
6991 break;
6992 }
6993 }
6994
6995 spin_unlock_irqrestore(host->host_lock, flags);
6996
6997 /*
6998 * Bypass SCSI error handling and reset the block layer timer if this
6999 * SCSI command was not actually dispatched to UFS driver, otherwise
7000 * let SCSI layer handle the error as usual.
7001 */
7002 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
7003 }
7004
7005 /**
7006 * ufshcd_query_ioctl - perform user read queries
7007 * @hba: per-adapter instance
7008 * @lun: used for lun specific queries
7009 * @buffer: user space buffer for reading and submitting query data and params
7010 * @return: 0 for success negative error code otherwise
7011 *
7012 * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7013 * It will read the opcode, idn and buf_length parameters, and, put the
7014 * response in the buffer field while updating the used size in buf_length.
7015 */
7016 static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
7017 {
7018 struct ufs_ioctl_query_data *ioctl_data;
7019 int err = 0;
7020 int length = 0;
7021 void *data_ptr;
7022 bool flag;
7023 u32 att;
7024 u8 index;
7025 u8 *desc = NULL;
7026
7027 ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
7028 if (!ioctl_data) {
7029 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
7030 sizeof(struct ufs_ioctl_query_data));
7031 err = -ENOMEM;
7032 goto out;
7033 }
7034
7035 /* extract params from user buffer */
7036 err = copy_from_user(ioctl_data, buffer,
7037 sizeof(struct ufs_ioctl_query_data));
7038 if (err) {
7039 dev_err(hba->dev,
7040 "%s: Failed copying buffer from user, err %d\n",
7041 __func__, err);
7042 goto out_release_mem;
7043 }
7044
7045 /* verify legal parameters & send query */
7046 switch (ioctl_data->opcode) {
7047 case UPIU_QUERY_OPCODE_READ_DESC:
7048 switch (ioctl_data->idn) {
7049 case QUERY_DESC_IDN_DEVICE:
7050 case QUERY_DESC_IDN_CONFIGURATION:
7051 case QUERY_DESC_IDN_INTERCONNECT:
7052 case QUERY_DESC_IDN_GEOMETRY:
7053 case QUERY_DESC_IDN_POWER:
7054 case QUERY_DESC_IDN_HEALTH:
7055 index = 0;
7056 break;
7057 case QUERY_DESC_IDN_UNIT:
7058 if (!ufs_is_valid_unit_desc_lun(lun)) {
7059 dev_err(hba->dev,
7060 "%s: No unit descriptor for lun 0x%x\n",
7061 __func__, lun);
7062 err = -EINVAL;
7063 goto out_release_mem;
7064 }
7065 index = lun;
7066 break;
7067 default:
7068 goto out_einval;
7069 }
7070 length = min_t(int, QUERY_DESC_MAX_SIZE,
7071 ioctl_data->buf_size);
7072 desc = kzalloc(length, GFP_KERNEL);
7073 if (!desc) {
7074 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
7075 __func__, length);
7076 err = -ENOMEM;
7077 goto out_release_mem;
7078 }
7079 err = ufshcd_query_descriptor_retry(hba, ioctl_data->opcode,
7080 ioctl_data->idn, index, 0, desc, &length);
7081 break;
7082 case UPIU_QUERY_OPCODE_READ_ATTR:
7083 switch (ioctl_data->idn) {
7084 case QUERY_ATTR_IDN_BOOT_LU_EN:
7085 case QUERY_ATTR_IDN_POWER_MODE:
7086 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
7087 case QUERY_ATTR_IDN_OOO_DATA_EN:
7088 case QUERY_ATTR_IDN_BKOPS_STATUS:
7089 case QUERY_ATTR_IDN_PURGE_STATUS:
7090 case QUERY_ATTR_IDN_MAX_DATA_IN:
7091 case QUERY_ATTR_IDN_MAX_DATA_OUT:
7092 case QUERY_ATTR_IDN_REF_CLK_FREQ:
7093 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
7094 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
7095 case QUERY_ATTR_IDN_EE_CONTROL:
7096 case QUERY_ATTR_IDN_EE_STATUS:
7097 case QUERY_ATTR_IDN_SECONDS_PASSED:
7098 index = 0;
7099 break;
7100 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
7101 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
7102 index = lun;
7103 break;
7104 default:
7105 goto out_einval;
7106 }
7107 err = ufshcd_query_attr_retry(hba, ioctl_data->opcode,
7108 ioctl_data->idn, index, 0, &att);
7109 break;
7110 case UPIU_QUERY_OPCODE_READ_FLAG:
7111 switch (ioctl_data->idn) {
7112 case QUERY_FLAG_IDN_FDEVICEINIT:
7113 case QUERY_FLAG_IDN_PERMANENT_WPE:
7114 case QUERY_FLAG_IDN_PWR_ON_WPE:
7115 case QUERY_FLAG_IDN_BKOPS_EN:
7116 case QUERY_FLAG_IDN_PURGE_ENABLE:
7117 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
7118 case QUERY_FLAG_IDN_BUSY_RTC:
7119 break;
7120 default:
7121 goto out_einval;
7122 }
7123 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
7124 ioctl_data->idn, &flag);
7125 break;
7126 default:
7127 goto out_einval;
7128 }
7129
7130 if (err) {
7131 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
7132 ioctl_data->idn);
7133 goto out_release_mem;
7134 }
7135
7136 /*
7137 * copy response data
7138 * As we might end up reading less data then what is specified in
7139 * "ioct_data->buf_size". So we are updating "ioct_data->
7140 * buf_size" to what exactly we have read.
7141 */
7142 switch (ioctl_data->opcode) {
7143 case UPIU_QUERY_OPCODE_READ_DESC:
7144 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
7145 data_ptr = desc;
7146 break;
7147 case UPIU_QUERY_OPCODE_READ_ATTR:
7148 ioctl_data->buf_size = sizeof(u32);
7149 data_ptr = &att;
7150 break;
7151 case UPIU_QUERY_OPCODE_READ_FLAG:
7152 ioctl_data->buf_size = 1;
7153 data_ptr = &flag;
7154 break;
7155 default:
7156 BUG_ON(true);
7157 }
7158
7159 /* copy to user */
7160 err = copy_to_user(buffer, ioctl_data,
7161 sizeof(struct ufs_ioctl_query_data));
7162 if (err)
7163 dev_err(hba->dev, "%s: Failed copying back to user.\n",
7164 __func__);
7165 err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
7166 data_ptr, ioctl_data->buf_size);
7167 if (err)
7168 dev_err(hba->dev, "%s: err %d copying back to user.\n",
7169 __func__, err);
7170 goto out_release_mem;
7171
7172 out_einval:
7173 dev_err(hba->dev,
7174 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7175 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
7176 err = -EINVAL;
7177 out_release_mem:
7178 kfree(ioctl_data);
7179 kfree(desc);
7180 out:
7181 return err;
7182 }
7183
7184 /**
7185 * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7186 * @dev: scsi device required for per LUN queries
7187 * @cmd: command opcode
7188 * @buffer: user space buffer for transferring data
7189 *
7190 * Supported commands:
7191 * UFS_IOCTL_QUERY
7192 */
7193 static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
7194 {
7195 struct ufs_hba *hba = shost_priv(dev->host);
7196 int err = 0;
7197
7198 BUG_ON(!hba);
7199 if (!buffer) {
7200 if (cmd != SCSI_UFS_REQUEST_SENSE) {
7201 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
7202 return -EINVAL;
7203 }
7204 }
7205 switch (cmd) {
7206 case SCSI_UFS_REQUEST_SENSE:
7207 err = ufshcd_send_request_sense(hba, hba->sdev_rpmb);
7208 if (err) {
7209 dev_warn(hba->dev, "%s failed to clear uac on rpmb(w-lu) %d\n",
7210 __func__, err);
7211 }
7212 hba->host->wlun_clr_uac = false;
7213 break;
7214 case UFS_IOCTL_QUERY:
7215 //pm_runtime_get_sync(hba->dev);
7216 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
7217 buffer);
7218 //pm_runtime_put_sync(hba->dev);
7219 break;
7220 case UFS_IOCTL_BLKROSET:
7221 err = -ENOIOCTLCMD;
7222 break;
7223 default:
7224 err = -EINVAL;
7225 dev_err(hba->dev, "%s: Illegal ufs-IOCTL cmd %d\n", __func__,
7226 cmd);
7227 break;
7228 }
7229
7230 return err;
7231 }
7232 static struct scsi_host_template ufshcd_driver_template = {
7233 .module = THIS_MODULE,
7234 .name = UFSHCD,
7235 .proc_name = UFSHCD,
7236 .queuecommand = ufshcd_queuecommand,
7237 .slave_alloc = ufshcd_slave_alloc,
7238 .slave_configure = ufshcd_slave_configure,
7239 .slave_destroy = ufshcd_slave_destroy,
7240 .change_queue_depth = ufshcd_change_queue_depth,
7241 .eh_abort_handler = ufshcd_abort,
7242 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7243 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7244 .eh_timed_out = ufshcd_eh_timed_out,
7245 .ioctl = ufshcd_ioctl,
7246 .this_id = -1,
7247 .sg_tablesize = SG_ALL,
7248 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7249 .can_queue = UFSHCD_CAN_QUEUE,
7250 .max_host_blocked = 1,
7251 .skip_settle_delay = 1,
7252 .track_queue_depth = 1,
7253 };
7254
7255 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7256 int ua)
7257 {
7258 int ret;
7259
7260 if (!vreg)
7261 return 0;
7262
7263 ret = regulator_set_load(vreg->reg, ua);
7264 if (ret < 0) {
7265 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7266 __func__, vreg->name, ua, ret);
7267 }
7268
7269 return ret;
7270 }
7271
7272 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7273 struct ufs_vreg *vreg)
7274 {
7275 if (!vreg)
7276 return 0;
7277 else if (vreg->unused)
7278 return 0;
7279 else
7280 return ufshcd_config_vreg_load(hba->dev, vreg,
7281 UFS_VREG_LPM_LOAD_UA);
7282 }
7283
7284 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7285 struct ufs_vreg *vreg)
7286 {
7287 if (!vreg)
7288 return 0;
7289 else if (vreg->unused)
7290 return 0;
7291 else
7292 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7293 }
7294
7295 static int ufshcd_config_vreg(struct device *dev,
7296 struct ufs_vreg *vreg, bool on)
7297 {
7298 int ret = 0;
7299 struct regulator *reg;
7300 const char *name;
7301 int min_uV, uA_load;
7302
7303 BUG_ON(!vreg);
7304
7305 reg = vreg->reg;
7306 name = vreg->name;
7307
7308 if (regulator_count_voltages(reg) > 0) {
7309 min_uV = on ? vreg->min_uV : 0;
7310 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7311 if (ret) {
7312 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
7313 __func__, name, ret);
7314 goto out;
7315 }
7316
7317 uA_load = on ? vreg->max_uA : 0;
7318 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7319 if (ret)
7320 goto out;
7321 }
7322 out:
7323 return ret;
7324 }
7325
7326 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7327 {
7328 int ret = 0;
7329
7330 if (!vreg)
7331 goto out;
7332 else if (vreg->enabled || vreg->unused)
7333 goto out;
7334
7335 ret = ufshcd_config_vreg(dev, vreg, true);
7336 if (!ret)
7337 ret = regulator_enable(vreg->reg);
7338
7339 if (!ret)
7340 vreg->enabled = true;
7341 else
7342 dev_err(dev, "%s: %s enable failed, err=%d\n",
7343 __func__, vreg->name, ret);
7344 out:
7345 return ret;
7346 }
7347
7348 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7349 {
7350 int ret = 0;
7351
7352 if (!vreg)
7353 goto out;
7354 else if (!vreg->enabled || vreg->unused)
7355 goto out;
7356
7357 ret = regulator_disable(vreg->reg);
7358
7359 if (!ret) {
7360 /* ignore errors on applying disable config */
7361 ufshcd_config_vreg(dev, vreg, false);
7362 vreg->enabled = false;
7363 } else {
7364 dev_err(dev, "%s: %s disable failed, err=%d\n",
7365 __func__, vreg->name, ret);
7366 }
7367 out:
7368 return ret;
7369 }
7370
7371 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7372 {
7373 int ret = 0;
7374 struct device *dev = hba->dev;
7375 struct ufs_vreg_info *info = &hba->vreg_info;
7376
7377 if (!info)
7378 goto out;
7379
7380 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7381 if (ret)
7382 goto out;
7383
7384 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7385 if (ret)
7386 goto out;
7387
7388 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7389 if (ret)
7390 goto out;
7391
7392 out:
7393 if (ret) {
7394 ufshcd_toggle_vreg(dev, info->vccq2, false);
7395 ufshcd_toggle_vreg(dev, info->vccq, false);
7396 ufshcd_toggle_vreg(dev, info->vcc, false);
7397 }
7398 return ret;
7399 }
7400
7401 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7402 {
7403 struct ufs_vreg_info *info = &hba->vreg_info;
7404
7405 if (info)
7406 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7407
7408 return 0;
7409 }
7410
7411 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7412 {
7413 int ret = 0;
7414
7415 if (!vreg)
7416 goto out;
7417
7418 vreg->reg = devm_regulator_get(dev, vreg->name);
7419 if (IS_ERR(vreg->reg)) {
7420 ret = PTR_ERR(vreg->reg);
7421 dev_err(dev, "%s: %s get failed, err=%d\n",
7422 __func__, vreg->name, ret);
7423 }
7424 out:
7425 return ret;
7426 }
7427
7428 static int ufshcd_init_vreg(struct ufs_hba *hba)
7429 {
7430 int ret = 0;
7431 struct device *dev = hba->dev;
7432 struct ufs_vreg_info *info = &hba->vreg_info;
7433
7434 if (!info)
7435 goto out;
7436
7437 ret = ufshcd_get_vreg(dev, info->vcc);
7438 if (ret)
7439 goto out;
7440
7441 ret = ufshcd_get_vreg(dev, info->vccq);
7442 if (ret)
7443 goto out;
7444
7445 ret = ufshcd_get_vreg(dev, info->vccq2);
7446 out:
7447 return ret;
7448 }
7449
7450 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7451 {
7452 struct ufs_vreg_info *info = &hba->vreg_info;
7453
7454 if (info)
7455 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7456
7457 return 0;
7458 }
7459
7460 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
7461 {
7462 int ret = 0;
7463 struct ufs_vreg_info *info = &hba->vreg_info;
7464
7465 if (!info)
7466 goto out;
7467 else if (!info->vccq)
7468 goto out;
7469
7470 if (unused) {
7471 /* shut off the rail here */
7472 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7473 /*
7474 * Mark this rail as no longer used, so it doesn't get enabled
7475 * later by mistake
7476 */
7477 if (!ret)
7478 info->vccq->unused = true;
7479 } else {
7480 /*
7481 * rail should have been already enabled hence just make sure
7482 * that unused flag is cleared.
7483 */
7484 info->vccq->unused = false;
7485 }
7486 out:
7487 return ret;
7488 }
7489
7490 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7491 bool skip_ref_clk)
7492 {
7493 int ret = 0;
7494 struct ufs_clk_info *clki;
7495 struct list_head *head = &hba->clk_list_head;
7496 const char *ref_clk = "ref_clk";
7497 unsigned long flags;
7498 ktime_t start = ktime_get();
7499 bool clk_state_changed = false;
7500
7501 if (list_empty(head))
7502 goto out;
7503
7504 <<<<<<< HEAD
7505 ufshcd_vops_pre_setup_clocks(hba, on);
7506 =======
7507 /*
7508 * vendor specific setup_clocks ops may depend on clocks managed by
7509 * this standard driver hence call the vendor specific setup_clocks
7510 * before disabling the clocks managed here.
7511 */
7512 if (!on) {
7513 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7514 if (ret)
7515 return ret;
7516 }
7517 >>>>>>> android-4.14-p
7518
7519 list_for_each_entry(clki, head, list) {
7520 if (!IS_ERR_OR_NULL(clki->clk)) {
7521 if (skip_ref_clk &&
7522 !strncmp(clki->name, ref_clk, strlen(ref_clk)))
7523 continue;
7524
7525 clk_state_changed = on ^ clki->enabled;
7526 if (on && !clki->enabled) {
7527 ret = clk_prepare_enable(clki->clk);
7528 if (ret) {
7529 hba->clk_gating.state = CLKS_DISABLE;
7530 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7531 __func__, clki->name, ret);
7532 goto out;
7533 }
7534 } else if (!on && clki->enabled) {
7535 clk_disable_unprepare(clki->clk);
7536 }
7537 clki->enabled = on;
7538 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7539 clki->name, on ? "en" : "dis");
7540 }
7541 }
7542
7543 <<<<<<< HEAD
7544 ret = ufshcd_vops_setup_clocks(hba, on);
7545 =======
7546 /*
7547 * vendor specific setup_clocks ops may depend on clocks managed by
7548 * this standard driver hence call the vendor specific setup_clocks
7549 * after enabling the clocks managed here.
7550 */
7551 if (on) {
7552 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7553 if (ret)
7554 return ret;
7555 }
7556 >>>>>>> android-4.14-p
7557
7558 out:
7559 if (ret) {
7560 list_for_each_entry(clki, head, list) {
7561 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7562 clk_disable_unprepare(clki->clk);
7563 }
7564 } else if (!ret && on) {
7565 spin_lock_irqsave(hba->host->host_lock, flags);
7566 hba->clk_gating.state = CLKS_ON;
7567 trace_ufshcd_clk_gating(dev_name(hba->dev),
7568 hba->clk_gating.state);
7569 spin_unlock_irqrestore(hba->host->host_lock, flags);
7570 }
7571
7572 if (clk_state_changed)
7573 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7574 (on ? "on" : "off"),
7575 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7576 return ret;
7577 }
7578
7579 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7580 {
7581 return __ufshcd_setup_clocks(hba, on, false);
7582 }
7583
7584 static int ufshcd_init_clocks(struct ufs_hba *hba)
7585 {
7586 int ret = 0;
7587 struct ufs_clk_info *clki;
7588 struct device *dev = hba->dev;
7589 struct list_head *head = &hba->clk_list_head;
7590
7591 if (list_empty(head))
7592 goto out;
7593
7594 list_for_each_entry(clki, head, list) {
7595 if (!clki->name)
7596 continue;
7597
7598 clki->clk = devm_clk_get(dev, clki->name);
7599 if (IS_ERR(clki->clk)) {
7600 ret = PTR_ERR(clki->clk);
7601 dev_err(dev, "%s: %s clk get failed, %d\n",
7602 __func__, clki->name, ret);
7603 goto out;
7604 }
7605
7606 if (clki->max_freq) {
7607 ret = clk_set_rate(clki->clk, clki->max_freq);
7608 if (ret) {
7609 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7610 __func__, clki->name,
7611 clki->max_freq, ret);
7612 goto out;
7613 }
7614 #if defined(CONFIG_PM_DEVFREQ)
7615 clki->curr_freq = clki->max_freq;
7616 #endif
7617 }
7618 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7619 clki->name, clk_get_rate(clki->clk));
7620 }
7621 out:
7622 return ret;
7623 }
7624
7625 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7626 {
7627 int err = 0;
7628
7629 if (!hba->vops)
7630 goto out;
7631
7632 err = ufshcd_vops_init(hba);
7633 if (err)
7634 goto out;
7635
7636 err = ufshcd_vops_setup_regulators(hba, true);
7637 if (err)
7638 goto out_exit;
7639
7640 goto out;
7641
7642 out_exit:
7643 ufshcd_vops_exit(hba);
7644 out:
7645 if (err)
7646 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7647 __func__, ufshcd_get_var_name(hba), err);
7648 return err;
7649 }
7650
7651 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7652 {
7653 if (!hba->vops)
7654 return;
7655
7656 ufshcd_vops_setup_regulators(hba, false);
7657
7658 ufshcd_vops_exit(hba);
7659 }
7660
7661 static int ufshcd_hba_init(struct ufs_hba *hba)
7662 {
7663 int err;
7664
7665 /*
7666 * Handle host controller power separately from the UFS device power
7667 * rails as it will help controlling the UFS host controller power
7668 * collapse easily which is different than UFS device power collapse.
7669 * Also, enable the host controller power before we go ahead with rest
7670 * of the initialization here.
7671 */
7672 err = ufshcd_init_hba_vreg(hba);
7673 if (err)
7674 goto out;
7675
7676 err = ufshcd_setup_hba_vreg(hba, true);
7677 if (err)
7678 goto out;
7679
7680 err = ufshcd_init_clocks(hba);
7681 if (err)
7682 goto out_disable_hba_vreg;
7683
7684 err = ufshcd_setup_clocks(hba, true);
7685 if (err)
7686 goto out_disable_hba_vreg;
7687
7688 err = ufshcd_init_vreg(hba);
7689 if (err)
7690 goto out_disable_clks;
7691
7692 err = ufshcd_setup_vreg(hba, true);
7693 if (err)
7694 goto out_disable_clks;
7695
7696 err = ufshcd_variant_hba_init(hba);
7697 if (err)
7698 goto out_disable_vreg;
7699
7700 hba->is_powered = true;
7701 goto out;
7702
7703 out_disable_vreg:
7704 ufshcd_setup_vreg(hba, false);
7705 out_disable_clks:
7706 ufshcd_setup_clocks(hba, false);
7707 out_disable_hba_vreg:
7708 ufshcd_setup_hba_vreg(hba, false);
7709 out:
7710 return err;
7711 }
7712
7713 static void ufshcd_hba_exit(struct ufs_hba *hba)
7714 {
7715 if (hba->is_powered) {
7716 ufshcd_variant_hba_exit(hba);
7717 ufshcd_setup_vreg(hba, false);
7718 #if defined(CONFIG_PM_DEVFREQ)
7719 ufshcd_suspend_clkscaling(hba);
7720 #endif
7721 if (ufshcd_is_clkscaling_supported(hba)) {
7722 #if defined(CONFIG_PM_DEVFREQ)
7723 if (hba->devfreq)
7724 ufshcd_suspend_clkscaling(hba);
7725 #endif
7726 destroy_workqueue(hba->clk_scaling.workq);
7727 }
7728 ufshcd_setup_clocks(hba, false);
7729 ufshcd_setup_hba_vreg(hba, false);
7730 hba->is_powered = false;
7731 }
7732 }
7733
7734 static int
7735 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7736 {
7737 unsigned char cmd[6] = {REQUEST_SENSE,
7738 0,
7739 0,
7740 0,
7741 UFSHCD_REQ_SENSE_SIZE,
7742 0};
7743 char *buffer;
7744 int ret;
7745
7746 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7747 if (!buffer) {
7748 ret = -ENOMEM;
7749 goto out;
7750 }
7751
7752 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7753 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7754 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7755 if (ret)
7756 pr_err("%s: failed with err %d\n", __func__, ret);
7757
7758 kfree(buffer);
7759 out:
7760 return ret;
7761 }
7762
7763 /**
7764 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7765 * power mode
7766 * @hba: per adapter instance
7767 * @pwr_mode: device power mode to set
7768 *
7769 * Returns 0 if requested power mode is set successfully
7770 * Returns non-zero if failed to set the requested power mode
7771 */
7772 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7773 enum ufs_dev_pwr_mode pwr_mode)
7774 {
7775 unsigned char cmd[6] = { START_STOP };
7776 struct scsi_sense_hdr sshdr;
7777 struct scsi_device *sdp;
7778 unsigned long flags;
7779 int ret;
7780
7781 spin_lock_irqsave(hba->host->host_lock, flags);
7782 sdp = hba->sdev_ufs_device;
7783 if (sdp) {
7784 ret = scsi_device_get(sdp);
7785 if (!ret && !scsi_device_online(sdp)) {
7786 ret = -ENODEV;
7787 scsi_device_put(sdp);
7788 }
7789 } else {
7790 ret = -ENODEV;
7791 }
7792 spin_unlock_irqrestore(hba->host->host_lock, flags);
7793
7794 if (ret)
7795 return ret;
7796
7797 /*
7798 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7799 * handling, which would wait for host to be resumed. Since we know
7800 * we are functional while we are here, skip host resume in error
7801 * handling context.
7802 */
7803 hba->host->eh_noresume = 1;
7804 if (hba->wlun_dev_clr_ua) {
7805 ret = ufshcd_send_request_sense(hba, sdp);
7806 if (ret)
7807 goto out;
7808 /* Unit attention condition is cleared now */
7809 hba->wlun_dev_clr_ua = false;
7810 }
7811
7812 cmd[4] = pwr_mode << 4;
7813
7814 /*
7815 * Current function would be generally called from the power management
7816 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7817 * already suspended childs.
7818 */
7819 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7820 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7821 if (ret) {
7822 sdev_printk(KERN_WARNING, sdp,
7823 "START_STOP failed for power mode: %d, result %x\n",
7824 pwr_mode, ret);
7825 if (driver_byte(ret) & DRIVER_SENSE)
7826 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7827 }
7828
7829 if (!ret)
7830 hba->curr_dev_pwr_mode = pwr_mode;
7831 out:
7832 scsi_device_put(sdp);
7833 hba->host->eh_noresume = 0;
7834 return ret;
7835 }
7836
7837 static int ufshcd_link_state_transition(struct ufs_hba *hba,
7838 enum uic_link_state req_link_state,
7839 int check_for_bkops)
7840 {
7841 int ret = 0;
7842
7843 if (req_link_state == hba->uic_link_state)
7844 return 0;
7845
7846 if (req_link_state == UIC_LINK_HIBERN8_STATE ||
7847 req_link_state == UIC_LINK_OFF_STATE) {
7848 ufshcd_set_link_trans_hibern8(hba);
7849 ret = ufshcd_link_hibern8_ctrl(hba, true);
7850 if (!ret)
7851 ufshcd_set_link_hibern8(hba);
7852 else {
7853 unsigned long flags;
7854 bool saved_is_suspended = hba->clk_gating.is_suspended;
7855
7856 spin_lock_irqsave(hba->host->host_lock, flags);
7857 hba->clk_gating.state = __CLKS_ON;
7858 spin_unlock_irqrestore(hba->host->host_lock, flags);
7859
7860 hba->clk_gating.is_suspended = true;
7861 ufshcd_host_reset_and_restore(hba);
7862 spin_lock_irqsave(hba->host->host_lock, flags);
7863 hba->clk_gating.state = CLKS_ON;
7864 spin_unlock_irqrestore(hba->host->host_lock, flags);
7865 hba->clk_gating.is_suspended = saved_is_suspended;
7866
7867 goto out;
7868 }
7869
7870
7871 /*
7872 * If autobkops is enabled, link can't be turned off because
7873 * turning off the link would also turn off the device.
7874 */
7875 if ((req_link_state == UIC_LINK_OFF_STATE) &&
7876 (!check_for_bkops || (check_for_bkops &&
7877 !hba->auto_bkops_enabled))) {
7878 unsigned long flags;
7879
7880 /*
7881 * Change controller state to "reset state" which
7882 * should also put the link in off/reset state
7883 */
7884
7885 spin_lock_irqsave(hba->host->host_lock, flags);
7886 hba->ufshcd_state = UFSHCD_STATE_RESET;
7887 ufshcd_hba_stop(hba, true);
7888 spin_unlock_irqrestore(hba->host->host_lock, flags);
7889 /*
7890 * TODO: Check if we need any delay to make sure that
7891 * controller is reset
7892 */
7893 ufshcd_set_link_off(hba);
7894 }
7895 }
7896
7897 out:
7898 return ret;
7899 }
7900
7901 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7902 {
7903 /*
7904 * It seems some UFS devices may keep drawing more than sleep current
7905 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7906 * To avoid this situation, add 2ms delay before putting these UFS
7907 * rails in LPM mode.
7908 */
7909 if (!ufshcd_is_link_active(hba) &&
7910 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7911 usleep_range(2000, 2100);
7912
7913 /*
7914 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7915 * power.
7916 *
7917 * If UFS device and link is in OFF state, all power supplies (VCC,
7918 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7919 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7920 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7921 *
7922 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7923 * in low power state which would save some power.
7924 */
7925 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7926 !hba->dev_info.is_lu_power_on_wp) {
7927 ufshcd_setup_vreg(hba, false);
7928 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7929 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7930 if (!ufshcd_is_link_active(hba)) {
7931 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7932 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7933 }
7934 }
7935 }
7936
7937 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7938 {
7939 int ret = 0;
7940
7941 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7942 !hba->dev_info.is_lu_power_on_wp) {
7943 ret = ufshcd_setup_vreg(hba, true);
7944 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7945 if (!ret && !ufshcd_is_link_active(hba)) {
7946 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7947 if (ret)
7948 goto vcc_disable;
7949 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7950 if (ret)
7951 goto vccq_lpm;
7952 }
7953 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7954 }
7955 goto out;
7956
7957 vccq_lpm:
7958 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7959 vcc_disable:
7960 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7961 out:
7962 return ret;
7963 }
7964
7965 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7966 {
7967 if (ufshcd_is_link_off(hba))
7968 ufshcd_setup_hba_vreg(hba, false);
7969 }
7970
7971 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7972 {
7973 if (ufshcd_is_link_off(hba))
7974 ufshcd_setup_hba_vreg(hba, true);
7975 }
7976
7977 /**
7978 * ufshcd_suspend - helper function for suspend operations
7979 * @hba: per adapter instance
7980 * @pm_op: desired low power operation type
7981 *
7982 * This function will try to put the UFS device and link into low power
7983 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7984 * (System PM level).
7985 *
7986 * If this function is called during shutdown, it will make sure that
7987 * both UFS device and UFS link is powered off.
7988 *
7989 * NOTE: UFS device & link must be active before we enter in this function.
7990 *
7991 * Returns 0 for success and non-zero for failure
7992 */
7993 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7994 {
7995 int ret = 0;
7996 enum ufs_pm_level pm_lvl;
7997 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7998 enum uic_link_state req_link_state;
7999 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
8000
8001 hba->pm_op_in_progress = 1;
8002 if (!ufshcd_is_shutdown_pm(pm_op)) {
8003 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8004 hba->rpm_lvl : hba->spm_lvl;
8005 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8006 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8007 } else {
8008 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8009 req_link_state = UIC_LINK_OFF_STATE;
8010 }
8011
8012 /*
8013 * If we can't transition into any of the low power modes
8014 * just gate the clocks.
8015 */
8016 ufshcd_hold(hba, false);
8017 hba->clk_gating.is_suspended = true;
8018
8019 if (hba->clk_scaling.is_allowed) {
8020 cancel_work_sync(&hba->clk_scaling.suspend_work);
8021 cancel_work_sync(&hba->clk_scaling.resume_work);
8022 #if defined(CONFIG_PM_DEVFREQ)
8023 ufshcd_suspend_clkscaling(hba);
8024 #endif
8025 }
8026
8027 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8028 req_link_state == UIC_LINK_ACTIVE_STATE) {
8029 goto disable_clks;
8030 }
8031
8032 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8033 (req_link_state == hba->uic_link_state))
8034 goto enable_gating;
8035
8036 /* UFS device & link must be active before we enter in this function */
8037 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8038 ret = -EINVAL;
8039 goto enable_gating;
8040 }
8041
8042 if (ufshcd_is_runtime_pm(pm_op)) {
8043 if (ufshcd_can_autobkops_during_suspend(hba)) {
8044 /*
8045 * The device is idle with no requests in the queue,
8046 * allow background operations if bkops status shows
8047 * that performance might be impacted.
8048 */
8049 ret = ufshcd_urgent_bkops(hba);
8050 if (ret)
8051 goto enable_gating;
8052 } else {
8053 /* make sure that auto bkops is disabled */
8054 ufshcd_disable_auto_bkops(hba);
8055 }
8056 }
8057
8058 if (ufshcd_is_shutdown_pm(pm_op))
8059 ufs_shutdown_state = 1;
8060
8061 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
8062 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8063 !ufshcd_is_runtime_pm(pm_op))) {
8064 /* ensure that bkops is disabled */
8065 ufshcd_disable_auto_bkops(hba);
8066 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8067 if (ret)
8068 goto enable_gating;
8069 }
8070
8071 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8072 if (ret)
8073 goto set_dev_active;
8074
8075 disable_clks:
8076
8077
8078 /*
8079 * Flush pending works before clock is disabled
8080 */
8081 cancel_work_sync(&hba->eh_work);
8082 cancel_work_sync(&hba->eeh_work);
8083
8084 /*
8085 * Disable the host irq as host controller as there won't be any
8086 * host controller trasanction expected till resume.
8087 */
8088 ufshcd_disable_irq(hba);
8089
8090 ufshcd_vreg_set_lpm(hba);
8091 udelay(50);
8092
8093 if (gating_allowed) {
8094 if (!ufshcd_is_link_active(hba))
8095 ufshcd_setup_clocks(hba, false);
8096 else
8097 /* If link is active, device ref_clk can't be switched off */
8098 __ufshcd_setup_clocks(hba, false, true);
8099 }
8100
8101 hba->clk_gating.state = CLKS_OFF;
8102 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
8103 /*
8104 * Call vendor specific suspend callback. As these callbacks may access
8105 * vendor specific host controller register space call them before the
8106 * host clocks are ON.
8107 */
8108 ret = ufshcd_vops_suspend(hba, pm_op);
8109 if (ret)
8110 goto set_link_active;
8111
8112
8113 /* Put the host controller in low power mode if possible */
8114 ufshcd_hba_vreg_set_lpm(hba);
8115 goto out;
8116
8117 set_link_active:
8118 #if defined(CONFIG_PM_DEVFREQ)
8119 if (hba->clk_scaling.is_allowed)
8120 ufshcd_resume_clkscaling(hba);
8121 #endif
8122
8123 if (ufshcd_is_shutdown_pm(pm_op))
8124 goto out;
8125
8126 ret = ufshcd_enable_irq(hba);
8127 if (ret)
8128 goto out;
8129
8130 if (ufshcd_is_link_hibern8(hba)) {
8131 ufshcd_set_link_trans_active(hba);
8132 if (!ufshcd_link_hibern8_ctrl(hba, false))
8133 ufshcd_set_link_active(hba);
8134 else
8135 ufshcd_set_link_off(hba);
8136 } else if (ufshcd_is_link_off(hba))
8137 ufshcd_host_reset_and_restore(hba);
8138 set_dev_active:
8139 if (ufshcd_is_shutdown_pm(pm_op))
8140 goto out;
8141
8142 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8143 ufshcd_disable_auto_bkops(hba);
8144 enable_gating:
8145 #if defined(CONFIG_PM_DEVFREQ)
8146 if (hba->clk_scaling.is_allowed)
8147 ufshcd_resume_clkscaling(hba);
8148 hba->clk_gating.is_suspended = false;
8149 #endif
8150 ufshcd_release(hba);
8151 out:
8152 hba->pm_op_in_progress = 0;
8153
8154 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
8155 dev_info(hba->dev, "UFS suspend done\n");
8156
8157 return ret;
8158 }
8159
8160 /**
8161 * ufshcd_resume - helper function for resume operations
8162 * @hba: per adapter instance
8163 * @pm_op: runtime PM or system PM
8164 *
8165 * This function basically brings the UFS device, UniPro link and controller
8166 * to active state.
8167 *
8168 * Returns 0 for success and non-zero for failure
8169 */
8170 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8171 {
8172 int ret;
8173 enum uic_link_state old_link_state;
8174 enum ufs_pm_level pm_lvl;
8175 bool gating_allowed = !ufshcd_can_fake_clkgating(hba);
8176
8177 hba->pm_op_in_progress = 1;
8178 if (ufshcd_is_system_pm(pm_op))
8179 pm_lvl = hba->spm_lvl;
8180 else
8181 pm_lvl = hba->rpm_lvl;
8182
8183 if (ufs_get_pm_lvl_to_link_pwr_state(pm_lvl) == UIC_LINK_OFF_STATE)
8184 hba->uic_link_state = UIC_LINK_OFF_STATE;
8185 old_link_state = hba->uic_link_state;
8186
8187 ufshcd_hba_vreg_set_hpm(hba);
8188
8189 ret = ufshcd_vreg_set_hpm(hba);
8190 if (ret)
8191 goto disable_irq_and_vops_clks;
8192
8193 /*
8194 * Call vendor specific resume callback. As these callbacks may access
8195 * vendor specific host controller register space call them when the
8196 * host clocks are ON.
8197 */
8198 ret = ufshcd_vops_resume(hba, pm_op);
8199 if (ret)
8200 goto disable_vreg;
8201
8202 if (gating_allowed) {
8203 /* Make sure clocks are enabled before accessing controller */
8204 ret = ufshcd_setup_clocks(hba, true);
8205 if (ret)
8206 goto disable_vreg;
8207 }
8208
8209 /* enable the host irq as host controller would be active soon */
8210 ret = ufshcd_enable_irq(hba);
8211 if (ret)
8212 goto disable_irq_and_vops_clks;
8213
8214 if (ufshcd_is_link_hibern8(hba)) {
8215 ufshcd_set_link_trans_active(hba);
8216 ret = ufshcd_link_hibern8_ctrl(hba, false);
8217 if (!ret)
8218 ufshcd_set_link_active(hba);
8219 else {
8220 ufshcd_set_link_off(hba);
8221 goto vendor_suspend;
8222 }
8223 } else if (ufshcd_is_link_off(hba)) {
8224 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
8225 hba->async_resume = true;
8226 ret = ufshcd_host_reset_and_restore(hba);
8227 goto async_resume;
8228 #else
8229 ret = ufshcd_host_reset_and_restore(hba);
8230 #endif
8231
8232 /*
8233 * ufshcd_host_reset_and_restore() should have already
8234 * set the link state as active
8235 */
8236 if (ret || !ufshcd_is_link_active(hba))
8237 goto vendor_suspend;
8238 }
8239
8240 if (!ufshcd_is_ufs_dev_active(hba)) {
8241 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8242 if (ret)
8243 goto set_old_link_state;
8244 }
8245
8246 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8247 ufshcd_enable_auto_bkops(hba);
8248 else
8249 /*
8250 * If BKOPs operations are urgently needed at this moment then
8251 * keep auto-bkops enabled or else disable it.
8252 */
8253 ufshcd_urgent_bkops(hba);
8254 #ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
8255 async_resume:
8256 #endif
8257 hba->clk_gating.is_suspended = false;
8258
8259 #if defined(CONFIG_PM_DEVFREQ)
8260 if (hba->clk_scaling.is_allowed)
8261 ufshcd_resume_clkscaling(hba);
8262 #endif
8263
8264 /* Schedule clock gating in case of no access to UFS device yet */
8265 ufshcd_release(hba);
8266 goto out;
8267
8268 set_old_link_state:
8269 ufshcd_link_state_transition(hba, old_link_state, 0);
8270 vendor_suspend:
8271 ufshcd_vops_suspend(hba, pm_op);
8272 disable_irq_and_vops_clks:
8273 ufshcd_disable_irq(hba);
8274 #if defined(CONFIG_PM_DEVFREQ)
8275 if (hba->clk_scaling.is_allowed)
8276 ufshcd_suspend_clkscaling(hba);
8277 #endif
8278
8279 if (gating_allowed)
8280 ufshcd_setup_clocks(hba, false);
8281 disable_vreg:
8282 ufshcd_vreg_set_lpm(hba);
8283 out:
8284 hba->pm_op_in_progress = 0;
8285
8286 if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
8287 dev_info(hba->dev, "UFS resume done\n");
8288
8289 return ret;
8290 }
8291
8292 /**
8293 * ufshcd_system_suspend - system suspend routine
8294 * @hba: per adapter instance
8295 * @pm_op: runtime PM or system PM
8296 *
8297 * Check the description of ufshcd_suspend() function for more details.
8298 *
8299 * Returns 0 for success and non-zero for failure
8300 */
8301 int ufshcd_system_suspend(struct ufs_hba *hba)
8302 {
8303 int ret = 0;
8304 ktime_t start = ktime_get();
8305
8306 if (!hba || !hba->is_powered)
8307 return 0;
8308
8309 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8310 hba->curr_dev_pwr_mode) &&
8311 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8312 hba->uic_link_state))
8313 goto out;
8314
8315 if (pm_runtime_suspended(hba->dev)) {
8316 /*
8317 * UFS device and/or UFS link low power states during runtime
8318 * suspend seems to be different than what is expected during
8319 * system suspend. Hence runtime resume the devic & link and
8320 * let the system suspend low power states to take effect.
8321 * TODO: If resume takes longer time, we might have optimize
8322 * it in future by not resuming everything if possible.
8323 */
8324 ret = ufshcd_runtime_resume(hba);
8325 if (ret)
8326 goto out;
8327 }
8328
8329 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8330 out:
8331 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8332 ktime_to_us(ktime_sub(ktime_get(), start)),
8333 hba->curr_dev_pwr_mode, hba->uic_link_state);
8334 if (!ret)
8335 hba->is_sys_suspended = true;
8336 return ret;
8337 }
8338 EXPORT_SYMBOL(ufshcd_system_suspend);
8339
8340 /**
8341 * ufshcd_system_resume - system resume routine
8342 * @hba: per adapter instance
8343 *
8344 * Returns 0 for success and non-zero for failure
8345 */
8346
8347 int ufshcd_system_resume(struct ufs_hba *hba)
8348 {
8349 int ret = 0;
8350 ktime_t start = ktime_get();
8351
8352 if (!hba)
8353 return -EINVAL;
8354
8355 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8356 /*
8357 * Let the runtime resume take care of resuming
8358 * if runtime suspended.
8359 */
8360 goto out;
8361 else
8362 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8363 out:
8364 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8365 ktime_to_us(ktime_sub(ktime_get(), start)),
8366 hba->curr_dev_pwr_mode, hba->uic_link_state);
8367 if (!ret)
8368 hba->is_sys_suspended = false;
8369 return ret;
8370 }
8371 EXPORT_SYMBOL(ufshcd_system_resume);
8372
8373 /**
8374 * ufshcd_runtime_suspend - runtime suspend routine
8375 * @hba: per adapter instance
8376 *
8377 * Check the description of ufshcd_suspend() function for more details.
8378 *
8379 * Returns 0 for success and non-zero for failure
8380 */
8381 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8382 {
8383 int ret = 0;
8384 ktime_t start = ktime_get();
8385
8386 if (!hba)
8387 return -EINVAL;
8388
8389 if (!hba->is_powered)
8390 goto out;
8391 else
8392 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8393 out:
8394 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8395 ktime_to_us(ktime_sub(ktime_get(), start)),
8396 hba->curr_dev_pwr_mode, hba->uic_link_state);
8397 return ret;
8398 }
8399 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8400
8401 /**
8402 * ufshcd_runtime_resume - runtime resume routine
8403 * @hba: per adapter instance
8404 *
8405 * This function basically brings the UFS device, UniPro link and controller
8406 * to active state. Following operations are done in this function:
8407 *
8408 * 1. Turn on all the controller related clocks
8409 * 2. Bring the UniPro link out of Hibernate state
8410 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8411 * to active state.
8412 * 4. If auto-bkops is enabled on the device, disable it.
8413 *
8414 * So following would be the possible power state after this function return
8415 * successfully:
8416 * S1: UFS device in Active state with VCC rail ON
8417 * UniPro link in Active state
8418 * All the UFS/UniPro controller clocks are ON
8419 *
8420 * Returns 0 for success and non-zero for failure
8421 */
8422 int ufshcd_runtime_resume(struct ufs_hba *hba)
8423 {
8424 int ret = 0;
8425 ktime_t start = ktime_get();
8426
8427 if (!hba)
8428 return -EINVAL;
8429
8430 if (!hba->is_powered)
8431 goto out;
8432 else
8433 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8434 out:
8435 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8436 ktime_to_us(ktime_sub(ktime_get(), start)),
8437 hba->curr_dev_pwr_mode, hba->uic_link_state);
8438 return ret;
8439 }
8440 EXPORT_SYMBOL(ufshcd_runtime_resume);
8441
8442 int ufshcd_runtime_idle(struct ufs_hba *hba)
8443 {
8444 return 0;
8445 }
8446 EXPORT_SYMBOL(ufshcd_runtime_idle);
8447
8448 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
8449 struct device_attribute *attr,
8450 const char *buf, size_t count,
8451 bool rpm)
8452 {
8453 struct ufs_hba *hba = dev_get_drvdata(dev);
8454 unsigned long flags, value;
8455
8456 if (kstrtoul(buf, 0, &value))
8457 return -EINVAL;
8458
8459 if (value >= UFS_PM_LVL_MAX)
8460 return -EINVAL;
8461
8462 spin_lock_irqsave(hba->host->host_lock, flags);
8463 if (rpm)
8464 hba->rpm_lvl = value;
8465 else
8466 hba->spm_lvl = value;
8467 spin_unlock_irqrestore(hba->host->host_lock, flags);
8468 return count;
8469 }
8470
8471 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
8472 struct device_attribute *attr, char *buf)
8473 {
8474 struct ufs_hba *hba = dev_get_drvdata(dev);
8475 int curr_len;
8476 u8 lvl;
8477
8478 curr_len = snprintf(buf, PAGE_SIZE,
8479 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
8480 hba->rpm_lvl,
8481 ufschd_ufs_dev_pwr_mode_to_string(
8482 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
8483 ufschd_uic_link_state_to_string(
8484 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
8485
8486 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8487 "\nAll available Runtime PM levels info:\n");
8488 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
8489 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8490 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
8491 lvl,
8492 ufschd_ufs_dev_pwr_mode_to_string(
8493 ufs_pm_lvl_states[lvl].dev_state),
8494 ufschd_uic_link_state_to_string(
8495 ufs_pm_lvl_states[lvl].link_state));
8496
8497 return curr_len;
8498 }
8499
8500 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
8501 struct device_attribute *attr, const char *buf, size_t count)
8502 {
8503 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
8504 }
8505
8506 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
8507 {
8508 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
8509 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
8510 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
8511 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
8512 hba->rpm_lvl_attr.attr.mode = 0644;
8513 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
8514 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
8515 }
8516
8517 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
8518 struct device_attribute *attr, char *buf)
8519 {
8520 struct ufs_hba *hba = dev_get_drvdata(dev);
8521 int curr_len;
8522 u8 lvl;
8523
8524 curr_len = snprintf(buf, PAGE_SIZE,
8525 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
8526 hba->spm_lvl,
8527 ufschd_ufs_dev_pwr_mode_to_string(
8528 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
8529 ufschd_uic_link_state_to_string(
8530 ufs_pm_lvl_states[hba->spm_lvl].link_state));
8531
8532 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8533 "\nAll available System PM levels info:\n");
8534 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
8535 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8536 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
8537 lvl,
8538 ufschd_ufs_dev_pwr_mode_to_string(
8539 ufs_pm_lvl_states[lvl].dev_state),
8540 ufschd_uic_link_state_to_string(
8541 ufs_pm_lvl_states[lvl].link_state));
8542
8543 return curr_len;
8544 }
8545
8546 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
8547 struct device_attribute *attr, const char *buf, size_t count)
8548 {
8549 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
8550 }
8551
8552 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
8553 {
8554 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
8555 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
8556 sysfs_attr_init(&hba->spm_lvl_attr.attr);
8557 hba->spm_lvl_attr.attr.name = "spm_lvl";
8558 hba->spm_lvl_attr.attr.mode = 0644;
8559 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
8560 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
8561 }
8562
8563 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
8564 {
8565 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
8566 ufshcd_add_spm_lvl_sysfs_nodes(hba);
8567 }
8568
8569 static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
8570 {
8571 device_remove_file(hba->dev, &hba->rpm_lvl_attr);
8572 device_remove_file(hba->dev, &hba->spm_lvl_attr);
8573 }
8574
8575 /**
8576 * ufshcd_shutdown - shutdown routine
8577 * @hba: per adapter instance
8578 *
8579 * This function would power off both UFS device and UFS link.
8580 *
8581 * Returns 0 always to allow force shutdown even in case of errors.
8582 */
8583 int ufshcd_shutdown(struct ufs_hba *hba)
8584 {
8585 int ret = 0;
8586
8587 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8588 goto out;
8589
8590 if (pm_runtime_suspended(hba->dev)) {
8591 ret = ufshcd_runtime_resume(hba);
8592 if (ret)
8593 goto out;
8594 }
8595
8596 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8597 out:
8598 if (ret)
8599 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8600 /* allow force shutdown even in case of errors */
8601 return 0;
8602 }
8603 EXPORT_SYMBOL(ufshcd_shutdown);
8604
8605 /**
8606 * ufshcd_remove - de-allocate SCSI host and host memory space
8607 * data structure memory
8608 * @hba - per adapter instance
8609 */
8610 void ufshcd_remove(struct ufs_hba *hba)
8611 {
8612 ufshcd_remove_sysfs_nodes(hba);
8613 scsi_remove_host(hba->host);
8614 /* disable interrupts */
8615 ufshcd_disable_intr(hba, hba->intr_mask);
8616 ufshcd_hba_stop(hba, true);
8617
8618 ufshcd_exit_clk_gating(hba);
8619 #if defined(CONFIG_PM_DEVFREQ)
8620 if (ufshcd_is_clkscaling_supported(hba))
8621 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8622 #endif
8623 ufshcd_hba_exit(hba);
8624 }
8625 EXPORT_SYMBOL_GPL(ufshcd_remove);
8626
8627 /**
8628 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8629 * @hba: pointer to Host Bus Adapter (HBA)
8630 */
8631 void ufshcd_dealloc_host(struct ufs_hba *hba)
8632 {
8633 scsi_host_put(hba->host);
8634 }
8635 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8636
8637 /**
8638 * ufshcd_set_dma_mask - Set dma mask based on the controller
8639 * addressing capability
8640 * @hba: per adapter instance
8641 *
8642 * Returns 0 for success, non-zero for failure
8643 */
8644 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8645 {
8646 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8647 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8648 return 0;
8649 }
8650 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8651 }
8652
8653 /**
8654 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8655 * @dev: pointer to device handle
8656 * @hba_handle: driver private handle
8657 * Returns 0 on success, non-zero value on failure
8658 */
8659 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8660 {
8661 struct Scsi_Host *host;
8662 struct ufs_hba *hba;
8663 int err = 0;
8664
8665 if (!dev) {
8666 dev_err(dev,
8667 "Invalid memory reference for dev is NULL\n");
8668 err = -ENODEV;
8669 goto out_error;
8670 }
8671
8672 host = scsi_host_alloc(&ufshcd_driver_template,
8673 sizeof(struct ufs_hba));
8674 if (!host) {
8675 dev_err(dev, "scsi_host_alloc failed\n");
8676 err = -ENOMEM;
8677 goto out_error;
8678 }
8679 hba = shost_priv(host);
8680 hba->host = host;
8681 hba->dev = dev;
8682 *hba_handle = hba;
8683
8684 INIT_LIST_HEAD(&hba->clk_list_head);
8685
8686 out_error:
8687 return err;
8688 }
8689 EXPORT_SYMBOL(ufshcd_alloc_host);
8690
8691 /**
8692 * ufshcd_init - Driver initialization routine
8693 * @hba: per-adapter instance
8694 * @mmio_base: base register address
8695 * @irq: Interrupt line of device
8696 * Returns 0 on success, non-zero value on failure
8697 */
8698 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8699 {
8700 int err;
8701 struct Scsi_Host *host = hba->host;
8702 struct device *dev = hba->dev;
8703
8704 if (!mmio_base) {
8705 dev_err(hba->dev,
8706 "Invalid memory reference for mmio_base is NULL\n");
8707 err = -ENODEV;
8708 goto out_error;
8709 }
8710
8711 hba->mmio_base = mmio_base;
8712 hba->irq = irq;
8713
8714 /* Set descriptor lengths to specification defaults */
8715 ufshcd_def_desc_sizes(hba);
8716
8717 err = ufshcd_hba_init(hba);
8718 if (err)
8719 goto out_error;
8720
8721 /* Read capabilities registers */
8722 ufshcd_hba_capabilities(hba);
8723
8724 /* Get UFS version supported by the controller */
8725 hba->ufs_version = ufshcd_get_ufs_version(hba);
8726
8727 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8728 (hba->ufs_version != UFSHCI_VERSION_11) &&
8729 (hba->ufs_version != UFSHCI_VERSION_20) &&
8730 (hba->ufs_version != UFSHCI_VERSION_21))
8731 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8732 hba->ufs_version);
8733
8734 /* Get Interrupt bit mask per version */
8735 hba->intr_mask = ufshcd_get_intr_mask(hba);
8736
8737 err = ufshcd_set_dma_mask(hba);
8738 if (err) {
8739 dev_err(hba->dev, "set dma mask failed\n");
8740 goto out_disable;
8741 }
8742
8743 /* Allocate memory for host memory space */
8744 err = ufshcd_memory_alloc(hba);
8745 if (err) {
8746 dev_err(hba->dev, "Memory allocation failed\n");
8747 goto out_disable;
8748 }
8749
8750 /* Configure LRB */
8751 ufshcd_host_memory_configure(hba);
8752
8753 host->can_queue = hba->nutrs;
8754 host->cmd_per_lun = hba->nutrs;
8755 host->max_id = UFSHCD_MAX_ID;
8756 host->max_lun = UFS_MAX_LUNS;
8757 host->max_channel = UFSHCD_MAX_CHANNEL;
8758 host->unique_id = host->host_no;
8759 host->max_cmd_len = MAX_CDB_SIZE;
8760
8761 hba->max_pwr_info.is_valid = false;
8762
8763 /* Initailize wait queue for task management */
8764 init_waitqueue_head(&hba->tm_wq);
8765 init_waitqueue_head(&hba->tm_tag_wq);
8766
8767 /* Initialize work queues */
8768 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8769 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8770
8771 /* Initialize UIC command mutex */
8772 mutex_init(&hba->uic_cmd_mutex);
8773
8774 /* Initialize mutex for device management commands */
8775 mutex_init(&hba->dev_cmd.lock);
8776
8777 init_rwsem(&hba->clk_scaling_lock);
8778
8779 /* Initialize device management tag acquire wait queue */
8780 init_waitqueue_head(&hba->dev_cmd.tag_wq);
8781
8782 /* Initialize monitor */
8783 ufshcd_init_monitor(hba);
8784
8785 err = ufshcd_init_clk_gating(hba);
8786 if (err) {
8787 dev_err(hba->dev, "init clk_gating failed\n");
8788 goto out_disable;
8789 }
8790
8791 /*
8792 * In order to avoid any spurious interrupt immediately after
8793 * registering UFS controller interrupt handler, clear any pending UFS
8794 * interrupt status and disable all the UFS interrupts.
8795 */
8796 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8797 REG_INTERRUPT_STATUS);
8798 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8799 /*
8800 * Make sure that UFS interrupts are disabled and any pending interrupt
8801 * status is cleared before registering UFS interrupt handler.
8802 */
8803 mb();
8804
8805 /* IRQ registration */
8806 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8807 if (err) {
8808 dev_err(hba->dev, "request irq failed\n");
8809 goto exit_gating;
8810 } else {
8811 hba->is_irq_enabled = true;
8812 }
8813
8814 err = scsi_add_host(host, hba->dev);
8815 if (err) {
8816 dev_err(hba->dev, "scsi_add_host failed\n");
8817 goto exit_gating;
8818 }
8819
8820 #if defined(CONFIG_PM_DEVFREQ)
8821 if (ufshcd_is_clkscaling_supported(hba)) {
8822 char wq_name[sizeof("ufs_clkscaling_00")];
8823
8824 INIT_WORK(&hba->clk_scaling.suspend_work,
8825 ufshcd_clk_scaling_suspend_work);
8826 INIT_WORK(&hba->clk_scaling.resume_work,
8827 ufshcd_clk_scaling_resume_work);
8828
8829 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
8830 host->host_no);
8831 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
8832
8833 ufshcd_clkscaling_init_sysfs(hba);
8834 }
8835 #endif
8836
8837 /* Hold auto suspend until async scan completes */
8838 pm_runtime_get_sync(dev);
8839
8840 /*
8841 * The device-initialize-sequence hasn't been invoked yet.
8842 * Set the device to power-off state
8843 */
8844 ufshcd_set_ufs_dev_poweroff(hba);
8845
8846 async_schedule(ufshcd_async_scan, hba);
8847 ufshcd_add_sysfs_nodes(hba);
8848
8849 return 0;
8850
8851 exit_gating:
8852 ufshcd_exit_clk_gating(hba);
8853 out_disable:
8854 hba->is_irq_enabled = false;
8855 ufshcd_hba_exit(hba);
8856 out_error:
8857 return err;
8858 }
8859 EXPORT_SYMBOL_GPL(ufshcd_init);
8860
8861 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8862 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8863 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8864 MODULE_LICENSE("GPL");
8865 MODULE_VERSION(UFSHCD_DRIVER_VERSION);