Commit | Line | Data |
---|---|---|
7a3e97b0 | 1 | /* |
e0eca63e | 2 | * Universal Flash Storage Host controller driver Core |
7a3e97b0 SY |
3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | |
3b1d0580 | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
5c0c28a8 | 6 | * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. |
7a3e97b0 | 7 | * |
3b1d0580 VH |
8 | * Authors: |
9 | * Santosh Yaraganavi <santosh.sy@samsung.com> | |
10 | * Vinayak Holikatti <h.vinayak@samsung.com> | |
7a3e97b0 SY |
11 | * |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version 2 | |
15 | * of the License, or (at your option) any later version. | |
3b1d0580 VH |
16 | * See the COPYING file in the top-level directory or visit |
17 | * <http://www.gnu.org/licenses/gpl-2.0.html> | |
7a3e97b0 SY |
18 | * |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
3b1d0580 VH |
24 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
25 | * without warranty of any kind. You are solely responsible for | |
26 | * determining the appropriateness of using and distributing | |
27 | * the program and assume all risks associated with your exercise | |
28 | * of rights with respect to the program, including but not limited | |
29 | * to infringement of third party rights, the risks and costs of | |
30 | * program errors, damage to or loss of data, programs or equipment, | |
31 | * and unavailability or interruption of operations. Under no | |
32 | * circumstances will the contributor of this Program be liable for | |
33 | * any damages of any kind arising from your use or distribution of | |
34 | * this program. | |
5c0c28a8 SRT |
35 | * |
36 | * The Linux Foundation chooses to take subject only to the GPLv2 | |
37 | * license terms, and distributes only under these terms. | |
7a3e97b0 SY |
38 | */ |
39 | ||
6ccf44fe | 40 | #include <linux/async.h> |
856b3483 | 41 | #include <linux/devfreq.h> |
6ccf44fe | 42 | |
e0eca63e | 43 | #include "ufshcd.h" |
53b3d9c3 | 44 | #include "unipro.h" |
7a3e97b0 | 45 | |
2fbd009b SJ |
46 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
47 | UTP_TASK_REQ_COMPL |\ | |
48 | UFSHCD_ERROR_MASK) | |
6ccf44fe SJ |
49 | /* UIC command timeout, unit: ms */ |
50 | #define UIC_CMD_TIMEOUT 500 | |
2fbd009b | 51 | |
5a0b0cb9 SRT |
52 | /* NOP OUT retries waiting for NOP IN response */ |
53 | #define NOP_OUT_RETRIES 10 | |
54 | /* Timeout after 30 msecs if NOP OUT hangs without response */ | |
55 | #define NOP_OUT_TIMEOUT 30 /* msecs */ | |
56 | ||
68078d5c DR |
57 | /* Query request retries */ |
58 | #define QUERY_REQ_RETRIES 10 | |
59 | /* Query request timeout */ | |
60 | #define QUERY_REQ_TIMEOUT 30 /* msec */ | |
61 | ||
e2933132 SRT |
62 | /* Task management command timeout */ |
63 | #define TM_CMD_TIMEOUT 100 /* msecs */ | |
64 | ||
1d337ec2 SRT |
65 | /* maximum number of link-startup retries */ |
66 | #define DME_LINKSTARTUP_RETRIES 3 | |
67 | ||
68 | /* maximum number of reset retries before giving up */ | |
69 | #define MAX_HOST_RESET_RETRIES 5 | |
70 | ||
68078d5c DR |
71 | /* Expose the flag value from utp_upiu_query.value */ |
72 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | |
73 | ||
7d568652 SJ |
74 | /* Interrupt aggregation default timeout, unit: 40us */ |
75 | #define INT_AGGR_DEF_TO 0x02 | |
76 | ||
aa497613 SRT |
77 | #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ |
78 | ({ \ | |
79 | int _ret; \ | |
80 | if (_on) \ | |
81 | _ret = ufshcd_enable_vreg(_dev, _vreg); \ | |
82 | else \ | |
83 | _ret = ufshcd_disable_vreg(_dev, _vreg); \ | |
84 | _ret; \ | |
85 | }) | |
86 | ||
da461cec SJ |
87 | static u32 ufs_query_desc_max_size[] = { |
88 | QUERY_DESC_DEVICE_MAX_SIZE, | |
89 | QUERY_DESC_CONFIGURAION_MAX_SIZE, | |
90 | QUERY_DESC_UNIT_MAX_SIZE, | |
91 | QUERY_DESC_RFU_MAX_SIZE, | |
92 | QUERY_DESC_INTERCONNECT_MAX_SIZE, | |
93 | QUERY_DESC_STRING_MAX_SIZE, | |
94 | QUERY_DESC_RFU_MAX_SIZE, | |
95 | QUERY_DESC_GEOMETRY_MAZ_SIZE, | |
96 | QUERY_DESC_POWER_MAX_SIZE, | |
97 | QUERY_DESC_RFU_MAX_SIZE, | |
98 | }; | |
99 | ||
7a3e97b0 SY |
100 | enum { |
101 | UFSHCD_MAX_CHANNEL = 0, | |
102 | UFSHCD_MAX_ID = 1, | |
7a3e97b0 SY |
103 | UFSHCD_CMD_PER_LUN = 32, |
104 | UFSHCD_CAN_QUEUE = 32, | |
105 | }; | |
106 | ||
107 | /* UFSHCD states */ | |
108 | enum { | |
7a3e97b0 SY |
109 | UFSHCD_STATE_RESET, |
110 | UFSHCD_STATE_ERROR, | |
3441da7d SRT |
111 | UFSHCD_STATE_OPERATIONAL, |
112 | }; | |
113 | ||
114 | /* UFSHCD error handling flags */ | |
115 | enum { | |
116 | UFSHCD_EH_IN_PROGRESS = (1 << 0), | |
7a3e97b0 SY |
117 | }; |
118 | ||
e8e7f271 SRT |
119 | /* UFSHCD UIC layer error flags */ |
120 | enum { | |
121 | UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ | |
122 | UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ | |
123 | UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ | |
124 | UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ | |
125 | }; | |
126 | ||
7a3e97b0 SY |
127 | /* Interrupt configuration options */ |
128 | enum { | |
129 | UFSHCD_INT_DISABLE, | |
130 | UFSHCD_INT_ENABLE, | |
131 | UFSHCD_INT_CLEAR, | |
132 | }; | |
133 | ||
3441da7d SRT |
134 | #define ufshcd_set_eh_in_progress(h) \ |
135 | (h->eh_flags |= UFSHCD_EH_IN_PROGRESS) | |
136 | #define ufshcd_eh_in_progress(h) \ | |
137 | (h->eh_flags & UFSHCD_EH_IN_PROGRESS) | |
138 | #define ufshcd_clear_eh_in_progress(h) \ | |
139 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) | |
140 | ||
57d104c1 SJ |
141 | #define ufshcd_set_ufs_dev_active(h) \ |
142 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | |
143 | #define ufshcd_set_ufs_dev_sleep(h) \ | |
144 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | |
145 | #define ufshcd_set_ufs_dev_poweroff(h) \ | |
146 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | |
147 | #define ufshcd_is_ufs_dev_active(h) \ | |
148 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | |
149 | #define ufshcd_is_ufs_dev_sleep(h) \ | |
150 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | |
151 | #define ufshcd_is_ufs_dev_poweroff(h) \ | |
152 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | |
153 | ||
154 | static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { | |
155 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | |
156 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
157 | {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | |
158 | {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
159 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
160 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, | |
161 | }; | |
162 | ||
163 | static inline enum ufs_dev_pwr_mode | |
164 | ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) | |
165 | { | |
166 | return ufs_pm_lvl_states[lvl].dev_state; | |
167 | } | |
168 | ||
169 | static inline enum uic_link_state | |
170 | ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) | |
171 | { | |
172 | return ufs_pm_lvl_states[lvl].link_state; | |
173 | } | |
174 | ||
3441da7d SRT |
175 | static void ufshcd_tmc_handler(struct ufs_hba *hba); |
176 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | |
e8e7f271 SRT |
177 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); |
178 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); | |
1d337ec2 SRT |
179 | static void ufshcd_hba_exit(struct ufs_hba *hba); |
180 | static int ufshcd_probe_hba(struct ufs_hba *hba); | |
1ab27c9c ST |
181 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
182 | bool skip_ref_clk); | |
183 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); | |
184 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); | |
185 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); | |
cad2e03d | 186 | static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); |
57d104c1 SJ |
187 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); |
188 | static irqreturn_t ufshcd_intr(int irq, void *__hba); | |
7eb584db DR |
189 | static int ufshcd_config_pwr_mode(struct ufs_hba *hba, |
190 | struct ufs_pa_layer_attr *desired_pwr_mode); | |
874237f7 YG |
191 | static int ufshcd_change_power_mode(struct ufs_hba *hba, |
192 | struct ufs_pa_layer_attr *pwr_mode); | |
57d104c1 SJ |
193 | |
194 | static inline int ufshcd_enable_irq(struct ufs_hba *hba) | |
195 | { | |
196 | int ret = 0; | |
197 | ||
198 | if (!hba->is_irq_enabled) { | |
199 | ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, | |
200 | hba); | |
201 | if (ret) | |
202 | dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", | |
203 | __func__, ret); | |
204 | hba->is_irq_enabled = true; | |
205 | } | |
206 | ||
207 | return ret; | |
208 | } | |
209 | ||
210 | static inline void ufshcd_disable_irq(struct ufs_hba *hba) | |
211 | { | |
212 | if (hba->is_irq_enabled) { | |
213 | free_irq(hba->irq, hba); | |
214 | hba->is_irq_enabled = false; | |
215 | } | |
216 | } | |
3441da7d | 217 | |
5a0b0cb9 SRT |
218 | /* |
219 | * ufshcd_wait_for_register - wait for register value to change | |
220 | * @hba - per-adapter interface | |
221 | * @reg - mmio register offset | |
222 | * @mask - mask to apply to read register value | |
223 | * @val - wait condition | |
224 | * @interval_us - polling interval in microsecs | |
225 | * @timeout_ms - timeout in millisecs | |
226 | * | |
227 | * Returns -ETIMEDOUT on error, zero on success | |
228 | */ | |
229 | static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, | |
230 | u32 val, unsigned long interval_us, unsigned long timeout_ms) | |
231 | { | |
232 | int err = 0; | |
233 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
234 | ||
235 | /* ignore bits that we don't intend to wait on */ | |
236 | val = val & mask; | |
237 | ||
238 | while ((ufshcd_readl(hba, reg) & mask) != val) { | |
239 | /* wakeup within 50us of expiry */ | |
240 | usleep_range(interval_us, interval_us + 50); | |
241 | ||
242 | if (time_after(jiffies, timeout)) { | |
243 | if ((ufshcd_readl(hba, reg) & mask) != val) | |
244 | err = -ETIMEDOUT; | |
245 | break; | |
246 | } | |
247 | } | |
248 | ||
249 | return err; | |
250 | } | |
251 | ||
2fbd009b SJ |
252 | /** |
253 | * ufshcd_get_intr_mask - Get the interrupt bit mask | |
254 | * @hba - Pointer to adapter instance | |
255 | * | |
256 | * Returns interrupt bit mask per version | |
257 | */ | |
258 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) | |
259 | { | |
260 | if (hba->ufs_version == UFSHCI_VERSION_10) | |
261 | return INTERRUPT_MASK_ALL_VER_10; | |
262 | else | |
263 | return INTERRUPT_MASK_ALL_VER_11; | |
264 | } | |
265 | ||
7a3e97b0 SY |
266 | /** |
267 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | |
268 | * @hba - Pointer to adapter instance | |
269 | * | |
270 | * Returns UFSHCI version supported by the controller | |
271 | */ | |
272 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | |
273 | { | |
b873a275 | 274 | return ufshcd_readl(hba, REG_UFS_VERSION); |
7a3e97b0 SY |
275 | } |
276 | ||
277 | /** | |
278 | * ufshcd_is_device_present - Check if any device connected to | |
279 | * the host controller | |
5c0c28a8 | 280 | * @hba: pointer to adapter instance |
7a3e97b0 | 281 | * |
73ec513a | 282 | * Returns 1 if device present, 0 if no device detected |
7a3e97b0 | 283 | */ |
5c0c28a8 | 284 | static inline int ufshcd_is_device_present(struct ufs_hba *hba) |
7a3e97b0 | 285 | { |
5c0c28a8 SRT |
286 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & |
287 | DEVICE_PRESENT) ? 1 : 0; | |
7a3e97b0 SY |
288 | } |
289 | ||
290 | /** | |
291 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | |
292 | * @lrb: pointer to local command reference block | |
293 | * | |
294 | * This function is used to get the OCS field from UTRD | |
295 | * Returns the OCS field in the UTRD | |
296 | */ | |
297 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | |
298 | { | |
e8c8e82a | 299 | return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; |
7a3e97b0 SY |
300 | } |
301 | ||
302 | /** | |
303 | * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status | |
304 | * @task_req_descp: pointer to utp_task_req_desc structure | |
305 | * | |
306 | * This function is used to get the OCS field from UTMRD | |
307 | * Returns the OCS field in the UTMRD | |
308 | */ | |
309 | static inline int | |
310 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) | |
311 | { | |
e8c8e82a | 312 | return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS; |
7a3e97b0 SY |
313 | } |
314 | ||
315 | /** | |
316 | * ufshcd_get_tm_free_slot - get a free slot for task management request | |
317 | * @hba: per adapter instance | |
e2933132 | 318 | * @free_slot: pointer to variable with available slot value |
7a3e97b0 | 319 | * |
e2933132 SRT |
320 | * Get a free tag and lock it until ufshcd_put_tm_slot() is called. |
321 | * Returns 0 if free slot is not available, else return 1 with tag value | |
322 | * in @free_slot. | |
7a3e97b0 | 323 | */ |
e2933132 | 324 | static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) |
7a3e97b0 | 325 | { |
e2933132 SRT |
326 | int tag; |
327 | bool ret = false; | |
328 | ||
329 | if (!free_slot) | |
330 | goto out; | |
331 | ||
332 | do { | |
333 | tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); | |
334 | if (tag >= hba->nutmrs) | |
335 | goto out; | |
336 | } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); | |
337 | ||
338 | *free_slot = tag; | |
339 | ret = true; | |
340 | out: | |
341 | return ret; | |
342 | } | |
343 | ||
344 | static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) | |
345 | { | |
346 | clear_bit_unlock(slot, &hba->tm_slots_in_use); | |
7a3e97b0 SY |
347 | } |
348 | ||
349 | /** | |
350 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register | |
351 | * @hba: per adapter instance | |
352 | * @pos: position of the bit to be cleared | |
353 | */ | |
354 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) | |
355 | { | |
b873a275 | 356 | ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); |
7a3e97b0 SY |
357 | } |
358 | ||
359 | /** | |
360 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | |
361 | * @reg: Register value of host controller status | |
362 | * | |
363 | * Returns integer, 0 on Success and positive value if failed | |
364 | */ | |
365 | static inline int ufshcd_get_lists_status(u32 reg) | |
366 | { | |
367 | /* | |
368 | * The mask 0xFF is for the following HCS register bits | |
369 | * Bit Description | |
370 | * 0 Device Present | |
371 | * 1 UTRLRDY | |
372 | * 2 UTMRLRDY | |
373 | * 3 UCRDY | |
374 | * 4 HEI | |
375 | * 5 DEI | |
376 | * 6-7 reserved | |
377 | */ | |
378 | return (((reg) & (0xFF)) >> 1) ^ (0x07); | |
379 | } | |
380 | ||
381 | /** | |
382 | * ufshcd_get_uic_cmd_result - Get the UIC command result | |
383 | * @hba: Pointer to adapter instance | |
384 | * | |
385 | * This function gets the result of UIC command completion | |
386 | * Returns 0 on success, non zero value on error | |
387 | */ | |
388 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | |
389 | { | |
b873a275 | 390 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & |
7a3e97b0 SY |
391 | MASK_UIC_COMMAND_RESULT; |
392 | } | |
393 | ||
12b4fdb4 SJ |
394 | /** |
395 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command | |
396 | * @hba: Pointer to adapter instance | |
397 | * | |
398 | * This function gets UIC command argument3 | |
399 | * Returns 0 on success, non zero value on error | |
400 | */ | |
401 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) | |
402 | { | |
403 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); | |
404 | } | |
405 | ||
7a3e97b0 | 406 | /** |
5a0b0cb9 | 407 | * ufshcd_get_req_rsp - returns the TR response transaction type |
7a3e97b0 | 408 | * @ucd_rsp_ptr: pointer to response UPIU |
7a3e97b0 SY |
409 | */ |
410 | static inline int | |
5a0b0cb9 | 411 | ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) |
7a3e97b0 | 412 | { |
5a0b0cb9 | 413 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; |
7a3e97b0 SY |
414 | } |
415 | ||
416 | /** | |
417 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU | |
418 | * @ucd_rsp_ptr: pointer to response UPIU | |
419 | * | |
420 | * This function gets the response status and scsi_status from response UPIU | |
421 | * Returns the response result code. | |
422 | */ | |
423 | static inline int | |
424 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | |
425 | { | |
426 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | |
427 | } | |
428 | ||
1c2623c5 SJ |
429 | /* |
430 | * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length | |
431 | * from response UPIU | |
432 | * @ucd_rsp_ptr: pointer to response UPIU | |
433 | * | |
434 | * Return the data segment length. | |
435 | */ | |
436 | static inline unsigned int | |
437 | ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) | |
438 | { | |
439 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | |
440 | MASK_RSP_UPIU_DATA_SEG_LEN; | |
441 | } | |
442 | ||
66ec6d59 SRT |
443 | /** |
444 | * ufshcd_is_exception_event - Check if the device raised an exception event | |
445 | * @ucd_rsp_ptr: pointer to response UPIU | |
446 | * | |
447 | * The function checks if the device raised an exception event indicated in | |
448 | * the Device Information field of response UPIU. | |
449 | * | |
450 | * Returns true if exception is raised, false otherwise. | |
451 | */ | |
452 | static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) | |
453 | { | |
454 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | |
455 | MASK_RSP_EXCEPTION_EVENT ? true : false; | |
456 | } | |
457 | ||
7a3e97b0 | 458 | /** |
7d568652 | 459 | * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. |
7a3e97b0 | 460 | * @hba: per adapter instance |
7a3e97b0 SY |
461 | */ |
462 | static inline void | |
7d568652 | 463 | ufshcd_reset_intr_aggr(struct ufs_hba *hba) |
7a3e97b0 | 464 | { |
7d568652 SJ |
465 | ufshcd_writel(hba, INT_AGGR_ENABLE | |
466 | INT_AGGR_COUNTER_AND_TIMER_RESET, | |
467 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
468 | } | |
469 | ||
470 | /** | |
471 | * ufshcd_config_intr_aggr - Configure interrupt aggregation values. | |
472 | * @hba: per adapter instance | |
473 | * @cnt: Interrupt aggregation counter threshold | |
474 | * @tmout: Interrupt aggregation timeout value | |
475 | */ | |
476 | static inline void | |
477 | ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) | |
478 | { | |
479 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | | |
480 | INT_AGGR_COUNTER_THLD_VAL(cnt) | | |
481 | INT_AGGR_TIMEOUT_VAL(tmout), | |
482 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
7a3e97b0 SY |
483 | } |
484 | ||
b852190e YG |
485 | /** |
486 | * ufshcd_disable_intr_aggr - Disables interrupt aggregation. | |
487 | * @hba: per adapter instance | |
488 | */ | |
489 | static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) | |
490 | { | |
491 | ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
492 | } | |
493 | ||
7a3e97b0 SY |
494 | /** |
495 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | |
496 | * When run-stop registers are set to 1, it indicates the | |
497 | * host controller that it can process the requests | |
498 | * @hba: per adapter instance | |
499 | */ | |
500 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | |
501 | { | |
b873a275 SJ |
502 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, |
503 | REG_UTP_TASK_REQ_LIST_RUN_STOP); | |
504 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | |
505 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); | |
7a3e97b0 SY |
506 | } |
507 | ||
7a3e97b0 SY |
508 | /** |
509 | * ufshcd_hba_start - Start controller initialization sequence | |
510 | * @hba: per adapter instance | |
511 | */ | |
512 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | |
513 | { | |
b873a275 | 514 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); |
7a3e97b0 SY |
515 | } |
516 | ||
517 | /** | |
518 | * ufshcd_is_hba_active - Get controller state | |
519 | * @hba: per adapter instance | |
520 | * | |
521 | * Returns zero if controller is active, 1 otherwise | |
522 | */ | |
523 | static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | |
524 | { | |
b873a275 | 525 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; |
7a3e97b0 SY |
526 | } |
527 | ||
1ab27c9c ST |
528 | static void ufshcd_ungate_work(struct work_struct *work) |
529 | { | |
530 | int ret; | |
531 | unsigned long flags; | |
532 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
533 | clk_gating.ungate_work); | |
534 | ||
535 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | |
536 | ||
537 | spin_lock_irqsave(hba->host->host_lock, flags); | |
538 | if (hba->clk_gating.state == CLKS_ON) { | |
539 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
540 | goto unblock_reqs; | |
541 | } | |
542 | ||
543 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
544 | ufshcd_setup_clocks(hba, true); | |
545 | ||
546 | /* Exit from hibern8 */ | |
547 | if (ufshcd_can_hibern8_during_gating(hba)) { | |
548 | /* Prevent gating in this path */ | |
549 | hba->clk_gating.is_suspended = true; | |
550 | if (ufshcd_is_link_hibern8(hba)) { | |
551 | ret = ufshcd_uic_hibern8_exit(hba); | |
552 | if (ret) | |
553 | dev_err(hba->dev, "%s: hibern8 exit failed %d\n", | |
554 | __func__, ret); | |
555 | else | |
556 | ufshcd_set_link_active(hba); | |
557 | } | |
558 | hba->clk_gating.is_suspended = false; | |
559 | } | |
560 | unblock_reqs: | |
856b3483 ST |
561 | if (ufshcd_is_clkscaling_enabled(hba)) |
562 | devfreq_resume_device(hba->devfreq); | |
1ab27c9c ST |
563 | scsi_unblock_requests(hba->host); |
564 | } | |
565 | ||
566 | /** | |
567 | * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. | |
568 | * Also, exit from hibern8 mode and set the link as active. | |
569 | * @hba: per adapter instance | |
570 | * @async: This indicates whether caller should ungate clocks asynchronously. | |
571 | */ | |
572 | int ufshcd_hold(struct ufs_hba *hba, bool async) | |
573 | { | |
574 | int rc = 0; | |
575 | unsigned long flags; | |
576 | ||
577 | if (!ufshcd_is_clkgating_allowed(hba)) | |
578 | goto out; | |
1ab27c9c ST |
579 | spin_lock_irqsave(hba->host->host_lock, flags); |
580 | hba->clk_gating.active_reqs++; | |
581 | ||
856b3483 | 582 | start: |
1ab27c9c ST |
583 | switch (hba->clk_gating.state) { |
584 | case CLKS_ON: | |
585 | break; | |
586 | case REQ_CLKS_OFF: | |
587 | if (cancel_delayed_work(&hba->clk_gating.gate_work)) { | |
588 | hba->clk_gating.state = CLKS_ON; | |
589 | break; | |
590 | } | |
591 | /* | |
592 | * If we here, it means gating work is either done or | |
593 | * currently running. Hence, fall through to cancel gating | |
594 | * work and to enable clocks. | |
595 | */ | |
596 | case CLKS_OFF: | |
597 | scsi_block_requests(hba->host); | |
598 | hba->clk_gating.state = REQ_CLKS_ON; | |
599 | schedule_work(&hba->clk_gating.ungate_work); | |
600 | /* | |
601 | * fall through to check if we should wait for this | |
602 | * work to be done or not. | |
603 | */ | |
604 | case REQ_CLKS_ON: | |
605 | if (async) { | |
606 | rc = -EAGAIN; | |
607 | hba->clk_gating.active_reqs--; | |
608 | break; | |
609 | } | |
610 | ||
611 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
612 | flush_work(&hba->clk_gating.ungate_work); | |
613 | /* Make sure state is CLKS_ON before returning */ | |
856b3483 | 614 | spin_lock_irqsave(hba->host->host_lock, flags); |
1ab27c9c ST |
615 | goto start; |
616 | default: | |
617 | dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", | |
618 | __func__, hba->clk_gating.state); | |
619 | break; | |
620 | } | |
621 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
622 | out: | |
623 | return rc; | |
624 | } | |
625 | ||
626 | static void ufshcd_gate_work(struct work_struct *work) | |
627 | { | |
628 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
629 | clk_gating.gate_work.work); | |
630 | unsigned long flags; | |
631 | ||
632 | spin_lock_irqsave(hba->host->host_lock, flags); | |
633 | if (hba->clk_gating.is_suspended) { | |
634 | hba->clk_gating.state = CLKS_ON; | |
635 | goto rel_lock; | |
636 | } | |
637 | ||
638 | if (hba->clk_gating.active_reqs | |
639 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | |
640 | || hba->lrb_in_use || hba->outstanding_tasks | |
641 | || hba->active_uic_cmd || hba->uic_async_done) | |
642 | goto rel_lock; | |
643 | ||
644 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
645 | ||
646 | /* put the link into hibern8 mode before turning off clocks */ | |
647 | if (ufshcd_can_hibern8_during_gating(hba)) { | |
648 | if (ufshcd_uic_hibern8_enter(hba)) { | |
649 | hba->clk_gating.state = CLKS_ON; | |
650 | goto out; | |
651 | } | |
652 | ufshcd_set_link_hibern8(hba); | |
653 | } | |
654 | ||
856b3483 ST |
655 | if (ufshcd_is_clkscaling_enabled(hba)) { |
656 | devfreq_suspend_device(hba->devfreq); | |
657 | hba->clk_scaling.window_start_t = 0; | |
658 | } | |
659 | ||
1ab27c9c ST |
660 | if (!ufshcd_is_link_active(hba)) |
661 | ufshcd_setup_clocks(hba, false); | |
662 | else | |
663 | /* If link is active, device ref_clk can't be switched off */ | |
664 | __ufshcd_setup_clocks(hba, false, true); | |
665 | ||
666 | /* | |
667 | * In case you are here to cancel this work the gating state | |
668 | * would be marked as REQ_CLKS_ON. In this case keep the state | |
669 | * as REQ_CLKS_ON which would anyway imply that clocks are off | |
670 | * and a request to turn them on is pending. By doing this way, | |
671 | * we keep the state machine in tact and this would ultimately | |
672 | * prevent from doing cancel work multiple times when there are | |
673 | * new requests arriving before the current cancel work is done. | |
674 | */ | |
675 | spin_lock_irqsave(hba->host->host_lock, flags); | |
676 | if (hba->clk_gating.state == REQ_CLKS_OFF) | |
677 | hba->clk_gating.state = CLKS_OFF; | |
678 | ||
679 | rel_lock: | |
680 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
681 | out: | |
682 | return; | |
683 | } | |
684 | ||
685 | /* host lock must be held before calling this variant */ | |
686 | static void __ufshcd_release(struct ufs_hba *hba) | |
687 | { | |
688 | if (!ufshcd_is_clkgating_allowed(hba)) | |
689 | return; | |
690 | ||
691 | hba->clk_gating.active_reqs--; | |
692 | ||
693 | if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended | |
694 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | |
695 | || hba->lrb_in_use || hba->outstanding_tasks | |
696 | || hba->active_uic_cmd || hba->uic_async_done) | |
697 | return; | |
698 | ||
699 | hba->clk_gating.state = REQ_CLKS_OFF; | |
700 | schedule_delayed_work(&hba->clk_gating.gate_work, | |
701 | msecs_to_jiffies(hba->clk_gating.delay_ms)); | |
702 | } | |
703 | ||
704 | void ufshcd_release(struct ufs_hba *hba) | |
705 | { | |
706 | unsigned long flags; | |
707 | ||
708 | spin_lock_irqsave(hba->host->host_lock, flags); | |
709 | __ufshcd_release(hba); | |
710 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
711 | } | |
712 | ||
713 | static ssize_t ufshcd_clkgate_delay_show(struct device *dev, | |
714 | struct device_attribute *attr, char *buf) | |
715 | { | |
716 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
717 | ||
718 | return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); | |
719 | } | |
720 | ||
721 | static ssize_t ufshcd_clkgate_delay_store(struct device *dev, | |
722 | struct device_attribute *attr, const char *buf, size_t count) | |
723 | { | |
724 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
725 | unsigned long flags, value; | |
726 | ||
727 | if (kstrtoul(buf, 0, &value)) | |
728 | return -EINVAL; | |
729 | ||
730 | spin_lock_irqsave(hba->host->host_lock, flags); | |
731 | hba->clk_gating.delay_ms = value; | |
732 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
733 | return count; | |
734 | } | |
735 | ||
736 | static void ufshcd_init_clk_gating(struct ufs_hba *hba) | |
737 | { | |
738 | if (!ufshcd_is_clkgating_allowed(hba)) | |
739 | return; | |
740 | ||
741 | hba->clk_gating.delay_ms = 150; | |
742 | INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); | |
743 | INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); | |
744 | ||
745 | hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; | |
746 | hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; | |
747 | sysfs_attr_init(&hba->clk_gating.delay_attr.attr); | |
748 | hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; | |
749 | hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; | |
750 | if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) | |
751 | dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); | |
752 | } | |
753 | ||
754 | static void ufshcd_exit_clk_gating(struct ufs_hba *hba) | |
755 | { | |
756 | if (!ufshcd_is_clkgating_allowed(hba)) | |
757 | return; | |
758 | device_remove_file(hba->dev, &hba->clk_gating.delay_attr); | |
97cd6805 AM |
759 | cancel_work_sync(&hba->clk_gating.ungate_work); |
760 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | |
1ab27c9c ST |
761 | } |
762 | ||
856b3483 ST |
763 | /* Must be called with host lock acquired */ |
764 | static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) | |
765 | { | |
766 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
767 | return; | |
768 | ||
769 | if (!hba->clk_scaling.is_busy_started) { | |
770 | hba->clk_scaling.busy_start_t = ktime_get(); | |
771 | hba->clk_scaling.is_busy_started = true; | |
772 | } | |
773 | } | |
774 | ||
775 | static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) | |
776 | { | |
777 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | |
778 | ||
779 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
780 | return; | |
781 | ||
782 | if (!hba->outstanding_reqs && scaling->is_busy_started) { | |
783 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | |
784 | scaling->busy_start_t)); | |
785 | scaling->busy_start_t = ktime_set(0, 0); | |
786 | scaling->is_busy_started = false; | |
787 | } | |
788 | } | |
7a3e97b0 SY |
789 | /** |
790 | * ufshcd_send_command - Send SCSI or device management commands | |
791 | * @hba: per adapter instance | |
792 | * @task_tag: Task tag of the command | |
793 | */ | |
794 | static inline | |
795 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | |
796 | { | |
856b3483 | 797 | ufshcd_clk_scaling_start_busy(hba); |
7a3e97b0 | 798 | __set_bit(task_tag, &hba->outstanding_reqs); |
b873a275 | 799 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
7a3e97b0 SY |
800 | } |
801 | ||
802 | /** | |
803 | * ufshcd_copy_sense_data - Copy sense data in case of check condition | |
804 | * @lrb - pointer to local reference block | |
805 | */ | |
806 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | |
807 | { | |
808 | int len; | |
1c2623c5 SJ |
809 | if (lrbp->sense_buffer && |
810 | ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { | |
5a0b0cb9 | 811 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); |
7a3e97b0 | 812 | memcpy(lrbp->sense_buffer, |
5a0b0cb9 | 813 | lrbp->ucd_rsp_ptr->sr.sense_data, |
7a3e97b0 SY |
814 | min_t(int, len, SCSI_SENSE_BUFFERSIZE)); |
815 | } | |
816 | } | |
817 | ||
68078d5c DR |
818 | /** |
819 | * ufshcd_copy_query_response() - Copy the Query Response and the data | |
820 | * descriptor | |
821 | * @hba: per adapter instance | |
822 | * @lrb - pointer to local reference block | |
823 | */ | |
824 | static | |
c6d4a831 | 825 | int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
68078d5c DR |
826 | { |
827 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
828 | ||
68078d5c | 829 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
68078d5c | 830 | |
68078d5c DR |
831 | /* Get the descriptor */ |
832 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { | |
d44a5f98 | 833 | u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + |
68078d5c | 834 | GENERAL_UPIU_REQUEST_SIZE; |
c6d4a831 DR |
835 | u16 resp_len; |
836 | u16 buf_len; | |
68078d5c DR |
837 | |
838 | /* data segment length */ | |
c6d4a831 | 839 | resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & |
68078d5c | 840 | MASK_QUERY_DATA_SEG_LEN; |
ea2aab24 SRT |
841 | buf_len = be16_to_cpu( |
842 | hba->dev_cmd.query.request.upiu_req.length); | |
c6d4a831 DR |
843 | if (likely(buf_len >= resp_len)) { |
844 | memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); | |
845 | } else { | |
846 | dev_warn(hba->dev, | |
847 | "%s: Response size is bigger than buffer", | |
848 | __func__); | |
849 | return -EINVAL; | |
850 | } | |
68078d5c | 851 | } |
c6d4a831 DR |
852 | |
853 | return 0; | |
68078d5c DR |
854 | } |
855 | ||
7a3e97b0 SY |
856 | /** |
857 | * ufshcd_hba_capabilities - Read controller capabilities | |
858 | * @hba: per adapter instance | |
859 | */ | |
860 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) | |
861 | { | |
b873a275 | 862 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); |
7a3e97b0 SY |
863 | |
864 | /* nutrs and nutmrs are 0 based values */ | |
865 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; | |
866 | hba->nutmrs = | |
867 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; | |
868 | } | |
869 | ||
870 | /** | |
6ccf44fe SJ |
871 | * ufshcd_ready_for_uic_cmd - Check if controller is ready |
872 | * to accept UIC commands | |
7a3e97b0 | 873 | * @hba: per adapter instance |
6ccf44fe SJ |
874 | * Return true on success, else false |
875 | */ | |
876 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) | |
877 | { | |
878 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) | |
879 | return true; | |
880 | else | |
881 | return false; | |
882 | } | |
883 | ||
53b3d9c3 SJ |
884 | /** |
885 | * ufshcd_get_upmcrs - Get the power mode change request status | |
886 | * @hba: Pointer to adapter instance | |
887 | * | |
888 | * This function gets the UPMCRS field of HCS register | |
889 | * Returns value of UPMCRS field | |
890 | */ | |
891 | static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) | |
892 | { | |
893 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; | |
894 | } | |
895 | ||
6ccf44fe SJ |
896 | /** |
897 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers | |
898 | * @hba: per adapter instance | |
899 | * @uic_cmd: UIC command | |
900 | * | |
901 | * Mutex must be held. | |
7a3e97b0 SY |
902 | */ |
903 | static inline void | |
6ccf44fe | 904 | ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
7a3e97b0 | 905 | { |
6ccf44fe SJ |
906 | WARN_ON(hba->active_uic_cmd); |
907 | ||
908 | hba->active_uic_cmd = uic_cmd; | |
909 | ||
7a3e97b0 | 910 | /* Write Args */ |
6ccf44fe SJ |
911 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); |
912 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); | |
913 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); | |
7a3e97b0 SY |
914 | |
915 | /* Write UIC Cmd */ | |
6ccf44fe | 916 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, |
b873a275 | 917 | REG_UIC_COMMAND); |
7a3e97b0 SY |
918 | } |
919 | ||
6ccf44fe SJ |
920 | /** |
921 | * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command | |
922 | * @hba: per adapter instance | |
923 | * @uic_command: UIC command | |
924 | * | |
925 | * Must be called with mutex held. | |
926 | * Returns 0 only if success. | |
927 | */ | |
928 | static int | |
929 | ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
930 | { | |
931 | int ret; | |
932 | unsigned long flags; | |
933 | ||
934 | if (wait_for_completion_timeout(&uic_cmd->done, | |
935 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) | |
936 | ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; | |
937 | else | |
938 | ret = -ETIMEDOUT; | |
939 | ||
940 | spin_lock_irqsave(hba->host->host_lock, flags); | |
941 | hba->active_uic_cmd = NULL; | |
942 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
943 | ||
944 | return ret; | |
945 | } | |
946 | ||
947 | /** | |
948 | * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | |
949 | * @hba: per adapter instance | |
950 | * @uic_cmd: UIC command | |
951 | * | |
952 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called | |
57d104c1 | 953 | * with mutex held and host_lock locked. |
6ccf44fe SJ |
954 | * Returns 0 only if success. |
955 | */ | |
956 | static int | |
957 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
958 | { | |
6ccf44fe SJ |
959 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
960 | dev_err(hba->dev, | |
961 | "Controller not ready to accept UIC commands\n"); | |
962 | return -EIO; | |
963 | } | |
964 | ||
965 | init_completion(&uic_cmd->done); | |
966 | ||
6ccf44fe | 967 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); |
6ccf44fe | 968 | |
57d104c1 | 969 | return 0; |
6ccf44fe SJ |
970 | } |
971 | ||
972 | /** | |
973 | * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | |
974 | * @hba: per adapter instance | |
975 | * @uic_cmd: UIC command | |
976 | * | |
977 | * Returns 0 only if success. | |
978 | */ | |
979 | static int | |
980 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
981 | { | |
982 | int ret; | |
57d104c1 | 983 | unsigned long flags; |
6ccf44fe | 984 | |
1ab27c9c | 985 | ufshcd_hold(hba, false); |
6ccf44fe | 986 | mutex_lock(&hba->uic_cmd_mutex); |
cad2e03d YG |
987 | ufshcd_add_delay_before_dme_cmd(hba); |
988 | ||
57d104c1 | 989 | spin_lock_irqsave(hba->host->host_lock, flags); |
6ccf44fe | 990 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); |
57d104c1 SJ |
991 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
992 | if (!ret) | |
993 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | |
994 | ||
6ccf44fe SJ |
995 | mutex_unlock(&hba->uic_cmd_mutex); |
996 | ||
1ab27c9c | 997 | ufshcd_release(hba); |
6ccf44fe SJ |
998 | return ret; |
999 | } | |
1000 | ||
7a3e97b0 SY |
1001 | /** |
1002 | * ufshcd_map_sg - Map scatter-gather list to prdt | |
1003 | * @lrbp - pointer to local reference block | |
1004 | * | |
1005 | * Returns 0 in case of success, non-zero value in case of failure | |
1006 | */ | |
1007 | static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) | |
1008 | { | |
1009 | struct ufshcd_sg_entry *prd_table; | |
1010 | struct scatterlist *sg; | |
1011 | struct scsi_cmnd *cmd; | |
1012 | int sg_segments; | |
1013 | int i; | |
1014 | ||
1015 | cmd = lrbp->cmd; | |
1016 | sg_segments = scsi_dma_map(cmd); | |
1017 | if (sg_segments < 0) | |
1018 | return sg_segments; | |
1019 | ||
1020 | if (sg_segments) { | |
1021 | lrbp->utr_descriptor_ptr->prd_table_length = | |
1022 | cpu_to_le16((u16) (sg_segments)); | |
1023 | ||
1024 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; | |
1025 | ||
1026 | scsi_for_each_sg(cmd, sg, sg_segments, i) { | |
1027 | prd_table[i].size = | |
1028 | cpu_to_le32(((u32) sg_dma_len(sg))-1); | |
1029 | prd_table[i].base_addr = | |
1030 | cpu_to_le32(lower_32_bits(sg->dma_address)); | |
1031 | prd_table[i].upper_addr = | |
1032 | cpu_to_le32(upper_32_bits(sg->dma_address)); | |
1033 | } | |
1034 | } else { | |
1035 | lrbp->utr_descriptor_ptr->prd_table_length = 0; | |
1036 | } | |
1037 | ||
1038 | return 0; | |
1039 | } | |
1040 | ||
1041 | /** | |
2fbd009b | 1042 | * ufshcd_enable_intr - enable interrupts |
7a3e97b0 | 1043 | * @hba: per adapter instance |
2fbd009b | 1044 | * @intrs: interrupt bits |
7a3e97b0 | 1045 | */ |
2fbd009b | 1046 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) |
7a3e97b0 | 1047 | { |
2fbd009b SJ |
1048 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
1049 | ||
1050 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
1051 | u32 rw; | |
1052 | rw = set & INTERRUPT_MASK_RW_VER_10; | |
1053 | set = rw | ((set ^ intrs) & intrs); | |
1054 | } else { | |
1055 | set |= intrs; | |
1056 | } | |
1057 | ||
1058 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
1059 | } | |
1060 | ||
1061 | /** | |
1062 | * ufshcd_disable_intr - disable interrupts | |
1063 | * @hba: per adapter instance | |
1064 | * @intrs: interrupt bits | |
1065 | */ | |
1066 | static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) | |
1067 | { | |
1068 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
1069 | ||
1070 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
1071 | u32 rw; | |
1072 | rw = (set & INTERRUPT_MASK_RW_VER_10) & | |
1073 | ~(intrs & INTERRUPT_MASK_RW_VER_10); | |
1074 | set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); | |
1075 | ||
1076 | } else { | |
1077 | set &= ~intrs; | |
7a3e97b0 | 1078 | } |
2fbd009b SJ |
1079 | |
1080 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
7a3e97b0 SY |
1081 | } |
1082 | ||
5a0b0cb9 SRT |
1083 | /** |
1084 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header | |
1085 | * descriptor according to request | |
1086 | * @lrbp: pointer to local reference block | |
1087 | * @upiu_flags: flags required in the header | |
1088 | * @cmd_dir: requests data direction | |
1089 | */ | |
1090 | static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, | |
1091 | u32 *upiu_flags, enum dma_data_direction cmd_dir) | |
1092 | { | |
1093 | struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; | |
1094 | u32 data_direction; | |
1095 | u32 dword_0; | |
1096 | ||
1097 | if (cmd_dir == DMA_FROM_DEVICE) { | |
1098 | data_direction = UTP_DEVICE_TO_HOST; | |
1099 | *upiu_flags = UPIU_CMD_FLAGS_READ; | |
1100 | } else if (cmd_dir == DMA_TO_DEVICE) { | |
1101 | data_direction = UTP_HOST_TO_DEVICE; | |
1102 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; | |
1103 | } else { | |
1104 | data_direction = UTP_NO_DATA_TRANSFER; | |
1105 | *upiu_flags = UPIU_CMD_FLAGS_NONE; | |
1106 | } | |
1107 | ||
1108 | dword_0 = data_direction | (lrbp->command_type | |
1109 | << UPIU_COMMAND_TYPE_OFFSET); | |
1110 | if (lrbp->intr_cmd) | |
1111 | dword_0 |= UTP_REQ_DESC_INT_CMD; | |
1112 | ||
1113 | /* Transfer request descriptor header fields */ | |
1114 | req_desc->header.dword_0 = cpu_to_le32(dword_0); | |
1115 | ||
1116 | /* | |
1117 | * assigning invalid value for command status. Controller | |
1118 | * updates OCS on command completion, with the command | |
1119 | * status | |
1120 | */ | |
1121 | req_desc->header.dword_2 = | |
1122 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
1123 | } | |
1124 | ||
1125 | /** | |
1126 | * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, | |
1127 | * for scsi commands | |
1128 | * @lrbp - local reference block pointer | |
1129 | * @upiu_flags - flags | |
1130 | */ | |
1131 | static | |
1132 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) | |
1133 | { | |
1134 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
1135 | ||
1136 | /* command descriptor fields */ | |
1137 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | |
1138 | UPIU_TRANSACTION_COMMAND, upiu_flags, | |
1139 | lrbp->lun, lrbp->task_tag); | |
1140 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | |
1141 | UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); | |
1142 | ||
1143 | /* Total EHS length and Data segment length will be zero */ | |
1144 | ucd_req_ptr->header.dword_2 = 0; | |
1145 | ||
1146 | ucd_req_ptr->sc.exp_data_transfer_len = | |
1147 | cpu_to_be32(lrbp->cmd->sdb.length); | |
1148 | ||
1149 | memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, | |
1150 | (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); | |
1151 | } | |
1152 | ||
68078d5c DR |
1153 | /** |
1154 | * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, | |
1155 | * for query requsts | |
1156 | * @hba: UFS hba | |
1157 | * @lrbp: local reference block pointer | |
1158 | * @upiu_flags: flags | |
1159 | */ | |
1160 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |
1161 | struct ufshcd_lrb *lrbp, u32 upiu_flags) | |
1162 | { | |
1163 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
1164 | struct ufs_query *query = &hba->dev_cmd.query; | |
e8c8e82a | 1165 | u16 len = be16_to_cpu(query->request.upiu_req.length); |
68078d5c DR |
1166 | u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; |
1167 | ||
1168 | /* Query request header */ | |
1169 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | |
1170 | UPIU_TRANSACTION_QUERY_REQ, upiu_flags, | |
1171 | lrbp->lun, lrbp->task_tag); | |
1172 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | |
1173 | 0, query->request.query_func, 0, 0); | |
1174 | ||
1175 | /* Data segment length */ | |
1176 | ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( | |
1177 | 0, 0, len >> 8, (u8)len); | |
1178 | ||
1179 | /* Copy the Query Request buffer as is */ | |
1180 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, | |
1181 | QUERY_OSF_SIZE); | |
68078d5c DR |
1182 | |
1183 | /* Copy the Descriptor */ | |
c6d4a831 DR |
1184 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) |
1185 | memcpy(descp, query->descriptor, len); | |
1186 | ||
68078d5c DR |
1187 | } |
1188 | ||
5a0b0cb9 SRT |
1189 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) |
1190 | { | |
1191 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
1192 | ||
1193 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); | |
1194 | ||
1195 | /* command descriptor fields */ | |
1196 | ucd_req_ptr->header.dword_0 = | |
1197 | UPIU_HEADER_DWORD( | |
1198 | UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); | |
1199 | } | |
1200 | ||
7a3e97b0 SY |
1201 | /** |
1202 | * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) | |
5a0b0cb9 | 1203 | * @hba - per adapter instance |
7a3e97b0 SY |
1204 | * @lrb - pointer to local reference block |
1205 | */ | |
5a0b0cb9 | 1206 | static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
7a3e97b0 | 1207 | { |
7a3e97b0 | 1208 | u32 upiu_flags; |
5a0b0cb9 | 1209 | int ret = 0; |
7a3e97b0 SY |
1210 | |
1211 | switch (lrbp->command_type) { | |
1212 | case UTP_CMD_TYPE_SCSI: | |
5a0b0cb9 SRT |
1213 | if (likely(lrbp->cmd)) { |
1214 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, | |
1215 | lrbp->cmd->sc_data_direction); | |
1216 | ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); | |
7a3e97b0 | 1217 | } else { |
5a0b0cb9 | 1218 | ret = -EINVAL; |
7a3e97b0 | 1219 | } |
7a3e97b0 SY |
1220 | break; |
1221 | case UTP_CMD_TYPE_DEV_MANAGE: | |
5a0b0cb9 | 1222 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); |
68078d5c DR |
1223 | if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) |
1224 | ufshcd_prepare_utp_query_req_upiu( | |
1225 | hba, lrbp, upiu_flags); | |
1226 | else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) | |
5a0b0cb9 SRT |
1227 | ufshcd_prepare_utp_nop_upiu(lrbp); |
1228 | else | |
1229 | ret = -EINVAL; | |
7a3e97b0 SY |
1230 | break; |
1231 | case UTP_CMD_TYPE_UFS: | |
1232 | /* For UFS native command implementation */ | |
5a0b0cb9 SRT |
1233 | ret = -ENOTSUPP; |
1234 | dev_err(hba->dev, "%s: UFS native command are not supported\n", | |
1235 | __func__); | |
1236 | break; | |
1237 | default: | |
1238 | ret = -ENOTSUPP; | |
1239 | dev_err(hba->dev, "%s: unknown command type: 0x%x\n", | |
1240 | __func__, lrbp->command_type); | |
7a3e97b0 SY |
1241 | break; |
1242 | } /* end of switch */ | |
5a0b0cb9 SRT |
1243 | |
1244 | return ret; | |
7a3e97b0 SY |
1245 | } |
1246 | ||
0ce147d4 SJ |
1247 | /* |
1248 | * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN | |
1249 | * @scsi_lun: scsi LUN id | |
1250 | * | |
1251 | * Returns UPIU LUN id | |
1252 | */ | |
1253 | static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) | |
1254 | { | |
1255 | if (scsi_is_wlun(scsi_lun)) | |
1256 | return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) | |
1257 | | UFS_UPIU_WLUN_ID; | |
1258 | else | |
1259 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; | |
1260 | } | |
1261 | ||
2a8fa600 SJ |
1262 | /** |
1263 | * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID | |
1264 | * @scsi_lun: UPIU W-LUN id | |
1265 | * | |
1266 | * Returns SCSI W-LUN id | |
1267 | */ | |
1268 | static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) | |
1269 | { | |
1270 | return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; | |
1271 | } | |
1272 | ||
7a3e97b0 SY |
1273 | /** |
1274 | * ufshcd_queuecommand - main entry point for SCSI requests | |
1275 | * @cmd: command from SCSI Midlayer | |
1276 | * @done: call back function | |
1277 | * | |
1278 | * Returns 0 for success, non-zero in case of failure | |
1279 | */ | |
1280 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |
1281 | { | |
1282 | struct ufshcd_lrb *lrbp; | |
1283 | struct ufs_hba *hba; | |
1284 | unsigned long flags; | |
1285 | int tag; | |
1286 | int err = 0; | |
1287 | ||
1288 | hba = shost_priv(host); | |
1289 | ||
1290 | tag = cmd->request->tag; | |
1291 | ||
3441da7d SRT |
1292 | spin_lock_irqsave(hba->host->host_lock, flags); |
1293 | switch (hba->ufshcd_state) { | |
1294 | case UFSHCD_STATE_OPERATIONAL: | |
1295 | break; | |
1296 | case UFSHCD_STATE_RESET: | |
7a3e97b0 | 1297 | err = SCSI_MLQUEUE_HOST_BUSY; |
3441da7d SRT |
1298 | goto out_unlock; |
1299 | case UFSHCD_STATE_ERROR: | |
1300 | set_host_byte(cmd, DID_ERROR); | |
1301 | cmd->scsi_done(cmd); | |
1302 | goto out_unlock; | |
1303 | default: | |
1304 | dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", | |
1305 | __func__, hba->ufshcd_state); | |
1306 | set_host_byte(cmd, DID_BAD_TARGET); | |
1307 | cmd->scsi_done(cmd); | |
1308 | goto out_unlock; | |
7a3e97b0 | 1309 | } |
3441da7d | 1310 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
7a3e97b0 | 1311 | |
5a0b0cb9 SRT |
1312 | /* acquire the tag to make sure device cmds don't use it */ |
1313 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { | |
1314 | /* | |
1315 | * Dev manage command in progress, requeue the command. | |
1316 | * Requeuing the command helps in cases where the request *may* | |
1317 | * find different tag instead of waiting for dev manage command | |
1318 | * completion. | |
1319 | */ | |
1320 | err = SCSI_MLQUEUE_HOST_BUSY; | |
1321 | goto out; | |
1322 | } | |
1323 | ||
1ab27c9c ST |
1324 | err = ufshcd_hold(hba, true); |
1325 | if (err) { | |
1326 | err = SCSI_MLQUEUE_HOST_BUSY; | |
1327 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
1328 | goto out; | |
1329 | } | |
1330 | WARN_ON(hba->clk_gating.state != CLKS_ON); | |
1331 | ||
7a3e97b0 SY |
1332 | lrbp = &hba->lrb[tag]; |
1333 | ||
5a0b0cb9 | 1334 | WARN_ON(lrbp->cmd); |
7a3e97b0 SY |
1335 | lrbp->cmd = cmd; |
1336 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; | |
1337 | lrbp->sense_buffer = cmd->sense_buffer; | |
1338 | lrbp->task_tag = tag; | |
0ce147d4 | 1339 | lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); |
b852190e | 1340 | lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; |
7a3e97b0 SY |
1341 | lrbp->command_type = UTP_CMD_TYPE_SCSI; |
1342 | ||
1343 | /* form UPIU before issuing the command */ | |
5a0b0cb9 | 1344 | ufshcd_compose_upiu(hba, lrbp); |
7a3e97b0 | 1345 | err = ufshcd_map_sg(lrbp); |
5a0b0cb9 SRT |
1346 | if (err) { |
1347 | lrbp->cmd = NULL; | |
1348 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
7a3e97b0 | 1349 | goto out; |
5a0b0cb9 | 1350 | } |
7a3e97b0 SY |
1351 | |
1352 | /* issue command to the controller */ | |
1353 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1354 | ufshcd_send_command(hba, tag); | |
3441da7d | 1355 | out_unlock: |
7a3e97b0 SY |
1356 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1357 | out: | |
1358 | return err; | |
1359 | } | |
1360 | ||
5a0b0cb9 SRT |
1361 | static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, |
1362 | struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) | |
1363 | { | |
1364 | lrbp->cmd = NULL; | |
1365 | lrbp->sense_bufflen = 0; | |
1366 | lrbp->sense_buffer = NULL; | |
1367 | lrbp->task_tag = tag; | |
1368 | lrbp->lun = 0; /* device management cmd is not specific to any LUN */ | |
1369 | lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; | |
1370 | lrbp->intr_cmd = true; /* No interrupt aggregation */ | |
1371 | hba->dev_cmd.type = cmd_type; | |
1372 | ||
1373 | return ufshcd_compose_upiu(hba, lrbp); | |
1374 | } | |
1375 | ||
1376 | static int | |
1377 | ufshcd_clear_cmd(struct ufs_hba *hba, int tag) | |
1378 | { | |
1379 | int err = 0; | |
1380 | unsigned long flags; | |
1381 | u32 mask = 1 << tag; | |
1382 | ||
1383 | /* clear outstanding transaction before retry */ | |
1384 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1385 | ufshcd_utrl_clear(hba, tag); | |
1386 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1387 | ||
1388 | /* | |
1389 | * wait for for h/w to clear corresponding bit in door-bell. | |
1390 | * max. wait is 1 sec. | |
1391 | */ | |
1392 | err = ufshcd_wait_for_register(hba, | |
1393 | REG_UTP_TRANSFER_REQ_DOOR_BELL, | |
1394 | mask, ~mask, 1000, 1000); | |
1395 | ||
1396 | return err; | |
1397 | } | |
1398 | ||
c6d4a831 DR |
1399 | static int |
1400 | ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
1401 | { | |
1402 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
1403 | ||
1404 | /* Get the UPIU response */ | |
1405 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> | |
1406 | UPIU_RSP_CODE_OFFSET; | |
1407 | return query_res->response; | |
1408 | } | |
1409 | ||
5a0b0cb9 SRT |
1410 | /** |
1411 | * ufshcd_dev_cmd_completion() - handles device management command responses | |
1412 | * @hba: per adapter instance | |
1413 | * @lrbp: pointer to local reference block | |
1414 | */ | |
1415 | static int | |
1416 | ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
1417 | { | |
1418 | int resp; | |
1419 | int err = 0; | |
1420 | ||
1421 | resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); | |
1422 | ||
1423 | switch (resp) { | |
1424 | case UPIU_TRANSACTION_NOP_IN: | |
1425 | if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { | |
1426 | err = -EINVAL; | |
1427 | dev_err(hba->dev, "%s: unexpected response %x\n", | |
1428 | __func__, resp); | |
1429 | } | |
1430 | break; | |
68078d5c | 1431 | case UPIU_TRANSACTION_QUERY_RSP: |
c6d4a831 DR |
1432 | err = ufshcd_check_query_response(hba, lrbp); |
1433 | if (!err) | |
1434 | err = ufshcd_copy_query_response(hba, lrbp); | |
68078d5c | 1435 | break; |
5a0b0cb9 SRT |
1436 | case UPIU_TRANSACTION_REJECT_UPIU: |
1437 | /* TODO: handle Reject UPIU Response */ | |
1438 | err = -EPERM; | |
1439 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", | |
1440 | __func__); | |
1441 | break; | |
1442 | default: | |
1443 | err = -EINVAL; | |
1444 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", | |
1445 | __func__, resp); | |
1446 | break; | |
1447 | } | |
1448 | ||
1449 | return err; | |
1450 | } | |
1451 | ||
1452 | static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, | |
1453 | struct ufshcd_lrb *lrbp, int max_timeout) | |
1454 | { | |
1455 | int err = 0; | |
1456 | unsigned long time_left; | |
1457 | unsigned long flags; | |
1458 | ||
1459 | time_left = wait_for_completion_timeout(hba->dev_cmd.complete, | |
1460 | msecs_to_jiffies(max_timeout)); | |
1461 | ||
1462 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1463 | hba->dev_cmd.complete = NULL; | |
1464 | if (likely(time_left)) { | |
1465 | err = ufshcd_get_tr_ocs(lrbp); | |
1466 | if (!err) | |
1467 | err = ufshcd_dev_cmd_completion(hba, lrbp); | |
1468 | } | |
1469 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1470 | ||
1471 | if (!time_left) { | |
1472 | err = -ETIMEDOUT; | |
1473 | if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) | |
1474 | /* sucessfully cleared the command, retry if needed */ | |
1475 | err = -EAGAIN; | |
1476 | } | |
1477 | ||
1478 | return err; | |
1479 | } | |
1480 | ||
1481 | /** | |
1482 | * ufshcd_get_dev_cmd_tag - Get device management command tag | |
1483 | * @hba: per-adapter instance | |
1484 | * @tag: pointer to variable with available slot value | |
1485 | * | |
1486 | * Get a free slot and lock it until device management command | |
1487 | * completes. | |
1488 | * | |
1489 | * Returns false if free slot is unavailable for locking, else | |
1490 | * return true with tag value in @tag. | |
1491 | */ | |
1492 | static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) | |
1493 | { | |
1494 | int tag; | |
1495 | bool ret = false; | |
1496 | unsigned long tmp; | |
1497 | ||
1498 | if (!tag_out) | |
1499 | goto out; | |
1500 | ||
1501 | do { | |
1502 | tmp = ~hba->lrb_in_use; | |
1503 | tag = find_last_bit(&tmp, hba->nutrs); | |
1504 | if (tag >= hba->nutrs) | |
1505 | goto out; | |
1506 | } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); | |
1507 | ||
1508 | *tag_out = tag; | |
1509 | ret = true; | |
1510 | out: | |
1511 | return ret; | |
1512 | } | |
1513 | ||
1514 | static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) | |
1515 | { | |
1516 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
1517 | } | |
1518 | ||
1519 | /** | |
1520 | * ufshcd_exec_dev_cmd - API for sending device management requests | |
1521 | * @hba - UFS hba | |
1522 | * @cmd_type - specifies the type (NOP, Query...) | |
1523 | * @timeout - time in seconds | |
1524 | * | |
68078d5c DR |
1525 | * NOTE: Since there is only one available tag for device management commands, |
1526 | * it is expected you hold the hba->dev_cmd.lock mutex. | |
5a0b0cb9 SRT |
1527 | */ |
1528 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, | |
1529 | enum dev_cmd_type cmd_type, int timeout) | |
1530 | { | |
1531 | struct ufshcd_lrb *lrbp; | |
1532 | int err; | |
1533 | int tag; | |
1534 | struct completion wait; | |
1535 | unsigned long flags; | |
1536 | ||
1537 | /* | |
1538 | * Get free slot, sleep if slots are unavailable. | |
1539 | * Even though we use wait_event() which sleeps indefinitely, | |
1540 | * the maximum wait time is bounded by SCSI request timeout. | |
1541 | */ | |
1542 | wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); | |
1543 | ||
1544 | init_completion(&wait); | |
1545 | lrbp = &hba->lrb[tag]; | |
1546 | WARN_ON(lrbp->cmd); | |
1547 | err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); | |
1548 | if (unlikely(err)) | |
1549 | goto out_put_tag; | |
1550 | ||
1551 | hba->dev_cmd.complete = &wait; | |
1552 | ||
1553 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1554 | ufshcd_send_command(hba, tag); | |
1555 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1556 | ||
1557 | err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); | |
1558 | ||
1559 | out_put_tag: | |
1560 | ufshcd_put_dev_cmd_tag(hba, tag); | |
1561 | wake_up(&hba->dev_cmd.tag_wq); | |
1562 | return err; | |
1563 | } | |
1564 | ||
d44a5f98 DR |
1565 | /** |
1566 | * ufshcd_init_query() - init the query response and request parameters | |
1567 | * @hba: per-adapter instance | |
1568 | * @request: address of the request pointer to be initialized | |
1569 | * @response: address of the response pointer to be initialized | |
1570 | * @opcode: operation to perform | |
1571 | * @idn: flag idn to access | |
1572 | * @index: LU number to access | |
1573 | * @selector: query/flag/descriptor further identification | |
1574 | */ | |
1575 | static inline void ufshcd_init_query(struct ufs_hba *hba, | |
1576 | struct ufs_query_req **request, struct ufs_query_res **response, | |
1577 | enum query_opcode opcode, u8 idn, u8 index, u8 selector) | |
1578 | { | |
1579 | *request = &hba->dev_cmd.query.request; | |
1580 | *response = &hba->dev_cmd.query.response; | |
1581 | memset(*request, 0, sizeof(struct ufs_query_req)); | |
1582 | memset(*response, 0, sizeof(struct ufs_query_res)); | |
1583 | (*request)->upiu_req.opcode = opcode; | |
1584 | (*request)->upiu_req.idn = idn; | |
1585 | (*request)->upiu_req.index = index; | |
1586 | (*request)->upiu_req.selector = selector; | |
1587 | } | |
1588 | ||
68078d5c DR |
1589 | /** |
1590 | * ufshcd_query_flag() - API function for sending flag query requests | |
1591 | * hba: per-adapter instance | |
1592 | * query_opcode: flag query to perform | |
1593 | * idn: flag idn to access | |
1594 | * flag_res: the flag value after the query request completes | |
1595 | * | |
1596 | * Returns 0 for success, non-zero in case of failure | |
1597 | */ | |
1598 | static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | |
1599 | enum flag_idn idn, bool *flag_res) | |
1600 | { | |
d44a5f98 DR |
1601 | struct ufs_query_req *request = NULL; |
1602 | struct ufs_query_res *response = NULL; | |
1603 | int err, index = 0, selector = 0; | |
68078d5c DR |
1604 | |
1605 | BUG_ON(!hba); | |
1606 | ||
1ab27c9c | 1607 | ufshcd_hold(hba, false); |
68078d5c | 1608 | mutex_lock(&hba->dev_cmd.lock); |
d44a5f98 DR |
1609 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
1610 | selector); | |
68078d5c DR |
1611 | |
1612 | switch (opcode) { | |
1613 | case UPIU_QUERY_OPCODE_SET_FLAG: | |
1614 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: | |
1615 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: | |
1616 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
1617 | break; | |
1618 | case UPIU_QUERY_OPCODE_READ_FLAG: | |
1619 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1620 | if (!flag_res) { | |
1621 | /* No dummy reads */ | |
1622 | dev_err(hba->dev, "%s: Invalid argument for read request\n", | |
1623 | __func__); | |
1624 | err = -EINVAL; | |
1625 | goto out_unlock; | |
1626 | } | |
1627 | break; | |
1628 | default: | |
1629 | dev_err(hba->dev, | |
1630 | "%s: Expected query flag opcode but got = %d\n", | |
1631 | __func__, opcode); | |
1632 | err = -EINVAL; | |
1633 | goto out_unlock; | |
1634 | } | |
68078d5c | 1635 | |
d44a5f98 | 1636 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
68078d5c DR |
1637 | |
1638 | if (err) { | |
1639 | dev_err(hba->dev, | |
1640 | "%s: Sending flag query for idn %d failed, err = %d\n", | |
1641 | __func__, idn, err); | |
1642 | goto out_unlock; | |
1643 | } | |
1644 | ||
1645 | if (flag_res) | |
e8c8e82a | 1646 | *flag_res = (be32_to_cpu(response->upiu_res.value) & |
68078d5c DR |
1647 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; |
1648 | ||
1649 | out_unlock: | |
1650 | mutex_unlock(&hba->dev_cmd.lock); | |
1ab27c9c | 1651 | ufshcd_release(hba); |
68078d5c DR |
1652 | return err; |
1653 | } | |
1654 | ||
66ec6d59 SRT |
1655 | /** |
1656 | * ufshcd_query_attr - API function for sending attribute requests | |
1657 | * hba: per-adapter instance | |
1658 | * opcode: attribute opcode | |
1659 | * idn: attribute idn to access | |
1660 | * index: index field | |
1661 | * selector: selector field | |
1662 | * attr_val: the attribute value after the query request completes | |
1663 | * | |
1664 | * Returns 0 for success, non-zero in case of failure | |
1665 | */ | |
bdbe5d2f | 1666 | static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
66ec6d59 SRT |
1667 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) |
1668 | { | |
d44a5f98 DR |
1669 | struct ufs_query_req *request = NULL; |
1670 | struct ufs_query_res *response = NULL; | |
66ec6d59 SRT |
1671 | int err; |
1672 | ||
1673 | BUG_ON(!hba); | |
1674 | ||
1ab27c9c | 1675 | ufshcd_hold(hba, false); |
66ec6d59 SRT |
1676 | if (!attr_val) { |
1677 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", | |
1678 | __func__, opcode); | |
1679 | err = -EINVAL; | |
1680 | goto out; | |
1681 | } | |
1682 | ||
1683 | mutex_lock(&hba->dev_cmd.lock); | |
d44a5f98 DR |
1684 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
1685 | selector); | |
66ec6d59 SRT |
1686 | |
1687 | switch (opcode) { | |
1688 | case UPIU_QUERY_OPCODE_WRITE_ATTR: | |
1689 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
e8c8e82a | 1690 | request->upiu_req.value = cpu_to_be32(*attr_val); |
66ec6d59 SRT |
1691 | break; |
1692 | case UPIU_QUERY_OPCODE_READ_ATTR: | |
1693 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1694 | break; | |
1695 | default: | |
1696 | dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", | |
1697 | __func__, opcode); | |
1698 | err = -EINVAL; | |
1699 | goto out_unlock; | |
1700 | } | |
1701 | ||
d44a5f98 | 1702 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
66ec6d59 SRT |
1703 | |
1704 | if (err) { | |
1705 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | |
1706 | __func__, opcode, idn, err); | |
1707 | goto out_unlock; | |
1708 | } | |
1709 | ||
e8c8e82a | 1710 | *attr_val = be32_to_cpu(response->upiu_res.value); |
66ec6d59 SRT |
1711 | |
1712 | out_unlock: | |
1713 | mutex_unlock(&hba->dev_cmd.lock); | |
1714 | out: | |
1ab27c9c | 1715 | ufshcd_release(hba); |
66ec6d59 SRT |
1716 | return err; |
1717 | } | |
1718 | ||
d44a5f98 DR |
1719 | /** |
1720 | * ufshcd_query_descriptor - API function for sending descriptor requests | |
1721 | * hba: per-adapter instance | |
1722 | * opcode: attribute opcode | |
1723 | * idn: attribute idn to access | |
1724 | * index: index field | |
1725 | * selector: selector field | |
1726 | * desc_buf: the buffer that contains the descriptor | |
1727 | * buf_len: length parameter passed to the device | |
1728 | * | |
1729 | * Returns 0 for success, non-zero in case of failure. | |
1730 | * The buf_len parameter will contain, on return, the length parameter | |
1731 | * received on the response. | |
1732 | */ | |
7289f983 | 1733 | static int ufshcd_query_descriptor(struct ufs_hba *hba, |
d44a5f98 DR |
1734 | enum query_opcode opcode, enum desc_idn idn, u8 index, |
1735 | u8 selector, u8 *desc_buf, int *buf_len) | |
1736 | { | |
1737 | struct ufs_query_req *request = NULL; | |
1738 | struct ufs_query_res *response = NULL; | |
1739 | int err; | |
1740 | ||
1741 | BUG_ON(!hba); | |
1742 | ||
1ab27c9c | 1743 | ufshcd_hold(hba, false); |
d44a5f98 DR |
1744 | if (!desc_buf) { |
1745 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", | |
1746 | __func__, opcode); | |
1747 | err = -EINVAL; | |
1748 | goto out; | |
1749 | } | |
1750 | ||
1751 | if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { | |
1752 | dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", | |
1753 | __func__, *buf_len); | |
1754 | err = -EINVAL; | |
1755 | goto out; | |
1756 | } | |
1757 | ||
1758 | mutex_lock(&hba->dev_cmd.lock); | |
1759 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | |
1760 | selector); | |
1761 | hba->dev_cmd.query.descriptor = desc_buf; | |
ea2aab24 | 1762 | request->upiu_req.length = cpu_to_be16(*buf_len); |
d44a5f98 DR |
1763 | |
1764 | switch (opcode) { | |
1765 | case UPIU_QUERY_OPCODE_WRITE_DESC: | |
1766 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
1767 | break; | |
1768 | case UPIU_QUERY_OPCODE_READ_DESC: | |
1769 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1770 | break; | |
1771 | default: | |
1772 | dev_err(hba->dev, | |
1773 | "%s: Expected query descriptor opcode but got = 0x%.2x\n", | |
1774 | __func__, opcode); | |
1775 | err = -EINVAL; | |
1776 | goto out_unlock; | |
1777 | } | |
1778 | ||
1779 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); | |
1780 | ||
1781 | if (err) { | |
1782 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | |
1783 | __func__, opcode, idn, err); | |
1784 | goto out_unlock; | |
1785 | } | |
1786 | ||
1787 | hba->dev_cmd.query.descriptor = NULL; | |
ea2aab24 | 1788 | *buf_len = be16_to_cpu(response->upiu_res.length); |
d44a5f98 DR |
1789 | |
1790 | out_unlock: | |
1791 | mutex_unlock(&hba->dev_cmd.lock); | |
1792 | out: | |
1ab27c9c | 1793 | ufshcd_release(hba); |
d44a5f98 DR |
1794 | return err; |
1795 | } | |
1796 | ||
da461cec SJ |
1797 | /** |
1798 | * ufshcd_read_desc_param - read the specified descriptor parameter | |
1799 | * @hba: Pointer to adapter instance | |
1800 | * @desc_id: descriptor idn value | |
1801 | * @desc_index: descriptor index | |
1802 | * @param_offset: offset of the parameter to read | |
1803 | * @param_read_buf: pointer to buffer where parameter would be read | |
1804 | * @param_size: sizeof(param_read_buf) | |
1805 | * | |
1806 | * Return 0 in case of success, non-zero otherwise | |
1807 | */ | |
1808 | static int ufshcd_read_desc_param(struct ufs_hba *hba, | |
1809 | enum desc_idn desc_id, | |
1810 | int desc_index, | |
1811 | u32 param_offset, | |
1812 | u8 *param_read_buf, | |
1813 | u32 param_size) | |
1814 | { | |
1815 | int ret; | |
1816 | u8 *desc_buf; | |
1817 | u32 buff_len; | |
1818 | bool is_kmalloc = true; | |
1819 | ||
1820 | /* safety checks */ | |
1821 | if (desc_id >= QUERY_DESC_IDN_MAX) | |
1822 | return -EINVAL; | |
1823 | ||
1824 | buff_len = ufs_query_desc_max_size[desc_id]; | |
1825 | if ((param_offset + param_size) > buff_len) | |
1826 | return -EINVAL; | |
1827 | ||
1828 | if (!param_offset && (param_size == buff_len)) { | |
1829 | /* memory space already available to hold full descriptor */ | |
1830 | desc_buf = param_read_buf; | |
1831 | is_kmalloc = false; | |
1832 | } else { | |
1833 | /* allocate memory to hold full descriptor */ | |
1834 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | |
1835 | if (!desc_buf) | |
1836 | return -ENOMEM; | |
1837 | } | |
1838 | ||
1839 | ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, | |
1840 | desc_id, desc_index, 0, desc_buf, | |
1841 | &buff_len); | |
1842 | ||
1843 | if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || | |
1844 | (desc_buf[QUERY_DESC_LENGTH_OFFSET] != | |
1845 | ufs_query_desc_max_size[desc_id]) | |
1846 | || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) { | |
1847 | dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", | |
1848 | __func__, desc_id, param_offset, buff_len, ret); | |
1849 | if (!ret) | |
1850 | ret = -EINVAL; | |
1851 | ||
1852 | goto out; | |
1853 | } | |
1854 | ||
1855 | if (is_kmalloc) | |
1856 | memcpy(param_read_buf, &desc_buf[param_offset], param_size); | |
1857 | out: | |
1858 | if (is_kmalloc) | |
1859 | kfree(desc_buf); | |
1860 | return ret; | |
1861 | } | |
1862 | ||
1863 | static inline int ufshcd_read_desc(struct ufs_hba *hba, | |
1864 | enum desc_idn desc_id, | |
1865 | int desc_index, | |
1866 | u8 *buf, | |
1867 | u32 size) | |
1868 | { | |
1869 | return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); | |
1870 | } | |
1871 | ||
1872 | static inline int ufshcd_read_power_desc(struct ufs_hba *hba, | |
1873 | u8 *buf, | |
1874 | u32 size) | |
1875 | { | |
1876 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); | |
1877 | } | |
1878 | ||
1879 | /** | |
1880 | * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter | |
1881 | * @hba: Pointer to adapter instance | |
1882 | * @lun: lun id | |
1883 | * @param_offset: offset of the parameter to read | |
1884 | * @param_read_buf: pointer to buffer where parameter would be read | |
1885 | * @param_size: sizeof(param_read_buf) | |
1886 | * | |
1887 | * Return 0 in case of success, non-zero otherwise | |
1888 | */ | |
1889 | static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, | |
1890 | int lun, | |
1891 | enum unit_desc_param param_offset, | |
1892 | u8 *param_read_buf, | |
1893 | u32 param_size) | |
1894 | { | |
1895 | /* | |
1896 | * Unit descriptors are only available for general purpose LUs (LUN id | |
1897 | * from 0 to 7) and RPMB Well known LU. | |
1898 | */ | |
0ce147d4 | 1899 | if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN)) |
da461cec SJ |
1900 | return -EOPNOTSUPP; |
1901 | ||
1902 | return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, | |
1903 | param_offset, param_read_buf, param_size); | |
1904 | } | |
1905 | ||
7a3e97b0 SY |
1906 | /** |
1907 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | |
1908 | * @hba: per adapter instance | |
1909 | * | |
1910 | * 1. Allocate DMA memory for Command Descriptor array | |
1911 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT | |
1912 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). | |
1913 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List | |
1914 | * (UTMRDL) | |
1915 | * 4. Allocate memory for local reference block(lrb). | |
1916 | * | |
1917 | * Returns 0 for success, non-zero in case of failure | |
1918 | */ | |
1919 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | |
1920 | { | |
1921 | size_t utmrdl_size, utrdl_size, ucdl_size; | |
1922 | ||
1923 | /* Allocate memory for UTP command descriptors */ | |
1924 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | |
2953f850 SJ |
1925 | hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, |
1926 | ucdl_size, | |
1927 | &hba->ucdl_dma_addr, | |
1928 | GFP_KERNEL); | |
7a3e97b0 SY |
1929 | |
1930 | /* | |
1931 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. | |
1932 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE | |
1933 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will | |
1934 | * be aligned to 128 bytes as well | |
1935 | */ | |
1936 | if (!hba->ucdl_base_addr || | |
1937 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 1938 | dev_err(hba->dev, |
7a3e97b0 SY |
1939 | "Command Descriptor Memory allocation failed\n"); |
1940 | goto out; | |
1941 | } | |
1942 | ||
1943 | /* | |
1944 | * Allocate memory for UTP Transfer descriptors | |
1945 | * UFSHCI requires 1024 byte alignment of UTRD | |
1946 | */ | |
1947 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | |
2953f850 SJ |
1948 | hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, |
1949 | utrdl_size, | |
1950 | &hba->utrdl_dma_addr, | |
1951 | GFP_KERNEL); | |
7a3e97b0 SY |
1952 | if (!hba->utrdl_base_addr || |
1953 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 1954 | dev_err(hba->dev, |
7a3e97b0 SY |
1955 | "Transfer Descriptor Memory allocation failed\n"); |
1956 | goto out; | |
1957 | } | |
1958 | ||
1959 | /* | |
1960 | * Allocate memory for UTP Task Management descriptors | |
1961 | * UFSHCI requires 1024 byte alignment of UTMRD | |
1962 | */ | |
1963 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | |
2953f850 SJ |
1964 | hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, |
1965 | utmrdl_size, | |
1966 | &hba->utmrdl_dma_addr, | |
1967 | GFP_KERNEL); | |
7a3e97b0 SY |
1968 | if (!hba->utmrdl_base_addr || |
1969 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 1970 | dev_err(hba->dev, |
7a3e97b0 SY |
1971 | "Task Management Descriptor Memory allocation failed\n"); |
1972 | goto out; | |
1973 | } | |
1974 | ||
1975 | /* Allocate memory for local reference block */ | |
2953f850 SJ |
1976 | hba->lrb = devm_kzalloc(hba->dev, |
1977 | hba->nutrs * sizeof(struct ufshcd_lrb), | |
1978 | GFP_KERNEL); | |
7a3e97b0 | 1979 | if (!hba->lrb) { |
3b1d0580 | 1980 | dev_err(hba->dev, "LRB Memory allocation failed\n"); |
7a3e97b0 SY |
1981 | goto out; |
1982 | } | |
1983 | return 0; | |
1984 | out: | |
7a3e97b0 SY |
1985 | return -ENOMEM; |
1986 | } | |
1987 | ||
1988 | /** | |
1989 | * ufshcd_host_memory_configure - configure local reference block with | |
1990 | * memory offsets | |
1991 | * @hba: per adapter instance | |
1992 | * | |
1993 | * Configure Host memory space | |
1994 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA | |
1995 | * address. | |
1996 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length | |
1997 | * and PRDT offset. | |
1998 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT | |
1999 | * into local reference block. | |
2000 | */ | |
2001 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | |
2002 | { | |
2003 | struct utp_transfer_cmd_desc *cmd_descp; | |
2004 | struct utp_transfer_req_desc *utrdlp; | |
2005 | dma_addr_t cmd_desc_dma_addr; | |
2006 | dma_addr_t cmd_desc_element_addr; | |
2007 | u16 response_offset; | |
2008 | u16 prdt_offset; | |
2009 | int cmd_desc_size; | |
2010 | int i; | |
2011 | ||
2012 | utrdlp = hba->utrdl_base_addr; | |
2013 | cmd_descp = hba->ucdl_base_addr; | |
2014 | ||
2015 | response_offset = | |
2016 | offsetof(struct utp_transfer_cmd_desc, response_upiu); | |
2017 | prdt_offset = | |
2018 | offsetof(struct utp_transfer_cmd_desc, prd_table); | |
2019 | ||
2020 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); | |
2021 | cmd_desc_dma_addr = hba->ucdl_dma_addr; | |
2022 | ||
2023 | for (i = 0; i < hba->nutrs; i++) { | |
2024 | /* Configure UTRD with command descriptor base address */ | |
2025 | cmd_desc_element_addr = | |
2026 | (cmd_desc_dma_addr + (cmd_desc_size * i)); | |
2027 | utrdlp[i].command_desc_base_addr_lo = | |
2028 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); | |
2029 | utrdlp[i].command_desc_base_addr_hi = | |
2030 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); | |
2031 | ||
2032 | /* Response upiu and prdt offset should be in double words */ | |
2033 | utrdlp[i].response_upiu_offset = | |
2034 | cpu_to_le16((response_offset >> 2)); | |
2035 | utrdlp[i].prd_table_offset = | |
2036 | cpu_to_le16((prdt_offset >> 2)); | |
2037 | utrdlp[i].response_upiu_length = | |
3ca316c5 | 2038 | cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); |
7a3e97b0 SY |
2039 | |
2040 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); | |
5a0b0cb9 SRT |
2041 | hba->lrb[i].ucd_req_ptr = |
2042 | (struct utp_upiu_req *)(cmd_descp + i); | |
7a3e97b0 SY |
2043 | hba->lrb[i].ucd_rsp_ptr = |
2044 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; | |
2045 | hba->lrb[i].ucd_prdt_ptr = | |
2046 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; | |
2047 | } | |
2048 | } | |
2049 | ||
2050 | /** | |
2051 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | |
2052 | * @hba: per adapter instance | |
2053 | * | |
2054 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, | |
2055 | * in order to initialize the Unipro link startup procedure. | |
2056 | * Once the Unipro links are up, the device connected to the controller | |
2057 | * is detected. | |
2058 | * | |
2059 | * Returns 0 on success, non-zero value on failure | |
2060 | */ | |
2061 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |
2062 | { | |
6ccf44fe SJ |
2063 | struct uic_command uic_cmd = {0}; |
2064 | int ret; | |
7a3e97b0 | 2065 | |
6ccf44fe | 2066 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; |
7a3e97b0 | 2067 | |
6ccf44fe SJ |
2068 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
2069 | if (ret) | |
2070 | dev_err(hba->dev, | |
2071 | "dme-link-startup: error code %d\n", ret); | |
2072 | return ret; | |
7a3e97b0 SY |
2073 | } |
2074 | ||
cad2e03d YG |
2075 | static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) |
2076 | { | |
2077 | #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 | |
2078 | unsigned long min_sleep_time_us; | |
2079 | ||
2080 | if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) | |
2081 | return; | |
2082 | ||
2083 | /* | |
2084 | * last_dme_cmd_tstamp will be 0 only for 1st call to | |
2085 | * this function | |
2086 | */ | |
2087 | if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { | |
2088 | min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; | |
2089 | } else { | |
2090 | unsigned long delta = | |
2091 | (unsigned long) ktime_to_us( | |
2092 | ktime_sub(ktime_get(), | |
2093 | hba->last_dme_cmd_tstamp)); | |
2094 | ||
2095 | if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) | |
2096 | min_sleep_time_us = | |
2097 | MIN_DELAY_BEFORE_DME_CMDS_US - delta; | |
2098 | else | |
2099 | return; /* no more delay required */ | |
2100 | } | |
2101 | ||
2102 | /* allow sleep for extra 50us if needed */ | |
2103 | usleep_range(min_sleep_time_us, min_sleep_time_us + 50); | |
2104 | } | |
2105 | ||
12b4fdb4 SJ |
2106 | /** |
2107 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET | |
2108 | * @hba: per adapter instance | |
2109 | * @attr_sel: uic command argument1 | |
2110 | * @attr_set: attribute set type as uic command argument2 | |
2111 | * @mib_val: setting value as uic command argument3 | |
2112 | * @peer: indicate whether peer or local | |
2113 | * | |
2114 | * Returns 0 on success, non-zero value on failure | |
2115 | */ | |
2116 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | |
2117 | u8 attr_set, u32 mib_val, u8 peer) | |
2118 | { | |
2119 | struct uic_command uic_cmd = {0}; | |
2120 | static const char *const action[] = { | |
2121 | "dme-set", | |
2122 | "dme-peer-set" | |
2123 | }; | |
2124 | const char *set = action[!!peer]; | |
2125 | int ret; | |
2126 | ||
2127 | uic_cmd.command = peer ? | |
2128 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; | |
2129 | uic_cmd.argument1 = attr_sel; | |
2130 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); | |
2131 | uic_cmd.argument3 = mib_val; | |
2132 | ||
2133 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
2134 | if (ret) | |
2135 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", | |
2136 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); | |
2137 | ||
2138 | return ret; | |
2139 | } | |
2140 | EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); | |
2141 | ||
2142 | /** | |
2143 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET | |
2144 | * @hba: per adapter instance | |
2145 | * @attr_sel: uic command argument1 | |
2146 | * @mib_val: the value of the attribute as returned by the UIC command | |
2147 | * @peer: indicate whether peer or local | |
2148 | * | |
2149 | * Returns 0 on success, non-zero value on failure | |
2150 | */ | |
2151 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | |
2152 | u32 *mib_val, u8 peer) | |
2153 | { | |
2154 | struct uic_command uic_cmd = {0}; | |
2155 | static const char *const action[] = { | |
2156 | "dme-get", | |
2157 | "dme-peer-get" | |
2158 | }; | |
2159 | const char *get = action[!!peer]; | |
2160 | int ret; | |
874237f7 YG |
2161 | struct ufs_pa_layer_attr orig_pwr_info; |
2162 | struct ufs_pa_layer_attr temp_pwr_info; | |
2163 | bool pwr_mode_change = false; | |
2164 | ||
2165 | if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { | |
2166 | orig_pwr_info = hba->pwr_info; | |
2167 | temp_pwr_info = orig_pwr_info; | |
2168 | ||
2169 | if (orig_pwr_info.pwr_tx == FAST_MODE || | |
2170 | orig_pwr_info.pwr_rx == FAST_MODE) { | |
2171 | temp_pwr_info.pwr_tx = FASTAUTO_MODE; | |
2172 | temp_pwr_info.pwr_rx = FASTAUTO_MODE; | |
2173 | pwr_mode_change = true; | |
2174 | } else if (orig_pwr_info.pwr_tx == SLOW_MODE || | |
2175 | orig_pwr_info.pwr_rx == SLOW_MODE) { | |
2176 | temp_pwr_info.pwr_tx = SLOWAUTO_MODE; | |
2177 | temp_pwr_info.pwr_rx = SLOWAUTO_MODE; | |
2178 | pwr_mode_change = true; | |
2179 | } | |
2180 | if (pwr_mode_change) { | |
2181 | ret = ufshcd_change_power_mode(hba, &temp_pwr_info); | |
2182 | if (ret) | |
2183 | goto out; | |
2184 | } | |
2185 | } | |
12b4fdb4 SJ |
2186 | |
2187 | uic_cmd.command = peer ? | |
2188 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; | |
2189 | uic_cmd.argument1 = attr_sel; | |
2190 | ||
2191 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
2192 | if (ret) { | |
2193 | dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", | |
2194 | get, UIC_GET_ATTR_ID(attr_sel), ret); | |
2195 | goto out; | |
2196 | } | |
2197 | ||
2198 | if (mib_val) | |
2199 | *mib_val = uic_cmd.argument3; | |
874237f7 YG |
2200 | |
2201 | if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) | |
2202 | && pwr_mode_change) | |
2203 | ufshcd_change_power_mode(hba, &orig_pwr_info); | |
12b4fdb4 SJ |
2204 | out: |
2205 | return ret; | |
2206 | } | |
2207 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | |
2208 | ||
53b3d9c3 | 2209 | /** |
57d104c1 SJ |
2210 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power |
2211 | * state) and waits for it to take effect. | |
2212 | * | |
53b3d9c3 | 2213 | * @hba: per adapter instance |
57d104c1 SJ |
2214 | * @cmd: UIC command to execute |
2215 | * | |
2216 | * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & | |
2217 | * DME_HIBERNATE_EXIT commands take some time to take its effect on both host | |
2218 | * and device UniPro link and hence it's final completion would be indicated by | |
2219 | * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in | |
2220 | * addition to normal UIC command completion Status (UCCS). This function only | |
2221 | * returns after the relevant status bits indicate the completion. | |
53b3d9c3 SJ |
2222 | * |
2223 | * Returns 0 on success, non-zero value on failure | |
2224 | */ | |
57d104c1 | 2225 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) |
53b3d9c3 | 2226 | { |
57d104c1 | 2227 | struct completion uic_async_done; |
53b3d9c3 SJ |
2228 | unsigned long flags; |
2229 | u8 status; | |
2230 | int ret; | |
2231 | ||
53b3d9c3 | 2232 | mutex_lock(&hba->uic_cmd_mutex); |
57d104c1 | 2233 | init_completion(&uic_async_done); |
cad2e03d | 2234 | ufshcd_add_delay_before_dme_cmd(hba); |
53b3d9c3 SJ |
2235 | |
2236 | spin_lock_irqsave(hba->host->host_lock, flags); | |
57d104c1 SJ |
2237 | hba->uic_async_done = &uic_async_done; |
2238 | ret = __ufshcd_send_uic_cmd(hba, cmd); | |
53b3d9c3 | 2239 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
53b3d9c3 SJ |
2240 | if (ret) { |
2241 | dev_err(hba->dev, | |
57d104c1 SJ |
2242 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", |
2243 | cmd->command, cmd->argument3, ret); | |
2244 | goto out; | |
2245 | } | |
2246 | ret = ufshcd_wait_for_uic_cmd(hba, cmd); | |
2247 | if (ret) { | |
2248 | dev_err(hba->dev, | |
2249 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", | |
2250 | cmd->command, cmd->argument3, ret); | |
53b3d9c3 SJ |
2251 | goto out; |
2252 | } | |
2253 | ||
57d104c1 | 2254 | if (!wait_for_completion_timeout(hba->uic_async_done, |
53b3d9c3 SJ |
2255 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { |
2256 | dev_err(hba->dev, | |
57d104c1 SJ |
2257 | "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", |
2258 | cmd->command, cmd->argument3); | |
53b3d9c3 SJ |
2259 | ret = -ETIMEDOUT; |
2260 | goto out; | |
2261 | } | |
2262 | ||
2263 | status = ufshcd_get_upmcrs(hba); | |
2264 | if (status != PWR_LOCAL) { | |
2265 | dev_err(hba->dev, | |
57d104c1 SJ |
2266 | "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", |
2267 | cmd->command, status); | |
53b3d9c3 SJ |
2268 | ret = (status != PWR_OK) ? status : -1; |
2269 | } | |
2270 | out: | |
2271 | spin_lock_irqsave(hba->host->host_lock, flags); | |
57d104c1 | 2272 | hba->uic_async_done = NULL; |
53b3d9c3 SJ |
2273 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
2274 | mutex_unlock(&hba->uic_cmd_mutex); | |
1ab27c9c | 2275 | |
53b3d9c3 SJ |
2276 | return ret; |
2277 | } | |
2278 | ||
57d104c1 SJ |
2279 | /** |
2280 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | |
2281 | * using DME_SET primitives. | |
2282 | * @hba: per adapter instance | |
2283 | * @mode: powr mode value | |
2284 | * | |
2285 | * Returns 0 on success, non-zero value on failure | |
2286 | */ | |
2287 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | |
2288 | { | |
2289 | struct uic_command uic_cmd = {0}; | |
1ab27c9c | 2290 | int ret; |
57d104c1 | 2291 | |
c3a2f9ee YG |
2292 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { |
2293 | ret = ufshcd_dme_set(hba, | |
2294 | UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); | |
2295 | if (ret) { | |
2296 | dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", | |
2297 | __func__, ret); | |
2298 | goto out; | |
2299 | } | |
2300 | } | |
2301 | ||
57d104c1 SJ |
2302 | uic_cmd.command = UIC_CMD_DME_SET; |
2303 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | |
2304 | uic_cmd.argument3 = mode; | |
1ab27c9c ST |
2305 | ufshcd_hold(hba, false); |
2306 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
2307 | ufshcd_release(hba); | |
57d104c1 | 2308 | |
c3a2f9ee | 2309 | out: |
1ab27c9c | 2310 | return ret; |
57d104c1 SJ |
2311 | } |
2312 | ||
2313 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) | |
2314 | { | |
2315 | struct uic_command uic_cmd = {0}; | |
2316 | ||
2317 | uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; | |
2318 | ||
2319 | return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
2320 | } | |
2321 | ||
2322 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) | |
2323 | { | |
2324 | struct uic_command uic_cmd = {0}; | |
2325 | int ret; | |
2326 | ||
2327 | uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; | |
2328 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
2329 | if (ret) { | |
2330 | ufshcd_set_link_off(hba); | |
2331 | ret = ufshcd_host_reset_and_restore(hba); | |
2332 | } | |
2333 | ||
2334 | return ret; | |
2335 | } | |
2336 | ||
5064636c YG |
2337 | /** |
2338 | * ufshcd_init_pwr_info - setting the POR (power on reset) | |
2339 | * values in hba power info | |
2340 | * @hba: per-adapter instance | |
2341 | */ | |
2342 | static void ufshcd_init_pwr_info(struct ufs_hba *hba) | |
2343 | { | |
2344 | hba->pwr_info.gear_rx = UFS_PWM_G1; | |
2345 | hba->pwr_info.gear_tx = UFS_PWM_G1; | |
2346 | hba->pwr_info.lane_rx = 1; | |
2347 | hba->pwr_info.lane_tx = 1; | |
2348 | hba->pwr_info.pwr_rx = SLOWAUTO_MODE; | |
2349 | hba->pwr_info.pwr_tx = SLOWAUTO_MODE; | |
2350 | hba->pwr_info.hs_rate = 0; | |
2351 | } | |
2352 | ||
d3e89bac | 2353 | /** |
7eb584db DR |
2354 | * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device |
2355 | * @hba: per-adapter instance | |
d3e89bac | 2356 | */ |
7eb584db | 2357 | static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) |
d3e89bac | 2358 | { |
7eb584db DR |
2359 | struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; |
2360 | ||
2361 | if (hba->max_pwr_info.is_valid) | |
2362 | return 0; | |
2363 | ||
2364 | pwr_info->pwr_tx = FASTAUTO_MODE; | |
2365 | pwr_info->pwr_rx = FASTAUTO_MODE; | |
2366 | pwr_info->hs_rate = PA_HS_MODE_B; | |
d3e89bac SJ |
2367 | |
2368 | /* Get the connected lane count */ | |
7eb584db DR |
2369 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), |
2370 | &pwr_info->lane_rx); | |
2371 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
2372 | &pwr_info->lane_tx); | |
2373 | ||
2374 | if (!pwr_info->lane_rx || !pwr_info->lane_tx) { | |
2375 | dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", | |
2376 | __func__, | |
2377 | pwr_info->lane_rx, | |
2378 | pwr_info->lane_tx); | |
2379 | return -EINVAL; | |
2380 | } | |
d3e89bac SJ |
2381 | |
2382 | /* | |
2383 | * First, get the maximum gears of HS speed. | |
2384 | * If a zero value, it means there is no HSGEAR capability. | |
2385 | * Then, get the maximum gears of PWM speed. | |
2386 | */ | |
7eb584db DR |
2387 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); |
2388 | if (!pwr_info->gear_rx) { | |
2389 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | |
2390 | &pwr_info->gear_rx); | |
2391 | if (!pwr_info->gear_rx) { | |
2392 | dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", | |
2393 | __func__, pwr_info->gear_rx); | |
2394 | return -EINVAL; | |
2395 | } | |
2396 | pwr_info->pwr_rx = SLOWAUTO_MODE; | |
d3e89bac SJ |
2397 | } |
2398 | ||
7eb584db DR |
2399 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), |
2400 | &pwr_info->gear_tx); | |
2401 | if (!pwr_info->gear_tx) { | |
d3e89bac | 2402 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
7eb584db DR |
2403 | &pwr_info->gear_tx); |
2404 | if (!pwr_info->gear_tx) { | |
2405 | dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", | |
2406 | __func__, pwr_info->gear_tx); | |
2407 | return -EINVAL; | |
2408 | } | |
2409 | pwr_info->pwr_tx = SLOWAUTO_MODE; | |
2410 | } | |
2411 | ||
2412 | hba->max_pwr_info.is_valid = true; | |
2413 | return 0; | |
2414 | } | |
2415 | ||
2416 | static int ufshcd_change_power_mode(struct ufs_hba *hba, | |
2417 | struct ufs_pa_layer_attr *pwr_mode) | |
2418 | { | |
2419 | int ret; | |
2420 | ||
2421 | /* if already configured to the requested pwr_mode */ | |
2422 | if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && | |
2423 | pwr_mode->gear_tx == hba->pwr_info.gear_tx && | |
2424 | pwr_mode->lane_rx == hba->pwr_info.lane_rx && | |
2425 | pwr_mode->lane_tx == hba->pwr_info.lane_tx && | |
2426 | pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && | |
2427 | pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && | |
2428 | pwr_mode->hs_rate == hba->pwr_info.hs_rate) { | |
2429 | dev_dbg(hba->dev, "%s: power already configured\n", __func__); | |
2430 | return 0; | |
d3e89bac SJ |
2431 | } |
2432 | ||
2433 | /* | |
2434 | * Configure attributes for power mode change with below. | |
2435 | * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, | |
2436 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, | |
2437 | * - PA_HSSERIES | |
2438 | */ | |
7eb584db DR |
2439 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); |
2440 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), | |
2441 | pwr_mode->lane_rx); | |
2442 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || | |
2443 | pwr_mode->pwr_rx == FAST_MODE) | |
d3e89bac | 2444 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); |
7eb584db DR |
2445 | else |
2446 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); | |
d3e89bac | 2447 | |
7eb584db DR |
2448 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); |
2449 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), | |
2450 | pwr_mode->lane_tx); | |
2451 | if (pwr_mode->pwr_tx == FASTAUTO_MODE || | |
2452 | pwr_mode->pwr_tx == FAST_MODE) | |
d3e89bac | 2453 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); |
7eb584db DR |
2454 | else |
2455 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); | |
d3e89bac | 2456 | |
7eb584db DR |
2457 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || |
2458 | pwr_mode->pwr_tx == FASTAUTO_MODE || | |
2459 | pwr_mode->pwr_rx == FAST_MODE || | |
2460 | pwr_mode->pwr_tx == FAST_MODE) | |
2461 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), | |
2462 | pwr_mode->hs_rate); | |
d3e89bac | 2463 | |
7eb584db DR |
2464 | ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
2465 | | pwr_mode->pwr_tx); | |
2466 | ||
2467 | if (ret) { | |
d3e89bac | 2468 | dev_err(hba->dev, |
7eb584db DR |
2469 | "%s: power mode change failed %d\n", __func__, ret); |
2470 | } else { | |
2471 | if (hba->vops && hba->vops->pwr_change_notify) | |
2472 | hba->vops->pwr_change_notify(hba, | |
2473 | POST_CHANGE, NULL, pwr_mode); | |
2474 | ||
2475 | memcpy(&hba->pwr_info, pwr_mode, | |
2476 | sizeof(struct ufs_pa_layer_attr)); | |
2477 | } | |
2478 | ||
2479 | return ret; | |
2480 | } | |
2481 | ||
2482 | /** | |
2483 | * ufshcd_config_pwr_mode - configure a new power mode | |
2484 | * @hba: per-adapter instance | |
2485 | * @desired_pwr_mode: desired power configuration | |
2486 | */ | |
2487 | static int ufshcd_config_pwr_mode(struct ufs_hba *hba, | |
2488 | struct ufs_pa_layer_attr *desired_pwr_mode) | |
2489 | { | |
2490 | struct ufs_pa_layer_attr final_params = { 0 }; | |
2491 | int ret; | |
2492 | ||
2493 | if (hba->vops && hba->vops->pwr_change_notify) | |
2494 | hba->vops->pwr_change_notify(hba, | |
2495 | PRE_CHANGE, desired_pwr_mode, &final_params); | |
2496 | else | |
2497 | memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); | |
2498 | ||
2499 | ret = ufshcd_change_power_mode(hba, &final_params); | |
d3e89bac SJ |
2500 | |
2501 | return ret; | |
2502 | } | |
2503 | ||
68078d5c DR |
2504 | /** |
2505 | * ufshcd_complete_dev_init() - checks device readiness | |
2506 | * hba: per-adapter instance | |
2507 | * | |
2508 | * Set fDeviceInit flag and poll until device toggles it. | |
2509 | */ | |
2510 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) | |
2511 | { | |
2512 | int i, retries, err = 0; | |
2513 | bool flag_res = 1; | |
2514 | ||
2515 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | |
2516 | /* Set the fDeviceInit flag */ | |
2517 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, | |
2518 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); | |
2519 | if (!err || err == -ETIMEDOUT) | |
2520 | break; | |
2521 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | |
2522 | } | |
2523 | if (err) { | |
2524 | dev_err(hba->dev, | |
2525 | "%s setting fDeviceInit flag failed with error %d\n", | |
2526 | __func__, err); | |
2527 | goto out; | |
2528 | } | |
2529 | ||
2530 | /* poll for max. 100 iterations for fDeviceInit flag to clear */ | |
2531 | for (i = 0; i < 100 && !err && flag_res; i++) { | |
2532 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | |
2533 | err = ufshcd_query_flag(hba, | |
2534 | UPIU_QUERY_OPCODE_READ_FLAG, | |
2535 | QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); | |
2536 | if (!err || err == -ETIMEDOUT) | |
2537 | break; | |
2538 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, | |
2539 | err); | |
2540 | } | |
2541 | } | |
2542 | if (err) | |
2543 | dev_err(hba->dev, | |
2544 | "%s reading fDeviceInit flag failed with error %d\n", | |
2545 | __func__, err); | |
2546 | else if (flag_res) | |
2547 | dev_err(hba->dev, | |
2548 | "%s fDeviceInit was not cleared by the device\n", | |
2549 | __func__); | |
2550 | ||
2551 | out: | |
2552 | return err; | |
2553 | } | |
2554 | ||
7a3e97b0 SY |
2555 | /** |
2556 | * ufshcd_make_hba_operational - Make UFS controller operational | |
2557 | * @hba: per adapter instance | |
2558 | * | |
2559 | * To bring UFS host controller to operational state, | |
5c0c28a8 SRT |
2560 | * 1. Enable required interrupts |
2561 | * 2. Configure interrupt aggregation | |
2562 | * 3. Program UTRL and UTMRL base addres | |
2563 | * 4. Configure run-stop-registers | |
7a3e97b0 SY |
2564 | * |
2565 | * Returns 0 on success, non-zero value on failure | |
2566 | */ | |
2567 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |
2568 | { | |
2569 | int err = 0; | |
2570 | u32 reg; | |
2571 | ||
6ccf44fe SJ |
2572 | /* Enable required interrupts */ |
2573 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | |
2574 | ||
2575 | /* Configure interrupt aggregation */ | |
b852190e YG |
2576 | if (ufshcd_is_intr_aggr_allowed(hba)) |
2577 | ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); | |
2578 | else | |
2579 | ufshcd_disable_intr_aggr(hba); | |
6ccf44fe SJ |
2580 | |
2581 | /* Configure UTRL and UTMRL base address registers */ | |
2582 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), | |
2583 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); | |
2584 | ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), | |
2585 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); | |
2586 | ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), | |
2587 | REG_UTP_TASK_REQ_LIST_BASE_L); | |
2588 | ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), | |
2589 | REG_UTP_TASK_REQ_LIST_BASE_H); | |
2590 | ||
7a3e97b0 SY |
2591 | /* |
2592 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | |
2593 | * DEI, HEI bits must be 0 | |
2594 | */ | |
5c0c28a8 | 2595 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); |
7a3e97b0 SY |
2596 | if (!(ufshcd_get_lists_status(reg))) { |
2597 | ufshcd_enable_run_stop_reg(hba); | |
2598 | } else { | |
3b1d0580 | 2599 | dev_err(hba->dev, |
7a3e97b0 SY |
2600 | "Host controller not ready to process requests"); |
2601 | err = -EIO; | |
2602 | goto out; | |
2603 | } | |
2604 | ||
7a3e97b0 SY |
2605 | out: |
2606 | return err; | |
2607 | } | |
2608 | ||
2609 | /** | |
2610 | * ufshcd_hba_enable - initialize the controller | |
2611 | * @hba: per adapter instance | |
2612 | * | |
2613 | * The controller resets itself and controller firmware initialization | |
2614 | * sequence kicks off. When controller is ready it will set | |
2615 | * the Host Controller Enable bit to 1. | |
2616 | * | |
2617 | * Returns 0 on success, non-zero value on failure | |
2618 | */ | |
2619 | static int ufshcd_hba_enable(struct ufs_hba *hba) | |
2620 | { | |
2621 | int retry; | |
2622 | ||
2623 | /* | |
2624 | * msleep of 1 and 5 used in this function might result in msleep(20), | |
2625 | * but it was necessary to send the UFS FPGA to reset mode during | |
2626 | * development and testing of this driver. msleep can be changed to | |
2627 | * mdelay and retry count can be reduced based on the controller. | |
2628 | */ | |
2629 | if (!ufshcd_is_hba_active(hba)) { | |
2630 | ||
2631 | /* change controller state to "reset state" */ | |
2632 | ufshcd_hba_stop(hba); | |
2633 | ||
2634 | /* | |
2635 | * This delay is based on the testing done with UFS host | |
2636 | * controller FPGA. The delay can be changed based on the | |
2637 | * host controller used. | |
2638 | */ | |
2639 | msleep(5); | |
2640 | } | |
2641 | ||
57d104c1 SJ |
2642 | /* UniPro link is disabled at this point */ |
2643 | ufshcd_set_link_off(hba); | |
2644 | ||
5c0c28a8 SRT |
2645 | if (hba->vops && hba->vops->hce_enable_notify) |
2646 | hba->vops->hce_enable_notify(hba, PRE_CHANGE); | |
2647 | ||
7a3e97b0 SY |
2648 | /* start controller initialization sequence */ |
2649 | ufshcd_hba_start(hba); | |
2650 | ||
2651 | /* | |
2652 | * To initialize a UFS host controller HCE bit must be set to 1. | |
2653 | * During initialization the HCE bit value changes from 1->0->1. | |
2654 | * When the host controller completes initialization sequence | |
2655 | * it sets the value of HCE bit to 1. The same HCE bit is read back | |
2656 | * to check if the controller has completed initialization sequence. | |
2657 | * So without this delay the value HCE = 1, set in the previous | |
2658 | * instruction might be read back. | |
2659 | * This delay can be changed based on the controller. | |
2660 | */ | |
2661 | msleep(1); | |
2662 | ||
2663 | /* wait for the host controller to complete initialization */ | |
2664 | retry = 10; | |
2665 | while (ufshcd_is_hba_active(hba)) { | |
2666 | if (retry) { | |
2667 | retry--; | |
2668 | } else { | |
3b1d0580 | 2669 | dev_err(hba->dev, |
7a3e97b0 SY |
2670 | "Controller enable failed\n"); |
2671 | return -EIO; | |
2672 | } | |
2673 | msleep(5); | |
2674 | } | |
5c0c28a8 | 2675 | |
1d337ec2 | 2676 | /* enable UIC related interrupts */ |
57d104c1 | 2677 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); |
1d337ec2 | 2678 | |
5c0c28a8 SRT |
2679 | if (hba->vops && hba->vops->hce_enable_notify) |
2680 | hba->vops->hce_enable_notify(hba, POST_CHANGE); | |
2681 | ||
7a3e97b0 SY |
2682 | return 0; |
2683 | } | |
2684 | ||
7ca38cf3 YG |
2685 | static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) |
2686 | { | |
2687 | int tx_lanes, i, err = 0; | |
2688 | ||
2689 | if (!peer) | |
2690 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
2691 | &tx_lanes); | |
2692 | else | |
2693 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
2694 | &tx_lanes); | |
2695 | for (i = 0; i < tx_lanes; i++) { | |
2696 | if (!peer) | |
2697 | err = ufshcd_dme_set(hba, | |
2698 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
2699 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
2700 | 0); | |
2701 | else | |
2702 | err = ufshcd_dme_peer_set(hba, | |
2703 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
2704 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
2705 | 0); | |
2706 | if (err) { | |
2707 | dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", | |
2708 | __func__, peer, i, err); | |
2709 | break; | |
2710 | } | |
2711 | } | |
2712 | ||
2713 | return err; | |
2714 | } | |
2715 | ||
2716 | static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) | |
2717 | { | |
2718 | return ufshcd_disable_tx_lcc(hba, true); | |
2719 | } | |
2720 | ||
7a3e97b0 | 2721 | /** |
6ccf44fe | 2722 | * ufshcd_link_startup - Initialize unipro link startup |
7a3e97b0 SY |
2723 | * @hba: per adapter instance |
2724 | * | |
6ccf44fe | 2725 | * Returns 0 for success, non-zero in case of failure |
7a3e97b0 | 2726 | */ |
6ccf44fe | 2727 | static int ufshcd_link_startup(struct ufs_hba *hba) |
7a3e97b0 | 2728 | { |
6ccf44fe | 2729 | int ret; |
1d337ec2 | 2730 | int retries = DME_LINKSTARTUP_RETRIES; |
7a3e97b0 | 2731 | |
1d337ec2 SRT |
2732 | do { |
2733 | if (hba->vops && hba->vops->link_startup_notify) | |
2734 | hba->vops->link_startup_notify(hba, PRE_CHANGE); | |
6ccf44fe | 2735 | |
1d337ec2 | 2736 | ret = ufshcd_dme_link_startup(hba); |
5c0c28a8 | 2737 | |
1d337ec2 SRT |
2738 | /* check if device is detected by inter-connect layer */ |
2739 | if (!ret && !ufshcd_is_device_present(hba)) { | |
2740 | dev_err(hba->dev, "%s: Device not present\n", __func__); | |
2741 | ret = -ENXIO; | |
2742 | goto out; | |
2743 | } | |
6ccf44fe | 2744 | |
1d337ec2 SRT |
2745 | /* |
2746 | * DME link lost indication is only received when link is up, | |
2747 | * but we can't be sure if the link is up until link startup | |
2748 | * succeeds. So reset the local Uni-Pro and try again. | |
2749 | */ | |
2750 | if (ret && ufshcd_hba_enable(hba)) | |
2751 | goto out; | |
2752 | } while (ret && retries--); | |
2753 | ||
2754 | if (ret) | |
2755 | /* failed to get the link up... retire */ | |
5c0c28a8 | 2756 | goto out; |
5c0c28a8 | 2757 | |
7ca38cf3 YG |
2758 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { |
2759 | ret = ufshcd_disable_device_tx_lcc(hba); | |
2760 | if (ret) | |
2761 | goto out; | |
2762 | } | |
2763 | ||
5c0c28a8 SRT |
2764 | /* Include any host controller configuration via UIC commands */ |
2765 | if (hba->vops && hba->vops->link_startup_notify) { | |
2766 | ret = hba->vops->link_startup_notify(hba, POST_CHANGE); | |
2767 | if (ret) | |
2768 | goto out; | |
2769 | } | |
7a3e97b0 | 2770 | |
5c0c28a8 | 2771 | ret = ufshcd_make_hba_operational(hba); |
6ccf44fe SJ |
2772 | out: |
2773 | if (ret) | |
2774 | dev_err(hba->dev, "link startup failed %d\n", ret); | |
2775 | return ret; | |
7a3e97b0 SY |
2776 | } |
2777 | ||
5a0b0cb9 SRT |
2778 | /** |
2779 | * ufshcd_verify_dev_init() - Verify device initialization | |
2780 | * @hba: per-adapter instance | |
2781 | * | |
2782 | * Send NOP OUT UPIU and wait for NOP IN response to check whether the | |
2783 | * device Transport Protocol (UTP) layer is ready after a reset. | |
2784 | * If the UTP layer at the device side is not initialized, it may | |
2785 | * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT | |
2786 | * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. | |
2787 | */ | |
2788 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |
2789 | { | |
2790 | int err = 0; | |
2791 | int retries; | |
2792 | ||
1ab27c9c | 2793 | ufshcd_hold(hba, false); |
5a0b0cb9 SRT |
2794 | mutex_lock(&hba->dev_cmd.lock); |
2795 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { | |
2796 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, | |
2797 | NOP_OUT_TIMEOUT); | |
2798 | ||
2799 | if (!err || err == -ETIMEDOUT) | |
2800 | break; | |
2801 | ||
2802 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | |
2803 | } | |
2804 | mutex_unlock(&hba->dev_cmd.lock); | |
1ab27c9c | 2805 | ufshcd_release(hba); |
5a0b0cb9 SRT |
2806 | |
2807 | if (err) | |
2808 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); | |
2809 | return err; | |
2810 | } | |
2811 | ||
0ce147d4 SJ |
2812 | /** |
2813 | * ufshcd_set_queue_depth - set lun queue depth | |
2814 | * @sdev: pointer to SCSI device | |
2815 | * | |
2816 | * Read bLUQueueDepth value and activate scsi tagged command | |
2817 | * queueing. For WLUN, queue depth is set to 1. For best-effort | |
2818 | * cases (bLUQueueDepth = 0) the queue depth is set to a maximum | |
2819 | * value that host can queue. | |
2820 | */ | |
2821 | static void ufshcd_set_queue_depth(struct scsi_device *sdev) | |
2822 | { | |
2823 | int ret = 0; | |
2824 | u8 lun_qdepth; | |
2825 | struct ufs_hba *hba; | |
2826 | ||
2827 | hba = shost_priv(sdev->host); | |
2828 | ||
2829 | lun_qdepth = hba->nutrs; | |
2830 | ret = ufshcd_read_unit_desc_param(hba, | |
2831 | ufshcd_scsi_to_upiu_lun(sdev->lun), | |
2832 | UNIT_DESC_PARAM_LU_Q_DEPTH, | |
2833 | &lun_qdepth, | |
2834 | sizeof(lun_qdepth)); | |
2835 | ||
2836 | /* Some WLUN doesn't support unit descriptor */ | |
2837 | if (ret == -EOPNOTSUPP) | |
2838 | lun_qdepth = 1; | |
2839 | else if (!lun_qdepth) | |
2840 | /* eventually, we can figure out the real queue depth */ | |
2841 | lun_qdepth = hba->nutrs; | |
2842 | else | |
2843 | lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); | |
2844 | ||
2845 | dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", | |
2846 | __func__, lun_qdepth); | |
db5ed4df | 2847 | scsi_change_queue_depth(sdev, lun_qdepth); |
0ce147d4 SJ |
2848 | } |
2849 | ||
57d104c1 SJ |
2850 | /* |
2851 | * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR | |
2852 | * @hba: per-adapter instance | |
2853 | * @lun: UFS device lun id | |
2854 | * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info | |
2855 | * | |
2856 | * Returns 0 in case of success and b_lu_write_protect status would be returned | |
2857 | * @b_lu_write_protect parameter. | |
2858 | * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. | |
2859 | * Returns -EINVAL in case of invalid parameters passed to this function. | |
2860 | */ | |
2861 | static int ufshcd_get_lu_wp(struct ufs_hba *hba, | |
2862 | u8 lun, | |
2863 | u8 *b_lu_write_protect) | |
2864 | { | |
2865 | int ret; | |
2866 | ||
2867 | if (!b_lu_write_protect) | |
2868 | ret = -EINVAL; | |
2869 | /* | |
2870 | * According to UFS device spec, RPMB LU can't be write | |
2871 | * protected so skip reading bLUWriteProtect parameter for | |
2872 | * it. For other W-LUs, UNIT DESCRIPTOR is not available. | |
2873 | */ | |
2874 | else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) | |
2875 | ret = -ENOTSUPP; | |
2876 | else | |
2877 | ret = ufshcd_read_unit_desc_param(hba, | |
2878 | lun, | |
2879 | UNIT_DESC_PARAM_LU_WR_PROTECT, | |
2880 | b_lu_write_protect, | |
2881 | sizeof(*b_lu_write_protect)); | |
2882 | return ret; | |
2883 | } | |
2884 | ||
2885 | /** | |
2886 | * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect | |
2887 | * status | |
2888 | * @hba: per-adapter instance | |
2889 | * @sdev: pointer to SCSI device | |
2890 | * | |
2891 | */ | |
2892 | static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, | |
2893 | struct scsi_device *sdev) | |
2894 | { | |
2895 | if (hba->dev_info.f_power_on_wp_en && | |
2896 | !hba->dev_info.is_lu_power_on_wp) { | |
2897 | u8 b_lu_write_protect; | |
2898 | ||
2899 | if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), | |
2900 | &b_lu_write_protect) && | |
2901 | (b_lu_write_protect == UFS_LU_POWER_ON_WP)) | |
2902 | hba->dev_info.is_lu_power_on_wp = true; | |
2903 | } | |
2904 | } | |
2905 | ||
7a3e97b0 SY |
2906 | /** |
2907 | * ufshcd_slave_alloc - handle initial SCSI device configurations | |
2908 | * @sdev: pointer to SCSI device | |
2909 | * | |
2910 | * Returns success | |
2911 | */ | |
2912 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | |
2913 | { | |
2914 | struct ufs_hba *hba; | |
2915 | ||
2916 | hba = shost_priv(sdev->host); | |
7a3e97b0 SY |
2917 | |
2918 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ | |
2919 | sdev->use_10_for_ms = 1; | |
7a3e97b0 | 2920 | |
e8e7f271 SRT |
2921 | /* allow SCSI layer to restart the device in case of errors */ |
2922 | sdev->allow_restart = 1; | |
4264fd61 | 2923 | |
b2a6c522 SRT |
2924 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
2925 | sdev->no_report_opcodes = 1; | |
2926 | ||
e8e7f271 | 2927 | |
0ce147d4 | 2928 | ufshcd_set_queue_depth(sdev); |
4264fd61 | 2929 | |
57d104c1 SJ |
2930 | ufshcd_get_lu_power_on_wp_status(hba, sdev); |
2931 | ||
7a3e97b0 SY |
2932 | return 0; |
2933 | } | |
2934 | ||
4264fd61 SRT |
2935 | /** |
2936 | * ufshcd_change_queue_depth - change queue depth | |
2937 | * @sdev: pointer to SCSI device | |
2938 | * @depth: required depth to set | |
4264fd61 | 2939 | * |
db5ed4df | 2940 | * Change queue depth and make sure the max. limits are not crossed. |
4264fd61 | 2941 | */ |
db5ed4df | 2942 | static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) |
4264fd61 SRT |
2943 | { |
2944 | struct ufs_hba *hba = shost_priv(sdev->host); | |
2945 | ||
2946 | if (depth > hba->nutrs) | |
2947 | depth = hba->nutrs; | |
db5ed4df | 2948 | return scsi_change_queue_depth(sdev, depth); |
4264fd61 SRT |
2949 | } |
2950 | ||
eeda4749 AM |
2951 | /** |
2952 | * ufshcd_slave_configure - adjust SCSI device configurations | |
2953 | * @sdev: pointer to SCSI device | |
2954 | */ | |
2955 | static int ufshcd_slave_configure(struct scsi_device *sdev) | |
2956 | { | |
2957 | struct request_queue *q = sdev->request_queue; | |
2958 | ||
2959 | blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); | |
2960 | blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX); | |
2961 | ||
2962 | return 0; | |
2963 | } | |
2964 | ||
7a3e97b0 SY |
2965 | /** |
2966 | * ufshcd_slave_destroy - remove SCSI device configurations | |
2967 | * @sdev: pointer to SCSI device | |
2968 | */ | |
2969 | static void ufshcd_slave_destroy(struct scsi_device *sdev) | |
2970 | { | |
2971 | struct ufs_hba *hba; | |
2972 | ||
2973 | hba = shost_priv(sdev->host); | |
0ce147d4 | 2974 | /* Drop the reference as it won't be needed anymore */ |
7c48bfd0 AM |
2975 | if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { |
2976 | unsigned long flags; | |
2977 | ||
2978 | spin_lock_irqsave(hba->host->host_lock, flags); | |
0ce147d4 | 2979 | hba->sdev_ufs_device = NULL; |
7c48bfd0 AM |
2980 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
2981 | } | |
7a3e97b0 SY |
2982 | } |
2983 | ||
2984 | /** | |
2985 | * ufshcd_task_req_compl - handle task management request completion | |
2986 | * @hba: per adapter instance | |
2987 | * @index: index of the completed request | |
e2933132 | 2988 | * @resp: task management service response |
7a3e97b0 | 2989 | * |
e2933132 | 2990 | * Returns non-zero value on error, zero on success |
7a3e97b0 | 2991 | */ |
e2933132 | 2992 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) |
7a3e97b0 SY |
2993 | { |
2994 | struct utp_task_req_desc *task_req_descp; | |
2995 | struct utp_upiu_task_rsp *task_rsp_upiup; | |
2996 | unsigned long flags; | |
2997 | int ocs_value; | |
2998 | int task_result; | |
2999 | ||
3000 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3001 | ||
3002 | /* Clear completed tasks from outstanding_tasks */ | |
3003 | __clear_bit(index, &hba->outstanding_tasks); | |
3004 | ||
3005 | task_req_descp = hba->utmrdl_base_addr; | |
3006 | ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); | |
3007 | ||
3008 | if (ocs_value == OCS_SUCCESS) { | |
3009 | task_rsp_upiup = (struct utp_upiu_task_rsp *) | |
3010 | task_req_descp[index].task_rsp_upiu; | |
3011 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); | |
3012 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); | |
e2933132 SRT |
3013 | if (resp) |
3014 | *resp = (u8)task_result; | |
7a3e97b0 | 3015 | } else { |
e2933132 SRT |
3016 | dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", |
3017 | __func__, ocs_value); | |
7a3e97b0 SY |
3018 | } |
3019 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
e2933132 SRT |
3020 | |
3021 | return ocs_value; | |
7a3e97b0 SY |
3022 | } |
3023 | ||
7a3e97b0 SY |
3024 | /** |
3025 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | |
3026 | * @lrb: pointer to local reference block of completed command | |
3027 | * @scsi_status: SCSI command status | |
3028 | * | |
3029 | * Returns value base on SCSI command status | |
3030 | */ | |
3031 | static inline int | |
3032 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | |
3033 | { | |
3034 | int result = 0; | |
3035 | ||
3036 | switch (scsi_status) { | |
7a3e97b0 | 3037 | case SAM_STAT_CHECK_CONDITION: |
1c2623c5 SJ |
3038 | ufshcd_copy_sense_data(lrbp); |
3039 | case SAM_STAT_GOOD: | |
7a3e97b0 SY |
3040 | result |= DID_OK << 16 | |
3041 | COMMAND_COMPLETE << 8 | | |
1c2623c5 | 3042 | scsi_status; |
7a3e97b0 SY |
3043 | break; |
3044 | case SAM_STAT_TASK_SET_FULL: | |
1c2623c5 | 3045 | case SAM_STAT_BUSY: |
7a3e97b0 | 3046 | case SAM_STAT_TASK_ABORTED: |
1c2623c5 SJ |
3047 | ufshcd_copy_sense_data(lrbp); |
3048 | result |= scsi_status; | |
7a3e97b0 SY |
3049 | break; |
3050 | default: | |
3051 | result |= DID_ERROR << 16; | |
3052 | break; | |
3053 | } /* end of switch */ | |
3054 | ||
3055 | return result; | |
3056 | } | |
3057 | ||
3058 | /** | |
3059 | * ufshcd_transfer_rsp_status - Get overall status of the response | |
3060 | * @hba: per adapter instance | |
3061 | * @lrb: pointer to local reference block of completed command | |
3062 | * | |
3063 | * Returns result of the command to notify SCSI midlayer | |
3064 | */ | |
3065 | static inline int | |
3066 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
3067 | { | |
3068 | int result = 0; | |
3069 | int scsi_status; | |
3070 | int ocs; | |
3071 | ||
3072 | /* overall command status of utrd */ | |
3073 | ocs = ufshcd_get_tr_ocs(lrbp); | |
3074 | ||
3075 | switch (ocs) { | |
3076 | case OCS_SUCCESS: | |
5a0b0cb9 | 3077 | result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); |
7a3e97b0 | 3078 | |
5a0b0cb9 SRT |
3079 | switch (result) { |
3080 | case UPIU_TRANSACTION_RESPONSE: | |
3081 | /* | |
3082 | * get the response UPIU result to extract | |
3083 | * the SCSI command status | |
3084 | */ | |
3085 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); | |
3086 | ||
3087 | /* | |
3088 | * get the result based on SCSI status response | |
3089 | * to notify the SCSI midlayer of the command status | |
3090 | */ | |
3091 | scsi_status = result & MASK_SCSI_STATUS; | |
3092 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); | |
66ec6d59 SRT |
3093 | |
3094 | if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) | |
3095 | schedule_work(&hba->eeh_work); | |
5a0b0cb9 SRT |
3096 | break; |
3097 | case UPIU_TRANSACTION_REJECT_UPIU: | |
3098 | /* TODO: handle Reject UPIU Response */ | |
3099 | result = DID_ERROR << 16; | |
3b1d0580 | 3100 | dev_err(hba->dev, |
5a0b0cb9 SRT |
3101 | "Reject UPIU not fully implemented\n"); |
3102 | break; | |
3103 | default: | |
3104 | result = DID_ERROR << 16; | |
3105 | dev_err(hba->dev, | |
3106 | "Unexpected request response code = %x\n", | |
3107 | result); | |
7a3e97b0 SY |
3108 | break; |
3109 | } | |
7a3e97b0 SY |
3110 | break; |
3111 | case OCS_ABORTED: | |
3112 | result |= DID_ABORT << 16; | |
3113 | break; | |
e8e7f271 SRT |
3114 | case OCS_INVALID_COMMAND_STATUS: |
3115 | result |= DID_REQUEUE << 16; | |
3116 | break; | |
7a3e97b0 SY |
3117 | case OCS_INVALID_CMD_TABLE_ATTR: |
3118 | case OCS_INVALID_PRDT_ATTR: | |
3119 | case OCS_MISMATCH_DATA_BUF_SIZE: | |
3120 | case OCS_MISMATCH_RESP_UPIU_SIZE: | |
3121 | case OCS_PEER_COMM_FAILURE: | |
3122 | case OCS_FATAL_ERROR: | |
3123 | default: | |
3124 | result |= DID_ERROR << 16; | |
3b1d0580 | 3125 | dev_err(hba->dev, |
7a3e97b0 SY |
3126 | "OCS error from controller = %x\n", ocs); |
3127 | break; | |
3128 | } /* end of switch */ | |
3129 | ||
3130 | return result; | |
3131 | } | |
3132 | ||
6ccf44fe SJ |
3133 | /** |
3134 | * ufshcd_uic_cmd_compl - handle completion of uic command | |
3135 | * @hba: per adapter instance | |
53b3d9c3 | 3136 | * @intr_status: interrupt status generated by the controller |
6ccf44fe | 3137 | */ |
53b3d9c3 | 3138 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) |
6ccf44fe | 3139 | { |
53b3d9c3 | 3140 | if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { |
6ccf44fe SJ |
3141 | hba->active_uic_cmd->argument2 |= |
3142 | ufshcd_get_uic_cmd_result(hba); | |
12b4fdb4 SJ |
3143 | hba->active_uic_cmd->argument3 = |
3144 | ufshcd_get_dme_attr_val(hba); | |
6ccf44fe SJ |
3145 | complete(&hba->active_uic_cmd->done); |
3146 | } | |
53b3d9c3 | 3147 | |
57d104c1 SJ |
3148 | if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) |
3149 | complete(hba->uic_async_done); | |
6ccf44fe SJ |
3150 | } |
3151 | ||
7a3e97b0 SY |
3152 | /** |
3153 | * ufshcd_transfer_req_compl - handle SCSI and query command completion | |
3154 | * @hba: per adapter instance | |
3155 | */ | |
3156 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |
3157 | { | |
5a0b0cb9 SRT |
3158 | struct ufshcd_lrb *lrbp; |
3159 | struct scsi_cmnd *cmd; | |
7a3e97b0 SY |
3160 | unsigned long completed_reqs; |
3161 | u32 tr_doorbell; | |
3162 | int result; | |
3163 | int index; | |
e9d501b1 DR |
3164 | |
3165 | /* Resetting interrupt aggregation counters first and reading the | |
3166 | * DOOR_BELL afterward allows us to handle all the completed requests. | |
3167 | * In order to prevent other interrupts starvation the DB is read once | |
3168 | * after reset. The down side of this solution is the possibility of | |
3169 | * false interrupt if device completes another request after resetting | |
3170 | * aggregation and before reading the DB. | |
3171 | */ | |
b852190e YG |
3172 | if (ufshcd_is_intr_aggr_allowed(hba)) |
3173 | ufshcd_reset_intr_aggr(hba); | |
7a3e97b0 | 3174 | |
b873a275 | 3175 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
7a3e97b0 SY |
3176 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; |
3177 | ||
e9d501b1 DR |
3178 | for_each_set_bit(index, &completed_reqs, hba->nutrs) { |
3179 | lrbp = &hba->lrb[index]; | |
3180 | cmd = lrbp->cmd; | |
3181 | if (cmd) { | |
3182 | result = ufshcd_transfer_rsp_status(hba, lrbp); | |
3183 | scsi_dma_unmap(cmd); | |
3184 | cmd->result = result; | |
3185 | /* Mark completed command as NULL in LRB */ | |
3186 | lrbp->cmd = NULL; | |
3187 | clear_bit_unlock(index, &hba->lrb_in_use); | |
3188 | /* Do not touch lrbp after scsi done */ | |
3189 | cmd->scsi_done(cmd); | |
1ab27c9c | 3190 | __ufshcd_release(hba); |
e9d501b1 DR |
3191 | } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { |
3192 | if (hba->dev_cmd.complete) | |
3193 | complete(hba->dev_cmd.complete); | |
3194 | } | |
3195 | } | |
7a3e97b0 SY |
3196 | |
3197 | /* clear corresponding bits of completed commands */ | |
3198 | hba->outstanding_reqs ^= completed_reqs; | |
3199 | ||
856b3483 ST |
3200 | ufshcd_clk_scaling_update_busy(hba); |
3201 | ||
5a0b0cb9 SRT |
3202 | /* we might have free'd some tags above */ |
3203 | wake_up(&hba->dev_cmd.tag_wq); | |
7a3e97b0 SY |
3204 | } |
3205 | ||
66ec6d59 SRT |
3206 | /** |
3207 | * ufshcd_disable_ee - disable exception event | |
3208 | * @hba: per-adapter instance | |
3209 | * @mask: exception event to disable | |
3210 | * | |
3211 | * Disables exception event in the device so that the EVENT_ALERT | |
3212 | * bit is not set. | |
3213 | * | |
3214 | * Returns zero on success, non-zero error value on failure. | |
3215 | */ | |
3216 | static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) | |
3217 | { | |
3218 | int err = 0; | |
3219 | u32 val; | |
3220 | ||
3221 | if (!(hba->ee_ctrl_mask & mask)) | |
3222 | goto out; | |
3223 | ||
3224 | val = hba->ee_ctrl_mask & ~mask; | |
3225 | val &= 0xFFFF; /* 2 bytes */ | |
3226 | err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, | |
3227 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); | |
3228 | if (!err) | |
3229 | hba->ee_ctrl_mask &= ~mask; | |
3230 | out: | |
3231 | return err; | |
3232 | } | |
3233 | ||
3234 | /** | |
3235 | * ufshcd_enable_ee - enable exception event | |
3236 | * @hba: per-adapter instance | |
3237 | * @mask: exception event to enable | |
3238 | * | |
3239 | * Enable corresponding exception event in the device to allow | |
3240 | * device to alert host in critical scenarios. | |
3241 | * | |
3242 | * Returns zero on success, non-zero error value on failure. | |
3243 | */ | |
3244 | static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) | |
3245 | { | |
3246 | int err = 0; | |
3247 | u32 val; | |
3248 | ||
3249 | if (hba->ee_ctrl_mask & mask) | |
3250 | goto out; | |
3251 | ||
3252 | val = hba->ee_ctrl_mask | mask; | |
3253 | val &= 0xFFFF; /* 2 bytes */ | |
3254 | err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, | |
3255 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); | |
3256 | if (!err) | |
3257 | hba->ee_ctrl_mask |= mask; | |
3258 | out: | |
3259 | return err; | |
3260 | } | |
3261 | ||
3262 | /** | |
3263 | * ufshcd_enable_auto_bkops - Allow device managed BKOPS | |
3264 | * @hba: per-adapter instance | |
3265 | * | |
3266 | * Allow device to manage background operations on its own. Enabling | |
3267 | * this might lead to inconsistent latencies during normal data transfers | |
3268 | * as the device is allowed to manage its own way of handling background | |
3269 | * operations. | |
3270 | * | |
3271 | * Returns zero on success, non-zero on failure. | |
3272 | */ | |
3273 | static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) | |
3274 | { | |
3275 | int err = 0; | |
3276 | ||
3277 | if (hba->auto_bkops_enabled) | |
3278 | goto out; | |
3279 | ||
3280 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, | |
3281 | QUERY_FLAG_IDN_BKOPS_EN, NULL); | |
3282 | if (err) { | |
3283 | dev_err(hba->dev, "%s: failed to enable bkops %d\n", | |
3284 | __func__, err); | |
3285 | goto out; | |
3286 | } | |
3287 | ||
3288 | hba->auto_bkops_enabled = true; | |
3289 | ||
3290 | /* No need of URGENT_BKOPS exception from the device */ | |
3291 | err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | |
3292 | if (err) | |
3293 | dev_err(hba->dev, "%s: failed to disable exception event %d\n", | |
3294 | __func__, err); | |
3295 | out: | |
3296 | return err; | |
3297 | } | |
3298 | ||
3299 | /** | |
3300 | * ufshcd_disable_auto_bkops - block device in doing background operations | |
3301 | * @hba: per-adapter instance | |
3302 | * | |
3303 | * Disabling background operations improves command response latency but | |
3304 | * has drawback of device moving into critical state where the device is | |
3305 | * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the | |
3306 | * host is idle so that BKOPS are managed effectively without any negative | |
3307 | * impacts. | |
3308 | * | |
3309 | * Returns zero on success, non-zero on failure. | |
3310 | */ | |
3311 | static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) | |
3312 | { | |
3313 | int err = 0; | |
3314 | ||
3315 | if (!hba->auto_bkops_enabled) | |
3316 | goto out; | |
3317 | ||
3318 | /* | |
3319 | * If host assisted BKOPs is to be enabled, make sure | |
3320 | * urgent bkops exception is allowed. | |
3321 | */ | |
3322 | err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); | |
3323 | if (err) { | |
3324 | dev_err(hba->dev, "%s: failed to enable exception event %d\n", | |
3325 | __func__, err); | |
3326 | goto out; | |
3327 | } | |
3328 | ||
3329 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, | |
3330 | QUERY_FLAG_IDN_BKOPS_EN, NULL); | |
3331 | if (err) { | |
3332 | dev_err(hba->dev, "%s: failed to disable bkops %d\n", | |
3333 | __func__, err); | |
3334 | ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | |
3335 | goto out; | |
3336 | } | |
3337 | ||
3338 | hba->auto_bkops_enabled = false; | |
3339 | out: | |
3340 | return err; | |
3341 | } | |
3342 | ||
3343 | /** | |
3344 | * ufshcd_force_reset_auto_bkops - force enable of auto bkops | |
3345 | * @hba: per adapter instance | |
3346 | * | |
3347 | * After a device reset the device may toggle the BKOPS_EN flag | |
3348 | * to default value. The s/w tracking variables should be updated | |
3349 | * as well. Do this by forcing enable of auto bkops. | |
3350 | */ | |
3351 | static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) | |
3352 | { | |
3353 | hba->auto_bkops_enabled = false; | |
3354 | hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; | |
3355 | ufshcd_enable_auto_bkops(hba); | |
3356 | } | |
3357 | ||
3358 | static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) | |
3359 | { | |
3360 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, | |
3361 | QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); | |
3362 | } | |
3363 | ||
3364 | /** | |
57d104c1 | 3365 | * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status |
66ec6d59 | 3366 | * @hba: per-adapter instance |
57d104c1 | 3367 | * @status: bkops_status value |
66ec6d59 | 3368 | * |
57d104c1 SJ |
3369 | * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn |
3370 | * flag in the device to permit background operations if the device | |
3371 | * bkops_status is greater than or equal to "status" argument passed to | |
3372 | * this function, disable otherwise. | |
3373 | * | |
3374 | * Returns 0 for success, non-zero in case of failure. | |
3375 | * | |
3376 | * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag | |
3377 | * to know whether auto bkops is enabled or disabled after this function | |
3378 | * returns control to it. | |
66ec6d59 | 3379 | */ |
57d104c1 SJ |
3380 | static int ufshcd_bkops_ctrl(struct ufs_hba *hba, |
3381 | enum bkops_status status) | |
66ec6d59 SRT |
3382 | { |
3383 | int err; | |
57d104c1 | 3384 | u32 curr_status = 0; |
66ec6d59 | 3385 | |
57d104c1 | 3386 | err = ufshcd_get_bkops_status(hba, &curr_status); |
66ec6d59 SRT |
3387 | if (err) { |
3388 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | |
3389 | __func__, err); | |
3390 | goto out; | |
57d104c1 SJ |
3391 | } else if (curr_status > BKOPS_STATUS_MAX) { |
3392 | dev_err(hba->dev, "%s: invalid BKOPS status %d\n", | |
3393 | __func__, curr_status); | |
3394 | err = -EINVAL; | |
3395 | goto out; | |
66ec6d59 SRT |
3396 | } |
3397 | ||
57d104c1 | 3398 | if (curr_status >= status) |
66ec6d59 | 3399 | err = ufshcd_enable_auto_bkops(hba); |
57d104c1 SJ |
3400 | else |
3401 | err = ufshcd_disable_auto_bkops(hba); | |
66ec6d59 SRT |
3402 | out: |
3403 | return err; | |
3404 | } | |
3405 | ||
57d104c1 SJ |
3406 | /** |
3407 | * ufshcd_urgent_bkops - handle urgent bkops exception event | |
3408 | * @hba: per-adapter instance | |
3409 | * | |
3410 | * Enable fBackgroundOpsEn flag in the device to permit background | |
3411 | * operations. | |
3412 | * | |
3413 | * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled | |
3414 | * and negative error value for any other failure. | |
3415 | */ | |
3416 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | |
3417 | { | |
3418 | return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); | |
3419 | } | |
3420 | ||
66ec6d59 SRT |
3421 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) |
3422 | { | |
3423 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, | |
3424 | QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); | |
3425 | } | |
3426 | ||
3427 | /** | |
3428 | * ufshcd_exception_event_handler - handle exceptions raised by device | |
3429 | * @work: pointer to work data | |
3430 | * | |
3431 | * Read bExceptionEventStatus attribute from the device and handle the | |
3432 | * exception event accordingly. | |
3433 | */ | |
3434 | static void ufshcd_exception_event_handler(struct work_struct *work) | |
3435 | { | |
3436 | struct ufs_hba *hba; | |
3437 | int err; | |
3438 | u32 status = 0; | |
3439 | hba = container_of(work, struct ufs_hba, eeh_work); | |
3440 | ||
62694735 | 3441 | pm_runtime_get_sync(hba->dev); |
66ec6d59 SRT |
3442 | err = ufshcd_get_ee_status(hba, &status); |
3443 | if (err) { | |
3444 | dev_err(hba->dev, "%s: failed to get exception status %d\n", | |
3445 | __func__, err); | |
3446 | goto out; | |
3447 | } | |
3448 | ||
3449 | status &= hba->ee_ctrl_mask; | |
3450 | if (status & MASK_EE_URGENT_BKOPS) { | |
3451 | err = ufshcd_urgent_bkops(hba); | |
57d104c1 | 3452 | if (err < 0) |
66ec6d59 SRT |
3453 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", |
3454 | __func__, err); | |
3455 | } | |
3456 | out: | |
62694735 | 3457 | pm_runtime_put_sync(hba->dev); |
66ec6d59 SRT |
3458 | return; |
3459 | } | |
3460 | ||
7a3e97b0 | 3461 | /** |
e8e7f271 SRT |
3462 | * ufshcd_err_handler - handle UFS errors that require s/w attention |
3463 | * @work: pointer to work structure | |
7a3e97b0 | 3464 | */ |
e8e7f271 | 3465 | static void ufshcd_err_handler(struct work_struct *work) |
7a3e97b0 SY |
3466 | { |
3467 | struct ufs_hba *hba; | |
e8e7f271 SRT |
3468 | unsigned long flags; |
3469 | u32 err_xfer = 0; | |
3470 | u32 err_tm = 0; | |
3471 | int err = 0; | |
3472 | int tag; | |
3473 | ||
3474 | hba = container_of(work, struct ufs_hba, eh_work); | |
7a3e97b0 | 3475 | |
62694735 | 3476 | pm_runtime_get_sync(hba->dev); |
1ab27c9c | 3477 | ufshcd_hold(hba, false); |
e8e7f271 SRT |
3478 | |
3479 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3480 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { | |
3481 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3482 | goto out; | |
3483 | } | |
3484 | ||
3485 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
3486 | ufshcd_set_eh_in_progress(hba); | |
3487 | ||
3488 | /* Complete requests that have door-bell cleared by h/w */ | |
3489 | ufshcd_transfer_req_compl(hba); | |
3490 | ufshcd_tmc_handler(hba); | |
3491 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3492 | ||
3493 | /* Clear pending transfer requests */ | |
3494 | for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) | |
3495 | if (ufshcd_clear_cmd(hba, tag)) | |
3496 | err_xfer |= 1 << tag; | |
3497 | ||
3498 | /* Clear pending task management requests */ | |
3499 | for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) | |
3500 | if (ufshcd_clear_tm_cmd(hba, tag)) | |
3501 | err_tm |= 1 << tag; | |
3502 | ||
3503 | /* Complete the requests that are cleared by s/w */ | |
3504 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3505 | ufshcd_transfer_req_compl(hba); | |
3506 | ufshcd_tmc_handler(hba); | |
3507 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3508 | ||
3509 | /* Fatal errors need reset */ | |
3510 | if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || | |
3511 | ((hba->saved_err & UIC_ERROR) && | |
3512 | (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { | |
3513 | err = ufshcd_reset_and_restore(hba); | |
3514 | if (err) { | |
3515 | dev_err(hba->dev, "%s: reset and restore failed\n", | |
3516 | __func__); | |
3517 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
3518 | } | |
3519 | /* | |
3520 | * Inform scsi mid-layer that we did reset and allow to handle | |
3521 | * Unit Attention properly. | |
3522 | */ | |
3523 | scsi_report_bus_reset(hba->host, 0); | |
3524 | hba->saved_err = 0; | |
3525 | hba->saved_uic_err = 0; | |
3526 | } | |
3527 | ufshcd_clear_eh_in_progress(hba); | |
3528 | ||
3529 | out: | |
3530 | scsi_unblock_requests(hba->host); | |
1ab27c9c | 3531 | ufshcd_release(hba); |
62694735 | 3532 | pm_runtime_put_sync(hba->dev); |
7a3e97b0 SY |
3533 | } |
3534 | ||
3535 | /** | |
e8e7f271 SRT |
3536 | * ufshcd_update_uic_error - check and set fatal UIC error flags. |
3537 | * @hba: per-adapter instance | |
7a3e97b0 | 3538 | */ |
e8e7f271 | 3539 | static void ufshcd_update_uic_error(struct ufs_hba *hba) |
7a3e97b0 SY |
3540 | { |
3541 | u32 reg; | |
3542 | ||
e8e7f271 SRT |
3543 | /* PA_INIT_ERROR is fatal and needs UIC reset */ |
3544 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); | |
3545 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) | |
3546 | hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; | |
3547 | ||
3548 | /* UIC NL/TL/DME errors needs software retry */ | |
3549 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); | |
3550 | if (reg) | |
3551 | hba->uic_error |= UFSHCD_UIC_NL_ERROR; | |
3552 | ||
3553 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); | |
3554 | if (reg) | |
3555 | hba->uic_error |= UFSHCD_UIC_TL_ERROR; | |
3556 | ||
3557 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); | |
3558 | if (reg) | |
3559 | hba->uic_error |= UFSHCD_UIC_DME_ERROR; | |
3560 | ||
3561 | dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", | |
3562 | __func__, hba->uic_error); | |
3563 | } | |
3564 | ||
3565 | /** | |
3566 | * ufshcd_check_errors - Check for errors that need s/w attention | |
3567 | * @hba: per-adapter instance | |
3568 | */ | |
3569 | static void ufshcd_check_errors(struct ufs_hba *hba) | |
3570 | { | |
3571 | bool queue_eh_work = false; | |
3572 | ||
7a3e97b0 | 3573 | if (hba->errors & INT_FATAL_ERRORS) |
e8e7f271 | 3574 | queue_eh_work = true; |
7a3e97b0 SY |
3575 | |
3576 | if (hba->errors & UIC_ERROR) { | |
e8e7f271 SRT |
3577 | hba->uic_error = 0; |
3578 | ufshcd_update_uic_error(hba); | |
3579 | if (hba->uic_error) | |
3580 | queue_eh_work = true; | |
7a3e97b0 | 3581 | } |
e8e7f271 SRT |
3582 | |
3583 | if (queue_eh_work) { | |
3584 | /* handle fatal errors only when link is functional */ | |
3585 | if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { | |
3586 | /* block commands from scsi mid-layer */ | |
3587 | scsi_block_requests(hba->host); | |
3588 | ||
3589 | /* transfer error masks to sticky bits */ | |
3590 | hba->saved_err |= hba->errors; | |
3591 | hba->saved_uic_err |= hba->uic_error; | |
3592 | ||
3593 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
3594 | schedule_work(&hba->eh_work); | |
3595 | } | |
3441da7d | 3596 | } |
e8e7f271 SRT |
3597 | /* |
3598 | * if (!queue_eh_work) - | |
3599 | * Other errors are either non-fatal where host recovers | |
3600 | * itself without s/w intervention or errors that will be | |
3601 | * handled by the SCSI core layer. | |
3602 | */ | |
7a3e97b0 SY |
3603 | } |
3604 | ||
3605 | /** | |
3606 | * ufshcd_tmc_handler - handle task management function completion | |
3607 | * @hba: per adapter instance | |
3608 | */ | |
3609 | static void ufshcd_tmc_handler(struct ufs_hba *hba) | |
3610 | { | |
3611 | u32 tm_doorbell; | |
3612 | ||
b873a275 | 3613 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 | 3614 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; |
e2933132 | 3615 | wake_up(&hba->tm_wq); |
7a3e97b0 SY |
3616 | } |
3617 | ||
3618 | /** | |
3619 | * ufshcd_sl_intr - Interrupt service routine | |
3620 | * @hba: per adapter instance | |
3621 | * @intr_status: contains interrupts generated by the controller | |
3622 | */ | |
3623 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | |
3624 | { | |
3625 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | |
3626 | if (hba->errors) | |
e8e7f271 | 3627 | ufshcd_check_errors(hba); |
7a3e97b0 | 3628 | |
53b3d9c3 SJ |
3629 | if (intr_status & UFSHCD_UIC_MASK) |
3630 | ufshcd_uic_cmd_compl(hba, intr_status); | |
7a3e97b0 SY |
3631 | |
3632 | if (intr_status & UTP_TASK_REQ_COMPL) | |
3633 | ufshcd_tmc_handler(hba); | |
3634 | ||
3635 | if (intr_status & UTP_TRANSFER_REQ_COMPL) | |
3636 | ufshcd_transfer_req_compl(hba); | |
3637 | } | |
3638 | ||
3639 | /** | |
3640 | * ufshcd_intr - Main interrupt service routine | |
3641 | * @irq: irq number | |
3642 | * @__hba: pointer to adapter instance | |
3643 | * | |
3644 | * Returns IRQ_HANDLED - If interrupt is valid | |
3645 | * IRQ_NONE - If invalid interrupt | |
3646 | */ | |
3647 | static irqreturn_t ufshcd_intr(int irq, void *__hba) | |
3648 | { | |
3649 | u32 intr_status; | |
3650 | irqreturn_t retval = IRQ_NONE; | |
3651 | struct ufs_hba *hba = __hba; | |
3652 | ||
3653 | spin_lock(hba->host->host_lock); | |
b873a275 | 3654 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
7a3e97b0 SY |
3655 | |
3656 | if (intr_status) { | |
261ea452 | 3657 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); |
7a3e97b0 | 3658 | ufshcd_sl_intr(hba, intr_status); |
7a3e97b0 SY |
3659 | retval = IRQ_HANDLED; |
3660 | } | |
3661 | spin_unlock(hba->host->host_lock); | |
3662 | return retval; | |
3663 | } | |
3664 | ||
e2933132 SRT |
3665 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) |
3666 | { | |
3667 | int err = 0; | |
3668 | u32 mask = 1 << tag; | |
3669 | unsigned long flags; | |
3670 | ||
3671 | if (!test_bit(tag, &hba->outstanding_tasks)) | |
3672 | goto out; | |
3673 | ||
3674 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3675 | ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); | |
3676 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3677 | ||
3678 | /* poll for max. 1 sec to clear door bell register by h/w */ | |
3679 | err = ufshcd_wait_for_register(hba, | |
3680 | REG_UTP_TASK_REQ_DOOR_BELL, | |
3681 | mask, 0, 1000, 1000); | |
3682 | out: | |
3683 | return err; | |
3684 | } | |
3685 | ||
7a3e97b0 SY |
3686 | /** |
3687 | * ufshcd_issue_tm_cmd - issues task management commands to controller | |
3688 | * @hba: per adapter instance | |
e2933132 SRT |
3689 | * @lun_id: LUN ID to which TM command is sent |
3690 | * @task_id: task ID to which the TM command is applicable | |
3691 | * @tm_function: task management function opcode | |
3692 | * @tm_response: task management service response return value | |
7a3e97b0 | 3693 | * |
e2933132 | 3694 | * Returns non-zero value on error, zero on success. |
7a3e97b0 | 3695 | */ |
e2933132 SRT |
3696 | static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, |
3697 | u8 tm_function, u8 *tm_response) | |
7a3e97b0 SY |
3698 | { |
3699 | struct utp_task_req_desc *task_req_descp; | |
3700 | struct utp_upiu_task_req *task_req_upiup; | |
3701 | struct Scsi_Host *host; | |
3702 | unsigned long flags; | |
e2933132 | 3703 | int free_slot; |
7a3e97b0 | 3704 | int err; |
e2933132 | 3705 | int task_tag; |
7a3e97b0 SY |
3706 | |
3707 | host = hba->host; | |
3708 | ||
e2933132 SRT |
3709 | /* |
3710 | * Get free slot, sleep if slots are unavailable. | |
3711 | * Even though we use wait_event() which sleeps indefinitely, | |
3712 | * the maximum wait time is bounded by %TM_CMD_TIMEOUT. | |
3713 | */ | |
3714 | wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); | |
1ab27c9c | 3715 | ufshcd_hold(hba, false); |
7a3e97b0 | 3716 | |
e2933132 | 3717 | spin_lock_irqsave(host->host_lock, flags); |
7a3e97b0 SY |
3718 | task_req_descp = hba->utmrdl_base_addr; |
3719 | task_req_descp += free_slot; | |
3720 | ||
3721 | /* Configure task request descriptor */ | |
3722 | task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | |
3723 | task_req_descp->header.dword_2 = | |
3724 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
3725 | ||
3726 | /* Configure task request UPIU */ | |
3727 | task_req_upiup = | |
3728 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; | |
e2933132 | 3729 | task_tag = hba->nutrs + free_slot; |
7a3e97b0 | 3730 | task_req_upiup->header.dword_0 = |
5a0b0cb9 | 3731 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, |
e2933132 | 3732 | lun_id, task_tag); |
7a3e97b0 | 3733 | task_req_upiup->header.dword_1 = |
5a0b0cb9 | 3734 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); |
0ce147d4 SJ |
3735 | /* |
3736 | * The host shall provide the same value for LUN field in the basic | |
3737 | * header and for Input Parameter. | |
3738 | */ | |
e2933132 SRT |
3739 | task_req_upiup->input_param1 = cpu_to_be32(lun_id); |
3740 | task_req_upiup->input_param2 = cpu_to_be32(task_id); | |
7a3e97b0 SY |
3741 | |
3742 | /* send command to the controller */ | |
3743 | __set_bit(free_slot, &hba->outstanding_tasks); | |
b873a275 | 3744 | ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 SY |
3745 | |
3746 | spin_unlock_irqrestore(host->host_lock, flags); | |
3747 | ||
3748 | /* wait until the task management command is completed */ | |
e2933132 SRT |
3749 | err = wait_event_timeout(hba->tm_wq, |
3750 | test_bit(free_slot, &hba->tm_condition), | |
3751 | msecs_to_jiffies(TM_CMD_TIMEOUT)); | |
7a3e97b0 | 3752 | if (!err) { |
e2933132 SRT |
3753 | dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", |
3754 | __func__, tm_function); | |
3755 | if (ufshcd_clear_tm_cmd(hba, free_slot)) | |
3756 | dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", | |
3757 | __func__, free_slot); | |
3758 | err = -ETIMEDOUT; | |
3759 | } else { | |
3760 | err = ufshcd_task_req_compl(hba, free_slot, tm_response); | |
7a3e97b0 | 3761 | } |
e2933132 | 3762 | |
7a3e97b0 | 3763 | clear_bit(free_slot, &hba->tm_condition); |
e2933132 SRT |
3764 | ufshcd_put_tm_slot(hba, free_slot); |
3765 | wake_up(&hba->tm_tag_wq); | |
3766 | ||
1ab27c9c | 3767 | ufshcd_release(hba); |
7a3e97b0 SY |
3768 | return err; |
3769 | } | |
3770 | ||
3771 | /** | |
3441da7d SRT |
3772 | * ufshcd_eh_device_reset_handler - device reset handler registered to |
3773 | * scsi layer. | |
7a3e97b0 SY |
3774 | * @cmd: SCSI command pointer |
3775 | * | |
3776 | * Returns SUCCESS/FAILED | |
3777 | */ | |
3441da7d | 3778 | static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) |
7a3e97b0 SY |
3779 | { |
3780 | struct Scsi_Host *host; | |
3781 | struct ufs_hba *hba; | |
3782 | unsigned int tag; | |
3783 | u32 pos; | |
3784 | int err; | |
e2933132 SRT |
3785 | u8 resp = 0xF; |
3786 | struct ufshcd_lrb *lrbp; | |
3441da7d | 3787 | unsigned long flags; |
7a3e97b0 SY |
3788 | |
3789 | host = cmd->device->host; | |
3790 | hba = shost_priv(host); | |
3791 | tag = cmd->request->tag; | |
3792 | ||
e2933132 SRT |
3793 | lrbp = &hba->lrb[tag]; |
3794 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); | |
3795 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
3441da7d SRT |
3796 | if (!err) |
3797 | err = resp; | |
7a3e97b0 | 3798 | goto out; |
e2933132 | 3799 | } |
7a3e97b0 | 3800 | |
3441da7d SRT |
3801 | /* clear the commands that were pending for corresponding LUN */ |
3802 | for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { | |
3803 | if (hba->lrb[pos].lun == lrbp->lun) { | |
3804 | err = ufshcd_clear_cmd(hba, pos); | |
3805 | if (err) | |
3806 | break; | |
7a3e97b0 | 3807 | } |
3441da7d SRT |
3808 | } |
3809 | spin_lock_irqsave(host->host_lock, flags); | |
3810 | ufshcd_transfer_req_compl(hba); | |
3811 | spin_unlock_irqrestore(host->host_lock, flags); | |
7a3e97b0 | 3812 | out: |
3441da7d SRT |
3813 | if (!err) { |
3814 | err = SUCCESS; | |
3815 | } else { | |
3816 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | |
3817 | err = FAILED; | |
3818 | } | |
7a3e97b0 SY |
3819 | return err; |
3820 | } | |
3821 | ||
7a3e97b0 SY |
3822 | /** |
3823 | * ufshcd_abort - abort a specific command | |
3824 | * @cmd: SCSI command pointer | |
3825 | * | |
f20810d8 SRT |
3826 | * Abort the pending command in device by sending UFS_ABORT_TASK task management |
3827 | * command, and in host controller by clearing the door-bell register. There can | |
3828 | * be race between controller sending the command to the device while abort is | |
3829 | * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is | |
3830 | * really issued and then try to abort it. | |
3831 | * | |
7a3e97b0 SY |
3832 | * Returns SUCCESS/FAILED |
3833 | */ | |
3834 | static int ufshcd_abort(struct scsi_cmnd *cmd) | |
3835 | { | |
3836 | struct Scsi_Host *host; | |
3837 | struct ufs_hba *hba; | |
3838 | unsigned long flags; | |
3839 | unsigned int tag; | |
f20810d8 SRT |
3840 | int err = 0; |
3841 | int poll_cnt; | |
e2933132 SRT |
3842 | u8 resp = 0xF; |
3843 | struct ufshcd_lrb *lrbp; | |
e9d501b1 | 3844 | u32 reg; |
7a3e97b0 SY |
3845 | |
3846 | host = cmd->device->host; | |
3847 | hba = shost_priv(host); | |
3848 | tag = cmd->request->tag; | |
3849 | ||
1ab27c9c | 3850 | ufshcd_hold(hba, false); |
f20810d8 SRT |
3851 | /* If command is already aborted/completed, return SUCCESS */ |
3852 | if (!(test_bit(tag, &hba->outstanding_reqs))) | |
3853 | goto out; | |
7a3e97b0 | 3854 | |
e9d501b1 DR |
3855 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
3856 | if (!(reg & (1 << tag))) { | |
3857 | dev_err(hba->dev, | |
3858 | "%s: cmd was completed, but without a notifying intr, tag = %d", | |
3859 | __func__, tag); | |
3860 | } | |
3861 | ||
f20810d8 SRT |
3862 | lrbp = &hba->lrb[tag]; |
3863 | for (poll_cnt = 100; poll_cnt; poll_cnt--) { | |
3864 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, | |
3865 | UFS_QUERY_TASK, &resp); | |
3866 | if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { | |
3867 | /* cmd pending in the device */ | |
3868 | break; | |
3869 | } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
f20810d8 SRT |
3870 | /* |
3871 | * cmd not pending in the device, check if it is | |
3872 | * in transition. | |
3873 | */ | |
3874 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | |
3875 | if (reg & (1 << tag)) { | |
3876 | /* sleep for max. 200us to stabilize */ | |
3877 | usleep_range(100, 200); | |
3878 | continue; | |
3879 | } | |
3880 | /* command completed already */ | |
3881 | goto out; | |
3882 | } else { | |
3883 | if (!err) | |
3884 | err = resp; /* service response error */ | |
3885 | goto out; | |
3886 | } | |
3887 | } | |
3888 | ||
3889 | if (!poll_cnt) { | |
3890 | err = -EBUSY; | |
7a3e97b0 SY |
3891 | goto out; |
3892 | } | |
7a3e97b0 | 3893 | |
e2933132 SRT |
3894 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, |
3895 | UFS_ABORT_TASK, &resp); | |
3896 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
f20810d8 SRT |
3897 | if (!err) |
3898 | err = resp; /* service response error */ | |
7a3e97b0 | 3899 | goto out; |
e2933132 | 3900 | } |
7a3e97b0 | 3901 | |
f20810d8 SRT |
3902 | err = ufshcd_clear_cmd(hba, tag); |
3903 | if (err) | |
3904 | goto out; | |
3905 | ||
7a3e97b0 SY |
3906 | scsi_dma_unmap(cmd); |
3907 | ||
3908 | spin_lock_irqsave(host->host_lock, flags); | |
7a3e97b0 SY |
3909 | __clear_bit(tag, &hba->outstanding_reqs); |
3910 | hba->lrb[tag].cmd = NULL; | |
3911 | spin_unlock_irqrestore(host->host_lock, flags); | |
5a0b0cb9 SRT |
3912 | |
3913 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
3914 | wake_up(&hba->dev_cmd.tag_wq); | |
1ab27c9c | 3915 | |
7a3e97b0 | 3916 | out: |
f20810d8 SRT |
3917 | if (!err) { |
3918 | err = SUCCESS; | |
3919 | } else { | |
3920 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | |
3921 | err = FAILED; | |
3922 | } | |
3923 | ||
1ab27c9c ST |
3924 | /* |
3925 | * This ufshcd_release() corresponds to the original scsi cmd that got | |
3926 | * aborted here (as we won't get any IRQ for it). | |
3927 | */ | |
3928 | ufshcd_release(hba); | |
7a3e97b0 SY |
3929 | return err; |
3930 | } | |
3931 | ||
3441da7d SRT |
3932 | /** |
3933 | * ufshcd_host_reset_and_restore - reset and restore host controller | |
3934 | * @hba: per-adapter instance | |
3935 | * | |
3936 | * Note that host controller reset may issue DME_RESET to | |
3937 | * local and remote (device) Uni-Pro stack and the attributes | |
3938 | * are reset to default state. | |
3939 | * | |
3940 | * Returns zero on success, non-zero on failure | |
3941 | */ | |
3942 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) | |
3943 | { | |
3944 | int err; | |
3441da7d SRT |
3945 | unsigned long flags; |
3946 | ||
3947 | /* Reset the host controller */ | |
3948 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3949 | ufshcd_hba_stop(hba); | |
3950 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3951 | ||
3952 | err = ufshcd_hba_enable(hba); | |
3953 | if (err) | |
3954 | goto out; | |
3955 | ||
3956 | /* Establish the link again and restore the device */ | |
1d337ec2 SRT |
3957 | err = ufshcd_probe_hba(hba); |
3958 | ||
3959 | if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) | |
3441da7d SRT |
3960 | err = -EIO; |
3961 | out: | |
3962 | if (err) | |
3963 | dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); | |
3964 | ||
3965 | return err; | |
3966 | } | |
3967 | ||
3968 | /** | |
3969 | * ufshcd_reset_and_restore - reset and re-initialize host/device | |
3970 | * @hba: per-adapter instance | |
3971 | * | |
3972 | * Reset and recover device, host and re-establish link. This | |
3973 | * is helpful to recover the communication in fatal error conditions. | |
3974 | * | |
3975 | * Returns zero on success, non-zero on failure | |
3976 | */ | |
3977 | static int ufshcd_reset_and_restore(struct ufs_hba *hba) | |
3978 | { | |
3979 | int err = 0; | |
3980 | unsigned long flags; | |
1d337ec2 | 3981 | int retries = MAX_HOST_RESET_RETRIES; |
3441da7d | 3982 | |
1d337ec2 SRT |
3983 | do { |
3984 | err = ufshcd_host_reset_and_restore(hba); | |
3985 | } while (err && --retries); | |
3441da7d SRT |
3986 | |
3987 | /* | |
3988 | * After reset the door-bell might be cleared, complete | |
3989 | * outstanding requests in s/w here. | |
3990 | */ | |
3991 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3992 | ufshcd_transfer_req_compl(hba); | |
3993 | ufshcd_tmc_handler(hba); | |
3994 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3995 | ||
3996 | return err; | |
3997 | } | |
3998 | ||
3999 | /** | |
4000 | * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer | |
4001 | * @cmd - SCSI command pointer | |
4002 | * | |
4003 | * Returns SUCCESS/FAILED | |
4004 | */ | |
4005 | static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | |
4006 | { | |
4007 | int err; | |
4008 | unsigned long flags; | |
4009 | struct ufs_hba *hba; | |
4010 | ||
4011 | hba = shost_priv(cmd->device->host); | |
4012 | ||
1ab27c9c | 4013 | ufshcd_hold(hba, false); |
3441da7d SRT |
4014 | /* |
4015 | * Check if there is any race with fatal error handling. | |
4016 | * If so, wait for it to complete. Even though fatal error | |
4017 | * handling does reset and restore in some cases, don't assume | |
4018 | * anything out of it. We are just avoiding race here. | |
4019 | */ | |
4020 | do { | |
4021 | spin_lock_irqsave(hba->host->host_lock, flags); | |
e8e7f271 | 4022 | if (!(work_pending(&hba->eh_work) || |
3441da7d SRT |
4023 | hba->ufshcd_state == UFSHCD_STATE_RESET)) |
4024 | break; | |
4025 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4026 | dev_dbg(hba->dev, "%s: reset in progress\n", __func__); | |
e8e7f271 | 4027 | flush_work(&hba->eh_work); |
3441da7d SRT |
4028 | } while (1); |
4029 | ||
4030 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
4031 | ufshcd_set_eh_in_progress(hba); | |
4032 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4033 | ||
4034 | err = ufshcd_reset_and_restore(hba); | |
4035 | ||
4036 | spin_lock_irqsave(hba->host->host_lock, flags); | |
4037 | if (!err) { | |
4038 | err = SUCCESS; | |
4039 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
4040 | } else { | |
4041 | err = FAILED; | |
4042 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
4043 | } | |
4044 | ufshcd_clear_eh_in_progress(hba); | |
4045 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4046 | ||
1ab27c9c | 4047 | ufshcd_release(hba); |
3441da7d SRT |
4048 | return err; |
4049 | } | |
4050 | ||
3a4bf06d YG |
4051 | /** |
4052 | * ufshcd_get_max_icc_level - calculate the ICC level | |
4053 | * @sup_curr_uA: max. current supported by the regulator | |
4054 | * @start_scan: row at the desc table to start scan from | |
4055 | * @buff: power descriptor buffer | |
4056 | * | |
4057 | * Returns calculated max ICC level for specific regulator | |
4058 | */ | |
4059 | static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) | |
4060 | { | |
4061 | int i; | |
4062 | int curr_uA; | |
4063 | u16 data; | |
4064 | u16 unit; | |
4065 | ||
4066 | for (i = start_scan; i >= 0; i--) { | |
4067 | data = be16_to_cpu(*((u16 *)(buff + 2*i))); | |
4068 | unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> | |
4069 | ATTR_ICC_LVL_UNIT_OFFSET; | |
4070 | curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; | |
4071 | switch (unit) { | |
4072 | case UFSHCD_NANO_AMP: | |
4073 | curr_uA = curr_uA / 1000; | |
4074 | break; | |
4075 | case UFSHCD_MILI_AMP: | |
4076 | curr_uA = curr_uA * 1000; | |
4077 | break; | |
4078 | case UFSHCD_AMP: | |
4079 | curr_uA = curr_uA * 1000 * 1000; | |
4080 | break; | |
4081 | case UFSHCD_MICRO_AMP: | |
4082 | default: | |
4083 | break; | |
4084 | } | |
4085 | if (sup_curr_uA >= curr_uA) | |
4086 | break; | |
4087 | } | |
4088 | if (i < 0) { | |
4089 | i = 0; | |
4090 | pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); | |
4091 | } | |
4092 | ||
4093 | return (u32)i; | |
4094 | } | |
4095 | ||
4096 | /** | |
4097 | * ufshcd_calc_icc_level - calculate the max ICC level | |
4098 | * In case regulators are not initialized we'll return 0 | |
4099 | * @hba: per-adapter instance | |
4100 | * @desc_buf: power descriptor buffer to extract ICC levels from. | |
4101 | * @len: length of desc_buff | |
4102 | * | |
4103 | * Returns calculated ICC level | |
4104 | */ | |
4105 | static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, | |
4106 | u8 *desc_buf, int len) | |
4107 | { | |
4108 | u32 icc_level = 0; | |
4109 | ||
4110 | if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || | |
4111 | !hba->vreg_info.vccq2) { | |
4112 | dev_err(hba->dev, | |
4113 | "%s: Regulator capability was not set, actvIccLevel=%d", | |
4114 | __func__, icc_level); | |
4115 | goto out; | |
4116 | } | |
4117 | ||
4118 | if (hba->vreg_info.vcc) | |
4119 | icc_level = ufshcd_get_max_icc_level( | |
4120 | hba->vreg_info.vcc->max_uA, | |
4121 | POWER_DESC_MAX_ACTV_ICC_LVLS - 1, | |
4122 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); | |
4123 | ||
4124 | if (hba->vreg_info.vccq) | |
4125 | icc_level = ufshcd_get_max_icc_level( | |
4126 | hba->vreg_info.vccq->max_uA, | |
4127 | icc_level, | |
4128 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); | |
4129 | ||
4130 | if (hba->vreg_info.vccq2) | |
4131 | icc_level = ufshcd_get_max_icc_level( | |
4132 | hba->vreg_info.vccq2->max_uA, | |
4133 | icc_level, | |
4134 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); | |
4135 | out: | |
4136 | return icc_level; | |
4137 | } | |
4138 | ||
4139 | static void ufshcd_init_icc_levels(struct ufs_hba *hba) | |
4140 | { | |
4141 | int ret; | |
4142 | int buff_len = QUERY_DESC_POWER_MAX_SIZE; | |
4143 | u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; | |
4144 | ||
4145 | ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); | |
4146 | if (ret) { | |
4147 | dev_err(hba->dev, | |
4148 | "%s: Failed reading power descriptor.len = %d ret = %d", | |
4149 | __func__, buff_len, ret); | |
4150 | return; | |
4151 | } | |
4152 | ||
4153 | hba->init_prefetch_data.icc_level = | |
4154 | ufshcd_find_max_sup_active_icc_level(hba, | |
4155 | desc_buf, buff_len); | |
4156 | dev_dbg(hba->dev, "%s: setting icc_level 0x%x", | |
4157 | __func__, hba->init_prefetch_data.icc_level); | |
4158 | ||
4159 | ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, | |
4160 | QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, | |
4161 | &hba->init_prefetch_data.icc_level); | |
4162 | ||
4163 | if (ret) | |
4164 | dev_err(hba->dev, | |
4165 | "%s: Failed configuring bActiveICCLevel = %d ret = %d", | |
4166 | __func__, hba->init_prefetch_data.icc_level , ret); | |
4167 | ||
4168 | } | |
4169 | ||
2a8fa600 SJ |
4170 | /** |
4171 | * ufshcd_scsi_add_wlus - Adds required W-LUs | |
4172 | * @hba: per-adapter instance | |
4173 | * | |
4174 | * UFS device specification requires the UFS devices to support 4 well known | |
4175 | * logical units: | |
4176 | * "REPORT_LUNS" (address: 01h) | |
4177 | * "UFS Device" (address: 50h) | |
4178 | * "RPMB" (address: 44h) | |
4179 | * "BOOT" (address: 30h) | |
4180 | * UFS device's power management needs to be controlled by "POWER CONDITION" | |
4181 | * field of SSU (START STOP UNIT) command. But this "power condition" field | |
4182 | * will take effect only when its sent to "UFS device" well known logical unit | |
4183 | * hence we require the scsi_device instance to represent this logical unit in | |
4184 | * order for the UFS host driver to send the SSU command for power management. | |
4185 | ||
4186 | * We also require the scsi_device instance for "RPMB" (Replay Protected Memory | |
4187 | * Block) LU so user space process can control this LU. User space may also | |
4188 | * want to have access to BOOT LU. | |
4189 | ||
4190 | * This function adds scsi device instances for each of all well known LUs | |
4191 | * (except "REPORT LUNS" LU). | |
4192 | * | |
4193 | * Returns zero on success (all required W-LUs are added successfully), | |
4194 | * non-zero error value on failure (if failed to add any of the required W-LU). | |
4195 | */ | |
4196 | static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) | |
4197 | { | |
4198 | int ret = 0; | |
7c48bfd0 AM |
4199 | struct scsi_device *sdev_rpmb; |
4200 | struct scsi_device *sdev_boot; | |
2a8fa600 SJ |
4201 | |
4202 | hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, | |
4203 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); | |
4204 | if (IS_ERR(hba->sdev_ufs_device)) { | |
4205 | ret = PTR_ERR(hba->sdev_ufs_device); | |
4206 | hba->sdev_ufs_device = NULL; | |
4207 | goto out; | |
4208 | } | |
7c48bfd0 | 4209 | scsi_device_put(hba->sdev_ufs_device); |
2a8fa600 | 4210 | |
7c48bfd0 | 4211 | sdev_boot = __scsi_add_device(hba->host, 0, 0, |
2a8fa600 | 4212 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); |
7c48bfd0 AM |
4213 | if (IS_ERR(sdev_boot)) { |
4214 | ret = PTR_ERR(sdev_boot); | |
2a8fa600 SJ |
4215 | goto remove_sdev_ufs_device; |
4216 | } | |
7c48bfd0 | 4217 | scsi_device_put(sdev_boot); |
2a8fa600 | 4218 | |
7c48bfd0 | 4219 | sdev_rpmb = __scsi_add_device(hba->host, 0, 0, |
2a8fa600 | 4220 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); |
7c48bfd0 AM |
4221 | if (IS_ERR(sdev_rpmb)) { |
4222 | ret = PTR_ERR(sdev_rpmb); | |
2a8fa600 SJ |
4223 | goto remove_sdev_boot; |
4224 | } | |
7c48bfd0 | 4225 | scsi_device_put(sdev_rpmb); |
2a8fa600 SJ |
4226 | goto out; |
4227 | ||
4228 | remove_sdev_boot: | |
7c48bfd0 | 4229 | scsi_remove_device(sdev_boot); |
2a8fa600 SJ |
4230 | remove_sdev_ufs_device: |
4231 | scsi_remove_device(hba->sdev_ufs_device); | |
4232 | out: | |
4233 | return ret; | |
4234 | } | |
4235 | ||
6ccf44fe | 4236 | /** |
1d337ec2 SRT |
4237 | * ufshcd_probe_hba - probe hba to detect device and initialize |
4238 | * @hba: per-adapter instance | |
4239 | * | |
4240 | * Execute link-startup and verify device initialization | |
6ccf44fe | 4241 | */ |
1d337ec2 | 4242 | static int ufshcd_probe_hba(struct ufs_hba *hba) |
6ccf44fe | 4243 | { |
6ccf44fe SJ |
4244 | int ret; |
4245 | ||
4246 | ret = ufshcd_link_startup(hba); | |
5a0b0cb9 SRT |
4247 | if (ret) |
4248 | goto out; | |
4249 | ||
5064636c YG |
4250 | ufshcd_init_pwr_info(hba); |
4251 | ||
57d104c1 SJ |
4252 | /* UniPro link is active now */ |
4253 | ufshcd_set_link_active(hba); | |
d3e89bac | 4254 | |
5a0b0cb9 SRT |
4255 | ret = ufshcd_verify_dev_init(hba); |
4256 | if (ret) | |
4257 | goto out; | |
68078d5c DR |
4258 | |
4259 | ret = ufshcd_complete_dev_init(hba); | |
4260 | if (ret) | |
4261 | goto out; | |
5a0b0cb9 | 4262 | |
57d104c1 SJ |
4263 | /* UFS device is also active now */ |
4264 | ufshcd_set_ufs_dev_active(hba); | |
66ec6d59 | 4265 | ufshcd_force_reset_auto_bkops(hba); |
3441da7d | 4266 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; |
57d104c1 SJ |
4267 | hba->wlun_dev_clr_ua = true; |
4268 | ||
7eb584db DR |
4269 | if (ufshcd_get_max_pwr_mode(hba)) { |
4270 | dev_err(hba->dev, | |
4271 | "%s: Failed getting max supported power mode\n", | |
4272 | __func__); | |
4273 | } else { | |
4274 | ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); | |
4275 | if (ret) | |
4276 | dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", | |
4277 | __func__, ret); | |
4278 | } | |
57d104c1 SJ |
4279 | |
4280 | /* | |
4281 | * If we are in error handling context or in power management callbacks | |
4282 | * context, no need to scan the host | |
4283 | */ | |
4284 | if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { | |
4285 | bool flag; | |
4286 | ||
4287 | /* clear any previous UFS device information */ | |
4288 | memset(&hba->dev_info, 0, sizeof(hba->dev_info)); | |
4289 | if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, | |
4290 | QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) | |
4291 | hba->dev_info.f_power_on_wp_en = flag; | |
3441da7d | 4292 | |
3a4bf06d YG |
4293 | if (!hba->is_init_prefetch) |
4294 | ufshcd_init_icc_levels(hba); | |
4295 | ||
2a8fa600 SJ |
4296 | /* Add required well known logical units to scsi mid layer */ |
4297 | if (ufshcd_scsi_add_wlus(hba)) | |
4298 | goto out; | |
4299 | ||
3441da7d SRT |
4300 | scsi_scan_host(hba->host); |
4301 | pm_runtime_put_sync(hba->dev); | |
4302 | } | |
3a4bf06d YG |
4303 | |
4304 | if (!hba->is_init_prefetch) | |
4305 | hba->is_init_prefetch = true; | |
4306 | ||
856b3483 ST |
4307 | /* Resume devfreq after UFS device is detected */ |
4308 | if (ufshcd_is_clkscaling_enabled(hba)) | |
4309 | devfreq_resume_device(hba->devfreq); | |
4310 | ||
5a0b0cb9 | 4311 | out: |
1d337ec2 SRT |
4312 | /* |
4313 | * If we failed to initialize the device or the device is not | |
4314 | * present, turn off the power/clocks etc. | |
4315 | */ | |
57d104c1 SJ |
4316 | if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { |
4317 | pm_runtime_put_sync(hba->dev); | |
1d337ec2 | 4318 | ufshcd_hba_exit(hba); |
57d104c1 | 4319 | } |
1d337ec2 SRT |
4320 | |
4321 | return ret; | |
4322 | } | |
4323 | ||
4324 | /** | |
4325 | * ufshcd_async_scan - asynchronous execution for probing hba | |
4326 | * @data: data pointer to pass to this function | |
4327 | * @cookie: cookie data | |
4328 | */ | |
4329 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) | |
4330 | { | |
4331 | struct ufs_hba *hba = (struct ufs_hba *)data; | |
4332 | ||
4333 | ufshcd_probe_hba(hba); | |
6ccf44fe SJ |
4334 | } |
4335 | ||
7a3e97b0 SY |
4336 | static struct scsi_host_template ufshcd_driver_template = { |
4337 | .module = THIS_MODULE, | |
4338 | .name = UFSHCD, | |
4339 | .proc_name = UFSHCD, | |
4340 | .queuecommand = ufshcd_queuecommand, | |
4341 | .slave_alloc = ufshcd_slave_alloc, | |
eeda4749 | 4342 | .slave_configure = ufshcd_slave_configure, |
7a3e97b0 | 4343 | .slave_destroy = ufshcd_slave_destroy, |
4264fd61 | 4344 | .change_queue_depth = ufshcd_change_queue_depth, |
7a3e97b0 | 4345 | .eh_abort_handler = ufshcd_abort, |
3441da7d SRT |
4346 | .eh_device_reset_handler = ufshcd_eh_device_reset_handler, |
4347 | .eh_host_reset_handler = ufshcd_eh_host_reset_handler, | |
7a3e97b0 SY |
4348 | .this_id = -1, |
4349 | .sg_tablesize = SG_ALL, | |
4350 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | |
4351 | .can_queue = UFSHCD_CAN_QUEUE, | |
1ab27c9c | 4352 | .max_host_blocked = 1, |
2ecb204d | 4353 | .use_blk_tags = 1, |
c40ecc12 | 4354 | .track_queue_depth = 1, |
7a3e97b0 SY |
4355 | }; |
4356 | ||
57d104c1 SJ |
4357 | static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, |
4358 | int ua) | |
4359 | { | |
7b16a07c | 4360 | int ret; |
57d104c1 | 4361 | |
7b16a07c BA |
4362 | if (!vreg) |
4363 | return 0; | |
57d104c1 | 4364 | |
7b16a07c BA |
4365 | ret = regulator_set_load(vreg->reg, ua); |
4366 | if (ret < 0) { | |
4367 | dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", | |
4368 | __func__, vreg->name, ua, ret); | |
57d104c1 SJ |
4369 | } |
4370 | ||
4371 | return ret; | |
4372 | } | |
4373 | ||
4374 | static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, | |
4375 | struct ufs_vreg *vreg) | |
4376 | { | |
4377 | return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); | |
4378 | } | |
4379 | ||
4380 | static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, | |
4381 | struct ufs_vreg *vreg) | |
4382 | { | |
4383 | return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); | |
4384 | } | |
4385 | ||
aa497613 SRT |
4386 | static int ufshcd_config_vreg(struct device *dev, |
4387 | struct ufs_vreg *vreg, bool on) | |
4388 | { | |
4389 | int ret = 0; | |
4390 | struct regulator *reg = vreg->reg; | |
4391 | const char *name = vreg->name; | |
4392 | int min_uV, uA_load; | |
4393 | ||
4394 | BUG_ON(!vreg); | |
4395 | ||
4396 | if (regulator_count_voltages(reg) > 0) { | |
4397 | min_uV = on ? vreg->min_uV : 0; | |
4398 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
4399 | if (ret) { | |
4400 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
4401 | __func__, name, ret); | |
4402 | goto out; | |
4403 | } | |
4404 | ||
4405 | uA_load = on ? vreg->max_uA : 0; | |
57d104c1 SJ |
4406 | ret = ufshcd_config_vreg_load(dev, vreg, uA_load); |
4407 | if (ret) | |
aa497613 | 4408 | goto out; |
aa497613 SRT |
4409 | } |
4410 | out: | |
4411 | return ret; | |
4412 | } | |
4413 | ||
4414 | static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) | |
4415 | { | |
4416 | int ret = 0; | |
4417 | ||
4418 | if (!vreg || vreg->enabled) | |
4419 | goto out; | |
4420 | ||
4421 | ret = ufshcd_config_vreg(dev, vreg, true); | |
4422 | if (!ret) | |
4423 | ret = regulator_enable(vreg->reg); | |
4424 | ||
4425 | if (!ret) | |
4426 | vreg->enabled = true; | |
4427 | else | |
4428 | dev_err(dev, "%s: %s enable failed, err=%d\n", | |
4429 | __func__, vreg->name, ret); | |
4430 | out: | |
4431 | return ret; | |
4432 | } | |
4433 | ||
4434 | static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) | |
4435 | { | |
4436 | int ret = 0; | |
4437 | ||
4438 | if (!vreg || !vreg->enabled) | |
4439 | goto out; | |
4440 | ||
4441 | ret = regulator_disable(vreg->reg); | |
4442 | ||
4443 | if (!ret) { | |
4444 | /* ignore errors on applying disable config */ | |
4445 | ufshcd_config_vreg(dev, vreg, false); | |
4446 | vreg->enabled = false; | |
4447 | } else { | |
4448 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
4449 | __func__, vreg->name, ret); | |
4450 | } | |
4451 | out: | |
4452 | return ret; | |
4453 | } | |
4454 | ||
4455 | static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) | |
4456 | { | |
4457 | int ret = 0; | |
4458 | struct device *dev = hba->dev; | |
4459 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4460 | ||
4461 | if (!info) | |
4462 | goto out; | |
4463 | ||
4464 | ret = ufshcd_toggle_vreg(dev, info->vcc, on); | |
4465 | if (ret) | |
4466 | goto out; | |
4467 | ||
4468 | ret = ufshcd_toggle_vreg(dev, info->vccq, on); | |
4469 | if (ret) | |
4470 | goto out; | |
4471 | ||
4472 | ret = ufshcd_toggle_vreg(dev, info->vccq2, on); | |
4473 | if (ret) | |
4474 | goto out; | |
4475 | ||
4476 | out: | |
4477 | if (ret) { | |
4478 | ufshcd_toggle_vreg(dev, info->vccq2, false); | |
4479 | ufshcd_toggle_vreg(dev, info->vccq, false); | |
4480 | ufshcd_toggle_vreg(dev, info->vcc, false); | |
4481 | } | |
4482 | return ret; | |
4483 | } | |
4484 | ||
6a771a65 RS |
4485 | static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) |
4486 | { | |
4487 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4488 | ||
4489 | if (info) | |
4490 | return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); | |
4491 | ||
4492 | return 0; | |
4493 | } | |
4494 | ||
aa497613 SRT |
4495 | static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) |
4496 | { | |
4497 | int ret = 0; | |
4498 | ||
4499 | if (!vreg) | |
4500 | goto out; | |
4501 | ||
4502 | vreg->reg = devm_regulator_get(dev, vreg->name); | |
4503 | if (IS_ERR(vreg->reg)) { | |
4504 | ret = PTR_ERR(vreg->reg); | |
4505 | dev_err(dev, "%s: %s get failed, err=%d\n", | |
4506 | __func__, vreg->name, ret); | |
4507 | } | |
4508 | out: | |
4509 | return ret; | |
4510 | } | |
4511 | ||
4512 | static int ufshcd_init_vreg(struct ufs_hba *hba) | |
4513 | { | |
4514 | int ret = 0; | |
4515 | struct device *dev = hba->dev; | |
4516 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4517 | ||
4518 | if (!info) | |
4519 | goto out; | |
4520 | ||
4521 | ret = ufshcd_get_vreg(dev, info->vcc); | |
4522 | if (ret) | |
4523 | goto out; | |
4524 | ||
4525 | ret = ufshcd_get_vreg(dev, info->vccq); | |
4526 | if (ret) | |
4527 | goto out; | |
4528 | ||
4529 | ret = ufshcd_get_vreg(dev, info->vccq2); | |
4530 | out: | |
4531 | return ret; | |
4532 | } | |
4533 | ||
6a771a65 RS |
4534 | static int ufshcd_init_hba_vreg(struct ufs_hba *hba) |
4535 | { | |
4536 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4537 | ||
4538 | if (info) | |
4539 | return ufshcd_get_vreg(hba->dev, info->vdd_hba); | |
4540 | ||
4541 | return 0; | |
4542 | } | |
4543 | ||
57d104c1 SJ |
4544 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
4545 | bool skip_ref_clk) | |
c6e79dac SRT |
4546 | { |
4547 | int ret = 0; | |
4548 | struct ufs_clk_info *clki; | |
4549 | struct list_head *head = &hba->clk_list_head; | |
1ab27c9c | 4550 | unsigned long flags; |
c6e79dac SRT |
4551 | |
4552 | if (!head || list_empty(head)) | |
4553 | goto out; | |
4554 | ||
4555 | list_for_each_entry(clki, head, list) { | |
4556 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
57d104c1 SJ |
4557 | if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) |
4558 | continue; | |
4559 | ||
c6e79dac SRT |
4560 | if (on && !clki->enabled) { |
4561 | ret = clk_prepare_enable(clki->clk); | |
4562 | if (ret) { | |
4563 | dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", | |
4564 | __func__, clki->name, ret); | |
4565 | goto out; | |
4566 | } | |
4567 | } else if (!on && clki->enabled) { | |
4568 | clk_disable_unprepare(clki->clk); | |
4569 | } | |
4570 | clki->enabled = on; | |
4571 | dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, | |
4572 | clki->name, on ? "en" : "dis"); | |
4573 | } | |
4574 | } | |
1ab27c9c ST |
4575 | |
4576 | if (hba->vops && hba->vops->setup_clocks) | |
4577 | ret = hba->vops->setup_clocks(hba, on); | |
c6e79dac SRT |
4578 | out: |
4579 | if (ret) { | |
4580 | list_for_each_entry(clki, head, list) { | |
4581 | if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) | |
4582 | clk_disable_unprepare(clki->clk); | |
4583 | } | |
eda910e4 | 4584 | } else if (on) { |
1ab27c9c ST |
4585 | spin_lock_irqsave(hba->host->host_lock, flags); |
4586 | hba->clk_gating.state = CLKS_ON; | |
4587 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
c6e79dac SRT |
4588 | } |
4589 | return ret; | |
4590 | } | |
4591 | ||
57d104c1 SJ |
4592 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) |
4593 | { | |
4594 | return __ufshcd_setup_clocks(hba, on, false); | |
4595 | } | |
4596 | ||
c6e79dac SRT |
4597 | static int ufshcd_init_clocks(struct ufs_hba *hba) |
4598 | { | |
4599 | int ret = 0; | |
4600 | struct ufs_clk_info *clki; | |
4601 | struct device *dev = hba->dev; | |
4602 | struct list_head *head = &hba->clk_list_head; | |
4603 | ||
4604 | if (!head || list_empty(head)) | |
4605 | goto out; | |
4606 | ||
4607 | list_for_each_entry(clki, head, list) { | |
4608 | if (!clki->name) | |
4609 | continue; | |
4610 | ||
4611 | clki->clk = devm_clk_get(dev, clki->name); | |
4612 | if (IS_ERR(clki->clk)) { | |
4613 | ret = PTR_ERR(clki->clk); | |
4614 | dev_err(dev, "%s: %s clk get failed, %d\n", | |
4615 | __func__, clki->name, ret); | |
4616 | goto out; | |
4617 | } | |
4618 | ||
4619 | if (clki->max_freq) { | |
4620 | ret = clk_set_rate(clki->clk, clki->max_freq); | |
4621 | if (ret) { | |
4622 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
4623 | __func__, clki->name, | |
4624 | clki->max_freq, ret); | |
4625 | goto out; | |
4626 | } | |
856b3483 | 4627 | clki->curr_freq = clki->max_freq; |
c6e79dac SRT |
4628 | } |
4629 | dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, | |
4630 | clki->name, clk_get_rate(clki->clk)); | |
4631 | } | |
4632 | out: | |
4633 | return ret; | |
4634 | } | |
4635 | ||
5c0c28a8 SRT |
4636 | static int ufshcd_variant_hba_init(struct ufs_hba *hba) |
4637 | { | |
4638 | int err = 0; | |
4639 | ||
4640 | if (!hba->vops) | |
4641 | goto out; | |
4642 | ||
4643 | if (hba->vops->init) { | |
4644 | err = hba->vops->init(hba); | |
4645 | if (err) | |
4646 | goto out; | |
4647 | } | |
4648 | ||
5c0c28a8 SRT |
4649 | if (hba->vops->setup_regulators) { |
4650 | err = hba->vops->setup_regulators(hba, true); | |
4651 | if (err) | |
1ab27c9c | 4652 | goto out_exit; |
5c0c28a8 SRT |
4653 | } |
4654 | ||
4655 | goto out; | |
4656 | ||
5c0c28a8 SRT |
4657 | out_exit: |
4658 | if (hba->vops->exit) | |
4659 | hba->vops->exit(hba); | |
4660 | out: | |
4661 | if (err) | |
4662 | dev_err(hba->dev, "%s: variant %s init failed err %d\n", | |
4663 | __func__, hba->vops ? hba->vops->name : "", err); | |
4664 | return err; | |
4665 | } | |
4666 | ||
4667 | static void ufshcd_variant_hba_exit(struct ufs_hba *hba) | |
4668 | { | |
4669 | if (!hba->vops) | |
4670 | return; | |
4671 | ||
4672 | if (hba->vops->setup_clocks) | |
4673 | hba->vops->setup_clocks(hba, false); | |
4674 | ||
4675 | if (hba->vops->setup_regulators) | |
4676 | hba->vops->setup_regulators(hba, false); | |
4677 | ||
4678 | if (hba->vops->exit) | |
4679 | hba->vops->exit(hba); | |
4680 | } | |
4681 | ||
aa497613 SRT |
4682 | static int ufshcd_hba_init(struct ufs_hba *hba) |
4683 | { | |
4684 | int err; | |
4685 | ||
6a771a65 RS |
4686 | /* |
4687 | * Handle host controller power separately from the UFS device power | |
4688 | * rails as it will help controlling the UFS host controller power | |
4689 | * collapse easily which is different than UFS device power collapse. | |
4690 | * Also, enable the host controller power before we go ahead with rest | |
4691 | * of the initialization here. | |
4692 | */ | |
4693 | err = ufshcd_init_hba_vreg(hba); | |
aa497613 SRT |
4694 | if (err) |
4695 | goto out; | |
4696 | ||
6a771a65 | 4697 | err = ufshcd_setup_hba_vreg(hba, true); |
aa497613 SRT |
4698 | if (err) |
4699 | goto out; | |
4700 | ||
6a771a65 RS |
4701 | err = ufshcd_init_clocks(hba); |
4702 | if (err) | |
4703 | goto out_disable_hba_vreg; | |
4704 | ||
4705 | err = ufshcd_setup_clocks(hba, true); | |
4706 | if (err) | |
4707 | goto out_disable_hba_vreg; | |
4708 | ||
c6e79dac SRT |
4709 | err = ufshcd_init_vreg(hba); |
4710 | if (err) | |
4711 | goto out_disable_clks; | |
4712 | ||
4713 | err = ufshcd_setup_vreg(hba, true); | |
4714 | if (err) | |
4715 | goto out_disable_clks; | |
4716 | ||
aa497613 SRT |
4717 | err = ufshcd_variant_hba_init(hba); |
4718 | if (err) | |
4719 | goto out_disable_vreg; | |
4720 | ||
1d337ec2 | 4721 | hba->is_powered = true; |
aa497613 SRT |
4722 | goto out; |
4723 | ||
4724 | out_disable_vreg: | |
4725 | ufshcd_setup_vreg(hba, false); | |
c6e79dac SRT |
4726 | out_disable_clks: |
4727 | ufshcd_setup_clocks(hba, false); | |
6a771a65 RS |
4728 | out_disable_hba_vreg: |
4729 | ufshcd_setup_hba_vreg(hba, false); | |
aa497613 SRT |
4730 | out: |
4731 | return err; | |
4732 | } | |
4733 | ||
4734 | static void ufshcd_hba_exit(struct ufs_hba *hba) | |
4735 | { | |
1d337ec2 SRT |
4736 | if (hba->is_powered) { |
4737 | ufshcd_variant_hba_exit(hba); | |
4738 | ufshcd_setup_vreg(hba, false); | |
4739 | ufshcd_setup_clocks(hba, false); | |
4740 | ufshcd_setup_hba_vreg(hba, false); | |
4741 | hba->is_powered = false; | |
4742 | } | |
aa497613 SRT |
4743 | } |
4744 | ||
57d104c1 SJ |
4745 | static int |
4746 | ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) | |
4747 | { | |
4748 | unsigned char cmd[6] = {REQUEST_SENSE, | |
4749 | 0, | |
4750 | 0, | |
4751 | 0, | |
4752 | SCSI_SENSE_BUFFERSIZE, | |
4753 | 0}; | |
4754 | char *buffer; | |
4755 | int ret; | |
4756 | ||
4757 | buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); | |
4758 | if (!buffer) { | |
4759 | ret = -ENOMEM; | |
4760 | goto out; | |
4761 | } | |
4762 | ||
4763 | ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, | |
4764 | SCSI_SENSE_BUFFERSIZE, NULL, | |
4765 | msecs_to_jiffies(1000), 3, NULL, REQ_PM); | |
4766 | if (ret) | |
4767 | pr_err("%s: failed with err %d\n", __func__, ret); | |
4768 | ||
4769 | kfree(buffer); | |
4770 | out: | |
4771 | return ret; | |
4772 | } | |
4773 | ||
4774 | /** | |
4775 | * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device | |
4776 | * power mode | |
4777 | * @hba: per adapter instance | |
4778 | * @pwr_mode: device power mode to set | |
4779 | * | |
4780 | * Returns 0 if requested power mode is set successfully | |
4781 | * Returns non-zero if failed to set the requested power mode | |
4782 | */ | |
4783 | static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | |
4784 | enum ufs_dev_pwr_mode pwr_mode) | |
4785 | { | |
4786 | unsigned char cmd[6] = { START_STOP }; | |
4787 | struct scsi_sense_hdr sshdr; | |
7c48bfd0 AM |
4788 | struct scsi_device *sdp; |
4789 | unsigned long flags; | |
57d104c1 SJ |
4790 | int ret; |
4791 | ||
7c48bfd0 AM |
4792 | spin_lock_irqsave(hba->host->host_lock, flags); |
4793 | sdp = hba->sdev_ufs_device; | |
4794 | if (sdp) { | |
4795 | ret = scsi_device_get(sdp); | |
4796 | if (!ret && !scsi_device_online(sdp)) { | |
4797 | ret = -ENODEV; | |
4798 | scsi_device_put(sdp); | |
4799 | } | |
4800 | } else { | |
4801 | ret = -ENODEV; | |
4802 | } | |
4803 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4804 | ||
4805 | if (ret) | |
4806 | return ret; | |
57d104c1 SJ |
4807 | |
4808 | /* | |
4809 | * If scsi commands fail, the scsi mid-layer schedules scsi error- | |
4810 | * handling, which would wait for host to be resumed. Since we know | |
4811 | * we are functional while we are here, skip host resume in error | |
4812 | * handling context. | |
4813 | */ | |
4814 | hba->host->eh_noresume = 1; | |
4815 | if (hba->wlun_dev_clr_ua) { | |
4816 | ret = ufshcd_send_request_sense(hba, sdp); | |
4817 | if (ret) | |
4818 | goto out; | |
4819 | /* Unit attention condition is cleared now */ | |
4820 | hba->wlun_dev_clr_ua = false; | |
4821 | } | |
4822 | ||
4823 | cmd[4] = pwr_mode << 4; | |
4824 | ||
4825 | /* | |
4826 | * Current function would be generally called from the power management | |
4827 | * callbacks hence set the REQ_PM flag so that it doesn't resume the | |
4828 | * already suspended childs. | |
4829 | */ | |
4830 | ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, | |
4831 | START_STOP_TIMEOUT, 0, NULL, REQ_PM); | |
4832 | if (ret) { | |
4833 | sdev_printk(KERN_WARNING, sdp, | |
ef61329d HR |
4834 | "START_STOP failed for power mode: %d, result %x\n", |
4835 | pwr_mode, ret); | |
21045519 HR |
4836 | if (driver_byte(ret) & DRIVER_SENSE) |
4837 | scsi_print_sense_hdr(sdp, NULL, &sshdr); | |
57d104c1 SJ |
4838 | } |
4839 | ||
4840 | if (!ret) | |
4841 | hba->curr_dev_pwr_mode = pwr_mode; | |
4842 | out: | |
7c48bfd0 | 4843 | scsi_device_put(sdp); |
57d104c1 SJ |
4844 | hba->host->eh_noresume = 0; |
4845 | return ret; | |
4846 | } | |
4847 | ||
4848 | static int ufshcd_link_state_transition(struct ufs_hba *hba, | |
4849 | enum uic_link_state req_link_state, | |
4850 | int check_for_bkops) | |
4851 | { | |
4852 | int ret = 0; | |
4853 | ||
4854 | if (req_link_state == hba->uic_link_state) | |
4855 | return 0; | |
4856 | ||
4857 | if (req_link_state == UIC_LINK_HIBERN8_STATE) { | |
4858 | ret = ufshcd_uic_hibern8_enter(hba); | |
4859 | if (!ret) | |
4860 | ufshcd_set_link_hibern8(hba); | |
4861 | else | |
4862 | goto out; | |
4863 | } | |
4864 | /* | |
4865 | * If autobkops is enabled, link can't be turned off because | |
4866 | * turning off the link would also turn off the device. | |
4867 | */ | |
4868 | else if ((req_link_state == UIC_LINK_OFF_STATE) && | |
4869 | (!check_for_bkops || (check_for_bkops && | |
4870 | !hba->auto_bkops_enabled))) { | |
4871 | /* | |
4872 | * Change controller state to "reset state" which | |
4873 | * should also put the link in off/reset state | |
4874 | */ | |
4875 | ufshcd_hba_stop(hba); | |
4876 | /* | |
4877 | * TODO: Check if we need any delay to make sure that | |
4878 | * controller is reset | |
4879 | */ | |
4880 | ufshcd_set_link_off(hba); | |
4881 | } | |
4882 | ||
4883 | out: | |
4884 | return ret; | |
4885 | } | |
4886 | ||
4887 | static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) | |
4888 | { | |
4889 | /* | |
4890 | * If UFS device is either in UFS_Sleep turn off VCC rail to save some | |
4891 | * power. | |
4892 | * | |
4893 | * If UFS device and link is in OFF state, all power supplies (VCC, | |
4894 | * VCCQ, VCCQ2) can be turned off if power on write protect is not | |
4895 | * required. If UFS link is inactive (Hibern8 or OFF state) and device | |
4896 | * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. | |
4897 | * | |
4898 | * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway | |
4899 | * in low power state which would save some power. | |
4900 | */ | |
4901 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | |
4902 | !hba->dev_info.is_lu_power_on_wp) { | |
4903 | ufshcd_setup_vreg(hba, false); | |
4904 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | |
4905 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | |
4906 | if (!ufshcd_is_link_active(hba)) { | |
4907 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | |
4908 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); | |
4909 | } | |
4910 | } | |
4911 | } | |
4912 | ||
4913 | static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) | |
4914 | { | |
4915 | int ret = 0; | |
4916 | ||
4917 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | |
4918 | !hba->dev_info.is_lu_power_on_wp) { | |
4919 | ret = ufshcd_setup_vreg(hba, true); | |
4920 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | |
4921 | ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); | |
4922 | if (!ret && !ufshcd_is_link_active(hba)) { | |
4923 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); | |
4924 | if (ret) | |
4925 | goto vcc_disable; | |
4926 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); | |
4927 | if (ret) | |
4928 | goto vccq_lpm; | |
4929 | } | |
4930 | } | |
4931 | goto out; | |
4932 | ||
4933 | vccq_lpm: | |
4934 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | |
4935 | vcc_disable: | |
4936 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | |
4937 | out: | |
4938 | return ret; | |
4939 | } | |
4940 | ||
4941 | static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) | |
4942 | { | |
4943 | if (ufshcd_is_link_off(hba)) | |
4944 | ufshcd_setup_hba_vreg(hba, false); | |
4945 | } | |
4946 | ||
4947 | static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) | |
4948 | { | |
4949 | if (ufshcd_is_link_off(hba)) | |
4950 | ufshcd_setup_hba_vreg(hba, true); | |
4951 | } | |
4952 | ||
7a3e97b0 | 4953 | /** |
57d104c1 | 4954 | * ufshcd_suspend - helper function for suspend operations |
3b1d0580 | 4955 | * @hba: per adapter instance |
57d104c1 SJ |
4956 | * @pm_op: desired low power operation type |
4957 | * | |
4958 | * This function will try to put the UFS device and link into low power | |
4959 | * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" | |
4960 | * (System PM level). | |
4961 | * | |
4962 | * If this function is called during shutdown, it will make sure that | |
4963 | * both UFS device and UFS link is powered off. | |
7a3e97b0 | 4964 | * |
57d104c1 SJ |
4965 | * NOTE: UFS device & link must be active before we enter in this function. |
4966 | * | |
4967 | * Returns 0 for success and non-zero for failure | |
7a3e97b0 | 4968 | */ |
57d104c1 | 4969 | static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
7a3e97b0 | 4970 | { |
57d104c1 SJ |
4971 | int ret = 0; |
4972 | enum ufs_pm_level pm_lvl; | |
4973 | enum ufs_dev_pwr_mode req_dev_pwr_mode; | |
4974 | enum uic_link_state req_link_state; | |
4975 | ||
4976 | hba->pm_op_in_progress = 1; | |
4977 | if (!ufshcd_is_shutdown_pm(pm_op)) { | |
4978 | pm_lvl = ufshcd_is_runtime_pm(pm_op) ? | |
4979 | hba->rpm_lvl : hba->spm_lvl; | |
4980 | req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); | |
4981 | req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); | |
4982 | } else { | |
4983 | req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; | |
4984 | req_link_state = UIC_LINK_OFF_STATE; | |
4985 | } | |
4986 | ||
7a3e97b0 | 4987 | /* |
57d104c1 SJ |
4988 | * If we can't transition into any of the low power modes |
4989 | * just gate the clocks. | |
7a3e97b0 | 4990 | */ |
1ab27c9c ST |
4991 | ufshcd_hold(hba, false); |
4992 | hba->clk_gating.is_suspended = true; | |
4993 | ||
57d104c1 SJ |
4994 | if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && |
4995 | req_link_state == UIC_LINK_ACTIVE_STATE) { | |
4996 | goto disable_clks; | |
4997 | } | |
7a3e97b0 | 4998 | |
57d104c1 SJ |
4999 | if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && |
5000 | (req_link_state == hba->uic_link_state)) | |
5001 | goto out; | |
5002 | ||
5003 | /* UFS device & link must be active before we enter in this function */ | |
5004 | if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { | |
5005 | ret = -EINVAL; | |
5006 | goto out; | |
5007 | } | |
5008 | ||
5009 | if (ufshcd_is_runtime_pm(pm_op)) { | |
374a246e SJ |
5010 | if (ufshcd_can_autobkops_during_suspend(hba)) { |
5011 | /* | |
5012 | * The device is idle with no requests in the queue, | |
5013 | * allow background operations if bkops status shows | |
5014 | * that performance might be impacted. | |
5015 | */ | |
5016 | ret = ufshcd_urgent_bkops(hba); | |
5017 | if (ret) | |
5018 | goto enable_gating; | |
5019 | } else { | |
5020 | /* make sure that auto bkops is disabled */ | |
5021 | ufshcd_disable_auto_bkops(hba); | |
5022 | } | |
57d104c1 SJ |
5023 | } |
5024 | ||
5025 | if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && | |
5026 | ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || | |
5027 | !ufshcd_is_runtime_pm(pm_op))) { | |
5028 | /* ensure that bkops is disabled */ | |
5029 | ufshcd_disable_auto_bkops(hba); | |
5030 | ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); | |
5031 | if (ret) | |
1ab27c9c | 5032 | goto enable_gating; |
57d104c1 SJ |
5033 | } |
5034 | ||
5035 | ret = ufshcd_link_state_transition(hba, req_link_state, 1); | |
5036 | if (ret) | |
5037 | goto set_dev_active; | |
5038 | ||
5039 | ufshcd_vreg_set_lpm(hba); | |
5040 | ||
5041 | disable_clks: | |
856b3483 ST |
5042 | /* |
5043 | * The clock scaling needs access to controller registers. Hence, Wait | |
5044 | * for pending clock scaling work to be done before clocks are | |
5045 | * turned off. | |
5046 | */ | |
5047 | if (ufshcd_is_clkscaling_enabled(hba)) { | |
5048 | devfreq_suspend_device(hba->devfreq); | |
5049 | hba->clk_scaling.window_start_t = 0; | |
5050 | } | |
57d104c1 SJ |
5051 | /* |
5052 | * Call vendor specific suspend callback. As these callbacks may access | |
5053 | * vendor specific host controller register space call them before the | |
5054 | * host clocks are ON. | |
5055 | */ | |
5056 | if (hba->vops && hba->vops->suspend) { | |
5057 | ret = hba->vops->suspend(hba, pm_op); | |
5058 | if (ret) | |
5059 | goto set_link_active; | |
5060 | } | |
5061 | ||
5062 | if (hba->vops && hba->vops->setup_clocks) { | |
5063 | ret = hba->vops->setup_clocks(hba, false); | |
5064 | if (ret) | |
5065 | goto vops_resume; | |
5066 | } | |
5067 | ||
5068 | if (!ufshcd_is_link_active(hba)) | |
5069 | ufshcd_setup_clocks(hba, false); | |
5070 | else | |
5071 | /* If link is active, device ref_clk can't be switched off */ | |
5072 | __ufshcd_setup_clocks(hba, false, true); | |
5073 | ||
1ab27c9c | 5074 | hba->clk_gating.state = CLKS_OFF; |
57d104c1 SJ |
5075 | /* |
5076 | * Disable the host irq as host controller as there won't be any | |
5077 | * host controller trasanction expected till resume. | |
5078 | */ | |
5079 | ufshcd_disable_irq(hba); | |
5080 | /* Put the host controller in low power mode if possible */ | |
5081 | ufshcd_hba_vreg_set_lpm(hba); | |
5082 | goto out; | |
5083 | ||
5084 | vops_resume: | |
5085 | if (hba->vops && hba->vops->resume) | |
5086 | hba->vops->resume(hba, pm_op); | |
5087 | set_link_active: | |
5088 | ufshcd_vreg_set_hpm(hba); | |
5089 | if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) | |
5090 | ufshcd_set_link_active(hba); | |
5091 | else if (ufshcd_is_link_off(hba)) | |
5092 | ufshcd_host_reset_and_restore(hba); | |
5093 | set_dev_active: | |
5094 | if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) | |
5095 | ufshcd_disable_auto_bkops(hba); | |
1ab27c9c ST |
5096 | enable_gating: |
5097 | hba->clk_gating.is_suspended = false; | |
5098 | ufshcd_release(hba); | |
57d104c1 SJ |
5099 | out: |
5100 | hba->pm_op_in_progress = 0; | |
5101 | return ret; | |
7a3e97b0 SY |
5102 | } |
5103 | ||
5104 | /** | |
57d104c1 | 5105 | * ufshcd_resume - helper function for resume operations |
3b1d0580 | 5106 | * @hba: per adapter instance |
57d104c1 | 5107 | * @pm_op: runtime PM or system PM |
7a3e97b0 | 5108 | * |
57d104c1 SJ |
5109 | * This function basically brings the UFS device, UniPro link and controller |
5110 | * to active state. | |
5111 | * | |
5112 | * Returns 0 for success and non-zero for failure | |
7a3e97b0 | 5113 | */ |
57d104c1 | 5114 | static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
7a3e97b0 | 5115 | { |
57d104c1 SJ |
5116 | int ret; |
5117 | enum uic_link_state old_link_state; | |
5118 | ||
5119 | hba->pm_op_in_progress = 1; | |
5120 | old_link_state = hba->uic_link_state; | |
5121 | ||
5122 | ufshcd_hba_vreg_set_hpm(hba); | |
5123 | /* Make sure clocks are enabled before accessing controller */ | |
5124 | ret = ufshcd_setup_clocks(hba, true); | |
5125 | if (ret) | |
5126 | goto out; | |
5127 | ||
57d104c1 SJ |
5128 | /* enable the host irq as host controller would be active soon */ |
5129 | ret = ufshcd_enable_irq(hba); | |
5130 | if (ret) | |
5131 | goto disable_irq_and_vops_clks; | |
5132 | ||
5133 | ret = ufshcd_vreg_set_hpm(hba); | |
5134 | if (ret) | |
5135 | goto disable_irq_and_vops_clks; | |
5136 | ||
7a3e97b0 | 5137 | /* |
57d104c1 SJ |
5138 | * Call vendor specific resume callback. As these callbacks may access |
5139 | * vendor specific host controller register space call them when the | |
5140 | * host clocks are ON. | |
7a3e97b0 | 5141 | */ |
57d104c1 SJ |
5142 | if (hba->vops && hba->vops->resume) { |
5143 | ret = hba->vops->resume(hba, pm_op); | |
5144 | if (ret) | |
5145 | goto disable_vreg; | |
5146 | } | |
5147 | ||
5148 | if (ufshcd_is_link_hibern8(hba)) { | |
5149 | ret = ufshcd_uic_hibern8_exit(hba); | |
5150 | if (!ret) | |
5151 | ufshcd_set_link_active(hba); | |
5152 | else | |
5153 | goto vendor_suspend; | |
5154 | } else if (ufshcd_is_link_off(hba)) { | |
5155 | ret = ufshcd_host_reset_and_restore(hba); | |
5156 | /* | |
5157 | * ufshcd_host_reset_and_restore() should have already | |
5158 | * set the link state as active | |
5159 | */ | |
5160 | if (ret || !ufshcd_is_link_active(hba)) | |
5161 | goto vendor_suspend; | |
5162 | } | |
5163 | ||
5164 | if (!ufshcd_is_ufs_dev_active(hba)) { | |
5165 | ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); | |
5166 | if (ret) | |
5167 | goto set_old_link_state; | |
5168 | } | |
5169 | ||
374a246e SJ |
5170 | /* |
5171 | * If BKOPs operations are urgently needed at this moment then | |
5172 | * keep auto-bkops enabled or else disable it. | |
5173 | */ | |
5174 | ufshcd_urgent_bkops(hba); | |
1ab27c9c ST |
5175 | hba->clk_gating.is_suspended = false; |
5176 | ||
856b3483 ST |
5177 | if (ufshcd_is_clkscaling_enabled(hba)) |
5178 | devfreq_resume_device(hba->devfreq); | |
5179 | ||
1ab27c9c ST |
5180 | /* Schedule clock gating in case of no access to UFS device yet */ |
5181 | ufshcd_release(hba); | |
57d104c1 SJ |
5182 | goto out; |
5183 | ||
5184 | set_old_link_state: | |
5185 | ufshcd_link_state_transition(hba, old_link_state, 0); | |
5186 | vendor_suspend: | |
5187 | if (hba->vops && hba->vops->suspend) | |
5188 | hba->vops->suspend(hba, pm_op); | |
5189 | disable_vreg: | |
5190 | ufshcd_vreg_set_lpm(hba); | |
5191 | disable_irq_and_vops_clks: | |
5192 | ufshcd_disable_irq(hba); | |
57d104c1 SJ |
5193 | ufshcd_setup_clocks(hba, false); |
5194 | out: | |
5195 | hba->pm_op_in_progress = 0; | |
5196 | return ret; | |
5197 | } | |
5198 | ||
5199 | /** | |
5200 | * ufshcd_system_suspend - system suspend routine | |
5201 | * @hba: per adapter instance | |
5202 | * @pm_op: runtime PM or system PM | |
5203 | * | |
5204 | * Check the description of ufshcd_suspend() function for more details. | |
5205 | * | |
5206 | * Returns 0 for success and non-zero for failure | |
5207 | */ | |
5208 | int ufshcd_system_suspend(struct ufs_hba *hba) | |
5209 | { | |
5210 | int ret = 0; | |
5211 | ||
5212 | if (!hba || !hba->is_powered) | |
233b594b | 5213 | return 0; |
57d104c1 SJ |
5214 | |
5215 | if (pm_runtime_suspended(hba->dev)) { | |
5216 | if (hba->rpm_lvl == hba->spm_lvl) | |
5217 | /* | |
5218 | * There is possibility that device may still be in | |
5219 | * active state during the runtime suspend. | |
5220 | */ | |
5221 | if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == | |
5222 | hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) | |
5223 | goto out; | |
5224 | ||
5225 | /* | |
5226 | * UFS device and/or UFS link low power states during runtime | |
5227 | * suspend seems to be different than what is expected during | |
5228 | * system suspend. Hence runtime resume the devic & link and | |
5229 | * let the system suspend low power states to take effect. | |
5230 | * TODO: If resume takes longer time, we might have optimize | |
5231 | * it in future by not resuming everything if possible. | |
5232 | */ | |
5233 | ret = ufshcd_runtime_resume(hba); | |
5234 | if (ret) | |
5235 | goto out; | |
5236 | } | |
5237 | ||
5238 | ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); | |
5239 | out: | |
e785060e DR |
5240 | if (!ret) |
5241 | hba->is_sys_suspended = true; | |
57d104c1 SJ |
5242 | return ret; |
5243 | } | |
5244 | EXPORT_SYMBOL(ufshcd_system_suspend); | |
5245 | ||
5246 | /** | |
5247 | * ufshcd_system_resume - system resume routine | |
5248 | * @hba: per adapter instance | |
5249 | * | |
5250 | * Returns 0 for success and non-zero for failure | |
5251 | */ | |
7a3e97b0 | 5252 | |
57d104c1 SJ |
5253 | int ufshcd_system_resume(struct ufs_hba *hba) |
5254 | { | |
5255 | if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) | |
5256 | /* | |
5257 | * Let the runtime resume take care of resuming | |
5258 | * if runtime suspended. | |
5259 | */ | |
5260 | return 0; | |
5261 | ||
5262 | return ufshcd_resume(hba, UFS_SYSTEM_PM); | |
7a3e97b0 | 5263 | } |
57d104c1 | 5264 | EXPORT_SYMBOL(ufshcd_system_resume); |
3b1d0580 | 5265 | |
57d104c1 SJ |
5266 | /** |
5267 | * ufshcd_runtime_suspend - runtime suspend routine | |
5268 | * @hba: per adapter instance | |
5269 | * | |
5270 | * Check the description of ufshcd_suspend() function for more details. | |
5271 | * | |
5272 | * Returns 0 for success and non-zero for failure | |
5273 | */ | |
66ec6d59 SRT |
5274 | int ufshcd_runtime_suspend(struct ufs_hba *hba) |
5275 | { | |
57d104c1 | 5276 | if (!hba || !hba->is_powered) |
66ec6d59 SRT |
5277 | return 0; |
5278 | ||
57d104c1 | 5279 | return ufshcd_suspend(hba, UFS_RUNTIME_PM); |
66ec6d59 SRT |
5280 | } |
5281 | EXPORT_SYMBOL(ufshcd_runtime_suspend); | |
5282 | ||
57d104c1 SJ |
5283 | /** |
5284 | * ufshcd_runtime_resume - runtime resume routine | |
5285 | * @hba: per adapter instance | |
5286 | * | |
5287 | * This function basically brings the UFS device, UniPro link and controller | |
5288 | * to active state. Following operations are done in this function: | |
5289 | * | |
5290 | * 1. Turn on all the controller related clocks | |
5291 | * 2. Bring the UniPro link out of Hibernate state | |
5292 | * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device | |
5293 | * to active state. | |
5294 | * 4. If auto-bkops is enabled on the device, disable it. | |
5295 | * | |
5296 | * So following would be the possible power state after this function return | |
5297 | * successfully: | |
5298 | * S1: UFS device in Active state with VCC rail ON | |
5299 | * UniPro link in Active state | |
5300 | * All the UFS/UniPro controller clocks are ON | |
5301 | * | |
5302 | * Returns 0 for success and non-zero for failure | |
5303 | */ | |
66ec6d59 SRT |
5304 | int ufshcd_runtime_resume(struct ufs_hba *hba) |
5305 | { | |
57d104c1 | 5306 | if (!hba || !hba->is_powered) |
66ec6d59 | 5307 | return 0; |
57d104c1 SJ |
5308 | else |
5309 | return ufshcd_resume(hba, UFS_RUNTIME_PM); | |
66ec6d59 SRT |
5310 | } |
5311 | EXPORT_SYMBOL(ufshcd_runtime_resume); | |
5312 | ||
5313 | int ufshcd_runtime_idle(struct ufs_hba *hba) | |
5314 | { | |
5315 | return 0; | |
5316 | } | |
5317 | EXPORT_SYMBOL(ufshcd_runtime_idle); | |
5318 | ||
57d104c1 SJ |
5319 | /** |
5320 | * ufshcd_shutdown - shutdown routine | |
5321 | * @hba: per adapter instance | |
5322 | * | |
5323 | * This function would power off both UFS device and UFS link. | |
5324 | * | |
5325 | * Returns 0 always to allow force shutdown even in case of errors. | |
5326 | */ | |
5327 | int ufshcd_shutdown(struct ufs_hba *hba) | |
5328 | { | |
5329 | int ret = 0; | |
5330 | ||
5331 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) | |
5332 | goto out; | |
5333 | ||
5334 | if (pm_runtime_suspended(hba->dev)) { | |
5335 | ret = ufshcd_runtime_resume(hba); | |
5336 | if (ret) | |
5337 | goto out; | |
5338 | } | |
5339 | ||
5340 | ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); | |
5341 | out: | |
5342 | if (ret) | |
5343 | dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); | |
5344 | /* allow force shutdown even in case of errors */ | |
5345 | return 0; | |
5346 | } | |
5347 | EXPORT_SYMBOL(ufshcd_shutdown); | |
5348 | ||
7a3e97b0 | 5349 | /** |
3b1d0580 | 5350 | * ufshcd_remove - de-allocate SCSI host and host memory space |
7a3e97b0 | 5351 | * data structure memory |
3b1d0580 | 5352 | * @hba - per adapter instance |
7a3e97b0 | 5353 | */ |
3b1d0580 | 5354 | void ufshcd_remove(struct ufs_hba *hba) |
7a3e97b0 | 5355 | { |
cfdf9c91 | 5356 | scsi_remove_host(hba->host); |
7a3e97b0 | 5357 | /* disable interrupts */ |
2fbd009b | 5358 | ufshcd_disable_intr(hba, hba->intr_mask); |
7a3e97b0 | 5359 | ufshcd_hba_stop(hba); |
7a3e97b0 | 5360 | |
7a3e97b0 | 5361 | scsi_host_put(hba->host); |
5c0c28a8 | 5362 | |
1ab27c9c | 5363 | ufshcd_exit_clk_gating(hba); |
856b3483 ST |
5364 | if (ufshcd_is_clkscaling_enabled(hba)) |
5365 | devfreq_remove_device(hba->devfreq); | |
aa497613 | 5366 | ufshcd_hba_exit(hba); |
3b1d0580 VH |
5367 | } |
5368 | EXPORT_SYMBOL_GPL(ufshcd_remove); | |
5369 | ||
ca3d7bf9 AM |
5370 | /** |
5371 | * ufshcd_set_dma_mask - Set dma mask based on the controller | |
5372 | * addressing capability | |
5373 | * @hba: per adapter instance | |
5374 | * | |
5375 | * Returns 0 for success, non-zero for failure | |
5376 | */ | |
5377 | static int ufshcd_set_dma_mask(struct ufs_hba *hba) | |
5378 | { | |
5379 | if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { | |
5380 | if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) | |
5381 | return 0; | |
5382 | } | |
5383 | return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); | |
5384 | } | |
5385 | ||
7a3e97b0 | 5386 | /** |
5c0c28a8 | 5387 | * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) |
3b1d0580 VH |
5388 | * @dev: pointer to device handle |
5389 | * @hba_handle: driver private handle | |
7a3e97b0 SY |
5390 | * Returns 0 on success, non-zero value on failure |
5391 | */ | |
5c0c28a8 | 5392 | int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) |
7a3e97b0 SY |
5393 | { |
5394 | struct Scsi_Host *host; | |
5395 | struct ufs_hba *hba; | |
5c0c28a8 | 5396 | int err = 0; |
7a3e97b0 | 5397 | |
3b1d0580 VH |
5398 | if (!dev) { |
5399 | dev_err(dev, | |
5400 | "Invalid memory reference for dev is NULL\n"); | |
5401 | err = -ENODEV; | |
7a3e97b0 SY |
5402 | goto out_error; |
5403 | } | |
5404 | ||
7a3e97b0 SY |
5405 | host = scsi_host_alloc(&ufshcd_driver_template, |
5406 | sizeof(struct ufs_hba)); | |
5407 | if (!host) { | |
3b1d0580 | 5408 | dev_err(dev, "scsi_host_alloc failed\n"); |
7a3e97b0 | 5409 | err = -ENOMEM; |
3b1d0580 | 5410 | goto out_error; |
7a3e97b0 SY |
5411 | } |
5412 | hba = shost_priv(host); | |
7a3e97b0 | 5413 | hba->host = host; |
3b1d0580 | 5414 | hba->dev = dev; |
5c0c28a8 SRT |
5415 | *hba_handle = hba; |
5416 | ||
5417 | out_error: | |
5418 | return err; | |
5419 | } | |
5420 | EXPORT_SYMBOL(ufshcd_alloc_host); | |
5421 | ||
856b3483 ST |
5422 | static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) |
5423 | { | |
5424 | int ret = 0; | |
5425 | struct ufs_clk_info *clki; | |
5426 | struct list_head *head = &hba->clk_list_head; | |
5427 | ||
5428 | if (!head || list_empty(head)) | |
5429 | goto out; | |
5430 | ||
5431 | list_for_each_entry(clki, head, list) { | |
5432 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
5433 | if (scale_up && clki->max_freq) { | |
5434 | if (clki->curr_freq == clki->max_freq) | |
5435 | continue; | |
5436 | ret = clk_set_rate(clki->clk, clki->max_freq); | |
5437 | if (ret) { | |
5438 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
5439 | __func__, clki->name, | |
5440 | clki->max_freq, ret); | |
5441 | break; | |
5442 | } | |
5443 | clki->curr_freq = clki->max_freq; | |
5444 | ||
5445 | } else if (!scale_up && clki->min_freq) { | |
5446 | if (clki->curr_freq == clki->min_freq) | |
5447 | continue; | |
5448 | ret = clk_set_rate(clki->clk, clki->min_freq); | |
5449 | if (ret) { | |
5450 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
5451 | __func__, clki->name, | |
5452 | clki->min_freq, ret); | |
5453 | break; | |
5454 | } | |
5455 | clki->curr_freq = clki->min_freq; | |
5456 | } | |
5457 | } | |
5458 | dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, | |
5459 | clki->name, clk_get_rate(clki->clk)); | |
5460 | } | |
5461 | if (hba->vops->clk_scale_notify) | |
5462 | hba->vops->clk_scale_notify(hba); | |
5463 | out: | |
5464 | return ret; | |
5465 | } | |
5466 | ||
5467 | static int ufshcd_devfreq_target(struct device *dev, | |
5468 | unsigned long *freq, u32 flags) | |
5469 | { | |
5470 | int err = 0; | |
5471 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
5472 | ||
5473 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
5474 | return -EINVAL; | |
5475 | ||
5476 | if (*freq == UINT_MAX) | |
5477 | err = ufshcd_scale_clks(hba, true); | |
5478 | else if (*freq == 0) | |
5479 | err = ufshcd_scale_clks(hba, false); | |
5480 | ||
5481 | return err; | |
5482 | } | |
5483 | ||
5484 | static int ufshcd_devfreq_get_dev_status(struct device *dev, | |
5485 | struct devfreq_dev_status *stat) | |
5486 | { | |
5487 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
5488 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | |
5489 | unsigned long flags; | |
5490 | ||
5491 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
5492 | return -EINVAL; | |
5493 | ||
5494 | memset(stat, 0, sizeof(*stat)); | |
5495 | ||
5496 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5497 | if (!scaling->window_start_t) | |
5498 | goto start_window; | |
5499 | ||
5500 | if (scaling->is_busy_started) | |
5501 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | |
5502 | scaling->busy_start_t)); | |
5503 | ||
5504 | stat->total_time = jiffies_to_usecs((long)jiffies - | |
5505 | (long)scaling->window_start_t); | |
5506 | stat->busy_time = scaling->tot_busy_t; | |
5507 | start_window: | |
5508 | scaling->window_start_t = jiffies; | |
5509 | scaling->tot_busy_t = 0; | |
5510 | ||
5511 | if (hba->outstanding_reqs) { | |
5512 | scaling->busy_start_t = ktime_get(); | |
5513 | scaling->is_busy_started = true; | |
5514 | } else { | |
5515 | scaling->busy_start_t = ktime_set(0, 0); | |
5516 | scaling->is_busy_started = false; | |
5517 | } | |
5518 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5519 | return 0; | |
5520 | } | |
5521 | ||
5522 | static struct devfreq_dev_profile ufs_devfreq_profile = { | |
5523 | .polling_ms = 100, | |
5524 | .target = ufshcd_devfreq_target, | |
5525 | .get_dev_status = ufshcd_devfreq_get_dev_status, | |
5526 | }; | |
5527 | ||
5c0c28a8 SRT |
5528 | /** |
5529 | * ufshcd_init - Driver initialization routine | |
5530 | * @hba: per-adapter instance | |
5531 | * @mmio_base: base register address | |
5532 | * @irq: Interrupt line of device | |
5533 | * Returns 0 on success, non-zero value on failure | |
5534 | */ | |
5535 | int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | |
5536 | { | |
5537 | int err; | |
5538 | struct Scsi_Host *host = hba->host; | |
5539 | struct device *dev = hba->dev; | |
5540 | ||
5541 | if (!mmio_base) { | |
5542 | dev_err(hba->dev, | |
5543 | "Invalid memory reference for mmio_base is NULL\n"); | |
5544 | err = -ENODEV; | |
5545 | goto out_error; | |
5546 | } | |
5547 | ||
3b1d0580 VH |
5548 | hba->mmio_base = mmio_base; |
5549 | hba->irq = irq; | |
7a3e97b0 | 5550 | |
aa497613 | 5551 | err = ufshcd_hba_init(hba); |
5c0c28a8 SRT |
5552 | if (err) |
5553 | goto out_error; | |
5554 | ||
7a3e97b0 SY |
5555 | /* Read capabilities registers */ |
5556 | ufshcd_hba_capabilities(hba); | |
5557 | ||
5558 | /* Get UFS version supported by the controller */ | |
5559 | hba->ufs_version = ufshcd_get_ufs_version(hba); | |
5560 | ||
2fbd009b SJ |
5561 | /* Get Interrupt bit mask per version */ |
5562 | hba->intr_mask = ufshcd_get_intr_mask(hba); | |
5563 | ||
ca3d7bf9 AM |
5564 | err = ufshcd_set_dma_mask(hba); |
5565 | if (err) { | |
5566 | dev_err(hba->dev, "set dma mask failed\n"); | |
5567 | goto out_disable; | |
5568 | } | |
5569 | ||
7a3e97b0 SY |
5570 | /* Allocate memory for host memory space */ |
5571 | err = ufshcd_memory_alloc(hba); | |
5572 | if (err) { | |
3b1d0580 VH |
5573 | dev_err(hba->dev, "Memory allocation failed\n"); |
5574 | goto out_disable; | |
7a3e97b0 SY |
5575 | } |
5576 | ||
5577 | /* Configure LRB */ | |
5578 | ufshcd_host_memory_configure(hba); | |
5579 | ||
5580 | host->can_queue = hba->nutrs; | |
5581 | host->cmd_per_lun = hba->nutrs; | |
5582 | host->max_id = UFSHCD_MAX_ID; | |
0ce147d4 | 5583 | host->max_lun = UFS_MAX_LUNS; |
7a3e97b0 SY |
5584 | host->max_channel = UFSHCD_MAX_CHANNEL; |
5585 | host->unique_id = host->host_no; | |
5586 | host->max_cmd_len = MAX_CDB_SIZE; | |
5587 | ||
7eb584db DR |
5588 | hba->max_pwr_info.is_valid = false; |
5589 | ||
7a3e97b0 | 5590 | /* Initailize wait queue for task management */ |
e2933132 SRT |
5591 | init_waitqueue_head(&hba->tm_wq); |
5592 | init_waitqueue_head(&hba->tm_tag_wq); | |
7a3e97b0 SY |
5593 | |
5594 | /* Initialize work queues */ | |
e8e7f271 | 5595 | INIT_WORK(&hba->eh_work, ufshcd_err_handler); |
66ec6d59 | 5596 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); |
7a3e97b0 | 5597 | |
6ccf44fe SJ |
5598 | /* Initialize UIC command mutex */ |
5599 | mutex_init(&hba->uic_cmd_mutex); | |
5600 | ||
5a0b0cb9 SRT |
5601 | /* Initialize mutex for device management commands */ |
5602 | mutex_init(&hba->dev_cmd.lock); | |
5603 | ||
5604 | /* Initialize device management tag acquire wait queue */ | |
5605 | init_waitqueue_head(&hba->dev_cmd.tag_wq); | |
5606 | ||
1ab27c9c | 5607 | ufshcd_init_clk_gating(hba); |
7a3e97b0 | 5608 | /* IRQ registration */ |
2953f850 | 5609 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
7a3e97b0 | 5610 | if (err) { |
3b1d0580 | 5611 | dev_err(hba->dev, "request irq failed\n"); |
1ab27c9c | 5612 | goto exit_gating; |
57d104c1 SJ |
5613 | } else { |
5614 | hba->is_irq_enabled = true; | |
7a3e97b0 SY |
5615 | } |
5616 | ||
5617 | /* Enable SCSI tag mapping */ | |
5618 | err = scsi_init_shared_tag_map(host, host->can_queue); | |
5619 | if (err) { | |
3b1d0580 | 5620 | dev_err(hba->dev, "init shared queue failed\n"); |
1ab27c9c | 5621 | goto exit_gating; |
7a3e97b0 SY |
5622 | } |
5623 | ||
3b1d0580 | 5624 | err = scsi_add_host(host, hba->dev); |
7a3e97b0 | 5625 | if (err) { |
3b1d0580 | 5626 | dev_err(hba->dev, "scsi_add_host failed\n"); |
1ab27c9c | 5627 | goto exit_gating; |
7a3e97b0 SY |
5628 | } |
5629 | ||
6ccf44fe SJ |
5630 | /* Host controller enable */ |
5631 | err = ufshcd_hba_enable(hba); | |
7a3e97b0 | 5632 | if (err) { |
6ccf44fe | 5633 | dev_err(hba->dev, "Host controller enable failed\n"); |
3b1d0580 | 5634 | goto out_remove_scsi_host; |
7a3e97b0 | 5635 | } |
6ccf44fe | 5636 | |
856b3483 ST |
5637 | if (ufshcd_is_clkscaling_enabled(hba)) { |
5638 | hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, | |
5639 | "simple_ondemand", NULL); | |
5640 | if (IS_ERR(hba->devfreq)) { | |
5641 | dev_err(hba->dev, "Unable to register with devfreq %ld\n", | |
5642 | PTR_ERR(hba->devfreq)); | |
5643 | goto out_remove_scsi_host; | |
5644 | } | |
5645 | /* Suspend devfreq until the UFS device is detected */ | |
5646 | devfreq_suspend_device(hba->devfreq); | |
5647 | hba->clk_scaling.window_start_t = 0; | |
5648 | } | |
5649 | ||
62694735 SRT |
5650 | /* Hold auto suspend until async scan completes */ |
5651 | pm_runtime_get_sync(dev); | |
5652 | ||
57d104c1 SJ |
5653 | /* |
5654 | * The device-initialize-sequence hasn't been invoked yet. | |
5655 | * Set the device to power-off state | |
5656 | */ | |
5657 | ufshcd_set_ufs_dev_poweroff(hba); | |
5658 | ||
6ccf44fe SJ |
5659 | async_schedule(ufshcd_async_scan, hba); |
5660 | ||
7a3e97b0 SY |
5661 | return 0; |
5662 | ||
3b1d0580 VH |
5663 | out_remove_scsi_host: |
5664 | scsi_remove_host(hba->host); | |
1ab27c9c ST |
5665 | exit_gating: |
5666 | ufshcd_exit_clk_gating(hba); | |
3b1d0580 | 5667 | out_disable: |
57d104c1 | 5668 | hba->is_irq_enabled = false; |
3b1d0580 | 5669 | scsi_host_put(host); |
aa497613 | 5670 | ufshcd_hba_exit(hba); |
3b1d0580 VH |
5671 | out_error: |
5672 | return err; | |
5673 | } | |
5674 | EXPORT_SYMBOL_GPL(ufshcd_init); | |
5675 | ||
3b1d0580 VH |
5676 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); |
5677 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); | |
e0eca63e | 5678 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |
7a3e97b0 SY |
5679 | MODULE_LICENSE("GPL"); |
5680 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); |