scsi: ufs: Fix hardware race conditions while aborting a command
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
3b1d0580
VH
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
3b1d0580
VH
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
7a3e97b0
SY
34 */
35
6ccf44fe
SJ
36#include <linux/async.h>
37
e0eca63e 38#include "ufshcd.h"
53b3d9c3 39#include "unipro.h"
7a3e97b0 40
2fbd009b
SJ
41#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
42 UTP_TASK_REQ_COMPL |\
53b3d9c3 43 UIC_POWER_MODE |\
2fbd009b 44 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
45/* UIC command timeout, unit: ms */
46#define UIC_CMD_TIMEOUT 500
2fbd009b 47
5a0b0cb9
SRT
48/* NOP OUT retries waiting for NOP IN response */
49#define NOP_OUT_RETRIES 10
50/* Timeout after 30 msecs if NOP OUT hangs without response */
51#define NOP_OUT_TIMEOUT 30 /* msecs */
52
68078d5c
DR
53/* Query request retries */
54#define QUERY_REQ_RETRIES 10
55/* Query request timeout */
56#define QUERY_REQ_TIMEOUT 30 /* msec */
57
e2933132
SRT
58/* Task management command timeout */
59#define TM_CMD_TIMEOUT 100 /* msecs */
60
68078d5c
DR
61/* Expose the flag value from utp_upiu_query.value */
62#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
63
7d568652
SJ
64/* Interrupt aggregation default timeout, unit: 40us */
65#define INT_AGGR_DEF_TO 0x02
66
7a3e97b0
SY
67enum {
68 UFSHCD_MAX_CHANNEL = 0,
69 UFSHCD_MAX_ID = 1,
70 UFSHCD_MAX_LUNS = 8,
71 UFSHCD_CMD_PER_LUN = 32,
72 UFSHCD_CAN_QUEUE = 32,
73};
74
75/* UFSHCD states */
76enum {
77 UFSHCD_STATE_OPERATIONAL,
78 UFSHCD_STATE_RESET,
79 UFSHCD_STATE_ERROR,
80};
81
82/* Interrupt configuration options */
83enum {
84 UFSHCD_INT_DISABLE,
85 UFSHCD_INT_ENABLE,
86 UFSHCD_INT_CLEAR,
87};
88
5a0b0cb9
SRT
89/*
90 * ufshcd_wait_for_register - wait for register value to change
91 * @hba - per-adapter interface
92 * @reg - mmio register offset
93 * @mask - mask to apply to read register value
94 * @val - wait condition
95 * @interval_us - polling interval in microsecs
96 * @timeout_ms - timeout in millisecs
97 *
98 * Returns -ETIMEDOUT on error, zero on success
99 */
100static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
101 u32 val, unsigned long interval_us, unsigned long timeout_ms)
102{
103 int err = 0;
104 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
105
106 /* ignore bits that we don't intend to wait on */
107 val = val & mask;
108
109 while ((ufshcd_readl(hba, reg) & mask) != val) {
110 /* wakeup within 50us of expiry */
111 usleep_range(interval_us, interval_us + 50);
112
113 if (time_after(jiffies, timeout)) {
114 if ((ufshcd_readl(hba, reg) & mask) != val)
115 err = -ETIMEDOUT;
116 break;
117 }
118 }
119
120 return err;
121}
122
2fbd009b
SJ
123/**
124 * ufshcd_get_intr_mask - Get the interrupt bit mask
125 * @hba - Pointer to adapter instance
126 *
127 * Returns interrupt bit mask per version
128 */
129static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
130{
131 if (hba->ufs_version == UFSHCI_VERSION_10)
132 return INTERRUPT_MASK_ALL_VER_10;
133 else
134 return INTERRUPT_MASK_ALL_VER_11;
135}
136
7a3e97b0
SY
137/**
138 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
139 * @hba - Pointer to adapter instance
140 *
141 * Returns UFSHCI version supported by the controller
142 */
143static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
144{
b873a275 145 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
146}
147
148/**
149 * ufshcd_is_device_present - Check if any device connected to
150 * the host controller
151 * @reg_hcs - host controller status register value
152 *
73ec513a 153 * Returns 1 if device present, 0 if no device detected
7a3e97b0
SY
154 */
155static inline int ufshcd_is_device_present(u32 reg_hcs)
156{
73ec513a 157 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
7a3e97b0
SY
158}
159
160/**
161 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
162 * @lrb: pointer to local command reference block
163 *
164 * This function is used to get the OCS field from UTRD
165 * Returns the OCS field in the UTRD
166 */
167static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
168{
e8c8e82a 169 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
170}
171
172/**
173 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
174 * @task_req_descp: pointer to utp_task_req_desc structure
175 *
176 * This function is used to get the OCS field from UTMRD
177 * Returns the OCS field in the UTMRD
178 */
179static inline int
180ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
181{
e8c8e82a 182 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
7a3e97b0
SY
183}
184
185/**
186 * ufshcd_get_tm_free_slot - get a free slot for task management request
187 * @hba: per adapter instance
e2933132 188 * @free_slot: pointer to variable with available slot value
7a3e97b0 189 *
e2933132
SRT
190 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
191 * Returns 0 if free slot is not available, else return 1 with tag value
192 * in @free_slot.
7a3e97b0 193 */
e2933132 194static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
7a3e97b0 195{
e2933132
SRT
196 int tag;
197 bool ret = false;
198
199 if (!free_slot)
200 goto out;
201
202 do {
203 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
204 if (tag >= hba->nutmrs)
205 goto out;
206 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
207
208 *free_slot = tag;
209 ret = true;
210out:
211 return ret;
212}
213
214static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
215{
216 clear_bit_unlock(slot, &hba->tm_slots_in_use);
7a3e97b0
SY
217}
218
219/**
220 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
221 * @hba: per adapter instance
222 * @pos: position of the bit to be cleared
223 */
224static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
225{
b873a275 226 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
7a3e97b0
SY
227}
228
229/**
230 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
231 * @reg: Register value of host controller status
232 *
233 * Returns integer, 0 on Success and positive value if failed
234 */
235static inline int ufshcd_get_lists_status(u32 reg)
236{
237 /*
238 * The mask 0xFF is for the following HCS register bits
239 * Bit Description
240 * 0 Device Present
241 * 1 UTRLRDY
242 * 2 UTMRLRDY
243 * 3 UCRDY
244 * 4 HEI
245 * 5 DEI
246 * 6-7 reserved
247 */
248 return (((reg) & (0xFF)) >> 1) ^ (0x07);
249}
250
251/**
252 * ufshcd_get_uic_cmd_result - Get the UIC command result
253 * @hba: Pointer to adapter instance
254 *
255 * This function gets the result of UIC command completion
256 * Returns 0 on success, non zero value on error
257 */
258static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
259{
b873a275 260 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
261 MASK_UIC_COMMAND_RESULT;
262}
263
12b4fdb4
SJ
264/**
265 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
266 * @hba: Pointer to adapter instance
267 *
268 * This function gets UIC command argument3
269 * Returns 0 on success, non zero value on error
270 */
271static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
272{
273 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
274}
275
7a3e97b0 276/**
5a0b0cb9 277 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 278 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
279 */
280static inline int
5a0b0cb9 281ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 282{
5a0b0cb9 283 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
284}
285
286/**
287 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
288 * @ucd_rsp_ptr: pointer to response UPIU
289 *
290 * This function gets the response status and scsi_status from response UPIU
291 * Returns the response result code.
292 */
293static inline int
294ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
295{
296 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
297}
298
1c2623c5
SJ
299/*
300 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
301 * from response UPIU
302 * @ucd_rsp_ptr: pointer to response UPIU
303 *
304 * Return the data segment length.
305 */
306static inline unsigned int
307ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
308{
309 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
310 MASK_RSP_UPIU_DATA_SEG_LEN;
311}
312
66ec6d59
SRT
313/**
314 * ufshcd_is_exception_event - Check if the device raised an exception event
315 * @ucd_rsp_ptr: pointer to response UPIU
316 *
317 * The function checks if the device raised an exception event indicated in
318 * the Device Information field of response UPIU.
319 *
320 * Returns true if exception is raised, false otherwise.
321 */
322static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
323{
324 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
325 MASK_RSP_EXCEPTION_EVENT ? true : false;
326}
327
7a3e97b0 328/**
7d568652 329 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 330 * @hba: per adapter instance
7a3e97b0
SY
331 */
332static inline void
7d568652 333ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 334{
7d568652
SJ
335 ufshcd_writel(hba, INT_AGGR_ENABLE |
336 INT_AGGR_COUNTER_AND_TIMER_RESET,
337 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
338}
339
340/**
341 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
342 * @hba: per adapter instance
343 * @cnt: Interrupt aggregation counter threshold
344 * @tmout: Interrupt aggregation timeout value
345 */
346static inline void
347ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
348{
349 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
350 INT_AGGR_COUNTER_THLD_VAL(cnt) |
351 INT_AGGR_TIMEOUT_VAL(tmout),
352 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
353}
354
355/**
356 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
357 * When run-stop registers are set to 1, it indicates the
358 * host controller that it can process the requests
359 * @hba: per adapter instance
360 */
361static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
362{
b873a275
SJ
363 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
364 REG_UTP_TASK_REQ_LIST_RUN_STOP);
365 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
366 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
367}
368
7a3e97b0
SY
369/**
370 * ufshcd_hba_start - Start controller initialization sequence
371 * @hba: per adapter instance
372 */
373static inline void ufshcd_hba_start(struct ufs_hba *hba)
374{
b873a275 375 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
376}
377
378/**
379 * ufshcd_is_hba_active - Get controller state
380 * @hba: per adapter instance
381 *
382 * Returns zero if controller is active, 1 otherwise
383 */
384static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
385{
b873a275 386 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
7a3e97b0
SY
387}
388
389/**
390 * ufshcd_send_command - Send SCSI or device management commands
391 * @hba: per adapter instance
392 * @task_tag: Task tag of the command
393 */
394static inline
395void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
396{
397 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 398 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
399}
400
401/**
402 * ufshcd_copy_sense_data - Copy sense data in case of check condition
403 * @lrb - pointer to local reference block
404 */
405static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
406{
407 int len;
1c2623c5
SJ
408 if (lrbp->sense_buffer &&
409 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
5a0b0cb9 410 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
7a3e97b0 411 memcpy(lrbp->sense_buffer,
5a0b0cb9 412 lrbp->ucd_rsp_ptr->sr.sense_data,
7a3e97b0
SY
413 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
414 }
415}
416
68078d5c
DR
417/**
418 * ufshcd_copy_query_response() - Copy the Query Response and the data
419 * descriptor
420 * @hba: per adapter instance
421 * @lrb - pointer to local reference block
422 */
423static
424void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
425{
426 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
427
428 /* Get the UPIU response */
429 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
430 UPIU_RSP_CODE_OFFSET;
431
432 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c
DR
433
434
435 /* Get the descriptor */
436 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
437 u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr +
438 GENERAL_UPIU_REQUEST_SIZE;
439 u16 len;
440
441 /* data segment length */
442 len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
443 MASK_QUERY_DATA_SEG_LEN;
444
445 memcpy(hba->dev_cmd.query.descriptor, descp,
446 min_t(u16, len, QUERY_DESC_MAX_SIZE));
447 }
448}
449
7a3e97b0
SY
450/**
451 * ufshcd_hba_capabilities - Read controller capabilities
452 * @hba: per adapter instance
453 */
454static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
455{
b873a275 456 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
457
458 /* nutrs and nutmrs are 0 based values */
459 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
460 hba->nutmrs =
461 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
462}
463
464/**
6ccf44fe
SJ
465 * ufshcd_ready_for_uic_cmd - Check if controller is ready
466 * to accept UIC commands
7a3e97b0 467 * @hba: per adapter instance
6ccf44fe
SJ
468 * Return true on success, else false
469 */
470static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
471{
472 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
473 return true;
474 else
475 return false;
476}
477
53b3d9c3
SJ
478/**
479 * ufshcd_get_upmcrs - Get the power mode change request status
480 * @hba: Pointer to adapter instance
481 *
482 * This function gets the UPMCRS field of HCS register
483 * Returns value of UPMCRS field
484 */
485static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
486{
487 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
488}
489
6ccf44fe
SJ
490/**
491 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
492 * @hba: per adapter instance
493 * @uic_cmd: UIC command
494 *
495 * Mutex must be held.
7a3e97b0
SY
496 */
497static inline void
6ccf44fe 498ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 499{
6ccf44fe
SJ
500 WARN_ON(hba->active_uic_cmd);
501
502 hba->active_uic_cmd = uic_cmd;
503
7a3e97b0 504 /* Write Args */
6ccf44fe
SJ
505 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
506 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
507 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
508
509 /* Write UIC Cmd */
6ccf44fe 510 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 511 REG_UIC_COMMAND);
7a3e97b0
SY
512}
513
6ccf44fe
SJ
514/**
515 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
516 * @hba: per adapter instance
517 * @uic_command: UIC command
518 *
519 * Must be called with mutex held.
520 * Returns 0 only if success.
521 */
522static int
523ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
524{
525 int ret;
526 unsigned long flags;
527
528 if (wait_for_completion_timeout(&uic_cmd->done,
529 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
530 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
531 else
532 ret = -ETIMEDOUT;
533
534 spin_lock_irqsave(hba->host->host_lock, flags);
535 hba->active_uic_cmd = NULL;
536 spin_unlock_irqrestore(hba->host->host_lock, flags);
537
538 return ret;
539}
540
541/**
542 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
543 * @hba: per adapter instance
544 * @uic_cmd: UIC command
545 *
546 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
547 * with mutex held.
548 * Returns 0 only if success.
549 */
550static int
551__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
552{
553 int ret;
554 unsigned long flags;
555
556 if (!ufshcd_ready_for_uic_cmd(hba)) {
557 dev_err(hba->dev,
558 "Controller not ready to accept UIC commands\n");
559 return -EIO;
560 }
561
562 init_completion(&uic_cmd->done);
563
564 spin_lock_irqsave(hba->host->host_lock, flags);
565 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
566 spin_unlock_irqrestore(hba->host->host_lock, flags);
567
568 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
569
570 return ret;
571}
572
573/**
574 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
575 * @hba: per adapter instance
576 * @uic_cmd: UIC command
577 *
578 * Returns 0 only if success.
579 */
580static int
581ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
582{
583 int ret;
584
585 mutex_lock(&hba->uic_cmd_mutex);
586 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
587 mutex_unlock(&hba->uic_cmd_mutex);
588
589 return ret;
590}
591
7a3e97b0
SY
592/**
593 * ufshcd_map_sg - Map scatter-gather list to prdt
594 * @lrbp - pointer to local reference block
595 *
596 * Returns 0 in case of success, non-zero value in case of failure
597 */
598static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
599{
600 struct ufshcd_sg_entry *prd_table;
601 struct scatterlist *sg;
602 struct scsi_cmnd *cmd;
603 int sg_segments;
604 int i;
605
606 cmd = lrbp->cmd;
607 sg_segments = scsi_dma_map(cmd);
608 if (sg_segments < 0)
609 return sg_segments;
610
611 if (sg_segments) {
612 lrbp->utr_descriptor_ptr->prd_table_length =
613 cpu_to_le16((u16) (sg_segments));
614
615 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
616
617 scsi_for_each_sg(cmd, sg, sg_segments, i) {
618 prd_table[i].size =
619 cpu_to_le32(((u32) sg_dma_len(sg))-1);
620 prd_table[i].base_addr =
621 cpu_to_le32(lower_32_bits(sg->dma_address));
622 prd_table[i].upper_addr =
623 cpu_to_le32(upper_32_bits(sg->dma_address));
624 }
625 } else {
626 lrbp->utr_descriptor_ptr->prd_table_length = 0;
627 }
628
629 return 0;
630}
631
632/**
2fbd009b 633 * ufshcd_enable_intr - enable interrupts
7a3e97b0 634 * @hba: per adapter instance
2fbd009b 635 * @intrs: interrupt bits
7a3e97b0 636 */
2fbd009b 637static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 638{
2fbd009b
SJ
639 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
640
641 if (hba->ufs_version == UFSHCI_VERSION_10) {
642 u32 rw;
643 rw = set & INTERRUPT_MASK_RW_VER_10;
644 set = rw | ((set ^ intrs) & intrs);
645 } else {
646 set |= intrs;
647 }
648
649 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
650}
651
652/**
653 * ufshcd_disable_intr - disable interrupts
654 * @hba: per adapter instance
655 * @intrs: interrupt bits
656 */
657static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
658{
659 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
660
661 if (hba->ufs_version == UFSHCI_VERSION_10) {
662 u32 rw;
663 rw = (set & INTERRUPT_MASK_RW_VER_10) &
664 ~(intrs & INTERRUPT_MASK_RW_VER_10);
665 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
666
667 } else {
668 set &= ~intrs;
7a3e97b0 669 }
2fbd009b
SJ
670
671 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
672}
673
5a0b0cb9
SRT
674/**
675 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
676 * descriptor according to request
677 * @lrbp: pointer to local reference block
678 * @upiu_flags: flags required in the header
679 * @cmd_dir: requests data direction
680 */
681static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
682 u32 *upiu_flags, enum dma_data_direction cmd_dir)
683{
684 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
685 u32 data_direction;
686 u32 dword_0;
687
688 if (cmd_dir == DMA_FROM_DEVICE) {
689 data_direction = UTP_DEVICE_TO_HOST;
690 *upiu_flags = UPIU_CMD_FLAGS_READ;
691 } else if (cmd_dir == DMA_TO_DEVICE) {
692 data_direction = UTP_HOST_TO_DEVICE;
693 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
694 } else {
695 data_direction = UTP_NO_DATA_TRANSFER;
696 *upiu_flags = UPIU_CMD_FLAGS_NONE;
697 }
698
699 dword_0 = data_direction | (lrbp->command_type
700 << UPIU_COMMAND_TYPE_OFFSET);
701 if (lrbp->intr_cmd)
702 dword_0 |= UTP_REQ_DESC_INT_CMD;
703
704 /* Transfer request descriptor header fields */
705 req_desc->header.dword_0 = cpu_to_le32(dword_0);
706
707 /*
708 * assigning invalid value for command status. Controller
709 * updates OCS on command completion, with the command
710 * status
711 */
712 req_desc->header.dword_2 =
713 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
714}
715
716/**
717 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
718 * for scsi commands
719 * @lrbp - local reference block pointer
720 * @upiu_flags - flags
721 */
722static
723void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
724{
725 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
726
727 /* command descriptor fields */
728 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
729 UPIU_TRANSACTION_COMMAND, upiu_flags,
730 lrbp->lun, lrbp->task_tag);
731 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
732 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
733
734 /* Total EHS length and Data segment length will be zero */
735 ucd_req_ptr->header.dword_2 = 0;
736
737 ucd_req_ptr->sc.exp_data_transfer_len =
738 cpu_to_be32(lrbp->cmd->sdb.length);
739
740 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
741 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
742}
743
68078d5c
DR
744/**
745 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
746 * for query requsts
747 * @hba: UFS hba
748 * @lrbp: local reference block pointer
749 * @upiu_flags: flags
750 */
751static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
752 struct ufshcd_lrb *lrbp, u32 upiu_flags)
753{
754 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
755 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 756 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
757 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
758
759 /* Query request header */
760 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
761 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
762 lrbp->lun, lrbp->task_tag);
763 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
764 0, query->request.query_func, 0, 0);
765
766 /* Data segment length */
767 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
768 0, 0, len >> 8, (u8)len);
769
770 /* Copy the Query Request buffer as is */
771 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
772 QUERY_OSF_SIZE);
68078d5c
DR
773
774 /* Copy the Descriptor */
775 if ((len > 0) && (query->request.upiu_req.opcode ==
776 UPIU_QUERY_OPCODE_WRITE_DESC)) {
777 memcpy(descp, query->descriptor,
778 min_t(u16, len, QUERY_DESC_MAX_SIZE));
779 }
780}
781
5a0b0cb9
SRT
782static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
783{
784 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
785
786 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
787
788 /* command descriptor fields */
789 ucd_req_ptr->header.dword_0 =
790 UPIU_HEADER_DWORD(
791 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
792}
793
7a3e97b0
SY
794/**
795 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
5a0b0cb9 796 * @hba - per adapter instance
7a3e97b0
SY
797 * @lrb - pointer to local reference block
798 */
5a0b0cb9 799static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 800{
7a3e97b0 801 u32 upiu_flags;
5a0b0cb9 802 int ret = 0;
7a3e97b0
SY
803
804 switch (lrbp->command_type) {
805 case UTP_CMD_TYPE_SCSI:
5a0b0cb9
SRT
806 if (likely(lrbp->cmd)) {
807 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
808 lrbp->cmd->sc_data_direction);
809 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
7a3e97b0 810 } else {
5a0b0cb9 811 ret = -EINVAL;
7a3e97b0 812 }
7a3e97b0
SY
813 break;
814 case UTP_CMD_TYPE_DEV_MANAGE:
5a0b0cb9 815 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
68078d5c
DR
816 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
817 ufshcd_prepare_utp_query_req_upiu(
818 hba, lrbp, upiu_flags);
819 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
5a0b0cb9
SRT
820 ufshcd_prepare_utp_nop_upiu(lrbp);
821 else
822 ret = -EINVAL;
7a3e97b0
SY
823 break;
824 case UTP_CMD_TYPE_UFS:
825 /* For UFS native command implementation */
5a0b0cb9
SRT
826 ret = -ENOTSUPP;
827 dev_err(hba->dev, "%s: UFS native command are not supported\n",
828 __func__);
829 break;
830 default:
831 ret = -ENOTSUPP;
832 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
833 __func__, lrbp->command_type);
7a3e97b0
SY
834 break;
835 } /* end of switch */
5a0b0cb9
SRT
836
837 return ret;
7a3e97b0
SY
838}
839
840/**
841 * ufshcd_queuecommand - main entry point for SCSI requests
842 * @cmd: command from SCSI Midlayer
843 * @done: call back function
844 *
845 * Returns 0 for success, non-zero in case of failure
846 */
847static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
848{
849 struct ufshcd_lrb *lrbp;
850 struct ufs_hba *hba;
851 unsigned long flags;
852 int tag;
853 int err = 0;
854
855 hba = shost_priv(host);
856
857 tag = cmd->request->tag;
858
859 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
860 err = SCSI_MLQUEUE_HOST_BUSY;
861 goto out;
862 }
863
5a0b0cb9
SRT
864 /* acquire the tag to make sure device cmds don't use it */
865 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
866 /*
867 * Dev manage command in progress, requeue the command.
868 * Requeuing the command helps in cases where the request *may*
869 * find different tag instead of waiting for dev manage command
870 * completion.
871 */
872 err = SCSI_MLQUEUE_HOST_BUSY;
873 goto out;
874 }
875
7a3e97b0
SY
876 lrbp = &hba->lrb[tag];
877
5a0b0cb9 878 WARN_ON(lrbp->cmd);
7a3e97b0
SY
879 lrbp->cmd = cmd;
880 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
881 lrbp->sense_buffer = cmd->sense_buffer;
882 lrbp->task_tag = tag;
883 lrbp->lun = cmd->device->lun;
5a0b0cb9 884 lrbp->intr_cmd = false;
7a3e97b0
SY
885 lrbp->command_type = UTP_CMD_TYPE_SCSI;
886
887 /* form UPIU before issuing the command */
5a0b0cb9 888 ufshcd_compose_upiu(hba, lrbp);
7a3e97b0 889 err = ufshcd_map_sg(lrbp);
5a0b0cb9
SRT
890 if (err) {
891 lrbp->cmd = NULL;
892 clear_bit_unlock(tag, &hba->lrb_in_use);
7a3e97b0 893 goto out;
5a0b0cb9 894 }
7a3e97b0
SY
895
896 /* issue command to the controller */
897 spin_lock_irqsave(hba->host->host_lock, flags);
898 ufshcd_send_command(hba, tag);
899 spin_unlock_irqrestore(hba->host->host_lock, flags);
900out:
901 return err;
902}
903
5a0b0cb9
SRT
904static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
905 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
906{
907 lrbp->cmd = NULL;
908 lrbp->sense_bufflen = 0;
909 lrbp->sense_buffer = NULL;
910 lrbp->task_tag = tag;
911 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
912 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
913 lrbp->intr_cmd = true; /* No interrupt aggregation */
914 hba->dev_cmd.type = cmd_type;
915
916 return ufshcd_compose_upiu(hba, lrbp);
917}
918
919static int
920ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
921{
922 int err = 0;
923 unsigned long flags;
924 u32 mask = 1 << tag;
925
926 /* clear outstanding transaction before retry */
927 spin_lock_irqsave(hba->host->host_lock, flags);
928 ufshcd_utrl_clear(hba, tag);
929 spin_unlock_irqrestore(hba->host->host_lock, flags);
930
931 /*
932 * wait for for h/w to clear corresponding bit in door-bell.
933 * max. wait is 1 sec.
934 */
935 err = ufshcd_wait_for_register(hba,
936 REG_UTP_TRANSFER_REQ_DOOR_BELL,
937 mask, ~mask, 1000, 1000);
938
939 return err;
940}
941
942/**
943 * ufshcd_dev_cmd_completion() - handles device management command responses
944 * @hba: per adapter instance
945 * @lrbp: pointer to local reference block
946 */
947static int
948ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
949{
950 int resp;
951 int err = 0;
952
953 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
954
955 switch (resp) {
956 case UPIU_TRANSACTION_NOP_IN:
957 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
958 err = -EINVAL;
959 dev_err(hba->dev, "%s: unexpected response %x\n",
960 __func__, resp);
961 }
962 break;
68078d5c
DR
963 case UPIU_TRANSACTION_QUERY_RSP:
964 ufshcd_copy_query_response(hba, lrbp);
965 break;
5a0b0cb9
SRT
966 case UPIU_TRANSACTION_REJECT_UPIU:
967 /* TODO: handle Reject UPIU Response */
968 err = -EPERM;
969 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
970 __func__);
971 break;
972 default:
973 err = -EINVAL;
974 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
975 __func__, resp);
976 break;
977 }
978
979 return err;
980}
981
982static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
983 struct ufshcd_lrb *lrbp, int max_timeout)
984{
985 int err = 0;
986 unsigned long time_left;
987 unsigned long flags;
988
989 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
990 msecs_to_jiffies(max_timeout));
991
992 spin_lock_irqsave(hba->host->host_lock, flags);
993 hba->dev_cmd.complete = NULL;
994 if (likely(time_left)) {
995 err = ufshcd_get_tr_ocs(lrbp);
996 if (!err)
997 err = ufshcd_dev_cmd_completion(hba, lrbp);
998 }
999 spin_unlock_irqrestore(hba->host->host_lock, flags);
1000
1001 if (!time_left) {
1002 err = -ETIMEDOUT;
1003 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1004 /* sucessfully cleared the command, retry if needed */
1005 err = -EAGAIN;
1006 }
1007
1008 return err;
1009}
1010
1011/**
1012 * ufshcd_get_dev_cmd_tag - Get device management command tag
1013 * @hba: per-adapter instance
1014 * @tag: pointer to variable with available slot value
1015 *
1016 * Get a free slot and lock it until device management command
1017 * completes.
1018 *
1019 * Returns false if free slot is unavailable for locking, else
1020 * return true with tag value in @tag.
1021 */
1022static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1023{
1024 int tag;
1025 bool ret = false;
1026 unsigned long tmp;
1027
1028 if (!tag_out)
1029 goto out;
1030
1031 do {
1032 tmp = ~hba->lrb_in_use;
1033 tag = find_last_bit(&tmp, hba->nutrs);
1034 if (tag >= hba->nutrs)
1035 goto out;
1036 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1037
1038 *tag_out = tag;
1039 ret = true;
1040out:
1041 return ret;
1042}
1043
1044static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1045{
1046 clear_bit_unlock(tag, &hba->lrb_in_use);
1047}
1048
1049/**
1050 * ufshcd_exec_dev_cmd - API for sending device management requests
1051 * @hba - UFS hba
1052 * @cmd_type - specifies the type (NOP, Query...)
1053 * @timeout - time in seconds
1054 *
68078d5c
DR
1055 * NOTE: Since there is only one available tag for device management commands,
1056 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
1057 */
1058static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1059 enum dev_cmd_type cmd_type, int timeout)
1060{
1061 struct ufshcd_lrb *lrbp;
1062 int err;
1063 int tag;
1064 struct completion wait;
1065 unsigned long flags;
1066
1067 /*
1068 * Get free slot, sleep if slots are unavailable.
1069 * Even though we use wait_event() which sleeps indefinitely,
1070 * the maximum wait time is bounded by SCSI request timeout.
1071 */
1072 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1073
1074 init_completion(&wait);
1075 lrbp = &hba->lrb[tag];
1076 WARN_ON(lrbp->cmd);
1077 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1078 if (unlikely(err))
1079 goto out_put_tag;
1080
1081 hba->dev_cmd.complete = &wait;
1082
1083 spin_lock_irqsave(hba->host->host_lock, flags);
1084 ufshcd_send_command(hba, tag);
1085 spin_unlock_irqrestore(hba->host->host_lock, flags);
1086
1087 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1088
1089out_put_tag:
1090 ufshcd_put_dev_cmd_tag(hba, tag);
1091 wake_up(&hba->dev_cmd.tag_wq);
1092 return err;
1093}
1094
68078d5c
DR
1095/**
1096 * ufshcd_query_flag() - API function for sending flag query requests
1097 * hba: per-adapter instance
1098 * query_opcode: flag query to perform
1099 * idn: flag idn to access
1100 * flag_res: the flag value after the query request completes
1101 *
1102 * Returns 0 for success, non-zero in case of failure
1103 */
1104static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1105 enum flag_idn idn, bool *flag_res)
1106{
1107 struct ufs_query_req *request;
1108 struct ufs_query_res *response;
1109 int err;
1110
1111 BUG_ON(!hba);
1112
1113 mutex_lock(&hba->dev_cmd.lock);
1114 request = &hba->dev_cmd.query.request;
1115 response = &hba->dev_cmd.query.response;
1116 memset(request, 0, sizeof(struct ufs_query_req));
1117 memset(response, 0, sizeof(struct ufs_query_res));
1118
1119 switch (opcode) {
1120 case UPIU_QUERY_OPCODE_SET_FLAG:
1121 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1122 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1123 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1124 break;
1125 case UPIU_QUERY_OPCODE_READ_FLAG:
1126 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1127 if (!flag_res) {
1128 /* No dummy reads */
1129 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1130 __func__);
1131 err = -EINVAL;
1132 goto out_unlock;
1133 }
1134 break;
1135 default:
1136 dev_err(hba->dev,
1137 "%s: Expected query flag opcode but got = %d\n",
1138 __func__, opcode);
1139 err = -EINVAL;
1140 goto out_unlock;
1141 }
1142 request->upiu_req.opcode = opcode;
1143 request->upiu_req.idn = idn;
1144
1145 /* Send query request */
1146 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
1147 QUERY_REQ_TIMEOUT);
1148
1149 if (err) {
1150 dev_err(hba->dev,
1151 "%s: Sending flag query for idn %d failed, err = %d\n",
1152 __func__, idn, err);
1153 goto out_unlock;
1154 }
1155
1156 if (flag_res)
e8c8e82a 1157 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
1158 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1159
1160out_unlock:
1161 mutex_unlock(&hba->dev_cmd.lock);
1162 return err;
1163}
1164
66ec6d59
SRT
1165/**
1166 * ufshcd_query_attr - API function for sending attribute requests
1167 * hba: per-adapter instance
1168 * opcode: attribute opcode
1169 * idn: attribute idn to access
1170 * index: index field
1171 * selector: selector field
1172 * attr_val: the attribute value after the query request completes
1173 *
1174 * Returns 0 for success, non-zero in case of failure
1175*/
bdbe5d2f 1176static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
66ec6d59
SRT
1177 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1178{
1179 struct ufs_query_req *request;
1180 struct ufs_query_res *response;
1181 int err;
1182
1183 BUG_ON(!hba);
1184
1185 if (!attr_val) {
1186 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1187 __func__, opcode);
1188 err = -EINVAL;
1189 goto out;
1190 }
1191
1192 mutex_lock(&hba->dev_cmd.lock);
1193 request = &hba->dev_cmd.query.request;
1194 response = &hba->dev_cmd.query.response;
1195 memset(request, 0, sizeof(struct ufs_query_req));
1196 memset(response, 0, sizeof(struct ufs_query_res));
1197
1198 switch (opcode) {
1199 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1200 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 1201 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
1202 break;
1203 case UPIU_QUERY_OPCODE_READ_ATTR:
1204 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1205 break;
1206 default:
1207 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1208 __func__, opcode);
1209 err = -EINVAL;
1210 goto out_unlock;
1211 }
1212
1213 request->upiu_req.opcode = opcode;
1214 request->upiu_req.idn = idn;
1215 request->upiu_req.index = index;
1216 request->upiu_req.selector = selector;
1217
1218 /* Send query request */
1219 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
1220 QUERY_REQ_TIMEOUT);
1221
1222 if (err) {
1223 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1224 __func__, opcode, idn, err);
1225 goto out_unlock;
1226 }
1227
e8c8e82a 1228 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
1229
1230out_unlock:
1231 mutex_unlock(&hba->dev_cmd.lock);
1232out:
1233 return err;
1234}
1235
7a3e97b0
SY
1236/**
1237 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1238 * @hba: per adapter instance
1239 *
1240 * 1. Allocate DMA memory for Command Descriptor array
1241 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1242 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1243 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1244 * (UTMRDL)
1245 * 4. Allocate memory for local reference block(lrb).
1246 *
1247 * Returns 0 for success, non-zero in case of failure
1248 */
1249static int ufshcd_memory_alloc(struct ufs_hba *hba)
1250{
1251 size_t utmrdl_size, utrdl_size, ucdl_size;
1252
1253 /* Allocate memory for UTP command descriptors */
1254 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
1255 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1256 ucdl_size,
1257 &hba->ucdl_dma_addr,
1258 GFP_KERNEL);
7a3e97b0
SY
1259
1260 /*
1261 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1262 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1263 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1264 * be aligned to 128 bytes as well
1265 */
1266 if (!hba->ucdl_base_addr ||
1267 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1268 dev_err(hba->dev,
7a3e97b0
SY
1269 "Command Descriptor Memory allocation failed\n");
1270 goto out;
1271 }
1272
1273 /*
1274 * Allocate memory for UTP Transfer descriptors
1275 * UFSHCI requires 1024 byte alignment of UTRD
1276 */
1277 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
1278 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1279 utrdl_size,
1280 &hba->utrdl_dma_addr,
1281 GFP_KERNEL);
7a3e97b0
SY
1282 if (!hba->utrdl_base_addr ||
1283 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1284 dev_err(hba->dev,
7a3e97b0
SY
1285 "Transfer Descriptor Memory allocation failed\n");
1286 goto out;
1287 }
1288
1289 /*
1290 * Allocate memory for UTP Task Management descriptors
1291 * UFSHCI requires 1024 byte alignment of UTMRD
1292 */
1293 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
1294 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1295 utmrdl_size,
1296 &hba->utmrdl_dma_addr,
1297 GFP_KERNEL);
7a3e97b0
SY
1298 if (!hba->utmrdl_base_addr ||
1299 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1300 dev_err(hba->dev,
7a3e97b0
SY
1301 "Task Management Descriptor Memory allocation failed\n");
1302 goto out;
1303 }
1304
1305 /* Allocate memory for local reference block */
2953f850
SJ
1306 hba->lrb = devm_kzalloc(hba->dev,
1307 hba->nutrs * sizeof(struct ufshcd_lrb),
1308 GFP_KERNEL);
7a3e97b0 1309 if (!hba->lrb) {
3b1d0580 1310 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
1311 goto out;
1312 }
1313 return 0;
1314out:
7a3e97b0
SY
1315 return -ENOMEM;
1316}
1317
1318/**
1319 * ufshcd_host_memory_configure - configure local reference block with
1320 * memory offsets
1321 * @hba: per adapter instance
1322 *
1323 * Configure Host memory space
1324 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1325 * address.
1326 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1327 * and PRDT offset.
1328 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1329 * into local reference block.
1330 */
1331static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1332{
1333 struct utp_transfer_cmd_desc *cmd_descp;
1334 struct utp_transfer_req_desc *utrdlp;
1335 dma_addr_t cmd_desc_dma_addr;
1336 dma_addr_t cmd_desc_element_addr;
1337 u16 response_offset;
1338 u16 prdt_offset;
1339 int cmd_desc_size;
1340 int i;
1341
1342 utrdlp = hba->utrdl_base_addr;
1343 cmd_descp = hba->ucdl_base_addr;
1344
1345 response_offset =
1346 offsetof(struct utp_transfer_cmd_desc, response_upiu);
1347 prdt_offset =
1348 offsetof(struct utp_transfer_cmd_desc, prd_table);
1349
1350 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
1351 cmd_desc_dma_addr = hba->ucdl_dma_addr;
1352
1353 for (i = 0; i < hba->nutrs; i++) {
1354 /* Configure UTRD with command descriptor base address */
1355 cmd_desc_element_addr =
1356 (cmd_desc_dma_addr + (cmd_desc_size * i));
1357 utrdlp[i].command_desc_base_addr_lo =
1358 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
1359 utrdlp[i].command_desc_base_addr_hi =
1360 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1361
1362 /* Response upiu and prdt offset should be in double words */
1363 utrdlp[i].response_upiu_offset =
1364 cpu_to_le16((response_offset >> 2));
1365 utrdlp[i].prd_table_offset =
1366 cpu_to_le16((prdt_offset >> 2));
1367 utrdlp[i].response_upiu_length =
3ca316c5 1368 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0
SY
1369
1370 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
5a0b0cb9
SRT
1371 hba->lrb[i].ucd_req_ptr =
1372 (struct utp_upiu_req *)(cmd_descp + i);
7a3e97b0
SY
1373 hba->lrb[i].ucd_rsp_ptr =
1374 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
1375 hba->lrb[i].ucd_prdt_ptr =
1376 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
1377 }
1378}
1379
1380/**
1381 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
1382 * @hba: per adapter instance
1383 *
1384 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
1385 * in order to initialize the Unipro link startup procedure.
1386 * Once the Unipro links are up, the device connected to the controller
1387 * is detected.
1388 *
1389 * Returns 0 on success, non-zero value on failure
1390 */
1391static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1392{
6ccf44fe
SJ
1393 struct uic_command uic_cmd = {0};
1394 int ret;
7a3e97b0 1395
6ccf44fe 1396 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 1397
6ccf44fe
SJ
1398 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1399 if (ret)
1400 dev_err(hba->dev,
1401 "dme-link-startup: error code %d\n", ret);
1402 return ret;
7a3e97b0
SY
1403}
1404
12b4fdb4
SJ
1405/**
1406 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
1407 * @hba: per adapter instance
1408 * @attr_sel: uic command argument1
1409 * @attr_set: attribute set type as uic command argument2
1410 * @mib_val: setting value as uic command argument3
1411 * @peer: indicate whether peer or local
1412 *
1413 * Returns 0 on success, non-zero value on failure
1414 */
1415int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1416 u8 attr_set, u32 mib_val, u8 peer)
1417{
1418 struct uic_command uic_cmd = {0};
1419 static const char *const action[] = {
1420 "dme-set",
1421 "dme-peer-set"
1422 };
1423 const char *set = action[!!peer];
1424 int ret;
1425
1426 uic_cmd.command = peer ?
1427 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
1428 uic_cmd.argument1 = attr_sel;
1429 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
1430 uic_cmd.argument3 = mib_val;
1431
1432 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1433 if (ret)
1434 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
1435 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
1436
1437 return ret;
1438}
1439EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
1440
1441/**
1442 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
1443 * @hba: per adapter instance
1444 * @attr_sel: uic command argument1
1445 * @mib_val: the value of the attribute as returned by the UIC command
1446 * @peer: indicate whether peer or local
1447 *
1448 * Returns 0 on success, non-zero value on failure
1449 */
1450int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1451 u32 *mib_val, u8 peer)
1452{
1453 struct uic_command uic_cmd = {0};
1454 static const char *const action[] = {
1455 "dme-get",
1456 "dme-peer-get"
1457 };
1458 const char *get = action[!!peer];
1459 int ret;
1460
1461 uic_cmd.command = peer ?
1462 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
1463 uic_cmd.argument1 = attr_sel;
1464
1465 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1466 if (ret) {
1467 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
1468 get, UIC_GET_ATTR_ID(attr_sel), ret);
1469 goto out;
1470 }
1471
1472 if (mib_val)
1473 *mib_val = uic_cmd.argument3;
1474out:
1475 return ret;
1476}
1477EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1478
53b3d9c3
SJ
1479/**
1480 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1481 * using DME_SET primitives.
1482 * @hba: per adapter instance
1483 * @mode: powr mode value
1484 *
1485 * Returns 0 on success, non-zero value on failure
1486 */
bdbe5d2f 1487static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
53b3d9c3
SJ
1488{
1489 struct uic_command uic_cmd = {0};
1490 struct completion pwr_done;
1491 unsigned long flags;
1492 u8 status;
1493 int ret;
1494
1495 uic_cmd.command = UIC_CMD_DME_SET;
1496 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1497 uic_cmd.argument3 = mode;
1498 init_completion(&pwr_done);
1499
1500 mutex_lock(&hba->uic_cmd_mutex);
1501
1502 spin_lock_irqsave(hba->host->host_lock, flags);
1503 hba->pwr_done = &pwr_done;
1504 spin_unlock_irqrestore(hba->host->host_lock, flags);
1505 ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1506 if (ret) {
1507 dev_err(hba->dev,
1508 "pwr mode change with mode 0x%x uic error %d\n",
1509 mode, ret);
1510 goto out;
1511 }
1512
1513 if (!wait_for_completion_timeout(hba->pwr_done,
1514 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1515 dev_err(hba->dev,
1516 "pwr mode change with mode 0x%x completion timeout\n",
1517 mode);
1518 ret = -ETIMEDOUT;
1519 goto out;
1520 }
1521
1522 status = ufshcd_get_upmcrs(hba);
1523 if (status != PWR_LOCAL) {
1524 dev_err(hba->dev,
1525 "pwr mode change failed, host umpcrs:0x%x\n",
1526 status);
1527 ret = (status != PWR_OK) ? status : -1;
1528 }
1529out:
1530 spin_lock_irqsave(hba->host->host_lock, flags);
1531 hba->pwr_done = NULL;
1532 spin_unlock_irqrestore(hba->host->host_lock, flags);
1533 mutex_unlock(&hba->uic_cmd_mutex);
1534 return ret;
1535}
1536
d3e89bac
SJ
1537/**
1538 * ufshcd_config_max_pwr_mode - Set & Change power mode with
1539 * maximum capability attribute information.
1540 * @hba: per adapter instance
1541 *
1542 * Returns 0 on success, non-zero value on failure
1543 */
1544static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1545{
1546 enum {RX = 0, TX = 1};
1547 u32 lanes[] = {1, 1};
1548 u32 gear[] = {1, 1};
1549 u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1550 int ret;
1551
1552 /* Get the connected lane count */
1553 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
1554 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
1555
1556 /*
1557 * First, get the maximum gears of HS speed.
1558 * If a zero value, it means there is no HSGEAR capability.
1559 * Then, get the maximum gears of PWM speed.
1560 */
1561 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
1562 if (!gear[RX]) {
1563 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
1564 pwr[RX] = SLOWAUTO_MODE;
1565 }
1566
1567 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
1568 if (!gear[TX]) {
1569 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1570 &gear[TX]);
1571 pwr[TX] = SLOWAUTO_MODE;
1572 }
1573
1574 /*
1575 * Configure attributes for power mode change with below.
1576 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1577 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1578 * - PA_HSSERIES
1579 */
1580 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
1581 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
1582 if (pwr[RX] == FASTAUTO_MODE)
1583 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1584
1585 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
1586 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
1587 if (pwr[TX] == FASTAUTO_MODE)
1588 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1589
1590 if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
1591 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
1592
1593 ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
1594 if (ret)
1595 dev_err(hba->dev,
1596 "pwr_mode: power mode change failed %d\n", ret);
1597
1598 return ret;
1599}
1600
68078d5c
DR
1601/**
1602 * ufshcd_complete_dev_init() - checks device readiness
1603 * hba: per-adapter instance
1604 *
1605 * Set fDeviceInit flag and poll until device toggles it.
1606 */
1607static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1608{
1609 int i, retries, err = 0;
1610 bool flag_res = 1;
1611
1612 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1613 /* Set the fDeviceInit flag */
1614 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1615 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1616 if (!err || err == -ETIMEDOUT)
1617 break;
1618 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1619 }
1620 if (err) {
1621 dev_err(hba->dev,
1622 "%s setting fDeviceInit flag failed with error %d\n",
1623 __func__, err);
1624 goto out;
1625 }
1626
1627 /* poll for max. 100 iterations for fDeviceInit flag to clear */
1628 for (i = 0; i < 100 && !err && flag_res; i++) {
1629 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1630 err = ufshcd_query_flag(hba,
1631 UPIU_QUERY_OPCODE_READ_FLAG,
1632 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
1633 if (!err || err == -ETIMEDOUT)
1634 break;
1635 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
1636 err);
1637 }
1638 }
1639 if (err)
1640 dev_err(hba->dev,
1641 "%s reading fDeviceInit flag failed with error %d\n",
1642 __func__, err);
1643 else if (flag_res)
1644 dev_err(hba->dev,
1645 "%s fDeviceInit was not cleared by the device\n",
1646 __func__);
1647
1648out:
1649 return err;
1650}
1651
7a3e97b0
SY
1652/**
1653 * ufshcd_make_hba_operational - Make UFS controller operational
1654 * @hba: per adapter instance
1655 *
1656 * To bring UFS host controller to operational state,
1657 * 1. Check if device is present
6ccf44fe
SJ
1658 * 2. Enable required interrupts
1659 * 3. Configure interrupt aggregation
1660 * 4. Program UTRL and UTMRL base addres
1661 * 5. Configure run-stop-registers
7a3e97b0
SY
1662 *
1663 * Returns 0 on success, non-zero value on failure
1664 */
1665static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1666{
1667 int err = 0;
1668 u32 reg;
1669
1670 /* check if device present */
b873a275 1671 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
73ec513a 1672 if (!ufshcd_is_device_present(reg)) {
3b1d0580 1673 dev_err(hba->dev, "cc: Device not present\n");
7a3e97b0
SY
1674 err = -ENXIO;
1675 goto out;
1676 }
1677
6ccf44fe
SJ
1678 /* Enable required interrupts */
1679 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1680
1681 /* Configure interrupt aggregation */
7d568652 1682 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
6ccf44fe
SJ
1683
1684 /* Configure UTRL and UTMRL base address registers */
1685 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
1686 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
1687 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
1688 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
1689 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
1690 REG_UTP_TASK_REQ_LIST_BASE_L);
1691 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
1692 REG_UTP_TASK_REQ_LIST_BASE_H);
1693
7a3e97b0
SY
1694 /*
1695 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1696 * DEI, HEI bits must be 0
1697 */
1698 if (!(ufshcd_get_lists_status(reg))) {
1699 ufshcd_enable_run_stop_reg(hba);
1700 } else {
3b1d0580 1701 dev_err(hba->dev,
7a3e97b0
SY
1702 "Host controller not ready to process requests");
1703 err = -EIO;
1704 goto out;
1705 }
1706
7a3e97b0
SY
1707 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1708 scsi_unblock_requests(hba->host);
1709
1710 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6ccf44fe 1711
7a3e97b0
SY
1712out:
1713 return err;
1714}
1715
1716/**
1717 * ufshcd_hba_enable - initialize the controller
1718 * @hba: per adapter instance
1719 *
1720 * The controller resets itself and controller firmware initialization
1721 * sequence kicks off. When controller is ready it will set
1722 * the Host Controller Enable bit to 1.
1723 *
1724 * Returns 0 on success, non-zero value on failure
1725 */
1726static int ufshcd_hba_enable(struct ufs_hba *hba)
1727{
1728 int retry;
1729
1730 /*
1731 * msleep of 1 and 5 used in this function might result in msleep(20),
1732 * but it was necessary to send the UFS FPGA to reset mode during
1733 * development and testing of this driver. msleep can be changed to
1734 * mdelay and retry count can be reduced based on the controller.
1735 */
1736 if (!ufshcd_is_hba_active(hba)) {
1737
1738 /* change controller state to "reset state" */
1739 ufshcd_hba_stop(hba);
1740
1741 /*
1742 * This delay is based on the testing done with UFS host
1743 * controller FPGA. The delay can be changed based on the
1744 * host controller used.
1745 */
1746 msleep(5);
1747 }
1748
1749 /* start controller initialization sequence */
1750 ufshcd_hba_start(hba);
1751
1752 /*
1753 * To initialize a UFS host controller HCE bit must be set to 1.
1754 * During initialization the HCE bit value changes from 1->0->1.
1755 * When the host controller completes initialization sequence
1756 * it sets the value of HCE bit to 1. The same HCE bit is read back
1757 * to check if the controller has completed initialization sequence.
1758 * So without this delay the value HCE = 1, set in the previous
1759 * instruction might be read back.
1760 * This delay can be changed based on the controller.
1761 */
1762 msleep(1);
1763
1764 /* wait for the host controller to complete initialization */
1765 retry = 10;
1766 while (ufshcd_is_hba_active(hba)) {
1767 if (retry) {
1768 retry--;
1769 } else {
3b1d0580 1770 dev_err(hba->dev,
7a3e97b0
SY
1771 "Controller enable failed\n");
1772 return -EIO;
1773 }
1774 msleep(5);
1775 }
1776 return 0;
1777}
1778
1779/**
6ccf44fe 1780 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
1781 * @hba: per adapter instance
1782 *
6ccf44fe 1783 * Returns 0 for success, non-zero in case of failure
7a3e97b0 1784 */
6ccf44fe 1785static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 1786{
6ccf44fe 1787 int ret;
7a3e97b0 1788
6ccf44fe
SJ
1789 /* enable UIC related interrupts */
1790 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
1791
1792 ret = ufshcd_dme_link_startup(hba);
1793 if (ret)
1794 goto out;
1795
1796 ret = ufshcd_make_hba_operational(hba);
7a3e97b0 1797
6ccf44fe
SJ
1798out:
1799 if (ret)
1800 dev_err(hba->dev, "link startup failed %d\n", ret);
1801 return ret;
7a3e97b0
SY
1802}
1803
5a0b0cb9
SRT
1804/**
1805 * ufshcd_verify_dev_init() - Verify device initialization
1806 * @hba: per-adapter instance
1807 *
1808 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1809 * device Transport Protocol (UTP) layer is ready after a reset.
1810 * If the UTP layer at the device side is not initialized, it may
1811 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1812 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1813 */
1814static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1815{
1816 int err = 0;
1817 int retries;
1818
1819 mutex_lock(&hba->dev_cmd.lock);
1820 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1821 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1822 NOP_OUT_TIMEOUT);
1823
1824 if (!err || err == -ETIMEDOUT)
1825 break;
1826
1827 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1828 }
1829 mutex_unlock(&hba->dev_cmd.lock);
1830
1831 if (err)
1832 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1833 return err;
1834}
1835
7a3e97b0
SY
1836/**
1837 * ufshcd_do_reset - reset the host controller
1838 * @hba: per adapter instance
1839 *
1840 * Returns SUCCESS/FAILED
1841 */
1842static int ufshcd_do_reset(struct ufs_hba *hba)
1843{
1844 struct ufshcd_lrb *lrbp;
1845 unsigned long flags;
1846 int tag;
1847
1848 /* block commands from midlayer */
1849 scsi_block_requests(hba->host);
1850
1851 spin_lock_irqsave(hba->host->host_lock, flags);
1852 hba->ufshcd_state = UFSHCD_STATE_RESET;
1853
1854 /* send controller to reset state */
1855 ufshcd_hba_stop(hba);
1856 spin_unlock_irqrestore(hba->host->host_lock, flags);
1857
1858 /* abort outstanding commands */
1859 for (tag = 0; tag < hba->nutrs; tag++) {
1860 if (test_bit(tag, &hba->outstanding_reqs)) {
1861 lrbp = &hba->lrb[tag];
5a0b0cb9
SRT
1862 if (lrbp->cmd) {
1863 scsi_dma_unmap(lrbp->cmd);
1864 lrbp->cmd->result = DID_RESET << 16;
1865 lrbp->cmd->scsi_done(lrbp->cmd);
1866 lrbp->cmd = NULL;
1867 clear_bit_unlock(tag, &hba->lrb_in_use);
1868 }
7a3e97b0
SY
1869 }
1870 }
1871
5a0b0cb9
SRT
1872 /* complete device management command */
1873 if (hba->dev_cmd.complete)
1874 complete(hba->dev_cmd.complete);
1875
7a3e97b0
SY
1876 /* clear outstanding request/task bit maps */
1877 hba->outstanding_reqs = 0;
1878 hba->outstanding_tasks = 0;
1879
6ccf44fe
SJ
1880 /* Host controller enable */
1881 if (ufshcd_hba_enable(hba)) {
3b1d0580 1882 dev_err(hba->dev,
7a3e97b0
SY
1883 "Reset: Controller initialization failed\n");
1884 return FAILED;
1885 }
6ccf44fe
SJ
1886
1887 if (ufshcd_link_startup(hba)) {
1888 dev_err(hba->dev,
1889 "Reset: Link start-up failed\n");
1890 return FAILED;
1891 }
1892
7a3e97b0
SY
1893 return SUCCESS;
1894}
1895
1896/**
1897 * ufshcd_slave_alloc - handle initial SCSI device configurations
1898 * @sdev: pointer to SCSI device
1899 *
1900 * Returns success
1901 */
1902static int ufshcd_slave_alloc(struct scsi_device *sdev)
1903{
1904 struct ufs_hba *hba;
1905
1906 hba = shost_priv(sdev->host);
1907 sdev->tagged_supported = 1;
1908
1909 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1910 sdev->use_10_for_ms = 1;
1911 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1912
1913 /*
1914 * Inform SCSI Midlayer that the LUN queue depth is same as the
1915 * controller queue depth. If a LUN queue depth is less than the
1916 * controller queue depth and if the LUN reports
1917 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1918 * with scsi_adjust_queue_depth.
1919 */
1920 scsi_activate_tcq(sdev, hba->nutrs);
1921 return 0;
1922}
1923
1924/**
1925 * ufshcd_slave_destroy - remove SCSI device configurations
1926 * @sdev: pointer to SCSI device
1927 */
1928static void ufshcd_slave_destroy(struct scsi_device *sdev)
1929{
1930 struct ufs_hba *hba;
1931
1932 hba = shost_priv(sdev->host);
1933 scsi_deactivate_tcq(sdev, hba->nutrs);
1934}
1935
1936/**
1937 * ufshcd_task_req_compl - handle task management request completion
1938 * @hba: per adapter instance
1939 * @index: index of the completed request
e2933132 1940 * @resp: task management service response
7a3e97b0 1941 *
e2933132 1942 * Returns non-zero value on error, zero on success
7a3e97b0 1943 */
e2933132 1944static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
7a3e97b0
SY
1945{
1946 struct utp_task_req_desc *task_req_descp;
1947 struct utp_upiu_task_rsp *task_rsp_upiup;
1948 unsigned long flags;
1949 int ocs_value;
1950 int task_result;
1951
1952 spin_lock_irqsave(hba->host->host_lock, flags);
1953
1954 /* Clear completed tasks from outstanding_tasks */
1955 __clear_bit(index, &hba->outstanding_tasks);
1956
1957 task_req_descp = hba->utmrdl_base_addr;
1958 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
1959
1960 if (ocs_value == OCS_SUCCESS) {
1961 task_rsp_upiup = (struct utp_upiu_task_rsp *)
1962 task_req_descp[index].task_rsp_upiu;
1963 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1964 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
e2933132
SRT
1965 if (resp)
1966 *resp = (u8)task_result;
7a3e97b0 1967 } else {
e2933132
SRT
1968 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
1969 __func__, ocs_value);
7a3e97b0
SY
1970 }
1971 spin_unlock_irqrestore(hba->host->host_lock, flags);
e2933132
SRT
1972
1973 return ocs_value;
7a3e97b0
SY
1974}
1975
1976/**
1977 * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
1978 * SAM_STAT_TASK_SET_FULL SCSI command status.
1979 * @cmd: pointer to SCSI command
1980 */
1981static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1982{
1983 struct ufs_hba *hba;
1984 int i;
1985 int lun_qdepth = 0;
1986
1987 hba = shost_priv(cmd->device->host);
1988
1989 /*
1990 * LUN queue depth can be obtained by counting outstanding commands
1991 * on the LUN.
1992 */
1993 for (i = 0; i < hba->nutrs; i++) {
1994 if (test_bit(i, &hba->outstanding_reqs)) {
1995
1996 /*
1997 * Check if the outstanding command belongs
1998 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1999 */
2000 if (cmd->device->lun == hba->lrb[i].lun)
2001 lun_qdepth++;
2002 }
2003 }
2004
2005 /*
2006 * LUN queue depth will be total outstanding commands, except the
2007 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
2008 */
2009 scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
2010}
2011
2012/**
2013 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2014 * @lrb: pointer to local reference block of completed command
2015 * @scsi_status: SCSI command status
2016 *
2017 * Returns value base on SCSI command status
2018 */
2019static inline int
2020ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
2021{
2022 int result = 0;
2023
2024 switch (scsi_status) {
7a3e97b0 2025 case SAM_STAT_CHECK_CONDITION:
1c2623c5
SJ
2026 ufshcd_copy_sense_data(lrbp);
2027 case SAM_STAT_GOOD:
7a3e97b0
SY
2028 result |= DID_OK << 16 |
2029 COMMAND_COMPLETE << 8 |
1c2623c5 2030 scsi_status;
7a3e97b0
SY
2031 break;
2032 case SAM_STAT_TASK_SET_FULL:
7a3e97b0
SY
2033 /*
2034 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
2035 * depth needs to be adjusted to the exact number of
2036 * outstanding commands the LUN can handle at any given time.
2037 */
2038 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1c2623c5 2039 case SAM_STAT_BUSY:
7a3e97b0 2040 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
2041 ufshcd_copy_sense_data(lrbp);
2042 result |= scsi_status;
7a3e97b0
SY
2043 break;
2044 default:
2045 result |= DID_ERROR << 16;
2046 break;
2047 } /* end of switch */
2048
2049 return result;
2050}
2051
2052/**
2053 * ufshcd_transfer_rsp_status - Get overall status of the response
2054 * @hba: per adapter instance
2055 * @lrb: pointer to local reference block of completed command
2056 *
2057 * Returns result of the command to notify SCSI midlayer
2058 */
2059static inline int
2060ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2061{
2062 int result = 0;
2063 int scsi_status;
2064 int ocs;
2065
2066 /* overall command status of utrd */
2067 ocs = ufshcd_get_tr_ocs(lrbp);
2068
2069 switch (ocs) {
2070 case OCS_SUCCESS:
5a0b0cb9 2071 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
7a3e97b0 2072
5a0b0cb9
SRT
2073 switch (result) {
2074 case UPIU_TRANSACTION_RESPONSE:
2075 /*
2076 * get the response UPIU result to extract
2077 * the SCSI command status
2078 */
2079 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
2080
2081 /*
2082 * get the result based on SCSI status response
2083 * to notify the SCSI midlayer of the command status
2084 */
2085 scsi_status = result & MASK_SCSI_STATUS;
2086 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59
SRT
2087
2088 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
2089 schedule_work(&hba->eeh_work);
5a0b0cb9
SRT
2090 break;
2091 case UPIU_TRANSACTION_REJECT_UPIU:
2092 /* TODO: handle Reject UPIU Response */
2093 result = DID_ERROR << 16;
3b1d0580 2094 dev_err(hba->dev,
5a0b0cb9
SRT
2095 "Reject UPIU not fully implemented\n");
2096 break;
2097 default:
2098 result = DID_ERROR << 16;
2099 dev_err(hba->dev,
2100 "Unexpected request response code = %x\n",
2101 result);
7a3e97b0
SY
2102 break;
2103 }
7a3e97b0
SY
2104 break;
2105 case OCS_ABORTED:
2106 result |= DID_ABORT << 16;
2107 break;
2108 case OCS_INVALID_CMD_TABLE_ATTR:
2109 case OCS_INVALID_PRDT_ATTR:
2110 case OCS_MISMATCH_DATA_BUF_SIZE:
2111 case OCS_MISMATCH_RESP_UPIU_SIZE:
2112 case OCS_PEER_COMM_FAILURE:
2113 case OCS_FATAL_ERROR:
2114 default:
2115 result |= DID_ERROR << 16;
3b1d0580 2116 dev_err(hba->dev,
7a3e97b0
SY
2117 "OCS error from controller = %x\n", ocs);
2118 break;
2119 } /* end of switch */
2120
2121 return result;
2122}
2123
6ccf44fe
SJ
2124/**
2125 * ufshcd_uic_cmd_compl - handle completion of uic command
2126 * @hba: per adapter instance
53b3d9c3 2127 * @intr_status: interrupt status generated by the controller
6ccf44fe 2128 */
53b3d9c3 2129static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 2130{
53b3d9c3 2131 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
2132 hba->active_uic_cmd->argument2 |=
2133 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
2134 hba->active_uic_cmd->argument3 =
2135 ufshcd_get_dme_attr_val(hba);
6ccf44fe
SJ
2136 complete(&hba->active_uic_cmd->done);
2137 }
53b3d9c3
SJ
2138
2139 if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
2140 complete(hba->pwr_done);
6ccf44fe
SJ
2141}
2142
7a3e97b0
SY
2143/**
2144 * ufshcd_transfer_req_compl - handle SCSI and query command completion
2145 * @hba: per adapter instance
2146 */
2147static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2148{
5a0b0cb9
SRT
2149 struct ufshcd_lrb *lrbp;
2150 struct scsi_cmnd *cmd;
7a3e97b0
SY
2151 unsigned long completed_reqs;
2152 u32 tr_doorbell;
2153 int result;
2154 int index;
5a0b0cb9 2155 bool int_aggr_reset = false;
7a3e97b0 2156
b873a275 2157 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
2158 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
2159
2160 for (index = 0; index < hba->nutrs; index++) {
2161 if (test_bit(index, &completed_reqs)) {
5a0b0cb9
SRT
2162 lrbp = &hba->lrb[index];
2163 cmd = lrbp->cmd;
2164 /*
2165 * Don't skip resetting interrupt aggregation counters
2166 * if a regular command is present.
2167 */
2168 int_aggr_reset |= !lrbp->intr_cmd;
7a3e97b0 2169
5a0b0cb9
SRT
2170 if (cmd) {
2171 result = ufshcd_transfer_rsp_status(hba, lrbp);
2172 scsi_dma_unmap(cmd);
2173 cmd->result = result;
7a3e97b0 2174 /* Mark completed command as NULL in LRB */
5a0b0cb9
SRT
2175 lrbp->cmd = NULL;
2176 clear_bit_unlock(index, &hba->lrb_in_use);
2177 /* Do not touch lrbp after scsi done */
2178 cmd->scsi_done(cmd);
2179 } else if (lrbp->command_type ==
2180 UTP_CMD_TYPE_DEV_MANAGE) {
2181 if (hba->dev_cmd.complete)
2182 complete(hba->dev_cmd.complete);
7a3e97b0
SY
2183 }
2184 } /* end of if */
2185 } /* end of for */
2186
2187 /* clear corresponding bits of completed commands */
2188 hba->outstanding_reqs ^= completed_reqs;
2189
5a0b0cb9
SRT
2190 /* we might have free'd some tags above */
2191 wake_up(&hba->dev_cmd.tag_wq);
2192
7a3e97b0 2193 /* Reset interrupt aggregation counters */
5a0b0cb9 2194 if (int_aggr_reset)
7d568652 2195 ufshcd_reset_intr_aggr(hba);
7a3e97b0
SY
2196}
2197
66ec6d59
SRT
2198/**
2199 * ufshcd_disable_ee - disable exception event
2200 * @hba: per-adapter instance
2201 * @mask: exception event to disable
2202 *
2203 * Disables exception event in the device so that the EVENT_ALERT
2204 * bit is not set.
2205 *
2206 * Returns zero on success, non-zero error value on failure.
2207 */
2208static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
2209{
2210 int err = 0;
2211 u32 val;
2212
2213 if (!(hba->ee_ctrl_mask & mask))
2214 goto out;
2215
2216 val = hba->ee_ctrl_mask & ~mask;
2217 val &= 0xFFFF; /* 2 bytes */
2218 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2219 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2220 if (!err)
2221 hba->ee_ctrl_mask &= ~mask;
2222out:
2223 return err;
2224}
2225
2226/**
2227 * ufshcd_enable_ee - enable exception event
2228 * @hba: per-adapter instance
2229 * @mask: exception event to enable
2230 *
2231 * Enable corresponding exception event in the device to allow
2232 * device to alert host in critical scenarios.
2233 *
2234 * Returns zero on success, non-zero error value on failure.
2235 */
2236static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
2237{
2238 int err = 0;
2239 u32 val;
2240
2241 if (hba->ee_ctrl_mask & mask)
2242 goto out;
2243
2244 val = hba->ee_ctrl_mask | mask;
2245 val &= 0xFFFF; /* 2 bytes */
2246 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2247 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2248 if (!err)
2249 hba->ee_ctrl_mask |= mask;
2250out:
2251 return err;
2252}
2253
2254/**
2255 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
2256 * @hba: per-adapter instance
2257 *
2258 * Allow device to manage background operations on its own. Enabling
2259 * this might lead to inconsistent latencies during normal data transfers
2260 * as the device is allowed to manage its own way of handling background
2261 * operations.
2262 *
2263 * Returns zero on success, non-zero on failure.
2264 */
2265static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
2266{
2267 int err = 0;
2268
2269 if (hba->auto_bkops_enabled)
2270 goto out;
2271
2272 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2273 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2274 if (err) {
2275 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
2276 __func__, err);
2277 goto out;
2278 }
2279
2280 hba->auto_bkops_enabled = true;
2281
2282 /* No need of URGENT_BKOPS exception from the device */
2283 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2284 if (err)
2285 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
2286 __func__, err);
2287out:
2288 return err;
2289}
2290
2291/**
2292 * ufshcd_disable_auto_bkops - block device in doing background operations
2293 * @hba: per-adapter instance
2294 *
2295 * Disabling background operations improves command response latency but
2296 * has drawback of device moving into critical state where the device is
2297 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
2298 * host is idle so that BKOPS are managed effectively without any negative
2299 * impacts.
2300 *
2301 * Returns zero on success, non-zero on failure.
2302 */
2303static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
2304{
2305 int err = 0;
2306
2307 if (!hba->auto_bkops_enabled)
2308 goto out;
2309
2310 /*
2311 * If host assisted BKOPs is to be enabled, make sure
2312 * urgent bkops exception is allowed.
2313 */
2314 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
2315 if (err) {
2316 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
2317 __func__, err);
2318 goto out;
2319 }
2320
2321 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
2322 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2323 if (err) {
2324 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
2325 __func__, err);
2326 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2327 goto out;
2328 }
2329
2330 hba->auto_bkops_enabled = false;
2331out:
2332 return err;
2333}
2334
2335/**
2336 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
2337 * @hba: per adapter instance
2338 *
2339 * After a device reset the device may toggle the BKOPS_EN flag
2340 * to default value. The s/w tracking variables should be updated
2341 * as well. Do this by forcing enable of auto bkops.
2342 */
2343static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
2344{
2345 hba->auto_bkops_enabled = false;
2346 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
2347 ufshcd_enable_auto_bkops(hba);
2348}
2349
2350static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2351{
2352 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2353 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
2354}
2355
2356/**
2357 * ufshcd_urgent_bkops - handle urgent bkops exception event
2358 * @hba: per-adapter instance
2359 *
2360 * Enable fBackgroundOpsEn flag in the device to permit background
2361 * operations.
2362 */
2363static int ufshcd_urgent_bkops(struct ufs_hba *hba)
2364{
2365 int err;
2366 u32 status = 0;
2367
2368 err = ufshcd_get_bkops_status(hba, &status);
2369 if (err) {
2370 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2371 __func__, err);
2372 goto out;
2373 }
2374
2375 status = status & 0xF;
2376
2377 /* handle only if status indicates performance impact or critical */
2378 if (status >= BKOPS_STATUS_PERF_IMPACT)
2379 err = ufshcd_enable_auto_bkops(hba);
2380out:
2381 return err;
2382}
2383
2384static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2385{
2386 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2387 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
2388}
2389
2390/**
2391 * ufshcd_exception_event_handler - handle exceptions raised by device
2392 * @work: pointer to work data
2393 *
2394 * Read bExceptionEventStatus attribute from the device and handle the
2395 * exception event accordingly.
2396 */
2397static void ufshcd_exception_event_handler(struct work_struct *work)
2398{
2399 struct ufs_hba *hba;
2400 int err;
2401 u32 status = 0;
2402 hba = container_of(work, struct ufs_hba, eeh_work);
2403
62694735 2404 pm_runtime_get_sync(hba->dev);
66ec6d59
SRT
2405 err = ufshcd_get_ee_status(hba, &status);
2406 if (err) {
2407 dev_err(hba->dev, "%s: failed to get exception status %d\n",
2408 __func__, err);
2409 goto out;
2410 }
2411
2412 status &= hba->ee_ctrl_mask;
2413 if (status & MASK_EE_URGENT_BKOPS) {
2414 err = ufshcd_urgent_bkops(hba);
2415 if (err)
2416 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2417 __func__, err);
2418 }
2419out:
62694735 2420 pm_runtime_put_sync(hba->dev);
66ec6d59
SRT
2421 return;
2422}
2423
7a3e97b0
SY
2424/**
2425 * ufshcd_fatal_err_handler - handle fatal errors
2426 * @hba: per adapter instance
2427 */
2428static void ufshcd_fatal_err_handler(struct work_struct *work)
2429{
2430 struct ufs_hba *hba;
2431 hba = container_of(work, struct ufs_hba, feh_workq);
2432
62694735 2433 pm_runtime_get_sync(hba->dev);
7a3e97b0
SY
2434 /* check if reset is already in progress */
2435 if (hba->ufshcd_state != UFSHCD_STATE_RESET)
2436 ufshcd_do_reset(hba);
62694735 2437 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
2438}
2439
2440/**
2441 * ufshcd_err_handler - Check for fatal errors
2442 * @work: pointer to a work queue structure
2443 */
2444static void ufshcd_err_handler(struct ufs_hba *hba)
2445{
2446 u32 reg;
2447
2448 if (hba->errors & INT_FATAL_ERRORS)
2449 goto fatal_eh;
2450
2451 if (hba->errors & UIC_ERROR) {
cf9f4b59 2452 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
7a3e97b0
SY
2453 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2454 goto fatal_eh;
2455 }
2456 return;
2457fatal_eh:
2458 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2459 schedule_work(&hba->feh_workq);
2460}
2461
2462/**
2463 * ufshcd_tmc_handler - handle task management function completion
2464 * @hba: per adapter instance
2465 */
2466static void ufshcd_tmc_handler(struct ufs_hba *hba)
2467{
2468 u32 tm_doorbell;
2469
b873a275 2470 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0 2471 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
e2933132 2472 wake_up(&hba->tm_wq);
7a3e97b0
SY
2473}
2474
2475/**
2476 * ufshcd_sl_intr - Interrupt service routine
2477 * @hba: per adapter instance
2478 * @intr_status: contains interrupts generated by the controller
2479 */
2480static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2481{
2482 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2483 if (hba->errors)
2484 ufshcd_err_handler(hba);
2485
53b3d9c3
SJ
2486 if (intr_status & UFSHCD_UIC_MASK)
2487 ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
2488
2489 if (intr_status & UTP_TASK_REQ_COMPL)
2490 ufshcd_tmc_handler(hba);
2491
2492 if (intr_status & UTP_TRANSFER_REQ_COMPL)
2493 ufshcd_transfer_req_compl(hba);
2494}
2495
2496/**
2497 * ufshcd_intr - Main interrupt service routine
2498 * @irq: irq number
2499 * @__hba: pointer to adapter instance
2500 *
2501 * Returns IRQ_HANDLED - If interrupt is valid
2502 * IRQ_NONE - If invalid interrupt
2503 */
2504static irqreturn_t ufshcd_intr(int irq, void *__hba)
2505{
2506 u32 intr_status;
2507 irqreturn_t retval = IRQ_NONE;
2508 struct ufs_hba *hba = __hba;
2509
2510 spin_lock(hba->host->host_lock);
b873a275 2511 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0
SY
2512
2513 if (intr_status) {
261ea452 2514 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7a3e97b0 2515 ufshcd_sl_intr(hba, intr_status);
7a3e97b0
SY
2516 retval = IRQ_HANDLED;
2517 }
2518 spin_unlock(hba->host->host_lock);
2519 return retval;
2520}
2521
e2933132
SRT
2522static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
2523{
2524 int err = 0;
2525 u32 mask = 1 << tag;
2526 unsigned long flags;
2527
2528 if (!test_bit(tag, &hba->outstanding_tasks))
2529 goto out;
2530
2531 spin_lock_irqsave(hba->host->host_lock, flags);
2532 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
2533 spin_unlock_irqrestore(hba->host->host_lock, flags);
2534
2535 /* poll for max. 1 sec to clear door bell register by h/w */
2536 err = ufshcd_wait_for_register(hba,
2537 REG_UTP_TASK_REQ_DOOR_BELL,
2538 mask, 0, 1000, 1000);
2539out:
2540 return err;
2541}
2542
7a3e97b0
SY
2543/**
2544 * ufshcd_issue_tm_cmd - issues task management commands to controller
2545 * @hba: per adapter instance
e2933132
SRT
2546 * @lun_id: LUN ID to which TM command is sent
2547 * @task_id: task ID to which the TM command is applicable
2548 * @tm_function: task management function opcode
2549 * @tm_response: task management service response return value
7a3e97b0 2550 *
e2933132 2551 * Returns non-zero value on error, zero on success.
7a3e97b0 2552 */
e2933132
SRT
2553static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2554 u8 tm_function, u8 *tm_response)
7a3e97b0
SY
2555{
2556 struct utp_task_req_desc *task_req_descp;
2557 struct utp_upiu_task_req *task_req_upiup;
2558 struct Scsi_Host *host;
2559 unsigned long flags;
e2933132 2560 int free_slot;
7a3e97b0 2561 int err;
e2933132 2562 int task_tag;
7a3e97b0
SY
2563
2564 host = hba->host;
2565
e2933132
SRT
2566 /*
2567 * Get free slot, sleep if slots are unavailable.
2568 * Even though we use wait_event() which sleeps indefinitely,
2569 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2570 */
2571 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
7a3e97b0 2572
e2933132 2573 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
2574 task_req_descp = hba->utmrdl_base_addr;
2575 task_req_descp += free_slot;
2576
2577 /* Configure task request descriptor */
2578 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
2579 task_req_descp->header.dword_2 =
2580 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2581
2582 /* Configure task request UPIU */
2583 task_req_upiup =
2584 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
e2933132 2585 task_tag = hba->nutrs + free_slot;
7a3e97b0 2586 task_req_upiup->header.dword_0 =
5a0b0cb9 2587 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
e2933132 2588 lun_id, task_tag);
7a3e97b0 2589 task_req_upiup->header.dword_1 =
5a0b0cb9 2590 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
7a3e97b0 2591
e2933132
SRT
2592 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2593 task_req_upiup->input_param2 = cpu_to_be32(task_id);
7a3e97b0
SY
2594
2595 /* send command to the controller */
2596 __set_bit(free_slot, &hba->outstanding_tasks);
b873a275 2597 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
2598
2599 spin_unlock_irqrestore(host->host_lock, flags);
2600
2601 /* wait until the task management command is completed */
e2933132
SRT
2602 err = wait_event_timeout(hba->tm_wq,
2603 test_bit(free_slot, &hba->tm_condition),
2604 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 2605 if (!err) {
e2933132
SRT
2606 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2607 __func__, tm_function);
2608 if (ufshcd_clear_tm_cmd(hba, free_slot))
2609 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2610 __func__, free_slot);
2611 err = -ETIMEDOUT;
2612 } else {
2613 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
7a3e97b0 2614 }
e2933132 2615
7a3e97b0 2616 clear_bit(free_slot, &hba->tm_condition);
e2933132
SRT
2617 ufshcd_put_tm_slot(hba, free_slot);
2618 wake_up(&hba->tm_tag_wq);
2619
7a3e97b0
SY
2620 return err;
2621}
2622
2623/**
2624 * ufshcd_device_reset - reset device and abort all the pending commands
2625 * @cmd: SCSI command pointer
2626 *
2627 * Returns SUCCESS/FAILED
2628 */
2629static int ufshcd_device_reset(struct scsi_cmnd *cmd)
2630{
2631 struct Scsi_Host *host;
2632 struct ufs_hba *hba;
2633 unsigned int tag;
2634 u32 pos;
2635 int err;
e2933132
SRT
2636 u8 resp = 0xF;
2637 struct ufshcd_lrb *lrbp;
7a3e97b0
SY
2638
2639 host = cmd->device->host;
2640 hba = shost_priv(host);
2641 tag = cmd->request->tag;
2642
e2933132
SRT
2643 lrbp = &hba->lrb[tag];
2644 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
2645 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2646 err = FAILED;
7a3e97b0 2647 goto out;
e2933132
SRT
2648 } else {
2649 err = SUCCESS;
2650 }
7a3e97b0
SY
2651
2652 for (pos = 0; pos < hba->nutrs; pos++) {
2653 if (test_bit(pos, &hba->outstanding_reqs) &&
2654 (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
2655
2656 /* clear the respective UTRLCLR register bit */
2657 ufshcd_utrl_clear(hba, pos);
2658
2659 clear_bit(pos, &hba->outstanding_reqs);
2660
2661 if (hba->lrb[pos].cmd) {
2662 scsi_dma_unmap(hba->lrb[pos].cmd);
2663 hba->lrb[pos].cmd->result =
5a0b0cb9 2664 DID_ABORT << 16;
7a3e97b0
SY
2665 hba->lrb[pos].cmd->scsi_done(cmd);
2666 hba->lrb[pos].cmd = NULL;
5a0b0cb9
SRT
2667 clear_bit_unlock(pos, &hba->lrb_in_use);
2668 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0
SY
2669 }
2670 }
2671 } /* end of for */
2672out:
2673 return err;
2674}
2675
2676/**
2677 * ufshcd_host_reset - Main reset function registered with scsi layer
2678 * @cmd: SCSI command pointer
2679 *
2680 * Returns SUCCESS/FAILED
2681 */
2682static int ufshcd_host_reset(struct scsi_cmnd *cmd)
2683{
2684 struct ufs_hba *hba;
2685
2686 hba = shost_priv(cmd->device->host);
2687
2688 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
2689 return SUCCESS;
2690
94c122ab 2691 return ufshcd_do_reset(hba);
7a3e97b0
SY
2692}
2693
2694/**
2695 * ufshcd_abort - abort a specific command
2696 * @cmd: SCSI command pointer
2697 *
f20810d8
SRT
2698 * Abort the pending command in device by sending UFS_ABORT_TASK task management
2699 * command, and in host controller by clearing the door-bell register. There can
2700 * be race between controller sending the command to the device while abort is
2701 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
2702 * really issued and then try to abort it.
2703 *
7a3e97b0
SY
2704 * Returns SUCCESS/FAILED
2705 */
2706static int ufshcd_abort(struct scsi_cmnd *cmd)
2707{
2708 struct Scsi_Host *host;
2709 struct ufs_hba *hba;
2710 unsigned long flags;
2711 unsigned int tag;
f20810d8
SRT
2712 int err = 0;
2713 int poll_cnt;
e2933132
SRT
2714 u8 resp = 0xF;
2715 struct ufshcd_lrb *lrbp;
7a3e97b0
SY
2716
2717 host = cmd->device->host;
2718 hba = shost_priv(host);
2719 tag = cmd->request->tag;
2720
f20810d8
SRT
2721 /* If command is already aborted/completed, return SUCCESS */
2722 if (!(test_bit(tag, &hba->outstanding_reqs)))
2723 goto out;
7a3e97b0 2724
f20810d8
SRT
2725 lrbp = &hba->lrb[tag];
2726 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
2727 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2728 UFS_QUERY_TASK, &resp);
2729 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
2730 /* cmd pending in the device */
2731 break;
2732 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2733 u32 reg;
2734
2735 /*
2736 * cmd not pending in the device, check if it is
2737 * in transition.
2738 */
2739 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2740 if (reg & (1 << tag)) {
2741 /* sleep for max. 200us to stabilize */
2742 usleep_range(100, 200);
2743 continue;
2744 }
2745 /* command completed already */
2746 goto out;
2747 } else {
2748 if (!err)
2749 err = resp; /* service response error */
2750 goto out;
2751 }
2752 }
2753
2754 if (!poll_cnt) {
2755 err = -EBUSY;
7a3e97b0
SY
2756 goto out;
2757 }
7a3e97b0 2758
e2933132
SRT
2759 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2760 UFS_ABORT_TASK, &resp);
2761 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
2762 if (!err)
2763 err = resp; /* service response error */
7a3e97b0 2764 goto out;
e2933132 2765 }
7a3e97b0 2766
f20810d8
SRT
2767 err = ufshcd_clear_cmd(hba, tag);
2768 if (err)
2769 goto out;
2770
7a3e97b0
SY
2771 scsi_dma_unmap(cmd);
2772
2773 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
2774 __clear_bit(tag, &hba->outstanding_reqs);
2775 hba->lrb[tag].cmd = NULL;
2776 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9
SRT
2777
2778 clear_bit_unlock(tag, &hba->lrb_in_use);
2779 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0 2780out:
f20810d8
SRT
2781 if (!err) {
2782 err = SUCCESS;
2783 } else {
2784 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2785 err = FAILED;
2786 }
2787
7a3e97b0
SY
2788 return err;
2789}
2790
6ccf44fe
SJ
2791/**
2792 * ufshcd_async_scan - asynchronous execution for link startup
2793 * @data: data pointer to pass to this function
2794 * @cookie: cookie data
2795 */
2796static void ufshcd_async_scan(void *data, async_cookie_t cookie)
2797{
2798 struct ufs_hba *hba = (struct ufs_hba *)data;
2799 int ret;
2800
2801 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
2802 if (ret)
2803 goto out;
2804
d3e89bac
SJ
2805 ufshcd_config_max_pwr_mode(hba);
2806
5a0b0cb9
SRT
2807 ret = ufshcd_verify_dev_init(hba);
2808 if (ret)
2809 goto out;
68078d5c
DR
2810
2811 ret = ufshcd_complete_dev_init(hba);
2812 if (ret)
2813 goto out;
5a0b0cb9 2814
66ec6d59 2815 ufshcd_force_reset_auto_bkops(hba);
5a0b0cb9 2816 scsi_scan_host(hba->host);
62694735 2817 pm_runtime_put_sync(hba->dev);
5a0b0cb9
SRT
2818out:
2819 return;
6ccf44fe
SJ
2820}
2821
7a3e97b0
SY
2822static struct scsi_host_template ufshcd_driver_template = {
2823 .module = THIS_MODULE,
2824 .name = UFSHCD,
2825 .proc_name = UFSHCD,
2826 .queuecommand = ufshcd_queuecommand,
2827 .slave_alloc = ufshcd_slave_alloc,
2828 .slave_destroy = ufshcd_slave_destroy,
2829 .eh_abort_handler = ufshcd_abort,
2830 .eh_device_reset_handler = ufshcd_device_reset,
2831 .eh_host_reset_handler = ufshcd_host_reset,
2832 .this_id = -1,
2833 .sg_tablesize = SG_ALL,
2834 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
2835 .can_queue = UFSHCD_CAN_QUEUE,
2836};
2837
7a3e97b0
SY
2838/**
2839 * ufshcd_suspend - suspend power management function
3b1d0580 2840 * @hba: per adapter instance
7a3e97b0
SY
2841 * @state: power state
2842 *
2843 * Returns -ENOSYS
2844 */
3b1d0580 2845int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
7a3e97b0
SY
2846{
2847 /*
2848 * TODO:
2849 * 1. Block SCSI requests from SCSI midlayer
2850 * 2. Change the internal driver state to non operational
2851 * 3. Set UTRLRSR and UTMRLRSR bits to zero
2852 * 4. Wait until outstanding commands are completed
2853 * 5. Set HCE to zero to send the UFS host controller to reset state
2854 */
2855
2856 return -ENOSYS;
2857}
3b1d0580 2858EXPORT_SYMBOL_GPL(ufshcd_suspend);
7a3e97b0
SY
2859
2860/**
2861 * ufshcd_resume - resume power management function
3b1d0580 2862 * @hba: per adapter instance
7a3e97b0
SY
2863 *
2864 * Returns -ENOSYS
2865 */
3b1d0580 2866int ufshcd_resume(struct ufs_hba *hba)
7a3e97b0
SY
2867{
2868 /*
2869 * TODO:
2870 * 1. Set HCE to 1, to start the UFS host controller
2871 * initialization process
2872 * 2. Set UTRLRSR and UTMRLRSR bits to 1
2873 * 3. Change the internal driver state to operational
2874 * 4. Unblock SCSI requests from SCSI midlayer
2875 */
2876
2877 return -ENOSYS;
2878}
3b1d0580
VH
2879EXPORT_SYMBOL_GPL(ufshcd_resume);
2880
66ec6d59
SRT
2881int ufshcd_runtime_suspend(struct ufs_hba *hba)
2882{
2883 if (!hba)
2884 return 0;
2885
2886 /*
2887 * The device is idle with no requests in the queue,
2888 * allow background operations.
2889 */
2890 return ufshcd_enable_auto_bkops(hba);
2891}
2892EXPORT_SYMBOL(ufshcd_runtime_suspend);
2893
2894int ufshcd_runtime_resume(struct ufs_hba *hba)
2895{
2896 if (!hba)
2897 return 0;
2898
2899 return ufshcd_disable_auto_bkops(hba);
2900}
2901EXPORT_SYMBOL(ufshcd_runtime_resume);
2902
2903int ufshcd_runtime_idle(struct ufs_hba *hba)
2904{
2905 return 0;
2906}
2907EXPORT_SYMBOL(ufshcd_runtime_idle);
2908
7a3e97b0 2909/**
3b1d0580 2910 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 2911 * data structure memory
3b1d0580 2912 * @hba - per adapter instance
7a3e97b0 2913 */
3b1d0580 2914void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 2915{
cfdf9c91 2916 scsi_remove_host(hba->host);
7a3e97b0 2917 /* disable interrupts */
2fbd009b 2918 ufshcd_disable_intr(hba, hba->intr_mask);
7a3e97b0 2919 ufshcd_hba_stop(hba);
7a3e97b0 2920
7a3e97b0 2921 scsi_host_put(hba->host);
3b1d0580
VH
2922}
2923EXPORT_SYMBOL_GPL(ufshcd_remove);
2924
7a3e97b0 2925/**
3b1d0580
VH
2926 * ufshcd_init - Driver initialization routine
2927 * @dev: pointer to device handle
2928 * @hba_handle: driver private handle
2929 * @mmio_base: base register address
2930 * @irq: Interrupt line of device
7a3e97b0
SY
2931 * Returns 0 on success, non-zero value on failure
2932 */
3b1d0580
VH
2933int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
2934 void __iomem *mmio_base, unsigned int irq)
7a3e97b0
SY
2935{
2936 struct Scsi_Host *host;
2937 struct ufs_hba *hba;
2938 int err;
2939
3b1d0580
VH
2940 if (!dev) {
2941 dev_err(dev,
2942 "Invalid memory reference for dev is NULL\n");
2943 err = -ENODEV;
7a3e97b0
SY
2944 goto out_error;
2945 }
2946
3b1d0580
VH
2947 if (!mmio_base) {
2948 dev_err(dev,
2949 "Invalid memory reference for mmio_base is NULL\n");
2950 err = -ENODEV;
2951 goto out_error;
2952 }
7a3e97b0
SY
2953
2954 host = scsi_host_alloc(&ufshcd_driver_template,
2955 sizeof(struct ufs_hba));
2956 if (!host) {
3b1d0580 2957 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 2958 err = -ENOMEM;
3b1d0580 2959 goto out_error;
7a3e97b0
SY
2960 }
2961 hba = shost_priv(host);
7a3e97b0 2962 hba->host = host;
3b1d0580
VH
2963 hba->dev = dev;
2964 hba->mmio_base = mmio_base;
2965 hba->irq = irq;
7a3e97b0
SY
2966
2967 /* Read capabilities registers */
2968 ufshcd_hba_capabilities(hba);
2969
2970 /* Get UFS version supported by the controller */
2971 hba->ufs_version = ufshcd_get_ufs_version(hba);
2972
2fbd009b
SJ
2973 /* Get Interrupt bit mask per version */
2974 hba->intr_mask = ufshcd_get_intr_mask(hba);
2975
7a3e97b0
SY
2976 /* Allocate memory for host memory space */
2977 err = ufshcd_memory_alloc(hba);
2978 if (err) {
3b1d0580
VH
2979 dev_err(hba->dev, "Memory allocation failed\n");
2980 goto out_disable;
7a3e97b0
SY
2981 }
2982
2983 /* Configure LRB */
2984 ufshcd_host_memory_configure(hba);
2985
2986 host->can_queue = hba->nutrs;
2987 host->cmd_per_lun = hba->nutrs;
2988 host->max_id = UFSHCD_MAX_ID;
2989 host->max_lun = UFSHCD_MAX_LUNS;
2990 host->max_channel = UFSHCD_MAX_CHANNEL;
2991 host->unique_id = host->host_no;
2992 host->max_cmd_len = MAX_CDB_SIZE;
2993
2994 /* Initailize wait queue for task management */
e2933132
SRT
2995 init_waitqueue_head(&hba->tm_wq);
2996 init_waitqueue_head(&hba->tm_tag_wq);
7a3e97b0
SY
2997
2998 /* Initialize work queues */
7a3e97b0 2999 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
66ec6d59 3000 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 3001
6ccf44fe
SJ
3002 /* Initialize UIC command mutex */
3003 mutex_init(&hba->uic_cmd_mutex);
3004
5a0b0cb9
SRT
3005 /* Initialize mutex for device management commands */
3006 mutex_init(&hba->dev_cmd.lock);
3007
3008 /* Initialize device management tag acquire wait queue */
3009 init_waitqueue_head(&hba->dev_cmd.tag_wq);
3010
7a3e97b0 3011 /* IRQ registration */
2953f850 3012 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 3013 if (err) {
3b1d0580 3014 dev_err(hba->dev, "request irq failed\n");
2953f850 3015 goto out_disable;
7a3e97b0
SY
3016 }
3017
3018 /* Enable SCSI tag mapping */
3019 err = scsi_init_shared_tag_map(host, host->can_queue);
3020 if (err) {
3b1d0580 3021 dev_err(hba->dev, "init shared queue failed\n");
2953f850 3022 goto out_disable;
7a3e97b0
SY
3023 }
3024
3b1d0580 3025 err = scsi_add_host(host, hba->dev);
7a3e97b0 3026 if (err) {
3b1d0580 3027 dev_err(hba->dev, "scsi_add_host failed\n");
2953f850 3028 goto out_disable;
7a3e97b0
SY
3029 }
3030
6ccf44fe
SJ
3031 /* Host controller enable */
3032 err = ufshcd_hba_enable(hba);
7a3e97b0 3033 if (err) {
6ccf44fe 3034 dev_err(hba->dev, "Host controller enable failed\n");
3b1d0580 3035 goto out_remove_scsi_host;
7a3e97b0 3036 }
6ccf44fe 3037
3b1d0580 3038 *hba_handle = hba;
7a3e97b0 3039
62694735
SRT
3040 /* Hold auto suspend until async scan completes */
3041 pm_runtime_get_sync(dev);
3042
6ccf44fe
SJ
3043 async_schedule(ufshcd_async_scan, hba);
3044
7a3e97b0
SY
3045 return 0;
3046
3b1d0580
VH
3047out_remove_scsi_host:
3048 scsi_remove_host(hba->host);
3b1d0580
VH
3049out_disable:
3050 scsi_host_put(host);
3051out_error:
3052 return err;
3053}
3054EXPORT_SYMBOL_GPL(ufshcd_init);
3055
3b1d0580
VH
3056MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
3057MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 3058MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
3059MODULE_LICENSE("GPL");
3060MODULE_VERSION(UFSHCD_DRIVER_VERSION);