[SCSI] libiscsi: Add missing prints for session and connection sysfs attrs
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
3b1d0580
VH
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
3b1d0580
VH
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
7a3e97b0
SY
34 */
35
6ccf44fe
SJ
36#include <linux/async.h>
37
e0eca63e 38#include "ufshcd.h"
7a3e97b0 39
2fbd009b
SJ
40#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
41 UTP_TASK_REQ_COMPL |\
42 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
43/* UIC command timeout, unit: ms */
44#define UIC_CMD_TIMEOUT 500
2fbd009b 45
7a3e97b0
SY
46enum {
47 UFSHCD_MAX_CHANNEL = 0,
48 UFSHCD_MAX_ID = 1,
49 UFSHCD_MAX_LUNS = 8,
50 UFSHCD_CMD_PER_LUN = 32,
51 UFSHCD_CAN_QUEUE = 32,
52};
53
54/* UFSHCD states */
55enum {
56 UFSHCD_STATE_OPERATIONAL,
57 UFSHCD_STATE_RESET,
58 UFSHCD_STATE_ERROR,
59};
60
61/* Interrupt configuration options */
62enum {
63 UFSHCD_INT_DISABLE,
64 UFSHCD_INT_ENABLE,
65 UFSHCD_INT_CLEAR,
66};
67
68/* Interrupt aggregation options */
69enum {
70 INT_AGGR_RESET,
71 INT_AGGR_CONFIG,
72};
73
2fbd009b
SJ
74/**
75 * ufshcd_get_intr_mask - Get the interrupt bit mask
76 * @hba - Pointer to adapter instance
77 *
78 * Returns interrupt bit mask per version
79 */
80static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
81{
82 if (hba->ufs_version == UFSHCI_VERSION_10)
83 return INTERRUPT_MASK_ALL_VER_10;
84 else
85 return INTERRUPT_MASK_ALL_VER_11;
86}
87
7a3e97b0
SY
88/**
89 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
90 * @hba - Pointer to adapter instance
91 *
92 * Returns UFSHCI version supported by the controller
93 */
94static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
95{
b873a275 96 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
97}
98
99/**
100 * ufshcd_is_device_present - Check if any device connected to
101 * the host controller
102 * @reg_hcs - host controller status register value
103 *
73ec513a 104 * Returns 1 if device present, 0 if no device detected
7a3e97b0
SY
105 */
106static inline int ufshcd_is_device_present(u32 reg_hcs)
107{
73ec513a 108 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
7a3e97b0
SY
109}
110
111/**
112 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
113 * @lrb: pointer to local command reference block
114 *
115 * This function is used to get the OCS field from UTRD
116 * Returns the OCS field in the UTRD
117 */
118static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
119{
120 return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
121}
122
123/**
124 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
125 * @task_req_descp: pointer to utp_task_req_desc structure
126 *
127 * This function is used to get the OCS field from UTMRD
128 * Returns the OCS field in the UTMRD
129 */
130static inline int
131ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
132{
133 return task_req_descp->header.dword_2 & MASK_OCS;
134}
135
136/**
137 * ufshcd_get_tm_free_slot - get a free slot for task management request
138 * @hba: per adapter instance
139 *
140 * Returns maximum number of task management request slots in case of
141 * task management queue full or returns the free slot number
142 */
143static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
144{
145 return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
146}
147
148/**
149 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
150 * @hba: per adapter instance
151 * @pos: position of the bit to be cleared
152 */
153static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
154{
b873a275 155 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
7a3e97b0
SY
156}
157
158/**
159 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
160 * @reg: Register value of host controller status
161 *
162 * Returns integer, 0 on Success and positive value if failed
163 */
164static inline int ufshcd_get_lists_status(u32 reg)
165{
166 /*
167 * The mask 0xFF is for the following HCS register bits
168 * Bit Description
169 * 0 Device Present
170 * 1 UTRLRDY
171 * 2 UTMRLRDY
172 * 3 UCRDY
173 * 4 HEI
174 * 5 DEI
175 * 6-7 reserved
176 */
177 return (((reg) & (0xFF)) >> 1) ^ (0x07);
178}
179
180/**
181 * ufshcd_get_uic_cmd_result - Get the UIC command result
182 * @hba: Pointer to adapter instance
183 *
184 * This function gets the result of UIC command completion
185 * Returns 0 on success, non zero value on error
186 */
187static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
188{
b873a275 189 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
190 MASK_UIC_COMMAND_RESULT;
191}
192
7a3e97b0
SY
193/**
194 * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
195 * @ucd_rsp_ptr: pointer to response UPIU
196 *
197 * This function checks the response UPIU for valid transaction type in
198 * response field
199 * Returns 0 on success, non-zero on failure
200 */
201static inline int
202ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
203{
204 return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
205 UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
206}
207
208/**
209 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
210 * @ucd_rsp_ptr: pointer to response UPIU
211 *
212 * This function gets the response status and scsi_status from response UPIU
213 * Returns the response result code.
214 */
215static inline int
216ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
217{
218 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
219}
220
221/**
222 * ufshcd_config_int_aggr - Configure interrupt aggregation values.
223 * Currently there is no use case where we want to configure
224 * interrupt aggregation dynamically. So to configure interrupt
225 * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
226 * INT_AGGR_TIMEOUT_VALUE are used.
227 * @hba: per adapter instance
228 * @option: Interrupt aggregation option
229 */
230static inline void
231ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
232{
233 switch (option) {
234 case INT_AGGR_RESET:
b873a275
SJ
235 ufshcd_writel(hba, INT_AGGR_ENABLE |
236 INT_AGGR_COUNTER_AND_TIMER_RESET,
237 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
238 break;
239 case INT_AGGR_CONFIG:
b873a275
SJ
240 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
241 INT_AGGR_COUNTER_THRESHOLD_VALUE |
242 INT_AGGR_TIMEOUT_VALUE,
243 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
244 break;
245 }
246}
247
248/**
249 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
250 * When run-stop registers are set to 1, it indicates the
251 * host controller that it can process the requests
252 * @hba: per adapter instance
253 */
254static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
255{
b873a275
SJ
256 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
257 REG_UTP_TASK_REQ_LIST_RUN_STOP);
258 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
259 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
260}
261
7a3e97b0
SY
262/**
263 * ufshcd_hba_start - Start controller initialization sequence
264 * @hba: per adapter instance
265 */
266static inline void ufshcd_hba_start(struct ufs_hba *hba)
267{
b873a275 268 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
269}
270
271/**
272 * ufshcd_is_hba_active - Get controller state
273 * @hba: per adapter instance
274 *
275 * Returns zero if controller is active, 1 otherwise
276 */
277static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
278{
b873a275 279 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
7a3e97b0
SY
280}
281
282/**
283 * ufshcd_send_command - Send SCSI or device management commands
284 * @hba: per adapter instance
285 * @task_tag: Task tag of the command
286 */
287static inline
288void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
289{
290 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 291 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
292}
293
294/**
295 * ufshcd_copy_sense_data - Copy sense data in case of check condition
296 * @lrb - pointer to local reference block
297 */
298static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
299{
300 int len;
301 if (lrbp->sense_buffer) {
302 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
303 memcpy(lrbp->sense_buffer,
304 lrbp->ucd_rsp_ptr->sense_data,
305 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
306 }
307}
308
309/**
310 * ufshcd_hba_capabilities - Read controller capabilities
311 * @hba: per adapter instance
312 */
313static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
314{
b873a275 315 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
316
317 /* nutrs and nutmrs are 0 based values */
318 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
319 hba->nutmrs =
320 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
321}
322
323/**
6ccf44fe
SJ
324 * ufshcd_ready_for_uic_cmd - Check if controller is ready
325 * to accept UIC commands
7a3e97b0 326 * @hba: per adapter instance
6ccf44fe
SJ
327 * Return true on success, else false
328 */
329static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
330{
331 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
332 return true;
333 else
334 return false;
335}
336
337/**
338 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
339 * @hba: per adapter instance
340 * @uic_cmd: UIC command
341 *
342 * Mutex must be held.
7a3e97b0
SY
343 */
344static inline void
6ccf44fe 345ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 346{
6ccf44fe
SJ
347 WARN_ON(hba->active_uic_cmd);
348
349 hba->active_uic_cmd = uic_cmd;
350
7a3e97b0 351 /* Write Args */
6ccf44fe
SJ
352 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
353 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
354 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
355
356 /* Write UIC Cmd */
6ccf44fe 357 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 358 REG_UIC_COMMAND);
7a3e97b0
SY
359}
360
6ccf44fe
SJ
361/**
362 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
363 * @hba: per adapter instance
364 * @uic_command: UIC command
365 *
366 * Must be called with mutex held.
367 * Returns 0 only if success.
368 */
369static int
370ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
371{
372 int ret;
373 unsigned long flags;
374
375 if (wait_for_completion_timeout(&uic_cmd->done,
376 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
377 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
378 else
379 ret = -ETIMEDOUT;
380
381 spin_lock_irqsave(hba->host->host_lock, flags);
382 hba->active_uic_cmd = NULL;
383 spin_unlock_irqrestore(hba->host->host_lock, flags);
384
385 return ret;
386}
387
388/**
389 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
390 * @hba: per adapter instance
391 * @uic_cmd: UIC command
392 *
393 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
394 * with mutex held.
395 * Returns 0 only if success.
396 */
397static int
398__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
399{
400 int ret;
401 unsigned long flags;
402
403 if (!ufshcd_ready_for_uic_cmd(hba)) {
404 dev_err(hba->dev,
405 "Controller not ready to accept UIC commands\n");
406 return -EIO;
407 }
408
409 init_completion(&uic_cmd->done);
410
411 spin_lock_irqsave(hba->host->host_lock, flags);
412 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
413 spin_unlock_irqrestore(hba->host->host_lock, flags);
414
415 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
416
417 return ret;
418}
419
420/**
421 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
422 * @hba: per adapter instance
423 * @uic_cmd: UIC command
424 *
425 * Returns 0 only if success.
426 */
427static int
428ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
429{
430 int ret;
431
432 mutex_lock(&hba->uic_cmd_mutex);
433 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
434 mutex_unlock(&hba->uic_cmd_mutex);
435
436 return ret;
437}
438
7a3e97b0
SY
439/**
440 * ufshcd_map_sg - Map scatter-gather list to prdt
441 * @lrbp - pointer to local reference block
442 *
443 * Returns 0 in case of success, non-zero value in case of failure
444 */
445static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
446{
447 struct ufshcd_sg_entry *prd_table;
448 struct scatterlist *sg;
449 struct scsi_cmnd *cmd;
450 int sg_segments;
451 int i;
452
453 cmd = lrbp->cmd;
454 sg_segments = scsi_dma_map(cmd);
455 if (sg_segments < 0)
456 return sg_segments;
457
458 if (sg_segments) {
459 lrbp->utr_descriptor_ptr->prd_table_length =
460 cpu_to_le16((u16) (sg_segments));
461
462 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
463
464 scsi_for_each_sg(cmd, sg, sg_segments, i) {
465 prd_table[i].size =
466 cpu_to_le32(((u32) sg_dma_len(sg))-1);
467 prd_table[i].base_addr =
468 cpu_to_le32(lower_32_bits(sg->dma_address));
469 prd_table[i].upper_addr =
470 cpu_to_le32(upper_32_bits(sg->dma_address));
471 }
472 } else {
473 lrbp->utr_descriptor_ptr->prd_table_length = 0;
474 }
475
476 return 0;
477}
478
479/**
2fbd009b 480 * ufshcd_enable_intr - enable interrupts
7a3e97b0 481 * @hba: per adapter instance
2fbd009b 482 * @intrs: interrupt bits
7a3e97b0 483 */
2fbd009b 484static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 485{
2fbd009b
SJ
486 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
487
488 if (hba->ufs_version == UFSHCI_VERSION_10) {
489 u32 rw;
490 rw = set & INTERRUPT_MASK_RW_VER_10;
491 set = rw | ((set ^ intrs) & intrs);
492 } else {
493 set |= intrs;
494 }
495
496 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
497}
498
499/**
500 * ufshcd_disable_intr - disable interrupts
501 * @hba: per adapter instance
502 * @intrs: interrupt bits
503 */
504static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
505{
506 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
507
508 if (hba->ufs_version == UFSHCI_VERSION_10) {
509 u32 rw;
510 rw = (set & INTERRUPT_MASK_RW_VER_10) &
511 ~(intrs & INTERRUPT_MASK_RW_VER_10);
512 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
513
514 } else {
515 set &= ~intrs;
7a3e97b0 516 }
2fbd009b
SJ
517
518 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
519}
520
521/**
522 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
523 * @lrb - pointer to local reference block
524 */
525static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
526{
527 struct utp_transfer_req_desc *req_desc;
528 struct utp_upiu_cmd *ucd_cmd_ptr;
529 u32 data_direction;
530 u32 upiu_flags;
531
532 ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
533 req_desc = lrbp->utr_descriptor_ptr;
534
535 switch (lrbp->command_type) {
536 case UTP_CMD_TYPE_SCSI:
537 if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
538 data_direction = UTP_DEVICE_TO_HOST;
539 upiu_flags = UPIU_CMD_FLAGS_READ;
540 } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
541 data_direction = UTP_HOST_TO_DEVICE;
542 upiu_flags = UPIU_CMD_FLAGS_WRITE;
543 } else {
544 data_direction = UTP_NO_DATA_TRANSFER;
545 upiu_flags = UPIU_CMD_FLAGS_NONE;
546 }
547
548 /* Transfer request descriptor header fields */
549 req_desc->header.dword_0 =
550 cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
551
552 /*
553 * assigning invalid value for command status. Controller
554 * updates OCS on command completion, with the command
555 * status
556 */
557 req_desc->header.dword_2 =
558 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
559
560 /* command descriptor fields */
561 ucd_cmd_ptr->header.dword_0 =
562 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
563 upiu_flags,
564 lrbp->lun,
565 lrbp->task_tag));
566 ucd_cmd_ptr->header.dword_1 =
567 cpu_to_be32(
568 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
569 0,
570 0,
571 0));
572
573 /* Total EHS length and Data segment length will be zero */
574 ucd_cmd_ptr->header.dword_2 = 0;
575
576 ucd_cmd_ptr->exp_data_transfer_len =
98b8e179 577 cpu_to_be32(lrbp->cmd->sdb.length);
7a3e97b0
SY
578
579 memcpy(ucd_cmd_ptr->cdb,
580 lrbp->cmd->cmnd,
581 (min_t(unsigned short,
582 lrbp->cmd->cmd_len,
583 MAX_CDB_SIZE)));
584 break;
585 case UTP_CMD_TYPE_DEV_MANAGE:
586 /* For query function implementation */
587 break;
588 case UTP_CMD_TYPE_UFS:
589 /* For UFS native command implementation */
590 break;
591 } /* end of switch */
592}
593
594/**
595 * ufshcd_queuecommand - main entry point for SCSI requests
596 * @cmd: command from SCSI Midlayer
597 * @done: call back function
598 *
599 * Returns 0 for success, non-zero in case of failure
600 */
601static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
602{
603 struct ufshcd_lrb *lrbp;
604 struct ufs_hba *hba;
605 unsigned long flags;
606 int tag;
607 int err = 0;
608
609 hba = shost_priv(host);
610
611 tag = cmd->request->tag;
612
613 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
614 err = SCSI_MLQUEUE_HOST_BUSY;
615 goto out;
616 }
617
618 lrbp = &hba->lrb[tag];
619
620 lrbp->cmd = cmd;
621 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
622 lrbp->sense_buffer = cmd->sense_buffer;
623 lrbp->task_tag = tag;
624 lrbp->lun = cmd->device->lun;
625
626 lrbp->command_type = UTP_CMD_TYPE_SCSI;
627
628 /* form UPIU before issuing the command */
629 ufshcd_compose_upiu(lrbp);
630 err = ufshcd_map_sg(lrbp);
631 if (err)
632 goto out;
633
634 /* issue command to the controller */
635 spin_lock_irqsave(hba->host->host_lock, flags);
636 ufshcd_send_command(hba, tag);
637 spin_unlock_irqrestore(hba->host->host_lock, flags);
638out:
639 return err;
640}
641
642/**
643 * ufshcd_memory_alloc - allocate memory for host memory space data structures
644 * @hba: per adapter instance
645 *
646 * 1. Allocate DMA memory for Command Descriptor array
647 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
648 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
649 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
650 * (UTMRDL)
651 * 4. Allocate memory for local reference block(lrb).
652 *
653 * Returns 0 for success, non-zero in case of failure
654 */
655static int ufshcd_memory_alloc(struct ufs_hba *hba)
656{
657 size_t utmrdl_size, utrdl_size, ucdl_size;
658
659 /* Allocate memory for UTP command descriptors */
660 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
661 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
662 ucdl_size,
663 &hba->ucdl_dma_addr,
664 GFP_KERNEL);
7a3e97b0
SY
665
666 /*
667 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
668 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
669 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
670 * be aligned to 128 bytes as well
671 */
672 if (!hba->ucdl_base_addr ||
673 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 674 dev_err(hba->dev,
7a3e97b0
SY
675 "Command Descriptor Memory allocation failed\n");
676 goto out;
677 }
678
679 /*
680 * Allocate memory for UTP Transfer descriptors
681 * UFSHCI requires 1024 byte alignment of UTRD
682 */
683 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
684 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
685 utrdl_size,
686 &hba->utrdl_dma_addr,
687 GFP_KERNEL);
7a3e97b0
SY
688 if (!hba->utrdl_base_addr ||
689 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 690 dev_err(hba->dev,
7a3e97b0
SY
691 "Transfer Descriptor Memory allocation failed\n");
692 goto out;
693 }
694
695 /*
696 * Allocate memory for UTP Task Management descriptors
697 * UFSHCI requires 1024 byte alignment of UTMRD
698 */
699 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
700 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
701 utmrdl_size,
702 &hba->utmrdl_dma_addr,
703 GFP_KERNEL);
7a3e97b0
SY
704 if (!hba->utmrdl_base_addr ||
705 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 706 dev_err(hba->dev,
7a3e97b0
SY
707 "Task Management Descriptor Memory allocation failed\n");
708 goto out;
709 }
710
711 /* Allocate memory for local reference block */
2953f850
SJ
712 hba->lrb = devm_kzalloc(hba->dev,
713 hba->nutrs * sizeof(struct ufshcd_lrb),
714 GFP_KERNEL);
7a3e97b0 715 if (!hba->lrb) {
3b1d0580 716 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
717 goto out;
718 }
719 return 0;
720out:
7a3e97b0
SY
721 return -ENOMEM;
722}
723
724/**
725 * ufshcd_host_memory_configure - configure local reference block with
726 * memory offsets
727 * @hba: per adapter instance
728 *
729 * Configure Host memory space
730 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
731 * address.
732 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
733 * and PRDT offset.
734 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
735 * into local reference block.
736 */
737static void ufshcd_host_memory_configure(struct ufs_hba *hba)
738{
739 struct utp_transfer_cmd_desc *cmd_descp;
740 struct utp_transfer_req_desc *utrdlp;
741 dma_addr_t cmd_desc_dma_addr;
742 dma_addr_t cmd_desc_element_addr;
743 u16 response_offset;
744 u16 prdt_offset;
745 int cmd_desc_size;
746 int i;
747
748 utrdlp = hba->utrdl_base_addr;
749 cmd_descp = hba->ucdl_base_addr;
750
751 response_offset =
752 offsetof(struct utp_transfer_cmd_desc, response_upiu);
753 prdt_offset =
754 offsetof(struct utp_transfer_cmd_desc, prd_table);
755
756 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
757 cmd_desc_dma_addr = hba->ucdl_dma_addr;
758
759 for (i = 0; i < hba->nutrs; i++) {
760 /* Configure UTRD with command descriptor base address */
761 cmd_desc_element_addr =
762 (cmd_desc_dma_addr + (cmd_desc_size * i));
763 utrdlp[i].command_desc_base_addr_lo =
764 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
765 utrdlp[i].command_desc_base_addr_hi =
766 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
767
768 /* Response upiu and prdt offset should be in double words */
769 utrdlp[i].response_upiu_offset =
770 cpu_to_le16((response_offset >> 2));
771 utrdlp[i].prd_table_offset =
772 cpu_to_le16((prdt_offset >> 2));
773 utrdlp[i].response_upiu_length =
3ca316c5 774 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0
SY
775
776 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
777 hba->lrb[i].ucd_cmd_ptr =
778 (struct utp_upiu_cmd *)(cmd_descp + i);
779 hba->lrb[i].ucd_rsp_ptr =
780 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
781 hba->lrb[i].ucd_prdt_ptr =
782 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
783 }
784}
785
786/**
787 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
788 * @hba: per adapter instance
789 *
790 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
791 * in order to initialize the Unipro link startup procedure.
792 * Once the Unipro links are up, the device connected to the controller
793 * is detected.
794 *
795 * Returns 0 on success, non-zero value on failure
796 */
797static int ufshcd_dme_link_startup(struct ufs_hba *hba)
798{
6ccf44fe
SJ
799 struct uic_command uic_cmd = {0};
800 int ret;
7a3e97b0 801
6ccf44fe 802 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 803
6ccf44fe
SJ
804 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
805 if (ret)
806 dev_err(hba->dev,
807 "dme-link-startup: error code %d\n", ret);
808 return ret;
7a3e97b0
SY
809}
810
811/**
812 * ufshcd_make_hba_operational - Make UFS controller operational
813 * @hba: per adapter instance
814 *
815 * To bring UFS host controller to operational state,
816 * 1. Check if device is present
6ccf44fe
SJ
817 * 2. Enable required interrupts
818 * 3. Configure interrupt aggregation
819 * 4. Program UTRL and UTMRL base addres
820 * 5. Configure run-stop-registers
7a3e97b0
SY
821 *
822 * Returns 0 on success, non-zero value on failure
823 */
824static int ufshcd_make_hba_operational(struct ufs_hba *hba)
825{
826 int err = 0;
827 u32 reg;
828
829 /* check if device present */
b873a275 830 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
73ec513a 831 if (!ufshcd_is_device_present(reg)) {
3b1d0580 832 dev_err(hba->dev, "cc: Device not present\n");
7a3e97b0
SY
833 err = -ENXIO;
834 goto out;
835 }
836
6ccf44fe
SJ
837 /* Enable required interrupts */
838 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
839
840 /* Configure interrupt aggregation */
841 ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
842
843 /* Configure UTRL and UTMRL base address registers */
844 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
845 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
846 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
847 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
848 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
849 REG_UTP_TASK_REQ_LIST_BASE_L);
850 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
851 REG_UTP_TASK_REQ_LIST_BASE_H);
852
7a3e97b0
SY
853 /*
854 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
855 * DEI, HEI bits must be 0
856 */
857 if (!(ufshcd_get_lists_status(reg))) {
858 ufshcd_enable_run_stop_reg(hba);
859 } else {
3b1d0580 860 dev_err(hba->dev,
7a3e97b0
SY
861 "Host controller not ready to process requests");
862 err = -EIO;
863 goto out;
864 }
865
7a3e97b0
SY
866 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
867 scsi_unblock_requests(hba->host);
868
869 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6ccf44fe 870
7a3e97b0
SY
871out:
872 return err;
873}
874
875/**
876 * ufshcd_hba_enable - initialize the controller
877 * @hba: per adapter instance
878 *
879 * The controller resets itself and controller firmware initialization
880 * sequence kicks off. When controller is ready it will set
881 * the Host Controller Enable bit to 1.
882 *
883 * Returns 0 on success, non-zero value on failure
884 */
885static int ufshcd_hba_enable(struct ufs_hba *hba)
886{
887 int retry;
888
889 /*
890 * msleep of 1 and 5 used in this function might result in msleep(20),
891 * but it was necessary to send the UFS FPGA to reset mode during
892 * development and testing of this driver. msleep can be changed to
893 * mdelay and retry count can be reduced based on the controller.
894 */
895 if (!ufshcd_is_hba_active(hba)) {
896
897 /* change controller state to "reset state" */
898 ufshcd_hba_stop(hba);
899
900 /*
901 * This delay is based on the testing done with UFS host
902 * controller FPGA. The delay can be changed based on the
903 * host controller used.
904 */
905 msleep(5);
906 }
907
908 /* start controller initialization sequence */
909 ufshcd_hba_start(hba);
910
911 /*
912 * To initialize a UFS host controller HCE bit must be set to 1.
913 * During initialization the HCE bit value changes from 1->0->1.
914 * When the host controller completes initialization sequence
915 * it sets the value of HCE bit to 1. The same HCE bit is read back
916 * to check if the controller has completed initialization sequence.
917 * So without this delay the value HCE = 1, set in the previous
918 * instruction might be read back.
919 * This delay can be changed based on the controller.
920 */
921 msleep(1);
922
923 /* wait for the host controller to complete initialization */
924 retry = 10;
925 while (ufshcd_is_hba_active(hba)) {
926 if (retry) {
927 retry--;
928 } else {
3b1d0580 929 dev_err(hba->dev,
7a3e97b0
SY
930 "Controller enable failed\n");
931 return -EIO;
932 }
933 msleep(5);
934 }
935 return 0;
936}
937
938/**
6ccf44fe 939 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
940 * @hba: per adapter instance
941 *
6ccf44fe 942 * Returns 0 for success, non-zero in case of failure
7a3e97b0 943 */
6ccf44fe 944static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 945{
6ccf44fe 946 int ret;
7a3e97b0 947
6ccf44fe
SJ
948 /* enable UIC related interrupts */
949 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
950
951 ret = ufshcd_dme_link_startup(hba);
952 if (ret)
953 goto out;
954
955 ret = ufshcd_make_hba_operational(hba);
7a3e97b0 956
6ccf44fe
SJ
957out:
958 if (ret)
959 dev_err(hba->dev, "link startup failed %d\n", ret);
960 return ret;
7a3e97b0
SY
961}
962
963/**
964 * ufshcd_do_reset - reset the host controller
965 * @hba: per adapter instance
966 *
967 * Returns SUCCESS/FAILED
968 */
969static int ufshcd_do_reset(struct ufs_hba *hba)
970{
971 struct ufshcd_lrb *lrbp;
972 unsigned long flags;
973 int tag;
974
975 /* block commands from midlayer */
976 scsi_block_requests(hba->host);
977
978 spin_lock_irqsave(hba->host->host_lock, flags);
979 hba->ufshcd_state = UFSHCD_STATE_RESET;
980
981 /* send controller to reset state */
982 ufshcd_hba_stop(hba);
983 spin_unlock_irqrestore(hba->host->host_lock, flags);
984
985 /* abort outstanding commands */
986 for (tag = 0; tag < hba->nutrs; tag++) {
987 if (test_bit(tag, &hba->outstanding_reqs)) {
988 lrbp = &hba->lrb[tag];
989 scsi_dma_unmap(lrbp->cmd);
990 lrbp->cmd->result = DID_RESET << 16;
991 lrbp->cmd->scsi_done(lrbp->cmd);
992 lrbp->cmd = NULL;
993 }
994 }
995
996 /* clear outstanding request/task bit maps */
997 hba->outstanding_reqs = 0;
998 hba->outstanding_tasks = 0;
999
6ccf44fe
SJ
1000 /* Host controller enable */
1001 if (ufshcd_hba_enable(hba)) {
3b1d0580 1002 dev_err(hba->dev,
7a3e97b0
SY
1003 "Reset: Controller initialization failed\n");
1004 return FAILED;
1005 }
6ccf44fe
SJ
1006
1007 if (ufshcd_link_startup(hba)) {
1008 dev_err(hba->dev,
1009 "Reset: Link start-up failed\n");
1010 return FAILED;
1011 }
1012
7a3e97b0
SY
1013 return SUCCESS;
1014}
1015
1016/**
1017 * ufshcd_slave_alloc - handle initial SCSI device configurations
1018 * @sdev: pointer to SCSI device
1019 *
1020 * Returns success
1021 */
1022static int ufshcd_slave_alloc(struct scsi_device *sdev)
1023{
1024 struct ufs_hba *hba;
1025
1026 hba = shost_priv(sdev->host);
1027 sdev->tagged_supported = 1;
1028
1029 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1030 sdev->use_10_for_ms = 1;
1031 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1032
1033 /*
1034 * Inform SCSI Midlayer that the LUN queue depth is same as the
1035 * controller queue depth. If a LUN queue depth is less than the
1036 * controller queue depth and if the LUN reports
1037 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1038 * with scsi_adjust_queue_depth.
1039 */
1040 scsi_activate_tcq(sdev, hba->nutrs);
1041 return 0;
1042}
1043
1044/**
1045 * ufshcd_slave_destroy - remove SCSI device configurations
1046 * @sdev: pointer to SCSI device
1047 */
1048static void ufshcd_slave_destroy(struct scsi_device *sdev)
1049{
1050 struct ufs_hba *hba;
1051
1052 hba = shost_priv(sdev->host);
1053 scsi_deactivate_tcq(sdev, hba->nutrs);
1054}
1055
1056/**
1057 * ufshcd_task_req_compl - handle task management request completion
1058 * @hba: per adapter instance
1059 * @index: index of the completed request
1060 *
1061 * Returns SUCCESS/FAILED
1062 */
1063static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1064{
1065 struct utp_task_req_desc *task_req_descp;
1066 struct utp_upiu_task_rsp *task_rsp_upiup;
1067 unsigned long flags;
1068 int ocs_value;
1069 int task_result;
1070
1071 spin_lock_irqsave(hba->host->host_lock, flags);
1072
1073 /* Clear completed tasks from outstanding_tasks */
1074 __clear_bit(index, &hba->outstanding_tasks);
1075
1076 task_req_descp = hba->utmrdl_base_addr;
1077 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
1078
1079 if (ocs_value == OCS_SUCCESS) {
1080 task_rsp_upiup = (struct utp_upiu_task_rsp *)
1081 task_req_descp[index].task_rsp_upiu;
1082 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1083 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1084
fd0f8370 1085 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
7a3e97b0
SY
1086 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1087 task_result = FAILED;
94c122ab
NJ
1088 else
1089 task_result = SUCCESS;
7a3e97b0
SY
1090 } else {
1091 task_result = FAILED;
3b1d0580 1092 dev_err(hba->dev,
7a3e97b0
SY
1093 "trc: Invalid ocs = %x\n", ocs_value);
1094 }
1095 spin_unlock_irqrestore(hba->host->host_lock, flags);
1096 return task_result;
1097}
1098
1099/**
1100 * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
1101 * SAM_STAT_TASK_SET_FULL SCSI command status.
1102 * @cmd: pointer to SCSI command
1103 */
1104static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1105{
1106 struct ufs_hba *hba;
1107 int i;
1108 int lun_qdepth = 0;
1109
1110 hba = shost_priv(cmd->device->host);
1111
1112 /*
1113 * LUN queue depth can be obtained by counting outstanding commands
1114 * on the LUN.
1115 */
1116 for (i = 0; i < hba->nutrs; i++) {
1117 if (test_bit(i, &hba->outstanding_reqs)) {
1118
1119 /*
1120 * Check if the outstanding command belongs
1121 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1122 */
1123 if (cmd->device->lun == hba->lrb[i].lun)
1124 lun_qdepth++;
1125 }
1126 }
1127
1128 /*
1129 * LUN queue depth will be total outstanding commands, except the
1130 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
1131 */
1132 scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
1133}
1134
1135/**
1136 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
1137 * @lrb: pointer to local reference block of completed command
1138 * @scsi_status: SCSI command status
1139 *
1140 * Returns value base on SCSI command status
1141 */
1142static inline int
1143ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1144{
1145 int result = 0;
1146
1147 switch (scsi_status) {
1148 case SAM_STAT_GOOD:
1149 result |= DID_OK << 16 |
1150 COMMAND_COMPLETE << 8 |
1151 SAM_STAT_GOOD;
1152 break;
1153 case SAM_STAT_CHECK_CONDITION:
1154 result |= DID_OK << 16 |
1155 COMMAND_COMPLETE << 8 |
1156 SAM_STAT_CHECK_CONDITION;
1157 ufshcd_copy_sense_data(lrbp);
1158 break;
1159 case SAM_STAT_BUSY:
1160 result |= SAM_STAT_BUSY;
1161 break;
1162 case SAM_STAT_TASK_SET_FULL:
1163
1164 /*
1165 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1166 * depth needs to be adjusted to the exact number of
1167 * outstanding commands the LUN can handle at any given time.
1168 */
1169 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1170 result |= SAM_STAT_TASK_SET_FULL;
1171 break;
1172 case SAM_STAT_TASK_ABORTED:
1173 result |= SAM_STAT_TASK_ABORTED;
1174 break;
1175 default:
1176 result |= DID_ERROR << 16;
1177 break;
1178 } /* end of switch */
1179
1180 return result;
1181}
1182
1183/**
1184 * ufshcd_transfer_rsp_status - Get overall status of the response
1185 * @hba: per adapter instance
1186 * @lrb: pointer to local reference block of completed command
1187 *
1188 * Returns result of the command to notify SCSI midlayer
1189 */
1190static inline int
1191ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1192{
1193 int result = 0;
1194 int scsi_status;
1195 int ocs;
1196
1197 /* overall command status of utrd */
1198 ocs = ufshcd_get_tr_ocs(lrbp);
1199
1200 switch (ocs) {
1201 case OCS_SUCCESS:
1202
1203 /* check if the returned transfer response is valid */
1204 result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
1205 if (result) {
3b1d0580 1206 dev_err(hba->dev,
7a3e97b0
SY
1207 "Invalid response = %x\n", result);
1208 break;
1209 }
1210
1211 /*
1212 * get the response UPIU result to extract
1213 * the SCSI command status
1214 */
1215 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1216
1217 /*
1218 * get the result based on SCSI status response
1219 * to notify the SCSI midlayer of the command status
1220 */
1221 scsi_status = result & MASK_SCSI_STATUS;
1222 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1223 break;
1224 case OCS_ABORTED:
1225 result |= DID_ABORT << 16;
1226 break;
1227 case OCS_INVALID_CMD_TABLE_ATTR:
1228 case OCS_INVALID_PRDT_ATTR:
1229 case OCS_MISMATCH_DATA_BUF_SIZE:
1230 case OCS_MISMATCH_RESP_UPIU_SIZE:
1231 case OCS_PEER_COMM_FAILURE:
1232 case OCS_FATAL_ERROR:
1233 default:
1234 result |= DID_ERROR << 16;
3b1d0580 1235 dev_err(hba->dev,
7a3e97b0
SY
1236 "OCS error from controller = %x\n", ocs);
1237 break;
1238 } /* end of switch */
1239
1240 return result;
1241}
1242
6ccf44fe
SJ
1243/**
1244 * ufshcd_uic_cmd_compl - handle completion of uic command
1245 * @hba: per adapter instance
1246 */
1247static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
1248{
1249 if (hba->active_uic_cmd) {
1250 hba->active_uic_cmd->argument2 |=
1251 ufshcd_get_uic_cmd_result(hba);
1252 complete(&hba->active_uic_cmd->done);
1253 }
1254}
1255
7a3e97b0
SY
1256/**
1257 * ufshcd_transfer_req_compl - handle SCSI and query command completion
1258 * @hba: per adapter instance
1259 */
1260static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1261{
1262 struct ufshcd_lrb *lrb;
1263 unsigned long completed_reqs;
1264 u32 tr_doorbell;
1265 int result;
1266 int index;
1267
1268 lrb = hba->lrb;
b873a275 1269 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
1270 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
1271
1272 for (index = 0; index < hba->nutrs; index++) {
1273 if (test_bit(index, &completed_reqs)) {
1274
1275 result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
1276
1277 if (lrb[index].cmd) {
1278 scsi_dma_unmap(lrb[index].cmd);
1279 lrb[index].cmd->result = result;
1280 lrb[index].cmd->scsi_done(lrb[index].cmd);
1281
1282 /* Mark completed command as NULL in LRB */
1283 lrb[index].cmd = NULL;
1284 }
1285 } /* end of if */
1286 } /* end of for */
1287
1288 /* clear corresponding bits of completed commands */
1289 hba->outstanding_reqs ^= completed_reqs;
1290
1291 /* Reset interrupt aggregation counters */
1292 ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
1293}
1294
7a3e97b0
SY
1295/**
1296 * ufshcd_fatal_err_handler - handle fatal errors
1297 * @hba: per adapter instance
1298 */
1299static void ufshcd_fatal_err_handler(struct work_struct *work)
1300{
1301 struct ufs_hba *hba;
1302 hba = container_of(work, struct ufs_hba, feh_workq);
1303
1304 /* check if reset is already in progress */
1305 if (hba->ufshcd_state != UFSHCD_STATE_RESET)
1306 ufshcd_do_reset(hba);
1307}
1308
1309/**
1310 * ufshcd_err_handler - Check for fatal errors
1311 * @work: pointer to a work queue structure
1312 */
1313static void ufshcd_err_handler(struct ufs_hba *hba)
1314{
1315 u32 reg;
1316
1317 if (hba->errors & INT_FATAL_ERRORS)
1318 goto fatal_eh;
1319
1320 if (hba->errors & UIC_ERROR) {
cf9f4b59 1321 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
7a3e97b0
SY
1322 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
1323 goto fatal_eh;
1324 }
1325 return;
1326fatal_eh:
1327 hba->ufshcd_state = UFSHCD_STATE_ERROR;
1328 schedule_work(&hba->feh_workq);
1329}
1330
1331/**
1332 * ufshcd_tmc_handler - handle task management function completion
1333 * @hba: per adapter instance
1334 */
1335static void ufshcd_tmc_handler(struct ufs_hba *hba)
1336{
1337 u32 tm_doorbell;
1338
b873a275 1339 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
1340 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
1341 wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
1342}
1343
1344/**
1345 * ufshcd_sl_intr - Interrupt service routine
1346 * @hba: per adapter instance
1347 * @intr_status: contains interrupts generated by the controller
1348 */
1349static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
1350{
1351 hba->errors = UFSHCD_ERROR_MASK & intr_status;
1352 if (hba->errors)
1353 ufshcd_err_handler(hba);
1354
1355 if (intr_status & UIC_COMMAND_COMPL)
6ccf44fe 1356 ufshcd_uic_cmd_compl(hba);
7a3e97b0
SY
1357
1358 if (intr_status & UTP_TASK_REQ_COMPL)
1359 ufshcd_tmc_handler(hba);
1360
1361 if (intr_status & UTP_TRANSFER_REQ_COMPL)
1362 ufshcd_transfer_req_compl(hba);
1363}
1364
1365/**
1366 * ufshcd_intr - Main interrupt service routine
1367 * @irq: irq number
1368 * @__hba: pointer to adapter instance
1369 *
1370 * Returns IRQ_HANDLED - If interrupt is valid
1371 * IRQ_NONE - If invalid interrupt
1372 */
1373static irqreturn_t ufshcd_intr(int irq, void *__hba)
1374{
1375 u32 intr_status;
1376 irqreturn_t retval = IRQ_NONE;
1377 struct ufs_hba *hba = __hba;
1378
1379 spin_lock(hba->host->host_lock);
b873a275 1380 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0
SY
1381
1382 if (intr_status) {
261ea452 1383 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7a3e97b0 1384 ufshcd_sl_intr(hba, intr_status);
7a3e97b0
SY
1385 retval = IRQ_HANDLED;
1386 }
1387 spin_unlock(hba->host->host_lock);
1388 return retval;
1389}
1390
1391/**
1392 * ufshcd_issue_tm_cmd - issues task management commands to controller
1393 * @hba: per adapter instance
1394 * @lrbp: pointer to local reference block
1395 *
1396 * Returns SUCCESS/FAILED
1397 */
1398static int
1399ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1400 struct ufshcd_lrb *lrbp,
1401 u8 tm_function)
1402{
1403 struct utp_task_req_desc *task_req_descp;
1404 struct utp_upiu_task_req *task_req_upiup;
1405 struct Scsi_Host *host;
1406 unsigned long flags;
1407 int free_slot = 0;
1408 int err;
1409
1410 host = hba->host;
1411
1412 spin_lock_irqsave(host->host_lock, flags);
1413
1414 /* If task management queue is full */
1415 free_slot = ufshcd_get_tm_free_slot(hba);
1416 if (free_slot >= hba->nutmrs) {
1417 spin_unlock_irqrestore(host->host_lock, flags);
3b1d0580 1418 dev_err(hba->dev, "Task management queue full\n");
7a3e97b0
SY
1419 err = FAILED;
1420 goto out;
1421 }
1422
1423 task_req_descp = hba->utmrdl_base_addr;
1424 task_req_descp += free_slot;
1425
1426 /* Configure task request descriptor */
1427 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
1428 task_req_descp->header.dword_2 =
1429 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1430
1431 /* Configure task request UPIU */
1432 task_req_upiup =
1433 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
1434 task_req_upiup->header.dword_0 =
1435 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
1436 lrbp->lun, lrbp->task_tag));
1437 task_req_upiup->header.dword_1 =
1438 cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
1439
1440 task_req_upiup->input_param1 = lrbp->lun;
1441 task_req_upiup->input_param1 =
1442 cpu_to_be32(task_req_upiup->input_param1);
1443 task_req_upiup->input_param2 = lrbp->task_tag;
1444 task_req_upiup->input_param2 =
1445 cpu_to_be32(task_req_upiup->input_param2);
1446
1447 /* send command to the controller */
1448 __set_bit(free_slot, &hba->outstanding_tasks);
b873a275 1449 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
1450
1451 spin_unlock_irqrestore(host->host_lock, flags);
1452
1453 /* wait until the task management command is completed */
1454 err =
1455 wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
1456 (test_bit(free_slot,
1457 &hba->tm_condition) != 0),
1458 60 * HZ);
1459 if (!err) {
3b1d0580 1460 dev_err(hba->dev,
7a3e97b0
SY
1461 "Task management command timed-out\n");
1462 err = FAILED;
1463 goto out;
1464 }
1465 clear_bit(free_slot, &hba->tm_condition);
94c122ab 1466 err = ufshcd_task_req_compl(hba, free_slot);
7a3e97b0
SY
1467out:
1468 return err;
1469}
1470
1471/**
1472 * ufshcd_device_reset - reset device and abort all the pending commands
1473 * @cmd: SCSI command pointer
1474 *
1475 * Returns SUCCESS/FAILED
1476 */
1477static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1478{
1479 struct Scsi_Host *host;
1480 struct ufs_hba *hba;
1481 unsigned int tag;
1482 u32 pos;
1483 int err;
1484
1485 host = cmd->device->host;
1486 hba = shost_priv(host);
1487 tag = cmd->request->tag;
1488
1489 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
94c122ab 1490 if (err == FAILED)
7a3e97b0
SY
1491 goto out;
1492
1493 for (pos = 0; pos < hba->nutrs; pos++) {
1494 if (test_bit(pos, &hba->outstanding_reqs) &&
1495 (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
1496
1497 /* clear the respective UTRLCLR register bit */
1498 ufshcd_utrl_clear(hba, pos);
1499
1500 clear_bit(pos, &hba->outstanding_reqs);
1501
1502 if (hba->lrb[pos].cmd) {
1503 scsi_dma_unmap(hba->lrb[pos].cmd);
1504 hba->lrb[pos].cmd->result =
1505 DID_ABORT << 16;
1506 hba->lrb[pos].cmd->scsi_done(cmd);
1507 hba->lrb[pos].cmd = NULL;
1508 }
1509 }
1510 } /* end of for */
1511out:
1512 return err;
1513}
1514
1515/**
1516 * ufshcd_host_reset - Main reset function registered with scsi layer
1517 * @cmd: SCSI command pointer
1518 *
1519 * Returns SUCCESS/FAILED
1520 */
1521static int ufshcd_host_reset(struct scsi_cmnd *cmd)
1522{
1523 struct ufs_hba *hba;
1524
1525 hba = shost_priv(cmd->device->host);
1526
1527 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1528 return SUCCESS;
1529
94c122ab 1530 return ufshcd_do_reset(hba);
7a3e97b0
SY
1531}
1532
1533/**
1534 * ufshcd_abort - abort a specific command
1535 * @cmd: SCSI command pointer
1536 *
1537 * Returns SUCCESS/FAILED
1538 */
1539static int ufshcd_abort(struct scsi_cmnd *cmd)
1540{
1541 struct Scsi_Host *host;
1542 struct ufs_hba *hba;
1543 unsigned long flags;
1544 unsigned int tag;
1545 int err;
1546
1547 host = cmd->device->host;
1548 hba = shost_priv(host);
1549 tag = cmd->request->tag;
1550
1551 spin_lock_irqsave(host->host_lock, flags);
1552
1553 /* check if command is still pending */
1554 if (!(test_bit(tag, &hba->outstanding_reqs))) {
1555 err = FAILED;
1556 spin_unlock_irqrestore(host->host_lock, flags);
1557 goto out;
1558 }
1559 spin_unlock_irqrestore(host->host_lock, flags);
1560
1561 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
94c122ab 1562 if (err == FAILED)
7a3e97b0
SY
1563 goto out;
1564
1565 scsi_dma_unmap(cmd);
1566
1567 spin_lock_irqsave(host->host_lock, flags);
1568
1569 /* clear the respective UTRLCLR register bit */
1570 ufshcd_utrl_clear(hba, tag);
1571
1572 __clear_bit(tag, &hba->outstanding_reqs);
1573 hba->lrb[tag].cmd = NULL;
1574 spin_unlock_irqrestore(host->host_lock, flags);
1575out:
1576 return err;
1577}
1578
6ccf44fe
SJ
1579/**
1580 * ufshcd_async_scan - asynchronous execution for link startup
1581 * @data: data pointer to pass to this function
1582 * @cookie: cookie data
1583 */
1584static void ufshcd_async_scan(void *data, async_cookie_t cookie)
1585{
1586 struct ufs_hba *hba = (struct ufs_hba *)data;
1587 int ret;
1588
1589 ret = ufshcd_link_startup(hba);
1590 if (!ret)
1591 scsi_scan_host(hba->host);
1592}
1593
7a3e97b0
SY
1594static struct scsi_host_template ufshcd_driver_template = {
1595 .module = THIS_MODULE,
1596 .name = UFSHCD,
1597 .proc_name = UFSHCD,
1598 .queuecommand = ufshcd_queuecommand,
1599 .slave_alloc = ufshcd_slave_alloc,
1600 .slave_destroy = ufshcd_slave_destroy,
1601 .eh_abort_handler = ufshcd_abort,
1602 .eh_device_reset_handler = ufshcd_device_reset,
1603 .eh_host_reset_handler = ufshcd_host_reset,
1604 .this_id = -1,
1605 .sg_tablesize = SG_ALL,
1606 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
1607 .can_queue = UFSHCD_CAN_QUEUE,
1608};
1609
7a3e97b0
SY
1610/**
1611 * ufshcd_suspend - suspend power management function
3b1d0580 1612 * @hba: per adapter instance
7a3e97b0
SY
1613 * @state: power state
1614 *
1615 * Returns -ENOSYS
1616 */
3b1d0580 1617int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
7a3e97b0
SY
1618{
1619 /*
1620 * TODO:
1621 * 1. Block SCSI requests from SCSI midlayer
1622 * 2. Change the internal driver state to non operational
1623 * 3. Set UTRLRSR and UTMRLRSR bits to zero
1624 * 4. Wait until outstanding commands are completed
1625 * 5. Set HCE to zero to send the UFS host controller to reset state
1626 */
1627
1628 return -ENOSYS;
1629}
3b1d0580 1630EXPORT_SYMBOL_GPL(ufshcd_suspend);
7a3e97b0
SY
1631
1632/**
1633 * ufshcd_resume - resume power management function
3b1d0580 1634 * @hba: per adapter instance
7a3e97b0
SY
1635 *
1636 * Returns -ENOSYS
1637 */
3b1d0580 1638int ufshcd_resume(struct ufs_hba *hba)
7a3e97b0
SY
1639{
1640 /*
1641 * TODO:
1642 * 1. Set HCE to 1, to start the UFS host controller
1643 * initialization process
1644 * 2. Set UTRLRSR and UTMRLRSR bits to 1
1645 * 3. Change the internal driver state to operational
1646 * 4. Unblock SCSI requests from SCSI midlayer
1647 */
1648
1649 return -ENOSYS;
1650}
3b1d0580
VH
1651EXPORT_SYMBOL_GPL(ufshcd_resume);
1652
7a3e97b0 1653/**
3b1d0580 1654 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 1655 * data structure memory
3b1d0580 1656 * @hba - per adapter instance
7a3e97b0 1657 */
3b1d0580 1658void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 1659{
7a3e97b0 1660 /* disable interrupts */
2fbd009b 1661 ufshcd_disable_intr(hba, hba->intr_mask);
7a3e97b0 1662 ufshcd_hba_stop(hba);
7a3e97b0
SY
1663
1664 scsi_remove_host(hba->host);
1665 scsi_host_put(hba->host);
3b1d0580
VH
1666}
1667EXPORT_SYMBOL_GPL(ufshcd_remove);
1668
7a3e97b0 1669/**
3b1d0580
VH
1670 * ufshcd_init - Driver initialization routine
1671 * @dev: pointer to device handle
1672 * @hba_handle: driver private handle
1673 * @mmio_base: base register address
1674 * @irq: Interrupt line of device
7a3e97b0
SY
1675 * Returns 0 on success, non-zero value on failure
1676 */
3b1d0580
VH
1677int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
1678 void __iomem *mmio_base, unsigned int irq)
7a3e97b0
SY
1679{
1680 struct Scsi_Host *host;
1681 struct ufs_hba *hba;
1682 int err;
1683
3b1d0580
VH
1684 if (!dev) {
1685 dev_err(dev,
1686 "Invalid memory reference for dev is NULL\n");
1687 err = -ENODEV;
7a3e97b0
SY
1688 goto out_error;
1689 }
1690
3b1d0580
VH
1691 if (!mmio_base) {
1692 dev_err(dev,
1693 "Invalid memory reference for mmio_base is NULL\n");
1694 err = -ENODEV;
1695 goto out_error;
1696 }
7a3e97b0
SY
1697
1698 host = scsi_host_alloc(&ufshcd_driver_template,
1699 sizeof(struct ufs_hba));
1700 if (!host) {
3b1d0580 1701 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 1702 err = -ENOMEM;
3b1d0580 1703 goto out_error;
7a3e97b0
SY
1704 }
1705 hba = shost_priv(host);
7a3e97b0 1706 hba->host = host;
3b1d0580
VH
1707 hba->dev = dev;
1708 hba->mmio_base = mmio_base;
1709 hba->irq = irq;
7a3e97b0
SY
1710
1711 /* Read capabilities registers */
1712 ufshcd_hba_capabilities(hba);
1713
1714 /* Get UFS version supported by the controller */
1715 hba->ufs_version = ufshcd_get_ufs_version(hba);
1716
2fbd009b
SJ
1717 /* Get Interrupt bit mask per version */
1718 hba->intr_mask = ufshcd_get_intr_mask(hba);
1719
7a3e97b0
SY
1720 /* Allocate memory for host memory space */
1721 err = ufshcd_memory_alloc(hba);
1722 if (err) {
3b1d0580
VH
1723 dev_err(hba->dev, "Memory allocation failed\n");
1724 goto out_disable;
7a3e97b0
SY
1725 }
1726
1727 /* Configure LRB */
1728 ufshcd_host_memory_configure(hba);
1729
1730 host->can_queue = hba->nutrs;
1731 host->cmd_per_lun = hba->nutrs;
1732 host->max_id = UFSHCD_MAX_ID;
1733 host->max_lun = UFSHCD_MAX_LUNS;
1734 host->max_channel = UFSHCD_MAX_CHANNEL;
1735 host->unique_id = host->host_no;
1736 host->max_cmd_len = MAX_CDB_SIZE;
1737
1738 /* Initailize wait queue for task management */
1739 init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
1740
1741 /* Initialize work queues */
7a3e97b0
SY
1742 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
1743
6ccf44fe
SJ
1744 /* Initialize UIC command mutex */
1745 mutex_init(&hba->uic_cmd_mutex);
1746
7a3e97b0 1747 /* IRQ registration */
2953f850 1748 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 1749 if (err) {
3b1d0580 1750 dev_err(hba->dev, "request irq failed\n");
2953f850 1751 goto out_disable;
7a3e97b0
SY
1752 }
1753
1754 /* Enable SCSI tag mapping */
1755 err = scsi_init_shared_tag_map(host, host->can_queue);
1756 if (err) {
3b1d0580 1757 dev_err(hba->dev, "init shared queue failed\n");
2953f850 1758 goto out_disable;
7a3e97b0
SY
1759 }
1760
3b1d0580 1761 err = scsi_add_host(host, hba->dev);
7a3e97b0 1762 if (err) {
3b1d0580 1763 dev_err(hba->dev, "scsi_add_host failed\n");
2953f850 1764 goto out_disable;
7a3e97b0
SY
1765 }
1766
6ccf44fe
SJ
1767 /* Host controller enable */
1768 err = ufshcd_hba_enable(hba);
7a3e97b0 1769 if (err) {
6ccf44fe 1770 dev_err(hba->dev, "Host controller enable failed\n");
3b1d0580 1771 goto out_remove_scsi_host;
7a3e97b0 1772 }
6ccf44fe 1773
3b1d0580 1774 *hba_handle = hba;
7a3e97b0 1775
6ccf44fe
SJ
1776 async_schedule(ufshcd_async_scan, hba);
1777
7a3e97b0
SY
1778 return 0;
1779
3b1d0580
VH
1780out_remove_scsi_host:
1781 scsi_remove_host(hba->host);
3b1d0580
VH
1782out_disable:
1783 scsi_host_put(host);
1784out_error:
1785 return err;
1786}
1787EXPORT_SYMBOL_GPL(ufshcd_init);
1788
3b1d0580
VH
1789MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
1790MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 1791MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
1792MODULE_LICENSE("GPL");
1793MODULE_VERSION(UFSHCD_DRIVER_VERSION);