[SCSI] storvsc: Update the storage protocol to win8 level
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
3b1d0580
VH
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
3b1d0580
VH
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
7a3e97b0
SY
34 */
35
e0eca63e 36#include "ufshcd.h"
7a3e97b0
SY
37
38enum {
39 UFSHCD_MAX_CHANNEL = 0,
40 UFSHCD_MAX_ID = 1,
41 UFSHCD_MAX_LUNS = 8,
42 UFSHCD_CMD_PER_LUN = 32,
43 UFSHCD_CAN_QUEUE = 32,
44};
45
46/* UFSHCD states */
47enum {
48 UFSHCD_STATE_OPERATIONAL,
49 UFSHCD_STATE_RESET,
50 UFSHCD_STATE_ERROR,
51};
52
53/* Interrupt configuration options */
54enum {
55 UFSHCD_INT_DISABLE,
56 UFSHCD_INT_ENABLE,
57 UFSHCD_INT_CLEAR,
58};
59
60/* Interrupt aggregation options */
61enum {
62 INT_AGGR_RESET,
63 INT_AGGR_CONFIG,
64};
65
7a3e97b0
SY
66/**
67 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
68 * @hba - Pointer to adapter instance
69 *
70 * Returns UFSHCI version supported by the controller
71 */
72static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
73{
74 return readl(hba->mmio_base + REG_UFS_VERSION);
75}
76
77/**
78 * ufshcd_is_device_present - Check if any device connected to
79 * the host controller
80 * @reg_hcs - host controller status register value
81 *
73ec513a 82 * Returns 1 if device present, 0 if no device detected
7a3e97b0
SY
83 */
84static inline int ufshcd_is_device_present(u32 reg_hcs)
85{
73ec513a 86 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
7a3e97b0
SY
87}
88
89/**
90 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
91 * @lrb: pointer to local command reference block
92 *
93 * This function is used to get the OCS field from UTRD
94 * Returns the OCS field in the UTRD
95 */
96static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
97{
98 return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
99}
100
101/**
102 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
103 * @task_req_descp: pointer to utp_task_req_desc structure
104 *
105 * This function is used to get the OCS field from UTMRD
106 * Returns the OCS field in the UTMRD
107 */
108static inline int
109ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
110{
111 return task_req_descp->header.dword_2 & MASK_OCS;
112}
113
114/**
115 * ufshcd_get_tm_free_slot - get a free slot for task management request
116 * @hba: per adapter instance
117 *
118 * Returns maximum number of task management request slots in case of
119 * task management queue full or returns the free slot number
120 */
121static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
122{
123 return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
124}
125
126/**
127 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
128 * @hba: per adapter instance
129 * @pos: position of the bit to be cleared
130 */
131static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
132{
133 writel(~(1 << pos),
134 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR));
135}
136
137/**
138 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
139 * @reg: Register value of host controller status
140 *
141 * Returns integer, 0 on Success and positive value if failed
142 */
143static inline int ufshcd_get_lists_status(u32 reg)
144{
145 /*
146 * The mask 0xFF is for the following HCS register bits
147 * Bit Description
148 * 0 Device Present
149 * 1 UTRLRDY
150 * 2 UTMRLRDY
151 * 3 UCRDY
152 * 4 HEI
153 * 5 DEI
154 * 6-7 reserved
155 */
156 return (((reg) & (0xFF)) >> 1) ^ (0x07);
157}
158
159/**
160 * ufshcd_get_uic_cmd_result - Get the UIC command result
161 * @hba: Pointer to adapter instance
162 *
163 * This function gets the result of UIC command completion
164 * Returns 0 on success, non zero value on error
165 */
166static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
167{
168 return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
169 MASK_UIC_COMMAND_RESULT;
170}
171
172/**
173 * ufshcd_free_hba_memory - Free allocated memory for LRB, request
174 * and task lists
175 * @hba: Pointer to adapter instance
176 */
177static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
178{
179 size_t utmrdl_size, utrdl_size, ucdl_size;
180
181 kfree(hba->lrb);
182
183 if (hba->utmrdl_base_addr) {
184 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3b1d0580 185 dma_free_coherent(hba->dev, utmrdl_size,
7a3e97b0
SY
186 hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
187 }
188
189 if (hba->utrdl_base_addr) {
190 utrdl_size =
191 (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3b1d0580 192 dma_free_coherent(hba->dev, utrdl_size,
7a3e97b0
SY
193 hba->utrdl_base_addr, hba->utrdl_dma_addr);
194 }
195
196 if (hba->ucdl_base_addr) {
197 ucdl_size =
198 (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3b1d0580 199 dma_free_coherent(hba->dev, ucdl_size,
7a3e97b0
SY
200 hba->ucdl_base_addr, hba->ucdl_dma_addr);
201 }
202}
203
204/**
205 * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
206 * @ucd_rsp_ptr: pointer to response UPIU
207 *
208 * This function checks the response UPIU for valid transaction type in
209 * response field
210 * Returns 0 on success, non-zero on failure
211 */
212static inline int
213ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
214{
215 return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
216 UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
217}
218
219/**
220 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
221 * @ucd_rsp_ptr: pointer to response UPIU
222 *
223 * This function gets the response status and scsi_status from response UPIU
224 * Returns the response result code.
225 */
226static inline int
227ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
228{
229 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
230}
231
232/**
233 * ufshcd_config_int_aggr - Configure interrupt aggregation values.
234 * Currently there is no use case where we want to configure
235 * interrupt aggregation dynamically. So to configure interrupt
236 * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
237 * INT_AGGR_TIMEOUT_VALUE are used.
238 * @hba: per adapter instance
239 * @option: Interrupt aggregation option
240 */
241static inline void
242ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
243{
244 switch (option) {
245 case INT_AGGR_RESET:
246 writel((INT_AGGR_ENABLE |
247 INT_AGGR_COUNTER_AND_TIMER_RESET),
248 (hba->mmio_base +
249 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
250 break;
251 case INT_AGGR_CONFIG:
252 writel((INT_AGGR_ENABLE |
253 INT_AGGR_PARAM_WRITE |
254 INT_AGGR_COUNTER_THRESHOLD_VALUE |
255 INT_AGGR_TIMEOUT_VALUE),
256 (hba->mmio_base +
257 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
258 break;
259 }
260}
261
262/**
263 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
264 * When run-stop registers are set to 1, it indicates the
265 * host controller that it can process the requests
266 * @hba: per adapter instance
267 */
268static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
269{
270 writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT,
271 (hba->mmio_base +
272 REG_UTP_TASK_REQ_LIST_RUN_STOP));
273 writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
274 (hba->mmio_base +
275 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
276}
277
7a3e97b0
SY
278/**
279 * ufshcd_hba_start - Start controller initialization sequence
280 * @hba: per adapter instance
281 */
282static inline void ufshcd_hba_start(struct ufs_hba *hba)
283{
284 writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE));
285}
286
287/**
288 * ufshcd_is_hba_active - Get controller state
289 * @hba: per adapter instance
290 *
291 * Returns zero if controller is active, 1 otherwise
292 */
293static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
294{
295 return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
296}
297
298/**
299 * ufshcd_send_command - Send SCSI or device management commands
300 * @hba: per adapter instance
301 * @task_tag: Task tag of the command
302 */
303static inline
304void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
305{
306 __set_bit(task_tag, &hba->outstanding_reqs);
307 writel((1 << task_tag),
308 (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL));
309}
310
311/**
312 * ufshcd_copy_sense_data - Copy sense data in case of check condition
313 * @lrb - pointer to local reference block
314 */
315static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
316{
317 int len;
318 if (lrbp->sense_buffer) {
319 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
320 memcpy(lrbp->sense_buffer,
321 lrbp->ucd_rsp_ptr->sense_data,
322 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
323 }
324}
325
326/**
327 * ufshcd_hba_capabilities - Read controller capabilities
328 * @hba: per adapter instance
329 */
330static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
331{
332 hba->capabilities =
333 readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES);
334
335 /* nutrs and nutmrs are 0 based values */
336 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
337 hba->nutmrs =
338 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
339}
340
341/**
342 * ufshcd_send_uic_command - Send UIC commands to unipro layers
343 * @hba: per adapter instance
344 * @uic_command: UIC command
345 */
346static inline void
347ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
348{
349 /* Write Args */
350 writel(uic_cmnd->argument1,
351 (hba->mmio_base + REG_UIC_COMMAND_ARG_1));
352 writel(uic_cmnd->argument2,
353 (hba->mmio_base + REG_UIC_COMMAND_ARG_2));
354 writel(uic_cmnd->argument3,
355 (hba->mmio_base + REG_UIC_COMMAND_ARG_3));
356
357 /* Write UIC Cmd */
358 writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
359 (hba->mmio_base + REG_UIC_COMMAND));
360}
361
362/**
363 * ufshcd_map_sg - Map scatter-gather list to prdt
364 * @lrbp - pointer to local reference block
365 *
366 * Returns 0 in case of success, non-zero value in case of failure
367 */
368static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
369{
370 struct ufshcd_sg_entry *prd_table;
371 struct scatterlist *sg;
372 struct scsi_cmnd *cmd;
373 int sg_segments;
374 int i;
375
376 cmd = lrbp->cmd;
377 sg_segments = scsi_dma_map(cmd);
378 if (sg_segments < 0)
379 return sg_segments;
380
381 if (sg_segments) {
382 lrbp->utr_descriptor_ptr->prd_table_length =
383 cpu_to_le16((u16) (sg_segments));
384
385 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
386
387 scsi_for_each_sg(cmd, sg, sg_segments, i) {
388 prd_table[i].size =
389 cpu_to_le32(((u32) sg_dma_len(sg))-1);
390 prd_table[i].base_addr =
391 cpu_to_le32(lower_32_bits(sg->dma_address));
392 prd_table[i].upper_addr =
393 cpu_to_le32(upper_32_bits(sg->dma_address));
394 }
395 } else {
396 lrbp->utr_descriptor_ptr->prd_table_length = 0;
397 }
398
399 return 0;
400}
401
402/**
403 * ufshcd_int_config - enable/disable interrupts
404 * @hba: per adapter instance
405 * @option: interrupt option
406 */
407static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
408{
409 switch (option) {
410 case UFSHCD_INT_ENABLE:
411 writel(hba->int_enable_mask,
412 (hba->mmio_base + REG_INTERRUPT_ENABLE));
413 break;
414 case UFSHCD_INT_DISABLE:
415 if (hba->ufs_version == UFSHCI_VERSION_10)
416 writel(INTERRUPT_DISABLE_MASK_10,
417 (hba->mmio_base + REG_INTERRUPT_ENABLE));
418 else
419 writel(INTERRUPT_DISABLE_MASK_11,
420 (hba->mmio_base + REG_INTERRUPT_ENABLE));
421 break;
422 }
423}
424
425/**
426 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
427 * @lrb - pointer to local reference block
428 */
429static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
430{
431 struct utp_transfer_req_desc *req_desc;
432 struct utp_upiu_cmd *ucd_cmd_ptr;
433 u32 data_direction;
434 u32 upiu_flags;
435
436 ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
437 req_desc = lrbp->utr_descriptor_ptr;
438
439 switch (lrbp->command_type) {
440 case UTP_CMD_TYPE_SCSI:
441 if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
442 data_direction = UTP_DEVICE_TO_HOST;
443 upiu_flags = UPIU_CMD_FLAGS_READ;
444 } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
445 data_direction = UTP_HOST_TO_DEVICE;
446 upiu_flags = UPIU_CMD_FLAGS_WRITE;
447 } else {
448 data_direction = UTP_NO_DATA_TRANSFER;
449 upiu_flags = UPIU_CMD_FLAGS_NONE;
450 }
451
452 /* Transfer request descriptor header fields */
453 req_desc->header.dword_0 =
454 cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
455
456 /*
457 * assigning invalid value for command status. Controller
458 * updates OCS on command completion, with the command
459 * status
460 */
461 req_desc->header.dword_2 =
462 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
463
464 /* command descriptor fields */
465 ucd_cmd_ptr->header.dword_0 =
466 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
467 upiu_flags,
468 lrbp->lun,
469 lrbp->task_tag));
470 ucd_cmd_ptr->header.dword_1 =
471 cpu_to_be32(
472 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
473 0,
474 0,
475 0));
476
477 /* Total EHS length and Data segment length will be zero */
478 ucd_cmd_ptr->header.dword_2 = 0;
479
480 ucd_cmd_ptr->exp_data_transfer_len =
98b8e179 481 cpu_to_be32(lrbp->cmd->sdb.length);
7a3e97b0
SY
482
483 memcpy(ucd_cmd_ptr->cdb,
484 lrbp->cmd->cmnd,
485 (min_t(unsigned short,
486 lrbp->cmd->cmd_len,
487 MAX_CDB_SIZE)));
488 break;
489 case UTP_CMD_TYPE_DEV_MANAGE:
490 /* For query function implementation */
491 break;
492 case UTP_CMD_TYPE_UFS:
493 /* For UFS native command implementation */
494 break;
495 } /* end of switch */
496}
497
498/**
499 * ufshcd_queuecommand - main entry point for SCSI requests
500 * @cmd: command from SCSI Midlayer
501 * @done: call back function
502 *
503 * Returns 0 for success, non-zero in case of failure
504 */
505static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
506{
507 struct ufshcd_lrb *lrbp;
508 struct ufs_hba *hba;
509 unsigned long flags;
510 int tag;
511 int err = 0;
512
513 hba = shost_priv(host);
514
515 tag = cmd->request->tag;
516
517 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
518 err = SCSI_MLQUEUE_HOST_BUSY;
519 goto out;
520 }
521
522 lrbp = &hba->lrb[tag];
523
524 lrbp->cmd = cmd;
525 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
526 lrbp->sense_buffer = cmd->sense_buffer;
527 lrbp->task_tag = tag;
528 lrbp->lun = cmd->device->lun;
529
530 lrbp->command_type = UTP_CMD_TYPE_SCSI;
531
532 /* form UPIU before issuing the command */
533 ufshcd_compose_upiu(lrbp);
534 err = ufshcd_map_sg(lrbp);
535 if (err)
536 goto out;
537
538 /* issue command to the controller */
539 spin_lock_irqsave(hba->host->host_lock, flags);
540 ufshcd_send_command(hba, tag);
541 spin_unlock_irqrestore(hba->host->host_lock, flags);
542out:
543 return err;
544}
545
546/**
547 * ufshcd_memory_alloc - allocate memory for host memory space data structures
548 * @hba: per adapter instance
549 *
550 * 1. Allocate DMA memory for Command Descriptor array
551 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
552 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
553 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
554 * (UTMRDL)
555 * 4. Allocate memory for local reference block(lrb).
556 *
557 * Returns 0 for success, non-zero in case of failure
558 */
559static int ufshcd_memory_alloc(struct ufs_hba *hba)
560{
561 size_t utmrdl_size, utrdl_size, ucdl_size;
562
563 /* Allocate memory for UTP command descriptors */
564 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3b1d0580 565 hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
7a3e97b0
SY
566 ucdl_size,
567 &hba->ucdl_dma_addr,
568 GFP_KERNEL);
569
570 /*
571 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
572 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
573 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
574 * be aligned to 128 bytes as well
575 */
576 if (!hba->ucdl_base_addr ||
577 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 578 dev_err(hba->dev,
7a3e97b0
SY
579 "Command Descriptor Memory allocation failed\n");
580 goto out;
581 }
582
583 /*
584 * Allocate memory for UTP Transfer descriptors
585 * UFSHCI requires 1024 byte alignment of UTRD
586 */
587 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3b1d0580 588 hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
7a3e97b0
SY
589 utrdl_size,
590 &hba->utrdl_dma_addr,
591 GFP_KERNEL);
592 if (!hba->utrdl_base_addr ||
593 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 594 dev_err(hba->dev,
7a3e97b0
SY
595 "Transfer Descriptor Memory allocation failed\n");
596 goto out;
597 }
598
599 /*
600 * Allocate memory for UTP Task Management descriptors
601 * UFSHCI requires 1024 byte alignment of UTMRD
602 */
603 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3b1d0580 604 hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
7a3e97b0
SY
605 utmrdl_size,
606 &hba->utmrdl_dma_addr,
607 GFP_KERNEL);
608 if (!hba->utmrdl_base_addr ||
609 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 610 dev_err(hba->dev,
7a3e97b0
SY
611 "Task Management Descriptor Memory allocation failed\n");
612 goto out;
613 }
614
615 /* Allocate memory for local reference block */
616 hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
617 if (!hba->lrb) {
3b1d0580 618 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
619 goto out;
620 }
621 return 0;
622out:
623 ufshcd_free_hba_memory(hba);
624 return -ENOMEM;
625}
626
627/**
628 * ufshcd_host_memory_configure - configure local reference block with
629 * memory offsets
630 * @hba: per adapter instance
631 *
632 * Configure Host memory space
633 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
634 * address.
635 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
636 * and PRDT offset.
637 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
638 * into local reference block.
639 */
640static void ufshcd_host_memory_configure(struct ufs_hba *hba)
641{
642 struct utp_transfer_cmd_desc *cmd_descp;
643 struct utp_transfer_req_desc *utrdlp;
644 dma_addr_t cmd_desc_dma_addr;
645 dma_addr_t cmd_desc_element_addr;
646 u16 response_offset;
647 u16 prdt_offset;
648 int cmd_desc_size;
649 int i;
650
651 utrdlp = hba->utrdl_base_addr;
652 cmd_descp = hba->ucdl_base_addr;
653
654 response_offset =
655 offsetof(struct utp_transfer_cmd_desc, response_upiu);
656 prdt_offset =
657 offsetof(struct utp_transfer_cmd_desc, prd_table);
658
659 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
660 cmd_desc_dma_addr = hba->ucdl_dma_addr;
661
662 for (i = 0; i < hba->nutrs; i++) {
663 /* Configure UTRD with command descriptor base address */
664 cmd_desc_element_addr =
665 (cmd_desc_dma_addr + (cmd_desc_size * i));
666 utrdlp[i].command_desc_base_addr_lo =
667 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
668 utrdlp[i].command_desc_base_addr_hi =
669 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
670
671 /* Response upiu and prdt offset should be in double words */
672 utrdlp[i].response_upiu_offset =
673 cpu_to_le16((response_offset >> 2));
674 utrdlp[i].prd_table_offset =
675 cpu_to_le16((prdt_offset >> 2));
676 utrdlp[i].response_upiu_length =
677 cpu_to_le16(ALIGNED_UPIU_SIZE);
678
679 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
680 hba->lrb[i].ucd_cmd_ptr =
681 (struct utp_upiu_cmd *)(cmd_descp + i);
682 hba->lrb[i].ucd_rsp_ptr =
683 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
684 hba->lrb[i].ucd_prdt_ptr =
685 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
686 }
687}
688
689/**
690 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
691 * @hba: per adapter instance
692 *
693 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
694 * in order to initialize the Unipro link startup procedure.
695 * Once the Unipro links are up, the device connected to the controller
696 * is detected.
697 *
698 * Returns 0 on success, non-zero value on failure
699 */
700static int ufshcd_dme_link_startup(struct ufs_hba *hba)
701{
702 struct uic_command *uic_cmd;
703 unsigned long flags;
704
705 /* check if controller is ready to accept UIC commands */
706 if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
707 UIC_COMMAND_READY) == 0x0) {
3b1d0580 708 dev_err(hba->dev,
7a3e97b0
SY
709 "Controller not ready"
710 " to accept UIC commands\n");
711 return -EIO;
712 }
713
714 spin_lock_irqsave(hba->host->host_lock, flags);
715
716 /* form UIC command */
717 uic_cmd = &hba->active_uic_cmd;
718 uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
719 uic_cmd->argument1 = 0;
720 uic_cmd->argument2 = 0;
721 uic_cmd->argument3 = 0;
722
723 /* enable UIC related interrupts */
724 hba->int_enable_mask |= UIC_COMMAND_COMPL;
725 ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
726
727 /* sending UIC commands to controller */
728 ufshcd_send_uic_command(hba, uic_cmd);
729 spin_unlock_irqrestore(hba->host->host_lock, flags);
730 return 0;
731}
732
733/**
734 * ufshcd_make_hba_operational - Make UFS controller operational
735 * @hba: per adapter instance
736 *
737 * To bring UFS host controller to operational state,
738 * 1. Check if device is present
739 * 2. Configure run-stop-registers
740 * 3. Enable required interrupts
741 * 4. Configure interrupt aggregation
742 *
743 * Returns 0 on success, non-zero value on failure
744 */
745static int ufshcd_make_hba_operational(struct ufs_hba *hba)
746{
747 int err = 0;
748 u32 reg;
749
750 /* check if device present */
751 reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
73ec513a 752 if (!ufshcd_is_device_present(reg)) {
3b1d0580 753 dev_err(hba->dev, "cc: Device not present\n");
7a3e97b0
SY
754 err = -ENXIO;
755 goto out;
756 }
757
758 /*
759 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
760 * DEI, HEI bits must be 0
761 */
762 if (!(ufshcd_get_lists_status(reg))) {
763 ufshcd_enable_run_stop_reg(hba);
764 } else {
3b1d0580 765 dev_err(hba->dev,
7a3e97b0
SY
766 "Host controller not ready to process requests");
767 err = -EIO;
768 goto out;
769 }
770
771 /* Enable required interrupts */
772 hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
773 UIC_ERROR |
774 UTP_TASK_REQ_COMPL |
775 DEVICE_FATAL_ERROR |
776 CONTROLLER_FATAL_ERROR |
777 SYSTEM_BUS_FATAL_ERROR);
778 ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
779
780 /* Configure interrupt aggregation */
781 ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
782
783 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
784 scsi_unblock_requests(hba->host);
785
786 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
787 scsi_scan_host(hba->host);
788out:
789 return err;
790}
791
792/**
793 * ufshcd_hba_enable - initialize the controller
794 * @hba: per adapter instance
795 *
796 * The controller resets itself and controller firmware initialization
797 * sequence kicks off. When controller is ready it will set
798 * the Host Controller Enable bit to 1.
799 *
800 * Returns 0 on success, non-zero value on failure
801 */
802static int ufshcd_hba_enable(struct ufs_hba *hba)
803{
804 int retry;
805
806 /*
807 * msleep of 1 and 5 used in this function might result in msleep(20),
808 * but it was necessary to send the UFS FPGA to reset mode during
809 * development and testing of this driver. msleep can be changed to
810 * mdelay and retry count can be reduced based on the controller.
811 */
812 if (!ufshcd_is_hba_active(hba)) {
813
814 /* change controller state to "reset state" */
815 ufshcd_hba_stop(hba);
816
817 /*
818 * This delay is based on the testing done with UFS host
819 * controller FPGA. The delay can be changed based on the
820 * host controller used.
821 */
822 msleep(5);
823 }
824
825 /* start controller initialization sequence */
826 ufshcd_hba_start(hba);
827
828 /*
829 * To initialize a UFS host controller HCE bit must be set to 1.
830 * During initialization the HCE bit value changes from 1->0->1.
831 * When the host controller completes initialization sequence
832 * it sets the value of HCE bit to 1. The same HCE bit is read back
833 * to check if the controller has completed initialization sequence.
834 * So without this delay the value HCE = 1, set in the previous
835 * instruction might be read back.
836 * This delay can be changed based on the controller.
837 */
838 msleep(1);
839
840 /* wait for the host controller to complete initialization */
841 retry = 10;
842 while (ufshcd_is_hba_active(hba)) {
843 if (retry) {
844 retry--;
845 } else {
3b1d0580 846 dev_err(hba->dev,
7a3e97b0
SY
847 "Controller enable failed\n");
848 return -EIO;
849 }
850 msleep(5);
851 }
852 return 0;
853}
854
855/**
856 * ufshcd_initialize_hba - start the initialization process
857 * @hba: per adapter instance
858 *
859 * 1. Enable the controller via ufshcd_hba_enable.
860 * 2. Program the Transfer Request List Address with the starting address of
861 * UTRDL.
862 * 3. Program the Task Management Request List Address with starting address
863 * of UTMRDL.
864 *
865 * Returns 0 on success, non-zero value on failure.
866 */
867static int ufshcd_initialize_hba(struct ufs_hba *hba)
868{
869 if (ufshcd_hba_enable(hba))
870 return -EIO;
871
872 /* Configure UTRL and UTMRL base address registers */
7a3e97b0 873 writel(lower_32_bits(hba->utrdl_dma_addr),
85bb4457
SY
874 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
875 writel(upper_32_bits(hba->utrdl_dma_addr),
7a3e97b0 876 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
85bb4457 877 writel(lower_32_bits(hba->utmrdl_dma_addr),
7a3e97b0
SY
878 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
879 writel(upper_32_bits(hba->utmrdl_dma_addr),
880 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
881
882 /* Initialize unipro link startup procedure */
883 return ufshcd_dme_link_startup(hba);
884}
885
886/**
887 * ufshcd_do_reset - reset the host controller
888 * @hba: per adapter instance
889 *
890 * Returns SUCCESS/FAILED
891 */
892static int ufshcd_do_reset(struct ufs_hba *hba)
893{
894 struct ufshcd_lrb *lrbp;
895 unsigned long flags;
896 int tag;
897
898 /* block commands from midlayer */
899 scsi_block_requests(hba->host);
900
901 spin_lock_irqsave(hba->host->host_lock, flags);
902 hba->ufshcd_state = UFSHCD_STATE_RESET;
903
904 /* send controller to reset state */
905 ufshcd_hba_stop(hba);
906 spin_unlock_irqrestore(hba->host->host_lock, flags);
907
908 /* abort outstanding commands */
909 for (tag = 0; tag < hba->nutrs; tag++) {
910 if (test_bit(tag, &hba->outstanding_reqs)) {
911 lrbp = &hba->lrb[tag];
912 scsi_dma_unmap(lrbp->cmd);
913 lrbp->cmd->result = DID_RESET << 16;
914 lrbp->cmd->scsi_done(lrbp->cmd);
915 lrbp->cmd = NULL;
916 }
917 }
918
919 /* clear outstanding request/task bit maps */
920 hba->outstanding_reqs = 0;
921 hba->outstanding_tasks = 0;
922
923 /* start the initialization process */
924 if (ufshcd_initialize_hba(hba)) {
3b1d0580 925 dev_err(hba->dev,
7a3e97b0
SY
926 "Reset: Controller initialization failed\n");
927 return FAILED;
928 }
929 return SUCCESS;
930}
931
932/**
933 * ufshcd_slave_alloc - handle initial SCSI device configurations
934 * @sdev: pointer to SCSI device
935 *
936 * Returns success
937 */
938static int ufshcd_slave_alloc(struct scsi_device *sdev)
939{
940 struct ufs_hba *hba;
941
942 hba = shost_priv(sdev->host);
943 sdev->tagged_supported = 1;
944
945 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
946 sdev->use_10_for_ms = 1;
947 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
948
949 /*
950 * Inform SCSI Midlayer that the LUN queue depth is same as the
951 * controller queue depth. If a LUN queue depth is less than the
952 * controller queue depth and if the LUN reports
953 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
954 * with scsi_adjust_queue_depth.
955 */
956 scsi_activate_tcq(sdev, hba->nutrs);
957 return 0;
958}
959
960/**
961 * ufshcd_slave_destroy - remove SCSI device configurations
962 * @sdev: pointer to SCSI device
963 */
964static void ufshcd_slave_destroy(struct scsi_device *sdev)
965{
966 struct ufs_hba *hba;
967
968 hba = shost_priv(sdev->host);
969 scsi_deactivate_tcq(sdev, hba->nutrs);
970}
971
972/**
973 * ufshcd_task_req_compl - handle task management request completion
974 * @hba: per adapter instance
975 * @index: index of the completed request
976 *
977 * Returns SUCCESS/FAILED
978 */
979static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
980{
981 struct utp_task_req_desc *task_req_descp;
982 struct utp_upiu_task_rsp *task_rsp_upiup;
983 unsigned long flags;
984 int ocs_value;
985 int task_result;
986
987 spin_lock_irqsave(hba->host->host_lock, flags);
988
989 /* Clear completed tasks from outstanding_tasks */
990 __clear_bit(index, &hba->outstanding_tasks);
991
992 task_req_descp = hba->utmrdl_base_addr;
993 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
994
995 if (ocs_value == OCS_SUCCESS) {
996 task_rsp_upiup = (struct utp_upiu_task_rsp *)
997 task_req_descp[index].task_rsp_upiu;
998 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
999 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1000
fd0f8370 1001 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
7a3e97b0
SY
1002 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1003 task_result = FAILED;
94c122ab
NJ
1004 else
1005 task_result = SUCCESS;
7a3e97b0
SY
1006 } else {
1007 task_result = FAILED;
3b1d0580 1008 dev_err(hba->dev,
7a3e97b0
SY
1009 "trc: Invalid ocs = %x\n", ocs_value);
1010 }
1011 spin_unlock_irqrestore(hba->host->host_lock, flags);
1012 return task_result;
1013}
1014
1015/**
1016 * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
1017 * SAM_STAT_TASK_SET_FULL SCSI command status.
1018 * @cmd: pointer to SCSI command
1019 */
1020static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1021{
1022 struct ufs_hba *hba;
1023 int i;
1024 int lun_qdepth = 0;
1025
1026 hba = shost_priv(cmd->device->host);
1027
1028 /*
1029 * LUN queue depth can be obtained by counting outstanding commands
1030 * on the LUN.
1031 */
1032 for (i = 0; i < hba->nutrs; i++) {
1033 if (test_bit(i, &hba->outstanding_reqs)) {
1034
1035 /*
1036 * Check if the outstanding command belongs
1037 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1038 */
1039 if (cmd->device->lun == hba->lrb[i].lun)
1040 lun_qdepth++;
1041 }
1042 }
1043
1044 /*
1045 * LUN queue depth will be total outstanding commands, except the
1046 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
1047 */
1048 scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
1049}
1050
1051/**
1052 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
1053 * @lrb: pointer to local reference block of completed command
1054 * @scsi_status: SCSI command status
1055 *
1056 * Returns value base on SCSI command status
1057 */
1058static inline int
1059ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1060{
1061 int result = 0;
1062
1063 switch (scsi_status) {
1064 case SAM_STAT_GOOD:
1065 result |= DID_OK << 16 |
1066 COMMAND_COMPLETE << 8 |
1067 SAM_STAT_GOOD;
1068 break;
1069 case SAM_STAT_CHECK_CONDITION:
1070 result |= DID_OK << 16 |
1071 COMMAND_COMPLETE << 8 |
1072 SAM_STAT_CHECK_CONDITION;
1073 ufshcd_copy_sense_data(lrbp);
1074 break;
1075 case SAM_STAT_BUSY:
1076 result |= SAM_STAT_BUSY;
1077 break;
1078 case SAM_STAT_TASK_SET_FULL:
1079
1080 /*
1081 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1082 * depth needs to be adjusted to the exact number of
1083 * outstanding commands the LUN can handle at any given time.
1084 */
1085 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1086 result |= SAM_STAT_TASK_SET_FULL;
1087 break;
1088 case SAM_STAT_TASK_ABORTED:
1089 result |= SAM_STAT_TASK_ABORTED;
1090 break;
1091 default:
1092 result |= DID_ERROR << 16;
1093 break;
1094 } /* end of switch */
1095
1096 return result;
1097}
1098
1099/**
1100 * ufshcd_transfer_rsp_status - Get overall status of the response
1101 * @hba: per adapter instance
1102 * @lrb: pointer to local reference block of completed command
1103 *
1104 * Returns result of the command to notify SCSI midlayer
1105 */
1106static inline int
1107ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1108{
1109 int result = 0;
1110 int scsi_status;
1111 int ocs;
1112
1113 /* overall command status of utrd */
1114 ocs = ufshcd_get_tr_ocs(lrbp);
1115
1116 switch (ocs) {
1117 case OCS_SUCCESS:
1118
1119 /* check if the returned transfer response is valid */
1120 result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
1121 if (result) {
3b1d0580 1122 dev_err(hba->dev,
7a3e97b0
SY
1123 "Invalid response = %x\n", result);
1124 break;
1125 }
1126
1127 /*
1128 * get the response UPIU result to extract
1129 * the SCSI command status
1130 */
1131 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1132
1133 /*
1134 * get the result based on SCSI status response
1135 * to notify the SCSI midlayer of the command status
1136 */
1137 scsi_status = result & MASK_SCSI_STATUS;
1138 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1139 break;
1140 case OCS_ABORTED:
1141 result |= DID_ABORT << 16;
1142 break;
1143 case OCS_INVALID_CMD_TABLE_ATTR:
1144 case OCS_INVALID_PRDT_ATTR:
1145 case OCS_MISMATCH_DATA_BUF_SIZE:
1146 case OCS_MISMATCH_RESP_UPIU_SIZE:
1147 case OCS_PEER_COMM_FAILURE:
1148 case OCS_FATAL_ERROR:
1149 default:
1150 result |= DID_ERROR << 16;
3b1d0580 1151 dev_err(hba->dev,
7a3e97b0
SY
1152 "OCS error from controller = %x\n", ocs);
1153 break;
1154 } /* end of switch */
1155
1156 return result;
1157}
1158
1159/**
1160 * ufshcd_transfer_req_compl - handle SCSI and query command completion
1161 * @hba: per adapter instance
1162 */
1163static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1164{
1165 struct ufshcd_lrb *lrb;
1166 unsigned long completed_reqs;
1167 u32 tr_doorbell;
1168 int result;
1169 int index;
1170
1171 lrb = hba->lrb;
1172 tr_doorbell =
1173 readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL);
1174 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
1175
1176 for (index = 0; index < hba->nutrs; index++) {
1177 if (test_bit(index, &completed_reqs)) {
1178
1179 result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
1180
1181 if (lrb[index].cmd) {
1182 scsi_dma_unmap(lrb[index].cmd);
1183 lrb[index].cmd->result = result;
1184 lrb[index].cmd->scsi_done(lrb[index].cmd);
1185
1186 /* Mark completed command as NULL in LRB */
1187 lrb[index].cmd = NULL;
1188 }
1189 } /* end of if */
1190 } /* end of for */
1191
1192 /* clear corresponding bits of completed commands */
1193 hba->outstanding_reqs ^= completed_reqs;
1194
1195 /* Reset interrupt aggregation counters */
1196 ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
1197}
1198
1199/**
1200 * ufshcd_uic_cc_handler - handle UIC command completion
1201 * @work: pointer to a work queue structure
1202 *
1203 * Returns 0 on success, non-zero value on failure
1204 */
1205static void ufshcd_uic_cc_handler (struct work_struct *work)
1206{
1207 struct ufs_hba *hba;
1208
1209 hba = container_of(work, struct ufs_hba, uic_workq);
1210
1211 if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
1212 !(ufshcd_get_uic_cmd_result(hba))) {
1213
1214 if (ufshcd_make_hba_operational(hba))
3b1d0580 1215 dev_err(hba->dev,
7a3e97b0
SY
1216 "cc: hba not operational state\n");
1217 return;
1218 }
1219}
1220
1221/**
1222 * ufshcd_fatal_err_handler - handle fatal errors
1223 * @hba: per adapter instance
1224 */
1225static void ufshcd_fatal_err_handler(struct work_struct *work)
1226{
1227 struct ufs_hba *hba;
1228 hba = container_of(work, struct ufs_hba, feh_workq);
1229
1230 /* check if reset is already in progress */
1231 if (hba->ufshcd_state != UFSHCD_STATE_RESET)
1232 ufshcd_do_reset(hba);
1233}
1234
1235/**
1236 * ufshcd_err_handler - Check for fatal errors
1237 * @work: pointer to a work queue structure
1238 */
1239static void ufshcd_err_handler(struct ufs_hba *hba)
1240{
1241 u32 reg;
1242
1243 if (hba->errors & INT_FATAL_ERRORS)
1244 goto fatal_eh;
1245
1246 if (hba->errors & UIC_ERROR) {
1247
1248 reg = readl(hba->mmio_base +
1249 REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
1250 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
1251 goto fatal_eh;
1252 }
1253 return;
1254fatal_eh:
1255 hba->ufshcd_state = UFSHCD_STATE_ERROR;
1256 schedule_work(&hba->feh_workq);
1257}
1258
1259/**
1260 * ufshcd_tmc_handler - handle task management function completion
1261 * @hba: per adapter instance
1262 */
1263static void ufshcd_tmc_handler(struct ufs_hba *hba)
1264{
1265 u32 tm_doorbell;
1266
1267 tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
1268 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
1269 wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
1270}
1271
1272/**
1273 * ufshcd_sl_intr - Interrupt service routine
1274 * @hba: per adapter instance
1275 * @intr_status: contains interrupts generated by the controller
1276 */
1277static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
1278{
1279 hba->errors = UFSHCD_ERROR_MASK & intr_status;
1280 if (hba->errors)
1281 ufshcd_err_handler(hba);
1282
1283 if (intr_status & UIC_COMMAND_COMPL)
1284 schedule_work(&hba->uic_workq);
1285
1286 if (intr_status & UTP_TASK_REQ_COMPL)
1287 ufshcd_tmc_handler(hba);
1288
1289 if (intr_status & UTP_TRANSFER_REQ_COMPL)
1290 ufshcd_transfer_req_compl(hba);
1291}
1292
1293/**
1294 * ufshcd_intr - Main interrupt service routine
1295 * @irq: irq number
1296 * @__hba: pointer to adapter instance
1297 *
1298 * Returns IRQ_HANDLED - If interrupt is valid
1299 * IRQ_NONE - If invalid interrupt
1300 */
1301static irqreturn_t ufshcd_intr(int irq, void *__hba)
1302{
1303 u32 intr_status;
1304 irqreturn_t retval = IRQ_NONE;
1305 struct ufs_hba *hba = __hba;
1306
1307 spin_lock(hba->host->host_lock);
1308 intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
1309
1310 if (intr_status) {
1311 ufshcd_sl_intr(hba, intr_status);
1312
1313 /* If UFSHCI 1.0 then clear interrupt status register */
1314 if (hba->ufs_version == UFSHCI_VERSION_10)
1315 writel(intr_status,
1316 (hba->mmio_base + REG_INTERRUPT_STATUS));
1317 retval = IRQ_HANDLED;
1318 }
1319 spin_unlock(hba->host->host_lock);
1320 return retval;
1321}
1322
1323/**
1324 * ufshcd_issue_tm_cmd - issues task management commands to controller
1325 * @hba: per adapter instance
1326 * @lrbp: pointer to local reference block
1327 *
1328 * Returns SUCCESS/FAILED
1329 */
1330static int
1331ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1332 struct ufshcd_lrb *lrbp,
1333 u8 tm_function)
1334{
1335 struct utp_task_req_desc *task_req_descp;
1336 struct utp_upiu_task_req *task_req_upiup;
1337 struct Scsi_Host *host;
1338 unsigned long flags;
1339 int free_slot = 0;
1340 int err;
1341
1342 host = hba->host;
1343
1344 spin_lock_irqsave(host->host_lock, flags);
1345
1346 /* If task management queue is full */
1347 free_slot = ufshcd_get_tm_free_slot(hba);
1348 if (free_slot >= hba->nutmrs) {
1349 spin_unlock_irqrestore(host->host_lock, flags);
3b1d0580 1350 dev_err(hba->dev, "Task management queue full\n");
7a3e97b0
SY
1351 err = FAILED;
1352 goto out;
1353 }
1354
1355 task_req_descp = hba->utmrdl_base_addr;
1356 task_req_descp += free_slot;
1357
1358 /* Configure task request descriptor */
1359 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
1360 task_req_descp->header.dword_2 =
1361 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1362
1363 /* Configure task request UPIU */
1364 task_req_upiup =
1365 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
1366 task_req_upiup->header.dword_0 =
1367 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
1368 lrbp->lun, lrbp->task_tag));
1369 task_req_upiup->header.dword_1 =
1370 cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
1371
1372 task_req_upiup->input_param1 = lrbp->lun;
1373 task_req_upiup->input_param1 =
1374 cpu_to_be32(task_req_upiup->input_param1);
1375 task_req_upiup->input_param2 = lrbp->task_tag;
1376 task_req_upiup->input_param2 =
1377 cpu_to_be32(task_req_upiup->input_param2);
1378
1379 /* send command to the controller */
1380 __set_bit(free_slot, &hba->outstanding_tasks);
1381 writel((1 << free_slot),
1382 (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL));
1383
1384 spin_unlock_irqrestore(host->host_lock, flags);
1385
1386 /* wait until the task management command is completed */
1387 err =
1388 wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
1389 (test_bit(free_slot,
1390 &hba->tm_condition) != 0),
1391 60 * HZ);
1392 if (!err) {
3b1d0580 1393 dev_err(hba->dev,
7a3e97b0
SY
1394 "Task management command timed-out\n");
1395 err = FAILED;
1396 goto out;
1397 }
1398 clear_bit(free_slot, &hba->tm_condition);
94c122ab 1399 err = ufshcd_task_req_compl(hba, free_slot);
7a3e97b0
SY
1400out:
1401 return err;
1402}
1403
1404/**
1405 * ufshcd_device_reset - reset device and abort all the pending commands
1406 * @cmd: SCSI command pointer
1407 *
1408 * Returns SUCCESS/FAILED
1409 */
1410static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1411{
1412 struct Scsi_Host *host;
1413 struct ufs_hba *hba;
1414 unsigned int tag;
1415 u32 pos;
1416 int err;
1417
1418 host = cmd->device->host;
1419 hba = shost_priv(host);
1420 tag = cmd->request->tag;
1421
1422 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
94c122ab 1423 if (err == FAILED)
7a3e97b0
SY
1424 goto out;
1425
1426 for (pos = 0; pos < hba->nutrs; pos++) {
1427 if (test_bit(pos, &hba->outstanding_reqs) &&
1428 (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
1429
1430 /* clear the respective UTRLCLR register bit */
1431 ufshcd_utrl_clear(hba, pos);
1432
1433 clear_bit(pos, &hba->outstanding_reqs);
1434
1435 if (hba->lrb[pos].cmd) {
1436 scsi_dma_unmap(hba->lrb[pos].cmd);
1437 hba->lrb[pos].cmd->result =
1438 DID_ABORT << 16;
1439 hba->lrb[pos].cmd->scsi_done(cmd);
1440 hba->lrb[pos].cmd = NULL;
1441 }
1442 }
1443 } /* end of for */
1444out:
1445 return err;
1446}
1447
1448/**
1449 * ufshcd_host_reset - Main reset function registered with scsi layer
1450 * @cmd: SCSI command pointer
1451 *
1452 * Returns SUCCESS/FAILED
1453 */
1454static int ufshcd_host_reset(struct scsi_cmnd *cmd)
1455{
1456 struct ufs_hba *hba;
1457
1458 hba = shost_priv(cmd->device->host);
1459
1460 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1461 return SUCCESS;
1462
94c122ab 1463 return ufshcd_do_reset(hba);
7a3e97b0
SY
1464}
1465
1466/**
1467 * ufshcd_abort - abort a specific command
1468 * @cmd: SCSI command pointer
1469 *
1470 * Returns SUCCESS/FAILED
1471 */
1472static int ufshcd_abort(struct scsi_cmnd *cmd)
1473{
1474 struct Scsi_Host *host;
1475 struct ufs_hba *hba;
1476 unsigned long flags;
1477 unsigned int tag;
1478 int err;
1479
1480 host = cmd->device->host;
1481 hba = shost_priv(host);
1482 tag = cmd->request->tag;
1483
1484 spin_lock_irqsave(host->host_lock, flags);
1485
1486 /* check if command is still pending */
1487 if (!(test_bit(tag, &hba->outstanding_reqs))) {
1488 err = FAILED;
1489 spin_unlock_irqrestore(host->host_lock, flags);
1490 goto out;
1491 }
1492 spin_unlock_irqrestore(host->host_lock, flags);
1493
1494 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
94c122ab 1495 if (err == FAILED)
7a3e97b0
SY
1496 goto out;
1497
1498 scsi_dma_unmap(cmd);
1499
1500 spin_lock_irqsave(host->host_lock, flags);
1501
1502 /* clear the respective UTRLCLR register bit */
1503 ufshcd_utrl_clear(hba, tag);
1504
1505 __clear_bit(tag, &hba->outstanding_reqs);
1506 hba->lrb[tag].cmd = NULL;
1507 spin_unlock_irqrestore(host->host_lock, flags);
1508out:
1509 return err;
1510}
1511
1512static struct scsi_host_template ufshcd_driver_template = {
1513 .module = THIS_MODULE,
1514 .name = UFSHCD,
1515 .proc_name = UFSHCD,
1516 .queuecommand = ufshcd_queuecommand,
1517 .slave_alloc = ufshcd_slave_alloc,
1518 .slave_destroy = ufshcd_slave_destroy,
1519 .eh_abort_handler = ufshcd_abort,
1520 .eh_device_reset_handler = ufshcd_device_reset,
1521 .eh_host_reset_handler = ufshcd_host_reset,
1522 .this_id = -1,
1523 .sg_tablesize = SG_ALL,
1524 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
1525 .can_queue = UFSHCD_CAN_QUEUE,
1526};
1527
7a3e97b0
SY
1528/**
1529 * ufshcd_suspend - suspend power management function
3b1d0580 1530 * @hba: per adapter instance
7a3e97b0
SY
1531 * @state: power state
1532 *
1533 * Returns -ENOSYS
1534 */
3b1d0580 1535int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
7a3e97b0
SY
1536{
1537 /*
1538 * TODO:
1539 * 1. Block SCSI requests from SCSI midlayer
1540 * 2. Change the internal driver state to non operational
1541 * 3. Set UTRLRSR and UTMRLRSR bits to zero
1542 * 4. Wait until outstanding commands are completed
1543 * 5. Set HCE to zero to send the UFS host controller to reset state
1544 */
1545
1546 return -ENOSYS;
1547}
3b1d0580 1548EXPORT_SYMBOL_GPL(ufshcd_suspend);
7a3e97b0
SY
1549
1550/**
1551 * ufshcd_resume - resume power management function
3b1d0580 1552 * @hba: per adapter instance
7a3e97b0
SY
1553 *
1554 * Returns -ENOSYS
1555 */
3b1d0580 1556int ufshcd_resume(struct ufs_hba *hba)
7a3e97b0
SY
1557{
1558 /*
1559 * TODO:
1560 * 1. Set HCE to 1, to start the UFS host controller
1561 * initialization process
1562 * 2. Set UTRLRSR and UTMRLRSR bits to 1
1563 * 3. Change the internal driver state to operational
1564 * 4. Unblock SCSI requests from SCSI midlayer
1565 */
1566
1567 return -ENOSYS;
1568}
3b1d0580
VH
1569EXPORT_SYMBOL_GPL(ufshcd_resume);
1570
7a3e97b0
SY
1571/**
1572 * ufshcd_hba_free - free allocated memory for
1573 * host memory space data structures
1574 * @hba: per adapter instance
1575 */
1576static void ufshcd_hba_free(struct ufs_hba *hba)
1577{
1578 iounmap(hba->mmio_base);
1579 ufshcd_free_hba_memory(hba);
7a3e97b0
SY
1580}
1581
1582/**
3b1d0580 1583 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 1584 * data structure memory
3b1d0580 1585 * @hba - per adapter instance
7a3e97b0 1586 */
3b1d0580 1587void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 1588{
7a3e97b0
SY
1589 /* disable interrupts */
1590 ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
7a3e97b0
SY
1591
1592 ufshcd_hba_stop(hba);
1593 ufshcd_hba_free(hba);
1594
1595 scsi_remove_host(hba->host);
1596 scsi_host_put(hba->host);
3b1d0580
VH
1597}
1598EXPORT_SYMBOL_GPL(ufshcd_remove);
1599
7a3e97b0 1600/**
3b1d0580
VH
1601 * ufshcd_init - Driver initialization routine
1602 * @dev: pointer to device handle
1603 * @hba_handle: driver private handle
1604 * @mmio_base: base register address
1605 * @irq: Interrupt line of device
7a3e97b0
SY
1606 * Returns 0 on success, non-zero value on failure
1607 */
3b1d0580
VH
1608int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
1609 void __iomem *mmio_base, unsigned int irq)
7a3e97b0
SY
1610{
1611 struct Scsi_Host *host;
1612 struct ufs_hba *hba;
1613 int err;
1614
3b1d0580
VH
1615 if (!dev) {
1616 dev_err(dev,
1617 "Invalid memory reference for dev is NULL\n");
1618 err = -ENODEV;
7a3e97b0
SY
1619 goto out_error;
1620 }
1621
3b1d0580
VH
1622 if (!mmio_base) {
1623 dev_err(dev,
1624 "Invalid memory reference for mmio_base is NULL\n");
1625 err = -ENODEV;
1626 goto out_error;
1627 }
7a3e97b0
SY
1628
1629 host = scsi_host_alloc(&ufshcd_driver_template,
1630 sizeof(struct ufs_hba));
1631 if (!host) {
3b1d0580 1632 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 1633 err = -ENOMEM;
3b1d0580 1634 goto out_error;
7a3e97b0
SY
1635 }
1636 hba = shost_priv(host);
7a3e97b0 1637 hba->host = host;
3b1d0580
VH
1638 hba->dev = dev;
1639 hba->mmio_base = mmio_base;
1640 hba->irq = irq;
7a3e97b0
SY
1641
1642 /* Read capabilities registers */
1643 ufshcd_hba_capabilities(hba);
1644
1645 /* Get UFS version supported by the controller */
1646 hba->ufs_version = ufshcd_get_ufs_version(hba);
1647
7a3e97b0
SY
1648 /* Allocate memory for host memory space */
1649 err = ufshcd_memory_alloc(hba);
1650 if (err) {
3b1d0580
VH
1651 dev_err(hba->dev, "Memory allocation failed\n");
1652 goto out_disable;
7a3e97b0
SY
1653 }
1654
1655 /* Configure LRB */
1656 ufshcd_host_memory_configure(hba);
1657
1658 host->can_queue = hba->nutrs;
1659 host->cmd_per_lun = hba->nutrs;
1660 host->max_id = UFSHCD_MAX_ID;
1661 host->max_lun = UFSHCD_MAX_LUNS;
1662 host->max_channel = UFSHCD_MAX_CHANNEL;
1663 host->unique_id = host->host_no;
1664 host->max_cmd_len = MAX_CDB_SIZE;
1665
1666 /* Initailize wait queue for task management */
1667 init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
1668
1669 /* Initialize work queues */
1670 INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
1671 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
1672
1673 /* IRQ registration */
3b1d0580 1674 err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 1675 if (err) {
3b1d0580 1676 dev_err(hba->dev, "request irq failed\n");
7a3e97b0
SY
1677 goto out_lrb_free;
1678 }
1679
1680 /* Enable SCSI tag mapping */
1681 err = scsi_init_shared_tag_map(host, host->can_queue);
1682 if (err) {
3b1d0580 1683 dev_err(hba->dev, "init shared queue failed\n");
7a3e97b0
SY
1684 goto out_free_irq;
1685 }
1686
3b1d0580 1687 err = scsi_add_host(host, hba->dev);
7a3e97b0 1688 if (err) {
3b1d0580 1689 dev_err(hba->dev, "scsi_add_host failed\n");
7a3e97b0
SY
1690 goto out_free_irq;
1691 }
1692
1693 /* Initialization routine */
1694 err = ufshcd_initialize_hba(hba);
1695 if (err) {
3b1d0580
VH
1696 dev_err(hba->dev, "Initialization failed\n");
1697 goto out_remove_scsi_host;
7a3e97b0 1698 }
3b1d0580 1699 *hba_handle = hba;
7a3e97b0
SY
1700
1701 return 0;
1702
3b1d0580
VH
1703out_remove_scsi_host:
1704 scsi_remove_host(hba->host);
7a3e97b0 1705out_free_irq:
3b1d0580 1706 free_irq(irq, hba);
7a3e97b0
SY
1707out_lrb_free:
1708 ufshcd_free_hba_memory(hba);
3b1d0580
VH
1709out_disable:
1710 scsi_host_put(host);
1711out_error:
1712 return err;
1713}
1714EXPORT_SYMBOL_GPL(ufshcd_init);
1715
3b1d0580
VH
1716MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
1717MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 1718MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
1719MODULE_LICENSE("GPL");
1720MODULE_VERSION(UFSHCD_DRIVER_VERSION);