Merge ../linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_version.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40
41 #define LPFC_RESET_WAIT 2
42 #define LPFC_ABORT_WAIT 2
43
44
45 /*
46 * This routine allocates a scsi buffer, which contains all the necessary
47 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
48 * contains information to build the IOCB. The DMAable region contains
49 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
50 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
51 * and the BPL BDE is setup in the IOCB.
52 */
53 static struct lpfc_scsi_buf *
54 lpfc_new_scsi_buf(struct lpfc_hba * phba)
55 {
56 struct lpfc_scsi_buf *psb;
57 struct ulp_bde64 *bpl;
58 IOCB_t *iocb;
59 dma_addr_t pdma_phys;
60 uint16_t iotag;
61
62 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
63 if (!psb)
64 return NULL;
65 memset(psb, 0, sizeof (struct lpfc_scsi_buf));
66 psb->scsi_hba = phba;
67
68 /*
69 * Get memory from the pci pool to map the virt space to pci bus space
70 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
71 * struct fcp_rsp and the number of bde's necessary to support the
72 * sg_tablesize.
73 */
74 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
75 &psb->dma_handle);
76 if (!psb->data) {
77 kfree(psb);
78 return NULL;
79 }
80
81 /* Initialize virtual ptrs to dma_buf region. */
82 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
83
84 /* Allocate iotag for psb->cur_iocbq. */
85 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
86 if (iotag == 0) {
87 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
88 psb->data, psb->dma_handle);
89 kfree (psb);
90 return NULL;
91 }
92 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
93
94 psb->fcp_cmnd = psb->data;
95 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
96 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
97 sizeof(struct fcp_rsp);
98
99 /* Initialize local short-hand pointers. */
100 bpl = psb->fcp_bpl;
101 pdma_phys = psb->dma_handle;
102
103 /*
104 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
105 * list bdes. Initialize the first two and leave the rest for
106 * queuecommand.
107 */
108 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
109 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
110 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
111 bpl->tus.f.bdeFlags = BUFF_USE_CMND;
112 bpl->tus.w = le32_to_cpu(bpl->tus.w);
113 bpl++;
114
115 /* Setup the physical region for the FCP RSP */
116 pdma_phys += sizeof (struct fcp_cmnd);
117 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
118 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
119 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
120 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
121 bpl->tus.w = le32_to_cpu(bpl->tus.w);
122
123 /*
124 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
125 * initialize it with all known data now.
126 */
127 pdma_phys += (sizeof (struct fcp_rsp));
128 iocb = &psb->cur_iocbq.iocb;
129 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
130 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
131 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
132 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
133 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
134 iocb->ulpBdeCount = 1;
135 iocb->ulpClass = CLASS3;
136
137 return psb;
138 }
139
140 static struct lpfc_scsi_buf*
141 lpfc_get_scsi_buf(struct lpfc_hba * phba)
142 {
143 struct lpfc_scsi_buf * lpfc_cmd = NULL;
144 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
145 unsigned long iflag = 0;
146
147 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
148 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
149 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
150 return lpfc_cmd;
151 }
152
153 static void
154 lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
155 {
156 unsigned long iflag = 0;
157
158 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
159 psb->pCmd = NULL;
160 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
161 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
162 }
163
164 static int
165 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
166 {
167 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
168 struct scatterlist *sgel = NULL;
169 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
170 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
171 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
172 dma_addr_t physaddr;
173 uint32_t i, num_bde = 0;
174 int datadir = scsi_cmnd->sc_data_direction;
175 int dma_error;
176
177 /*
178 * There are three possibilities here - use scatter-gather segment, use
179 * the single mapping, or neither. Start the lpfc command prep by
180 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
181 * data bde entry.
182 */
183 bpl += 2;
184 if (scsi_cmnd->use_sg) {
185 /*
186 * The driver stores the segment count returned from pci_map_sg
187 * because this a count of dma-mappings used to map the use_sg
188 * pages. They are not guaranteed to be the same for those
189 * architectures that implement an IOMMU.
190 */
191 sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
192 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
193 scsi_cmnd->use_sg, datadir);
194 if (lpfc_cmd->seg_cnt == 0)
195 return 1;
196
197 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
198 printk(KERN_ERR "%s: Too many sg segments from "
199 "dma_map_sg. Config %d, seg_cnt %d",
200 __FUNCTION__, phba->cfg_sg_seg_cnt,
201 lpfc_cmd->seg_cnt);
202 dma_unmap_sg(&phba->pcidev->dev, sgel,
203 lpfc_cmd->seg_cnt, datadir);
204 return 1;
205 }
206
207 /*
208 * The driver established a maximum scatter-gather segment count
209 * during probe that limits the number of sg elements in any
210 * single scsi command. Just run through the seg_cnt and format
211 * the bde's.
212 */
213 for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
214 physaddr = sg_dma_address(sgel);
215 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
216 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
217 bpl->tus.f.bdeSize = sg_dma_len(sgel);
218 if (datadir == DMA_TO_DEVICE)
219 bpl->tus.f.bdeFlags = 0;
220 else
221 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
222 bpl->tus.w = le32_to_cpu(bpl->tus.w);
223 bpl++;
224 sgel++;
225 num_bde++;
226 }
227 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
228 physaddr = dma_map_single(&phba->pcidev->dev,
229 scsi_cmnd->request_buffer,
230 scsi_cmnd->request_bufflen,
231 datadir);
232 dma_error = dma_mapping_error(physaddr);
233 if (dma_error) {
234 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
235 "%d:0718 Unable to dma_map_single "
236 "request_buffer: x%x\n",
237 phba->brd_no, dma_error);
238 return 1;
239 }
240
241 lpfc_cmd->nonsg_phys = physaddr;
242 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
243 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
244 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
245 if (datadir == DMA_TO_DEVICE)
246 bpl->tus.f.bdeFlags = 0;
247 else
248 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
249 bpl->tus.w = le32_to_cpu(bpl->tus.w);
250 num_bde = 1;
251 bpl++;
252 }
253
254 /*
255 * Finish initializing those IOCB fields that are dependent on the
256 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
257 * reinitialized since all iocb memory resources are used many times
258 * for transmit, receive, and continuation bpl's.
259 */
260 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
261 iocb_cmd->un.fcpi64.bdl.bdeSize +=
262 (num_bde * sizeof (struct ulp_bde64));
263 iocb_cmd->ulpBdeCount = 1;
264 iocb_cmd->ulpLe = 1;
265 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
266 return 0;
267 }
268
269 static void
270 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
271 {
272 /*
273 * There are only two special cases to consider. (1) the scsi command
274 * requested scatter-gather usage or (2) the scsi command allocated
275 * a request buffer, but did not request use_sg. There is a third
276 * case, but it does not require resource deallocation.
277 */
278 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
279 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
280 psb->seg_cnt, psb->pCmd->sc_data_direction);
281 } else {
282 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
283 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
284 psb->pCmd->request_bufflen,
285 psb->pCmd->sc_data_direction);
286 }
287 }
288 }
289
290 static void
291 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
292 {
293 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
294 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
295 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
296 struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
297 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
298 uint32_t resp_info = fcprsp->rspStatus2;
299 uint32_t scsi_status = fcprsp->rspStatus3;
300 uint32_t *lp;
301 uint32_t host_status = DID_OK;
302 uint32_t rsplen = 0;
303 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
304
305 /*
306 * If this is a task management command, there is no
307 * scsi packet associated with this lpfc_cmd. The driver
308 * consumes it.
309 */
310 if (fcpcmd->fcpCntl2) {
311 scsi_status = 0;
312 goto out;
313 }
314
315 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
316 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
317 if (snslen > SCSI_SENSE_BUFFERSIZE)
318 snslen = SCSI_SENSE_BUFFERSIZE;
319
320 if (resp_info & RSP_LEN_VALID)
321 rsplen = be32_to_cpu(fcprsp->rspRspLen);
322 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
323 }
324 lp = (uint32_t *)cmnd->sense_buffer;
325
326 if (!scsi_status && (resp_info & RESID_UNDER))
327 logit = LOG_FCP;
328
329 lpfc_printf_log(phba, KERN_WARNING, logit,
330 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
331 "Data: x%x x%x x%x x%x x%x\n",
332 phba->brd_no, cmnd->cmnd[0], scsi_status,
333 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
334 be32_to_cpu(fcprsp->rspResId),
335 be32_to_cpu(fcprsp->rspSnsLen),
336 be32_to_cpu(fcprsp->rspRspLen),
337 fcprsp->rspInfo3);
338
339 if (resp_info & RSP_LEN_VALID) {
340 rsplen = be32_to_cpu(fcprsp->rspRspLen);
341 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
342 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
343 host_status = DID_ERROR;
344 goto out;
345 }
346 }
347
348 cmnd->resid = 0;
349 if (resp_info & RESID_UNDER) {
350 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
351
352 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
353 "%d:0716 FCP Read Underrun, expected %d, "
354 "residual %d Data: x%x x%x x%x\n", phba->brd_no,
355 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
356 fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
357
358 /*
359 * The cmnd->underflow is the minimum number of bytes that must
360 * be transfered for this command. Provided a sense condition
361 * is not present, make sure the actual amount transferred is at
362 * least the underflow value or fail.
363 */
364 if (!(resp_info & SNS_LEN_VALID) &&
365 (scsi_status == SAM_STAT_GOOD) &&
366 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
367 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
368 "%d:0717 FCP command x%x residual "
369 "underrun converted to error "
370 "Data: x%x x%x x%x\n", phba->brd_no,
371 cmnd->cmnd[0], cmnd->request_bufflen,
372 cmnd->resid, cmnd->underflow);
373
374 host_status = DID_ERROR;
375 }
376 } else if (resp_info & RESID_OVER) {
377 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
378 "%d:0720 FCP command x%x residual "
379 "overrun error. Data: x%x x%x \n",
380 phba->brd_no, cmnd->cmnd[0],
381 cmnd->request_bufflen, cmnd->resid);
382 host_status = DID_ERROR;
383
384 /*
385 * Check SLI validation that all the transfer was actually done
386 * (fcpi_parm should be zero). Apply check only to reads.
387 */
388 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
389 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
390 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
391 "%d:0734 FCP Read Check Error Data: "
392 "x%x x%x x%x x%x\n", phba->brd_no,
393 be32_to_cpu(fcpcmd->fcpDl),
394 be32_to_cpu(fcprsp->rspResId),
395 fcpi_parm, cmnd->cmnd[0]);
396 host_status = DID_ERROR;
397 cmnd->resid = cmnd->request_bufflen;
398 }
399
400 out:
401 cmnd->result = ScsiResult(host_status, scsi_status);
402 }
403
404 static void
405 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
406 struct lpfc_iocbq *pIocbOut)
407 {
408 struct lpfc_scsi_buf *lpfc_cmd =
409 (struct lpfc_scsi_buf *) pIocbIn->context1;
410 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
411 struct lpfc_nodelist *pnode = rdata->pnode;
412 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
413 int result;
414 struct scsi_device *sdev, *tmp_sdev;
415 int depth = 0;
416
417 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
418 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
419
420 if (lpfc_cmd->status) {
421 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
422 (lpfc_cmd->result & IOERR_DRVR_MASK))
423 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
424 else if (lpfc_cmd->status >= IOSTAT_CNT)
425 lpfc_cmd->status = IOSTAT_DEFAULT;
426
427 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
428 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
429 "x%x result: x%x Data: x%x x%x\n",
430 phba->brd_no, cmd->cmnd[0], cmd->device->id,
431 cmd->device->lun, lpfc_cmd->status,
432 lpfc_cmd->result, pIocbOut->iocb.ulpContext,
433 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
434
435 switch (lpfc_cmd->status) {
436 case IOSTAT_FCP_RSP_ERROR:
437 /* Call FCP RSP handler to determine result */
438 lpfc_handle_fcp_err(lpfc_cmd);
439 break;
440 case IOSTAT_NPORT_BSY:
441 case IOSTAT_FABRIC_BSY:
442 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
443 break;
444 default:
445 cmd->result = ScsiResult(DID_ERROR, 0);
446 break;
447 }
448
449 if ((pnode == NULL )
450 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
451 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
452 } else {
453 cmd->result = ScsiResult(DID_OK, 0);
454 }
455
456 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
457 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
458
459 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
460 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
461 "SNS x%x x%x Data: x%x x%x\n",
462 phba->brd_no, cmd->device->id,
463 cmd->device->lun, cmd, cmd->result,
464 *lp, *(lp + 3), cmd->retries, cmd->resid);
465 }
466
467 result = cmd->result;
468 sdev = cmd->device;
469 cmd->scsi_done(cmd);
470
471 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
472 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
473 lpfc_release_scsi_buf(phba, lpfc_cmd);
474 return;
475 }
476
477 if (!result && pnode != NULL &&
478 ((jiffies - pnode->last_ramp_up_time) >
479 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
480 ((jiffies - pnode->last_q_full_time) >
481 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
482 (phba->cfg_lun_queue_depth > sdev->queue_depth)) {
483 shost_for_each_device(tmp_sdev, sdev->host) {
484 if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) {
485 if (tmp_sdev->id != sdev->id)
486 continue;
487 if (tmp_sdev->ordered_tags)
488 scsi_adjust_queue_depth(tmp_sdev,
489 MSG_ORDERED_TAG,
490 tmp_sdev->queue_depth+1);
491 else
492 scsi_adjust_queue_depth(tmp_sdev,
493 MSG_SIMPLE_TAG,
494 tmp_sdev->queue_depth+1);
495
496 pnode->last_ramp_up_time = jiffies;
497 }
498 }
499 }
500
501 /*
502 * Check for queue full. If the lun is reporting queue full, then
503 * back off the lun queue depth to prevent target overloads.
504 */
505 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
506 pnode->last_q_full_time = jiffies;
507
508 shost_for_each_device(tmp_sdev, sdev->host) {
509 if (tmp_sdev->id != sdev->id)
510 continue;
511 depth = scsi_track_queue_full(tmp_sdev,
512 tmp_sdev->queue_depth - 1);
513 }
514 /*
515 * The queue depth cannot be lowered any more.
516 * Modify the returned error code to store
517 * the final depth value set by
518 * scsi_track_queue_full.
519 */
520 if (depth == -1)
521 depth = sdev->host->cmd_per_lun;
522
523 if (depth) {
524 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
525 "%d:0711 detected queue full - lun queue depth "
526 " adjusted to %d.\n", phba->brd_no, depth);
527 }
528 }
529
530 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
531 lpfc_release_scsi_buf(phba, lpfc_cmd);
532 }
533
534 static void
535 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
536 struct lpfc_nodelist *pnode)
537 {
538 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
539 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
540 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
541 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
542 int datadir = scsi_cmnd->sc_data_direction;
543
544 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
545 /* clear task management bits */
546 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
547
548 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
549 &lpfc_cmd->fcp_cmnd->fcp_lun);
550
551 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
552
553 if (scsi_cmnd->device->tagged_supported) {
554 switch (scsi_cmnd->tag) {
555 case HEAD_OF_QUEUE_TAG:
556 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
557 break;
558 case ORDERED_QUEUE_TAG:
559 fcp_cmnd->fcpCntl1 = ORDERED_Q;
560 break;
561 default:
562 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
563 break;
564 }
565 } else
566 fcp_cmnd->fcpCntl1 = 0;
567
568 /*
569 * There are three possibilities here - use scatter-gather segment, use
570 * the single mapping, or neither. Start the lpfc command prep by
571 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
572 * data bde entry.
573 */
574 if (scsi_cmnd->use_sg) {
575 if (datadir == DMA_TO_DEVICE) {
576 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
577 iocb_cmd->un.fcpi.fcpi_parm = 0;
578 iocb_cmd->ulpPU = 0;
579 fcp_cmnd->fcpCntl3 = WRITE_DATA;
580 phba->fc4OutputRequests++;
581 } else {
582 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
583 iocb_cmd->ulpPU = PARM_READ_CHECK;
584 iocb_cmd->un.fcpi.fcpi_parm =
585 scsi_cmnd->request_bufflen;
586 fcp_cmnd->fcpCntl3 = READ_DATA;
587 phba->fc4InputRequests++;
588 }
589 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
590 if (datadir == DMA_TO_DEVICE) {
591 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
592 iocb_cmd->un.fcpi.fcpi_parm = 0;
593 iocb_cmd->ulpPU = 0;
594 fcp_cmnd->fcpCntl3 = WRITE_DATA;
595 phba->fc4OutputRequests++;
596 } else {
597 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
598 iocb_cmd->ulpPU = PARM_READ_CHECK;
599 iocb_cmd->un.fcpi.fcpi_parm =
600 scsi_cmnd->request_bufflen;
601 fcp_cmnd->fcpCntl3 = READ_DATA;
602 phba->fc4InputRequests++;
603 }
604 } else {
605 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
606 iocb_cmd->un.fcpi.fcpi_parm = 0;
607 iocb_cmd->ulpPU = 0;
608 fcp_cmnd->fcpCntl3 = 0;
609 phba->fc4ControlRequests++;
610 }
611
612 /*
613 * Finish initializing those IOCB fields that are independent
614 * of the scsi_cmnd request_buffer
615 */
616 piocbq->iocb.ulpContext = pnode->nlp_rpi;
617 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
618 piocbq->iocb.ulpFCP2Rcvy = 1;
619
620 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
621 piocbq->context1 = lpfc_cmd;
622 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
623 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
624 }
625
626 static int
627 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
628 struct lpfc_scsi_buf *lpfc_cmd,
629 unsigned int lun,
630 uint8_t task_mgmt_cmd)
631 {
632 struct lpfc_sli *psli;
633 struct lpfc_iocbq *piocbq;
634 IOCB_t *piocb;
635 struct fcp_cmnd *fcp_cmnd;
636 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
637 struct lpfc_nodelist *ndlp = rdata->pnode;
638
639 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
640 return 0;
641 }
642
643 psli = &phba->sli;
644 piocbq = &(lpfc_cmd->cur_iocbq);
645 piocb = &piocbq->iocb;
646
647 fcp_cmnd = lpfc_cmd->fcp_cmnd;
648 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
649 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
650
651 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
652
653 piocb->ulpContext = ndlp->nlp_rpi;
654 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
655 piocb->ulpFCP2Rcvy = 1;
656 }
657 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
658
659 /* ulpTimeout is only one byte */
660 if (lpfc_cmd->timeout > 0xff) {
661 /*
662 * Do not timeout the command at the firmware level.
663 * The driver will provide the timeout mechanism.
664 */
665 piocb->ulpTimeout = 0;
666 } else {
667 piocb->ulpTimeout = lpfc_cmd->timeout;
668 }
669
670 return (1);
671 }
672
673 static int
674 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
675 unsigned tgt_id, unsigned int lun,
676 struct lpfc_rport_data *rdata)
677 {
678 struct lpfc_iocbq *iocbq;
679 struct lpfc_iocbq *iocbqrsp;
680 int ret;
681
682 if (!rdata->pnode)
683 return FAILED;
684
685 lpfc_cmd->rdata = rdata;
686 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
687 FCP_TARGET_RESET);
688 if (!ret)
689 return FAILED;
690
691 lpfc_cmd->scsi_hba = phba;
692 iocbq = &lpfc_cmd->cur_iocbq;
693 iocbqrsp = lpfc_sli_get_iocbq(phba);
694
695 if (!iocbqrsp)
696 return FAILED;
697
698 /* Issue Target Reset to TGT <num> */
699 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
700 "%d:0702 Issue Target Reset to TGT %d "
701 "Data: x%x x%x\n",
702 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
703 rdata->pnode->nlp_flag);
704
705 ret = lpfc_sli_issue_iocb_wait(phba,
706 &phba->sli.ring[phba->sli.fcp_ring],
707 iocbq, iocbqrsp, lpfc_cmd->timeout);
708 if (ret != IOCB_SUCCESS) {
709 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
710 ret = FAILED;
711 } else {
712 ret = SUCCESS;
713 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
714 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
715 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
716 (lpfc_cmd->result & IOERR_DRVR_MASK))
717 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
718 }
719
720 lpfc_sli_release_iocbq(phba, iocbqrsp);
721 return ret;
722 }
723
724 const char *
725 lpfc_info(struct Scsi_Host *host)
726 {
727 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata;
728 int len;
729 static char lpfcinfobuf[384];
730
731 memset(lpfcinfobuf,0,384);
732 if (phba && phba->pcidev){
733 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
734 len = strlen(lpfcinfobuf);
735 snprintf(lpfcinfobuf + len,
736 384-len,
737 " on PCI bus %02x device %02x irq %d",
738 phba->pcidev->bus->number,
739 phba->pcidev->devfn,
740 phba->pcidev->irq);
741 len = strlen(lpfcinfobuf);
742 if (phba->Port[0]) {
743 snprintf(lpfcinfobuf + len,
744 384-len,
745 " port %s",
746 phba->Port);
747 }
748 }
749 return lpfcinfobuf;
750 }
751
752 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
753 {
754 unsigned long poll_tmo_expires =
755 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
756
757 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
758 mod_timer(&phba->fcp_poll_timer,
759 poll_tmo_expires);
760 }
761
762 void lpfc_poll_start_timer(struct lpfc_hba * phba)
763 {
764 lpfc_poll_rearm_timer(phba);
765 }
766
767 void lpfc_poll_timeout(unsigned long ptr)
768 {
769 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
770 unsigned long iflag;
771
772 spin_lock_irqsave(phba->host->host_lock, iflag);
773
774 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
775 lpfc_sli_poll_fcp_ring (phba);
776 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
777 lpfc_poll_rearm_timer(phba);
778 }
779
780 spin_unlock_irqrestore(phba->host->host_lock, iflag);
781 }
782
783 static int
784 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
785 {
786 struct lpfc_hba *phba =
787 (struct lpfc_hba *) cmnd->device->host->hostdata;
788 struct lpfc_sli *psli = &phba->sli;
789 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
790 struct lpfc_nodelist *ndlp = rdata->pnode;
791 struct lpfc_scsi_buf *lpfc_cmd;
792 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
793 int err;
794
795 err = fc_remote_port_chkready(rport);
796 if (err) {
797 cmnd->result = err;
798 goto out_fail_command;
799 }
800
801 /*
802 * Catch race where our node has transitioned, but the
803 * transport is still transitioning.
804 */
805 if (!ndlp) {
806 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
807 goto out_fail_command;
808 }
809 lpfc_cmd = lpfc_get_scsi_buf (phba);
810 if (lpfc_cmd == NULL) {
811 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
812 "%d:0707 driver's buffer pool is empty, "
813 "IO busied\n", phba->brd_no);
814 goto out_host_busy;
815 }
816
817 /*
818 * Store the midlayer's command structure for the completion phase
819 * and complete the command initialization.
820 */
821 lpfc_cmd->pCmd = cmnd;
822 lpfc_cmd->rdata = rdata;
823 lpfc_cmd->timeout = 0;
824 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
825 cmnd->scsi_done = done;
826
827 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
828 if (err)
829 goto out_host_busy_free_buf;
830
831 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
832
833 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
834 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
835 if (err)
836 goto out_host_busy_free_buf;
837
838 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
839 lpfc_sli_poll_fcp_ring(phba);
840 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
841 lpfc_poll_rearm_timer(phba);
842 }
843
844 return 0;
845
846 out_host_busy_free_buf:
847 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
848 lpfc_release_scsi_buf(phba, lpfc_cmd);
849 out_host_busy:
850 return SCSI_MLQUEUE_HOST_BUSY;
851
852 out_fail_command:
853 done(cmnd);
854 return 0;
855 }
856
857 static void
858 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
859 {
860 struct Scsi_Host *shost = cmnd->device->host;
861 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
862
863 spin_lock_irq(shost->host_lock);
864 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
865 spin_unlock_irq(shost->host_lock);
866 msleep(1000);
867 spin_lock_irq(shost->host_lock);
868 }
869 spin_unlock_irq(shost->host_lock);
870 return;
871 }
872
873 static int
874 lpfc_abort_handler(struct scsi_cmnd *cmnd)
875 {
876 struct Scsi_Host *shost = cmnd->device->host;
877 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
878 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
879 struct lpfc_iocbq *iocb;
880 struct lpfc_iocbq *abtsiocb;
881 struct lpfc_scsi_buf *lpfc_cmd;
882 IOCB_t *cmd, *icmd;
883 unsigned int loop_count = 0;
884 int ret = SUCCESS;
885
886 lpfc_block_error_handler(cmnd);
887 spin_lock_irq(shost->host_lock);
888
889 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
890 BUG_ON(!lpfc_cmd);
891
892 /*
893 * If pCmd field of the corresponding lpfc_scsi_buf structure
894 * points to a different SCSI command, then the driver has
895 * already completed this command, but the midlayer did not
896 * see the completion before the eh fired. Just return
897 * SUCCESS.
898 */
899 iocb = &lpfc_cmd->cur_iocbq;
900 if (lpfc_cmd->pCmd != cmnd)
901 goto out;
902
903 BUG_ON(iocb->context1 != lpfc_cmd);
904
905 abtsiocb = lpfc_sli_get_iocbq(phba);
906 if (abtsiocb == NULL) {
907 ret = FAILED;
908 goto out;
909 }
910
911 /*
912 * The scsi command can not be in txq and it is in flight because the
913 * pCmd is still pointig at the SCSI command we have to abort. There
914 * is no need to search the txcmplq. Just send an abort to the FW.
915 */
916
917 cmd = &iocb->iocb;
918 icmd = &abtsiocb->iocb;
919 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
920 icmd->un.acxri.abortContextTag = cmd->ulpContext;
921 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
922
923 icmd->ulpLe = 1;
924 icmd->ulpClass = cmd->ulpClass;
925 if (phba->hba_state >= LPFC_LINK_UP)
926 icmd->ulpCommand = CMD_ABORT_XRI_CN;
927 else
928 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
929
930 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
931 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
932 lpfc_sli_release_iocbq(phba, abtsiocb);
933 ret = FAILED;
934 goto out;
935 }
936
937 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
938 lpfc_sli_poll_fcp_ring (phba);
939
940 /* Wait for abort to complete */
941 while (lpfc_cmd->pCmd == cmnd)
942 {
943 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
944 lpfc_sli_poll_fcp_ring (phba);
945
946 spin_unlock_irq(phba->host->host_lock);
947 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
948 spin_lock_irq(phba->host->host_lock);
949 if (++loop_count
950 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
951 break;
952 }
953
954 if (lpfc_cmd->pCmd == cmnd) {
955 ret = FAILED;
956 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
957 "%d:0748 abort handler timed out waiting for "
958 "abort to complete: ret %#x, ID %d, LUN %d, "
959 "snum %#lx\n",
960 phba->brd_no, ret, cmnd->device->id,
961 cmnd->device->lun, cmnd->serial_number);
962 }
963
964 out:
965 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
966 "%d:0749 SCSI Layer I/O Abort Request "
967 "Status x%x ID %d LUN %d snum %#lx\n",
968 phba->brd_no, ret, cmnd->device->id,
969 cmnd->device->lun, cmnd->serial_number);
970
971 spin_unlock_irq(shost->host_lock);
972
973 return ret;
974 }
975
976 static int
977 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
978 {
979 struct Scsi_Host *shost = cmnd->device->host;
980 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
981 struct lpfc_scsi_buf *lpfc_cmd;
982 struct lpfc_iocbq *iocbq, *iocbqrsp;
983 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
984 struct lpfc_nodelist *pnode = rdata->pnode;
985 uint32_t cmd_result = 0, cmd_status = 0;
986 int ret = FAILED;
987 int cnt, loopcnt;
988
989 lpfc_block_error_handler(cmnd);
990 spin_lock_irq(shost->host_lock);
991 loopcnt = 0;
992 /*
993 * If target is not in a MAPPED state, delay the reset until
994 * target is rediscovered or devloss timeout expires.
995 */
996 while ( 1 ) {
997 if (!pnode)
998 return FAILED;
999
1000 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1001 spin_unlock_irq(phba->host->host_lock);
1002 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1003 spin_lock_irq(phba->host->host_lock);
1004 loopcnt++;
1005 rdata = cmnd->device->hostdata;
1006 if (!rdata ||
1007 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1008 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1009 "%d:0721 LUN Reset rport failure:"
1010 " cnt x%x rdata x%p\n",
1011 phba->brd_no, loopcnt, rdata);
1012 goto out;
1013 }
1014 pnode = rdata->pnode;
1015 if (!pnode)
1016 return FAILED;
1017 }
1018 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1019 break;
1020 }
1021
1022 lpfc_cmd = lpfc_get_scsi_buf (phba);
1023 if (lpfc_cmd == NULL)
1024 goto out;
1025
1026 lpfc_cmd->timeout = 60;
1027 lpfc_cmd->scsi_hba = phba;
1028 lpfc_cmd->rdata = rdata;
1029
1030 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
1031 FCP_LUN_RESET);
1032 if (!ret)
1033 goto out_free_scsi_buf;
1034
1035 iocbq = &lpfc_cmd->cur_iocbq;
1036
1037 /* get a buffer for this IOCB command response */
1038 iocbqrsp = lpfc_sli_get_iocbq(phba);
1039 if (iocbqrsp == NULL)
1040 goto out_free_scsi_buf;
1041
1042 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1043 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
1044 "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
1045 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1046
1047 ret = lpfc_sli_issue_iocb_wait(phba,
1048 &phba->sli.ring[phba->sli.fcp_ring],
1049 iocbq, iocbqrsp, lpfc_cmd->timeout);
1050 if (ret == IOCB_SUCCESS)
1051 ret = SUCCESS;
1052
1053
1054 cmd_result = iocbqrsp->iocb.un.ulpWord[4];
1055 cmd_status = iocbqrsp->iocb.ulpStatus;
1056
1057 lpfc_sli_release_iocbq(phba, iocbqrsp);
1058
1059 /*
1060 * All outstanding txcmplq I/Os should have been aborted by the device.
1061 * Unfortunately, some targets do not abide by this forcing the driver
1062 * to double check.
1063 */
1064 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1065 cmnd->device->id, cmnd->device->lun,
1066 LPFC_CTX_LUN);
1067 if (cnt)
1068 lpfc_sli_abort_iocb(phba,
1069 &phba->sli.ring[phba->sli.fcp_ring],
1070 cmnd->device->id, cmnd->device->lun,
1071 0, LPFC_CTX_LUN);
1072 loopcnt = 0;
1073 while(cnt) {
1074 spin_unlock_irq(phba->host->host_lock);
1075 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1076 spin_lock_irq(phba->host->host_lock);
1077
1078 if (++loopcnt
1079 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1080 break;
1081
1082 cnt = lpfc_sli_sum_iocb(phba,
1083 &phba->sli.ring[phba->sli.fcp_ring],
1084 cmnd->device->id, cmnd->device->lun,
1085 LPFC_CTX_LUN);
1086 }
1087
1088 if (cnt) {
1089 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1090 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1091 phba->brd_no, cnt);
1092 ret = FAILED;
1093 }
1094
1095 out_free_scsi_buf:
1096 lpfc_release_scsi_buf(phba, lpfc_cmd);
1097
1098 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1099 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1100 "Data: x%x x%x x%x\n",
1101 phba->brd_no, cmnd->device->id,cmnd->device->lun,
1102 ret, cmd_status, cmd_result);
1103
1104 out:
1105 spin_unlock_irq(shost->host_lock);
1106 return ret;
1107 }
1108
1109 static int
1110 lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1111 {
1112 struct Scsi_Host *shost = cmnd->device->host;
1113 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1114 struct lpfc_nodelist *ndlp = NULL;
1115 int match;
1116 int ret = FAILED, i, err_count = 0;
1117 int cnt, loopcnt;
1118 struct lpfc_scsi_buf * lpfc_cmd;
1119
1120 lpfc_block_error_handler(cmnd);
1121 spin_lock_irq(shost->host_lock);
1122
1123 lpfc_cmd = lpfc_get_scsi_buf(phba);
1124 if (lpfc_cmd == NULL)
1125 goto out;
1126
1127 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1128 lpfc_cmd->timeout = 60;
1129 lpfc_cmd->scsi_hba = phba;
1130
1131 /*
1132 * Since the driver manages a single bus device, reset all
1133 * targets known to the driver. Should any target reset
1134 * fail, this routine returns failure to the midlayer.
1135 */
1136 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1137 /* Search the mapped list for this target ID */
1138 match = 0;
1139 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1140 if ((i == ndlp->nlp_sid) && ndlp->rport) {
1141 match = 1;
1142 break;
1143 }
1144 }
1145 if (!match)
1146 continue;
1147
1148 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
1149 ndlp->rport->dd_data);
1150 if (ret != SUCCESS) {
1151 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1152 "%d:0700 Bus Reset on target %d failed\n",
1153 phba->brd_no, i);
1154 err_count++;
1155 }
1156 }
1157
1158 if (err_count == 0)
1159 ret = SUCCESS;
1160
1161 lpfc_release_scsi_buf(phba, lpfc_cmd);
1162
1163 /*
1164 * All outstanding txcmplq I/Os should have been aborted by
1165 * the targets. Unfortunately, some targets do not abide by
1166 * this forcing the driver to double check.
1167 */
1168 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1169 0, 0, LPFC_CTX_HOST);
1170 if (cnt)
1171 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1172 0, 0, 0, LPFC_CTX_HOST);
1173 loopcnt = 0;
1174 while(cnt) {
1175 spin_unlock_irq(phba->host->host_lock);
1176 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1177 spin_lock_irq(phba->host->host_lock);
1178
1179 if (++loopcnt
1180 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1181 break;
1182
1183 cnt = lpfc_sli_sum_iocb(phba,
1184 &phba->sli.ring[phba->sli.fcp_ring],
1185 0, 0, LPFC_CTX_HOST);
1186 }
1187
1188 if (cnt) {
1189 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1190 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1191 phba->brd_no, cnt, i);
1192 ret = FAILED;
1193 }
1194
1195 lpfc_printf_log(phba,
1196 KERN_ERR,
1197 LOG_FCP,
1198 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1199 phba->brd_no, ret);
1200 out:
1201 spin_unlock_irq(shost->host_lock);
1202 return ret;
1203 }
1204
1205 static int
1206 lpfc_slave_alloc(struct scsi_device *sdev)
1207 {
1208 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata;
1209 struct lpfc_scsi_buf *scsi_buf = NULL;
1210 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1211 uint32_t total = 0, i;
1212 uint32_t num_to_alloc = 0;
1213 unsigned long flags;
1214
1215 if (!rport || fc_remote_port_chkready(rport))
1216 return -ENXIO;
1217
1218 sdev->hostdata = rport->dd_data;
1219
1220 /*
1221 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1222 * available list of scsi buffers. Don't allocate more than the
1223 * HBA limit conveyed to the midlayer via the host structure. The
1224 * formula accounts for the lun_queue_depth + error handlers + 1
1225 * extra. This list of scsi bufs exists for the lifetime of the driver.
1226 */
1227 total = phba->total_scsi_bufs;
1228 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1229 if (total >= phba->cfg_hba_queue_depth) {
1230 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1231 "%d:0704 At limitation of %d preallocated "
1232 "command buffers\n", phba->brd_no, total);
1233 return 0;
1234 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
1235 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1236 "%d:0705 Allocation request of %d command "
1237 "buffers will exceed max of %d. Reducing "
1238 "allocation request to %d.\n", phba->brd_no,
1239 num_to_alloc, phba->cfg_hba_queue_depth,
1240 (phba->cfg_hba_queue_depth - total));
1241 num_to_alloc = phba->cfg_hba_queue_depth - total;
1242 }
1243
1244 for (i = 0; i < num_to_alloc; i++) {
1245 scsi_buf = lpfc_new_scsi_buf(phba);
1246 if (!scsi_buf) {
1247 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1248 "%d:0706 Failed to allocate command "
1249 "buffer\n", phba->brd_no);
1250 break;
1251 }
1252
1253 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1254 phba->total_scsi_bufs++;
1255 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1256 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1257 }
1258 return 0;
1259 }
1260
1261 static int
1262 lpfc_slave_configure(struct scsi_device *sdev)
1263 {
1264 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata;
1265 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1266
1267 if (sdev->tagged_supported)
1268 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
1269 else
1270 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
1271
1272 /*
1273 * Initialize the fc transport attributes for the target
1274 * containing this scsi device. Also note that the driver's
1275 * target pointer is stored in the starget_data for the
1276 * driver's sysfs entry point functions.
1277 */
1278 rport->dev_loss_tmo = phba->cfg_devloss_tmo;
1279
1280 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1281 lpfc_sli_poll_fcp_ring(phba);
1282 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1283 lpfc_poll_rearm_timer(phba);
1284 }
1285
1286 return 0;
1287 }
1288
1289 static void
1290 lpfc_slave_destroy(struct scsi_device *sdev)
1291 {
1292 sdev->hostdata = NULL;
1293 return;
1294 }
1295
1296 struct scsi_host_template lpfc_template = {
1297 .module = THIS_MODULE,
1298 .name = LPFC_DRIVER_NAME,
1299 .info = lpfc_info,
1300 .queuecommand = lpfc_queuecommand,
1301 .eh_abort_handler = lpfc_abort_handler,
1302 .eh_device_reset_handler= lpfc_reset_lun_handler,
1303 .eh_bus_reset_handler = lpfc_reset_bus_handler,
1304 .slave_alloc = lpfc_slave_alloc,
1305 .slave_configure = lpfc_slave_configure,
1306 .slave_destroy = lpfc_slave_destroy,
1307 .this_id = -1,
1308 .sg_tablesize = LPFC_SG_SEG_CNT,
1309 .cmd_per_lun = LPFC_CMD_PER_LUN,
1310 .use_clustering = ENABLE_CLUSTERING,
1311 .shost_attrs = lpfc_host_attrs,
1312 .max_sectors = 0xFFFF,
1313 };