[SCSI] qla2xxx: Fix to ensure driver works in sinlge queue mode if multiqueue fails
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp);
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17
18 static void qla25xx_set_que(srb_t *, struct rsp_que **);
19 /**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * @cmd: SCSI command
22 *
23 * Returns the proper CF_* direction based on CDB.
24 */
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t *sp)
27 {
28 uint16_t cflags;
29
30 cflags = 0;
31
32 /* Set transfer direction */
33 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
34 cflags = CF_WRITE;
35 sp->fcport->vha->hw->qla_stats.output_bytes +=
36 scsi_bufflen(sp->cmd);
37 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 cflags = CF_READ;
39 sp->fcport->vha->hw->qla_stats.input_bytes +=
40 scsi_bufflen(sp->cmd);
41 }
42 return (cflags);
43 }
44
45 /**
46 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47 * Continuation Type 0 IOCBs to allocate.
48 *
49 * @dsds: number of data segment decriptors needed
50 *
51 * Returns the number of IOCB entries needed to store @dsds.
52 */
53 uint16_t
54 qla2x00_calc_iocbs_32(uint16_t dsds)
55 {
56 uint16_t iocbs;
57
58 iocbs = 1;
59 if (dsds > 3) {
60 iocbs += (dsds - 3) / 7;
61 if ((dsds - 3) % 7)
62 iocbs++;
63 }
64 return (iocbs);
65 }
66
67 /**
68 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69 * Continuation Type 1 IOCBs to allocate.
70 *
71 * @dsds: number of data segment decriptors needed
72 *
73 * Returns the number of IOCB entries needed to store @dsds.
74 */
75 uint16_t
76 qla2x00_calc_iocbs_64(uint16_t dsds)
77 {
78 uint16_t iocbs;
79
80 iocbs = 1;
81 if (dsds > 2) {
82 iocbs += (dsds - 2) / 5;
83 if ((dsds - 2) % 5)
84 iocbs++;
85 }
86 return (iocbs);
87 }
88
89 /**
90 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * @ha: HA context
92 *
93 * Returns a pointer to the Continuation Type 0 IOCB packet.
94 */
95 static inline cont_entry_t *
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
97 {
98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
100 /* Adjust ring index. */
101 req->ring_index++;
102 if (req->ring_index == req->length) {
103 req->ring_index = 0;
104 req->ring_ptr = req->ring;
105 } else {
106 req->ring_ptr++;
107 }
108
109 cont_pkt = (cont_entry_t *)req->ring_ptr;
110
111 /* Load packet defaults. */
112 *((uint32_t *)(&cont_pkt->entry_type)) =
113 __constant_cpu_to_le32(CONTINUE_TYPE);
114
115 return (cont_pkt);
116 }
117
118 /**
119 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * @ha: HA context
121 *
122 * Returns a pointer to the continuation type 1 IOCB packet.
123 */
124 static inline cont_a64_entry_t *
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
126 {
127 cont_a64_entry_t *cont_pkt;
128
129 struct req_que *req = vha->req;
130 /* Adjust ring index. */
131 req->ring_index++;
132 if (req->ring_index == req->length) {
133 req->ring_index = 0;
134 req->ring_ptr = req->ring;
135 } else {
136 req->ring_ptr++;
137 }
138
139 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
140
141 /* Load packet defaults. */
142 *((uint32_t *)(&cont_pkt->entry_type)) =
143 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
144
145 return (cont_pkt);
146 }
147
148 /**
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types.
151 *
152 * @sp: SRB command to process
153 * @cmd_pkt: Command type 2 IOCB
154 * @tot_dsds: Total number of segments to transfer
155 */
156 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
157 uint16_t tot_dsds)
158 {
159 uint16_t avail_dsds;
160 uint32_t *cur_dsd;
161 scsi_qla_host_t *vha;
162 struct scsi_cmnd *cmd;
163 struct scatterlist *sg;
164 int i;
165
166 cmd = sp->cmd;
167
168 /* Update entry type to indicate Command Type 2 IOCB */
169 *((uint32_t *)(&cmd_pkt->entry_type)) =
170 __constant_cpu_to_le32(COMMAND_TYPE);
171
172 /* No data transfer */
173 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
174 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
175 return;
176 }
177
178 vha = sp->fcport->vha;
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180
181 /* Three DSDs are available in the Command Type 2 IOCB */
182 avail_dsds = 3;
183 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
184
185 /* Load data segments */
186 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187 cont_entry_t *cont_pkt;
188
189 /* Allocate additional continuation packets? */
190 if (avail_dsds == 0) {
191 /*
192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB.
194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7;
198 }
199
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
202 avail_dsds--;
203 }
204 }
205
206 /**
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
209 *
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
213 */
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
215 uint16_t tot_dsds)
216 {
217 uint16_t avail_dsds;
218 uint32_t *cur_dsd;
219 scsi_qla_host_t *vha;
220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg;
222 int i;
223
224 cmd = sp->cmd;
225
226 /* Update entry type to indicate Command Type 3 IOCB */
227 *((uint32_t *)(&cmd_pkt->entry_type)) =
228 __constant_cpu_to_le32(COMMAND_A64_TYPE);
229
230 /* No data transfer */
231 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
232 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
233 return;
234 }
235
236 vha = sp->fcport->vha;
237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
238
239 /* Two DSDs are available in the Command Type 3 IOCB */
240 avail_dsds = 2;
241 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
242
243 /* Load data segments */
244 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
245 dma_addr_t sle_dma;
246 cont_a64_entry_t *cont_pkt;
247
248 /* Allocate additional continuation packets? */
249 if (avail_dsds == 0) {
250 /*
251 * Five DSDs are available in the Continuation
252 * Type 1 IOCB.
253 */
254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
256 avail_dsds = 5;
257 }
258
259 sle_dma = sg_dma_address(sg);
260 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
263 avail_dsds--;
264 }
265 }
266
267 /**
268 * qla2x00_start_scsi() - Send a SCSI command to the ISP
269 * @sp: command to send to the ISP
270 *
271 * Returns non-zero if a failure occurred, else zero.
272 */
273 int
274 qla2x00_start_scsi(srb_t *sp)
275 {
276 int ret, nseg;
277 unsigned long flags;
278 scsi_qla_host_t *vha;
279 struct scsi_cmnd *cmd;
280 uint32_t *clr_ptr;
281 uint32_t index;
282 uint32_t handle;
283 cmd_entry_t *cmd_pkt;
284 uint16_t cnt;
285 uint16_t req_cnt;
286 uint16_t tot_dsds;
287 struct device_reg_2xxx __iomem *reg;
288 struct qla_hw_data *ha;
289 struct req_que *req;
290 struct rsp_que *rsp;
291
292 /* Setup device pointers. */
293 ret = 0;
294 vha = sp->fcport->vha;
295 ha = vha->hw;
296 reg = &ha->iobase->isp;
297 cmd = sp->cmd;
298 req = ha->req_q_map[0];
299 rsp = ha->rsp_q_map[0];
300 /* So we know we haven't pci_map'ed anything yet */
301 tot_dsds = 0;
302
303 /* Send marker if required */
304 if (vha->marker_needed != 0) {
305 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
306 != QLA_SUCCESS)
307 return (QLA_FUNCTION_FAILED);
308 vha->marker_needed = 0;
309 }
310
311 /* Acquire ring specific lock */
312 spin_lock_irqsave(&ha->hardware_lock, flags);
313
314 /* Check for room in outstanding command list. */
315 handle = req->current_outstanding_cmd;
316 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
317 handle++;
318 if (handle == MAX_OUTSTANDING_COMMANDS)
319 handle = 1;
320 if (!req->outstanding_cmds[handle])
321 break;
322 }
323 if (index == MAX_OUTSTANDING_COMMANDS)
324 goto queuing_error;
325
326 /* Map the sg table so we have an accurate count of sg entries needed */
327 if (scsi_sg_count(cmd)) {
328 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
329 scsi_sg_count(cmd), cmd->sc_data_direction);
330 if (unlikely(!nseg))
331 goto queuing_error;
332 } else
333 nseg = 0;
334
335 tot_dsds = nseg;
336
337 /* Calculate the number of request entries needed. */
338 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339 if (req->cnt < (req_cnt + 2)) {
340 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341 if (req->ring_index < cnt)
342 req->cnt = cnt - req->ring_index;
343 else
344 req->cnt = req->length -
345 (req->ring_index - cnt);
346 }
347 if (req->cnt < (req_cnt + 2))
348 goto queuing_error;
349
350 /* Build command packet */
351 req->current_outstanding_cmd = handle;
352 req->outstanding_cmds[handle] = sp;
353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
354 req->cnt -= req_cnt;
355
356 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
357 cmd_pkt->handle = handle;
358 /* Zero out remaining portion of packet. */
359 clr_ptr = (uint32_t *)cmd_pkt + 2;
360 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
361 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
362
363 /* Set target ID and LUN number*/
364 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
365 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
366
367 /* Update tagged queuing modifier */
368 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
369
370 /* Load SCSI command packet. */
371 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
372 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
373
374 /* Build IOCB segments */
375 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
376
377 /* Set total data segment count. */
378 cmd_pkt->entry_count = (uint8_t)req_cnt;
379 wmb();
380
381 /* Adjust ring index. */
382 req->ring_index++;
383 if (req->ring_index == req->length) {
384 req->ring_index = 0;
385 req->ring_ptr = req->ring;
386 } else
387 req->ring_ptr++;
388
389 sp->flags |= SRB_DMA_VALID;
390
391 /* Set chip new ring index. */
392 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
393 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
394
395 /* Manage unprocessed RIO/ZIO commands in response queue. */
396 if (vha->flags.process_response_queue &&
397 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
398 qla2x00_process_response_queue(rsp);
399
400 spin_unlock_irqrestore(&ha->hardware_lock, flags);
401 return (QLA_SUCCESS);
402
403 queuing_error:
404 if (tot_dsds)
405 scsi_dma_unmap(cmd);
406
407 spin_unlock_irqrestore(&ha->hardware_lock, flags);
408
409 return (QLA_FUNCTION_FAILED);
410 }
411
412 /**
413 * qla2x00_marker() - Send a marker IOCB to the firmware.
414 * @ha: HA context
415 * @loop_id: loop ID
416 * @lun: LUN
417 * @type: marker modifier
418 *
419 * Can be called from both normal and interrupt context.
420 *
421 * Returns non-zero if a failure occurred, else zero.
422 */
423 int
424 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
425 struct rsp_que *rsp, uint16_t loop_id,
426 uint16_t lun, uint8_t type)
427 {
428 mrk_entry_t *mrk;
429 struct mrk_entry_24xx *mrk24;
430 struct qla_hw_data *ha = vha->hw;
431 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
432
433 mrk24 = NULL;
434 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
435 if (mrk == NULL) {
436 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
437 __func__, base_vha->host_no));
438
439 return (QLA_FUNCTION_FAILED);
440 }
441
442 mrk->entry_type = MARKER_TYPE;
443 mrk->modifier = type;
444 if (type != MK_SYNC_ALL) {
445 if (IS_FWI2_CAPABLE(ha)) {
446 mrk24 = (struct mrk_entry_24xx *) mrk;
447 mrk24->nport_handle = cpu_to_le16(loop_id);
448 mrk24->lun[1] = LSB(lun);
449 mrk24->lun[2] = MSB(lun);
450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
453 } else {
454 SET_TARGET_ID(ha, mrk->target, loop_id);
455 mrk->lun = cpu_to_le16(lun);
456 }
457 }
458 wmb();
459
460 qla2x00_isp_cmd(vha, req);
461
462 return (QLA_SUCCESS);
463 }
464
465 int
466 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
467 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
468 uint8_t type)
469 {
470 int ret;
471 unsigned long flags = 0;
472
473 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
474 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
475 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
476
477 return (ret);
478 }
479
480 /**
481 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
482 * @ha: HA context
483 *
484 * Note: The caller must hold the hardware lock before calling this routine.
485 *
486 * Returns NULL if function failed, else, a pointer to the request packet.
487 */
488 static request_t *
489 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
490 struct rsp_que *rsp)
491 {
492 struct qla_hw_data *ha = vha->hw;
493 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
494 request_t *pkt = NULL;
495 uint16_t cnt;
496 uint32_t *dword_ptr;
497 uint32_t timer;
498 uint16_t req_cnt = 1;
499
500 /* Wait 1 second for slot. */
501 for (timer = HZ; timer; timer--) {
502 if ((req_cnt + 2) >= req->cnt) {
503 /* Calculate number of free request entries. */
504 if (ha->mqenable)
505 cnt = (uint16_t)
506 RD_REG_DWORD(&reg->isp25mq.req_q_out);
507 else {
508 if (IS_FWI2_CAPABLE(ha))
509 cnt = (uint16_t)RD_REG_DWORD(
510 &reg->isp24.req_q_out);
511 else
512 cnt = qla2x00_debounce_register(
513 ISP_REQ_Q_OUT(ha, &reg->isp));
514 }
515 if (req->ring_index < cnt)
516 req->cnt = cnt - req->ring_index;
517 else
518 req->cnt = req->length -
519 (req->ring_index - cnt);
520 }
521 /* If room for request in request ring. */
522 if ((req_cnt + 2) < req->cnt) {
523 req->cnt--;
524 pkt = req->ring_ptr;
525
526 /* Zero out packet. */
527 dword_ptr = (uint32_t *)pkt;
528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
529 *dword_ptr++ = 0;
530
531 /* Set entry count. */
532 pkt->entry_count = 1;
533
534 break;
535 }
536
537 /* Release ring specific lock */
538 spin_unlock_irq(&ha->hardware_lock);
539
540 udelay(2); /* 2 us */
541
542 /* Check for pending interrupts. */
543 /* During init we issue marker directly */
544 if (!vha->marker_needed && !vha->flags.init_done)
545 qla2x00_poll(rsp);
546 spin_lock_irq(&ha->hardware_lock);
547 }
548 if (!pkt) {
549 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
550 }
551
552 return (pkt);
553 }
554
555 /**
556 * qla2x00_isp_cmd() - Modify the request ring pointer.
557 * @ha: HA context
558 *
559 * Note: The caller must hold the hardware lock before calling this routine.
560 */
561 static void
562 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
563 {
564 struct qla_hw_data *ha = vha->hw;
565 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
566 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
567
568 DEBUG5(printk("%s(): IOCB data:\n", __func__));
569 DEBUG5(qla2x00_dump_buffer(
570 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
571
572 /* Adjust ring index. */
573 req->ring_index++;
574 if (req->ring_index == req->length) {
575 req->ring_index = 0;
576 req->ring_ptr = req->ring;
577 } else
578 req->ring_ptr++;
579
580 /* Set chip new ring index. */
581 if (ha->mqenable) {
582 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
583 RD_REG_DWORD(&ioreg->hccr);
584 }
585 else {
586 if (IS_FWI2_CAPABLE(ha)) {
587 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
588 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
589 } else {
590 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
591 req->ring_index);
592 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
593 }
594 }
595
596 }
597
598 /**
599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
601 *
602 * @dsds: number of data segment decriptors needed
603 *
604 * Returns the number of IOCB entries needed to store @dsds.
605 */
606 static inline uint16_t
607 qla24xx_calc_iocbs(uint16_t dsds)
608 {
609 uint16_t iocbs;
610
611 iocbs = 1;
612 if (dsds > 1) {
613 iocbs += (dsds - 1) / 5;
614 if ((dsds - 1) % 5)
615 iocbs++;
616 }
617 return iocbs;
618 }
619
620 /**
621 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
622 * IOCB types.
623 *
624 * @sp: SRB command to process
625 * @cmd_pkt: Command type 3 IOCB
626 * @tot_dsds: Total number of segments to transfer
627 */
628 static inline void
629 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
630 uint16_t tot_dsds)
631 {
632 uint16_t avail_dsds;
633 uint32_t *cur_dsd;
634 scsi_qla_host_t *vha;
635 struct scsi_cmnd *cmd;
636 struct scatterlist *sg;
637 int i;
638 struct req_que *req;
639
640 cmd = sp->cmd;
641
642 /* Update entry type to indicate Command Type 3 IOCB */
643 *((uint32_t *)(&cmd_pkt->entry_type)) =
644 __constant_cpu_to_le32(COMMAND_TYPE_7);
645
646 /* No data transfer */
647 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
648 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
649 return;
650 }
651
652 vha = sp->fcport->vha;
653 req = vha->req;
654
655 /* Set transfer direction */
656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
657 cmd_pkt->task_mgmt_flags =
658 __constant_cpu_to_le16(TMF_WRITE_DATA);
659 sp->fcport->vha->hw->qla_stats.output_bytes +=
660 scsi_bufflen(sp->cmd);
661 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
662 cmd_pkt->task_mgmt_flags =
663 __constant_cpu_to_le16(TMF_READ_DATA);
664 sp->fcport->vha->hw->qla_stats.input_bytes +=
665 scsi_bufflen(sp->cmd);
666 }
667
668 /* One DSD is available in the Command Type 3 IOCB */
669 avail_dsds = 1;
670 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
671
672 /* Load data segments */
673
674 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
675 dma_addr_t sle_dma;
676 cont_a64_entry_t *cont_pkt;
677
678 /* Allocate additional continuation packets? */
679 if (avail_dsds == 0) {
680 /*
681 * Five DSDs are available in the Continuation
682 * Type 1 IOCB.
683 */
684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
686 avail_dsds = 5;
687 }
688
689 sle_dma = sg_dma_address(sg);
690 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
691 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
692 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
693 avail_dsds--;
694 }
695 }
696
697
698 /**
699 * qla24xx_start_scsi() - Send a SCSI command to the ISP
700 * @sp: command to send to the ISP
701 *
702 * Returns non-zero if a failure occurred, else zero.
703 */
704 int
705 qla24xx_start_scsi(srb_t *sp)
706 {
707 int ret, nseg;
708 unsigned long flags;
709 uint32_t *clr_ptr;
710 uint32_t index;
711 uint32_t handle;
712 struct cmd_type_7 *cmd_pkt;
713 uint16_t cnt;
714 uint16_t req_cnt;
715 uint16_t tot_dsds;
716 struct req_que *req = NULL;
717 struct rsp_que *rsp = NULL;
718 struct scsi_cmnd *cmd = sp->cmd;
719 struct scsi_qla_host *vha = sp->fcport->vha;
720 struct qla_hw_data *ha = vha->hw;
721
722 /* Setup device pointers. */
723 ret = 0;
724
725 qla25xx_set_que(sp, &rsp);
726 req = vha->req;
727
728 /* So we know we haven't pci_map'ed anything yet */
729 tot_dsds = 0;
730
731 /* Send marker if required */
732 if (vha->marker_needed != 0) {
733 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
734 != QLA_SUCCESS)
735 return QLA_FUNCTION_FAILED;
736 vha->marker_needed = 0;
737 }
738
739 /* Acquire ring specific lock */
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741
742 /* Check for room in outstanding command list. */
743 handle = req->current_outstanding_cmd;
744 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
745 handle++;
746 if (handle == MAX_OUTSTANDING_COMMANDS)
747 handle = 1;
748 if (!req->outstanding_cmds[handle])
749 break;
750 }
751 if (index == MAX_OUTSTANDING_COMMANDS)
752 goto queuing_error;
753
754 /* Map the sg table so we have an accurate count of sg entries needed */
755 if (scsi_sg_count(cmd)) {
756 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
757 scsi_sg_count(cmd), cmd->sc_data_direction);
758 if (unlikely(!nseg))
759 goto queuing_error;
760 } else
761 nseg = 0;
762
763 tot_dsds = nseg;
764
765 req_cnt = qla24xx_calc_iocbs(tot_dsds);
766 if (req->cnt < (req_cnt + 2)) {
767 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
768
769 if (req->ring_index < cnt)
770 req->cnt = cnt - req->ring_index;
771 else
772 req->cnt = req->length -
773 (req->ring_index - cnt);
774 }
775 if (req->cnt < (req_cnt + 2))
776 goto queuing_error;
777
778 /* Build command packet. */
779 req->current_outstanding_cmd = handle;
780 req->outstanding_cmds[handle] = sp;
781 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
782 req->cnt -= req_cnt;
783
784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
786
787 /* Zero out remaining portion of packet. */
788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
789 clr_ptr = (uint32_t *)cmd_pkt + 2;
790 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
791 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
792
793 /* Set NPORT-ID and LUN number*/
794 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
795 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
796 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
797 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
798 cmd_pkt->vp_index = sp->fcport->vp_idx;
799
800 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
801 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
802
803 /* Load SCSI command packet. */
804 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
805 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
806
807 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
808
809 /* Build IOCB segments */
810 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
811
812 /* Set total data segment count. */
813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
816 wmb();
817
818 /* Adjust ring index. */
819 req->ring_index++;
820 if (req->ring_index == req->length) {
821 req->ring_index = 0;
822 req->ring_ptr = req->ring;
823 } else
824 req->ring_ptr++;
825
826 sp->flags |= SRB_DMA_VALID;
827
828 /* Set chip new ring index. */
829 WRT_REG_DWORD(req->req_q_in, req->ring_index);
830 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
831
832 /* Manage unprocessed RIO/ZIO commands in response queue. */
833 if (vha->flags.process_response_queue &&
834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
835 qla24xx_process_response_queue(vha, rsp);
836
837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
838 return QLA_SUCCESS;
839
840 queuing_error:
841 if (tot_dsds)
842 scsi_dma_unmap(cmd);
843
844 spin_unlock_irqrestore(&ha->hardware_lock, flags);
845
846 return QLA_FUNCTION_FAILED;
847 }
848
849 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
850 {
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
854
855 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];
860 }