[SCSI] small whitespace cleanup for qlogic driver
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
fa90c54f
AV
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2005 QLogic Corporation
1da177e4 4 *
fa90c54f
AV
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
1da177e4
LT
7#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
14static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18
19/**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * @cmd: SCSI command
22 *
23 * Returns the proper CF_* direction based on CDB.
24 */
25static inline uint16_t
26qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
27{
28 uint16_t cflags;
29
30 cflags = 0;
31
32 /* Set transfer direction */
33 if (cmd->sc_data_direction == DMA_TO_DEVICE)
34 cflags = CF_WRITE;
35 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
36 cflags = CF_READ;
37 return (cflags);
38}
39
40/**
41 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
42 * Continuation Type 0 IOCBs to allocate.
43 *
44 * @dsds: number of data segment decriptors needed
45 *
46 * Returns the number of IOCB entries needed to store @dsds.
47 */
48uint16_t
49qla2x00_calc_iocbs_32(uint16_t dsds)
50{
51 uint16_t iocbs;
52
53 iocbs = 1;
54 if (dsds > 3) {
55 iocbs += (dsds - 3) / 7;
56 if ((dsds - 3) % 7)
57 iocbs++;
58 }
59 return (iocbs);
60}
61
62/**
63 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
64 * Continuation Type 1 IOCBs to allocate.
65 *
66 * @dsds: number of data segment decriptors needed
67 *
68 * Returns the number of IOCB entries needed to store @dsds.
69 */
70uint16_t
71qla2x00_calc_iocbs_64(uint16_t dsds)
72{
73 uint16_t iocbs;
74
75 iocbs = 1;
76 if (dsds > 2) {
77 iocbs += (dsds - 2) / 5;
78 if ((dsds - 2) % 5)
79 iocbs++;
80 }
81 return (iocbs);
82}
83
84/**
85 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
86 * @ha: HA context
87 *
88 * Returns a pointer to the Continuation Type 0 IOCB packet.
89 */
90static inline cont_entry_t *
91qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
92{
93 cont_entry_t *cont_pkt;
94
95 /* Adjust ring index. */
96 ha->req_ring_index++;
97 if (ha->req_ring_index == ha->request_q_length) {
98 ha->req_ring_index = 0;
99 ha->request_ring_ptr = ha->request_ring;
100 } else {
101 ha->request_ring_ptr++;
102 }
103
104 cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
105
106 /* Load packet defaults. */
107 *((uint32_t *)(&cont_pkt->entry_type)) =
108 __constant_cpu_to_le32(CONTINUE_TYPE);
109
110 return (cont_pkt);
111}
112
113/**
114 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
115 * @ha: HA context
116 *
117 * Returns a pointer to the continuation type 1 IOCB packet.
118 */
119static inline cont_a64_entry_t *
120qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
121{
122 cont_a64_entry_t *cont_pkt;
123
124 /* Adjust ring index. */
125 ha->req_ring_index++;
126 if (ha->req_ring_index == ha->request_q_length) {
127 ha->req_ring_index = 0;
128 ha->request_ring_ptr = ha->request_ring;
129 } else {
130 ha->request_ring_ptr++;
131 }
132
133 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
134
135 /* Load packet defaults. */
136 *((uint32_t *)(&cont_pkt->entry_type)) =
137 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
138
139 return (cont_pkt);
140}
141
142/**
143 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
144 * capable IOCB types.
145 *
146 * @sp: SRB command to process
147 * @cmd_pkt: Command type 2 IOCB
148 * @tot_dsds: Total number of segments to transfer
149 */
150void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
151 uint16_t tot_dsds)
152{
153 uint16_t avail_dsds;
154 uint32_t *cur_dsd;
155 scsi_qla_host_t *ha;
156 struct scsi_cmnd *cmd;
157
158 cmd = sp->cmd;
159
160 /* Update entry type to indicate Command Type 2 IOCB */
161 *((uint32_t *)(&cmd_pkt->entry_type)) =
162 __constant_cpu_to_le32(COMMAND_TYPE);
163
164 /* No data transfer */
165 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
166 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
167 return;
168 }
169
170 ha = sp->ha;
171
172 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
173
174 /* Three DSDs are available in the Command Type 2 IOCB */
175 avail_dsds = 3;
176 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
177
178 /* Load data segments */
179 if (cmd->use_sg != 0) {
180 struct scatterlist *cur_seg;
181 struct scatterlist *end_seg;
182
183 cur_seg = (struct scatterlist *)cmd->request_buffer;
184 end_seg = cur_seg + tot_dsds;
185 while (cur_seg < end_seg) {
186 cont_entry_t *cont_pkt;
187
188 /* Allocate additional continuation packets? */
189 if (avail_dsds == 0) {
190 /*
191 * Seven DSDs are available in the Continuation
192 * Type 0 IOCB.
193 */
194 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
195 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
196 avail_dsds = 7;
197 }
198
199 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
200 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
201 avail_dsds--;
202
203 cur_seg++;
204 }
205 } else {
83021920 206 *cur_dsd++ = cpu_to_le32(sp->dma_handle);
1da177e4
LT
207 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
208 }
209}
210
211/**
212 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
213 * capable IOCB types.
214 *
215 * @sp: SRB command to process
216 * @cmd_pkt: Command type 3 IOCB
217 * @tot_dsds: Total number of segments to transfer
218 */
219void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 uint16_t tot_dsds)
221{
222 uint16_t avail_dsds;
223 uint32_t *cur_dsd;
224 scsi_qla_host_t *ha;
225 struct scsi_cmnd *cmd;
226
227 cmd = sp->cmd;
228
229 /* Update entry type to indicate Command Type 3 IOCB */
230 *((uint32_t *)(&cmd_pkt->entry_type)) =
231 __constant_cpu_to_le32(COMMAND_A64_TYPE);
232
233 /* No data transfer */
234 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
235 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
236 return;
237 }
238
239 ha = sp->ha;
240
241 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
242
243 /* Two DSDs are available in the Command Type 3 IOCB */
244 avail_dsds = 2;
245 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
246
247 /* Load data segments */
248 if (cmd->use_sg != 0) {
249 struct scatterlist *cur_seg;
250 struct scatterlist *end_seg;
251
252 cur_seg = (struct scatterlist *)cmd->request_buffer;
253 end_seg = cur_seg + tot_dsds;
254 while (cur_seg < end_seg) {
255 dma_addr_t sle_dma;
256 cont_a64_entry_t *cont_pkt;
257
258 /* Allocate additional continuation packets? */
259 if (avail_dsds == 0) {
260 /*
261 * Five DSDs are available in the Continuation
262 * Type 1 IOCB.
263 */
264 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
265 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
266 avail_dsds = 5;
267 }
268
269 sle_dma = sg_dma_address(cur_seg);
270 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
271 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
272 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
273 avail_dsds--;
274
275 cur_seg++;
276 }
277 } else {
83021920
AV
278 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
279 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
1da177e4
LT
280 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
281 }
282}
283
284/**
285 * qla2x00_start_scsi() - Send a SCSI command to the ISP
286 * @sp: command to send to the ISP
287 *
288 * Returns non-zero if a failure occured, else zero.
289 */
290int
291qla2x00_start_scsi(srb_t *sp)
292{
293 int ret;
294 unsigned long flags;
295 scsi_qla_host_t *ha;
1da177e4
LT
296 struct scsi_cmnd *cmd;
297 uint32_t *clr_ptr;
298 uint32_t index;
299 uint32_t handle;
300 cmd_entry_t *cmd_pkt;
1da177e4
LT
301 struct scatterlist *sg;
302 uint16_t cnt;
303 uint16_t req_cnt;
304 uint16_t tot_dsds;
3d71644c 305 struct device_reg_2xxx __iomem *reg;
1da177e4
LT
306
307 /* Setup device pointers. */
308 ret = 0;
bdf79621 309 ha = sp->ha;
3d71644c 310 reg = &ha->iobase->isp;
1da177e4 311 cmd = sp->cmd;
83021920
AV
312 /* So we know we haven't pci_map'ed anything yet */
313 tot_dsds = 0;
1da177e4
LT
314
315 /* Send marker if required */
316 if (ha->marker_needed != 0) {
317 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
318 return (QLA_FUNCTION_FAILED);
319 }
320 ha->marker_needed = 0;
321 }
322
323 /* Acquire ring specific lock */
324 spin_lock_irqsave(&ha->hardware_lock, flags);
325
326 /* Check for room in outstanding command list. */
327 handle = ha->current_outstanding_cmd;
328 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
329 handle++;
330 if (handle == MAX_OUTSTANDING_COMMANDS)
331 handle = 1;
332 if (ha->outstanding_cmds[handle] == 0)
333 break;
334 }
335 if (index == MAX_OUTSTANDING_COMMANDS)
336 goto queuing_error;
337
83021920
AV
338 /* Map the sg table so we have an accurate count of sg entries needed */
339 if (cmd->use_sg) {
340 sg = (struct scatterlist *) cmd->request_buffer;
341 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
342 cmd->sc_data_direction);
343 if (tot_dsds == 0)
344 goto queuing_error;
345 } else if (cmd->request_bufflen) {
346 dma_addr_t req_dma;
347
348 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
349 cmd->request_bufflen, cmd->sc_data_direction);
350 if (dma_mapping_error(req_dma))
351 goto queuing_error;
352
353 sp->dma_handle = req_dma;
354 tot_dsds = 1;
355 }
356
1da177e4 357 /* Calculate the number of request entries needed. */
abbd8870 358 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
1da177e4
LT
359 if (ha->req_q_cnt < (req_cnt + 2)) {
360 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
361 if (ha->req_ring_index < cnt)
362 ha->req_q_cnt = cnt - ha->req_ring_index;
363 else
364 ha->req_q_cnt = ha->request_q_length -
365 (ha->req_ring_index - cnt);
366 }
367 if (ha->req_q_cnt < (req_cnt + 2))
368 goto queuing_error;
369
1da177e4
LT
370 /* Build command packet */
371 ha->current_outstanding_cmd = handle;
372 ha->outstanding_cmds[handle] = sp;
373 sp->ha = ha;
374 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
375 ha->req_q_cnt -= req_cnt;
376
377 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
378 cmd_pkt->handle = handle;
379 /* Zero out remaining portion of packet. */
380 clr_ptr = (uint32_t *)cmd_pkt + 2;
381 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
382 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
383
bdf79621
AV
384 /* Set target ID and LUN number*/
385 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
386 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
1da177e4
LT
387
388 /* Update tagged queuing modifier */
389 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
1da177e4 390
1da177e4
LT
391 /* Load SCSI command packet. */
392 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
393 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
394
395 /* Build IOCB segments */
abbd8870 396 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
397
398 /* Set total data segment count. */
399 cmd_pkt->entry_count = (uint8_t)req_cnt;
400 wmb();
401
402 /* Adjust ring index. */
403 ha->req_ring_index++;
404 if (ha->req_ring_index == ha->request_q_length) {
405 ha->req_ring_index = 0;
406 ha->request_ring_ptr = ha->request_ring;
407 } else
408 ha->request_ring_ptr++;
409
1da177e4 410 sp->flags |= SRB_DMA_VALID;
1da177e4
LT
411
412 /* Set chip new ring index. */
413 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
414 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
415
4fdfefe5
AV
416 /* Manage unprocessed RIO/ZIO commands in response queue. */
417 if (ha->flags.process_response_queue &&
418 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
419 qla2x00_process_response_queue(ha);
420
1da177e4
LT
421 spin_unlock_irqrestore(&ha->hardware_lock, flags);
422 return (QLA_SUCCESS);
423
424queuing_error:
83021920
AV
425 if (cmd->use_sg && tot_dsds) {
426 sg = (struct scatterlist *) cmd->request_buffer;
427 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
428 cmd->sc_data_direction);
429 } else if (tot_dsds) {
430 pci_unmap_single(ha->pdev, sp->dma_handle,
431 cmd->request_bufflen, cmd->sc_data_direction);
432 }
1da177e4
LT
433 spin_unlock_irqrestore(&ha->hardware_lock, flags);
434
435 return (QLA_FUNCTION_FAILED);
436}
437
438/**
439 * qla2x00_marker() - Send a marker IOCB to the firmware.
440 * @ha: HA context
441 * @loop_id: loop ID
442 * @lun: LUN
443 * @type: marker modifier
444 *
445 * Can be called from both normal and interrupt context.
446 *
447 * Returns non-zero if a failure occured, else zero.
448 */
fa2a1ce5 449int
1da177e4
LT
450__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
451 uint8_t type)
452{
2b6c0cee
AV
453 mrk_entry_t *mrk;
454 struct mrk_entry_24xx *mrk24;
1da177e4 455
2b6c0cee
AV
456 mrk24 = NULL;
457 mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
458 if (mrk == NULL) {
459 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
460 __func__, ha->host_no));
1da177e4
LT
461
462 return (QLA_FUNCTION_FAILED);
463 }
464
2b6c0cee
AV
465 mrk->entry_type = MARKER_TYPE;
466 mrk->modifier = type;
1da177e4 467 if (type != MK_SYNC_ALL) {
044cc6c8 468 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
2b6c0cee
AV
469 mrk24 = (struct mrk_entry_24xx *) mrk;
470 mrk24->nport_handle = cpu_to_le16(loop_id);
471 mrk24->lun[1] = LSB(lun);
472 mrk24->lun[2] = MSB(lun);
473 } else {
474 SET_TARGET_ID(ha, mrk->target, loop_id);
475 mrk->lun = cpu_to_le16(lun);
476 }
1da177e4
LT
477 }
478 wmb();
479
1da177e4
LT
480 qla2x00_isp_cmd(ha);
481
482 return (QLA_SUCCESS);
483}
484
fa2a1ce5 485int
1da177e4
LT
486qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
487 uint8_t type)
488{
489 int ret;
490 unsigned long flags = 0;
491
492 spin_lock_irqsave(&ha->hardware_lock, flags);
493 ret = __qla2x00_marker(ha, loop_id, lun, type);
494 spin_unlock_irqrestore(&ha->hardware_lock, flags);
495
496 return (ret);
497}
498
499/**
500 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
501 * @ha: HA context
502 *
503 * Note: The caller must hold the hardware lock before calling this routine.
504 *
505 * Returns NULL if function failed, else, a pointer to the request packet.
506 */
507static request_t *
508qla2x00_req_pkt(scsi_qla_host_t *ha)
509{
2b6c0cee 510 device_reg_t __iomem *reg = ha->iobase;
1da177e4
LT
511 request_t *pkt = NULL;
512 uint16_t cnt;
513 uint32_t *dword_ptr;
514 uint32_t timer;
515 uint16_t req_cnt = 1;
516
517 /* Wait 1 second for slot. */
518 for (timer = HZ; timer; timer--) {
519 if ((req_cnt + 2) >= ha->req_q_cnt) {
520 /* Calculate number of free request entries. */
044cc6c8 521 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
2b6c0cee
AV
522 cnt = (uint16_t)RD_REG_DWORD(
523 &reg->isp24.req_q_out);
524 else
525 cnt = qla2x00_debounce_register(
526 ISP_REQ_Q_OUT(ha, &reg->isp));
1da177e4
LT
527 if (ha->req_ring_index < cnt)
528 ha->req_q_cnt = cnt - ha->req_ring_index;
529 else
530 ha->req_q_cnt = ha->request_q_length -
531 (ha->req_ring_index - cnt);
532 }
533 /* If room for request in request ring. */
534 if ((req_cnt + 2) < ha->req_q_cnt) {
535 ha->req_q_cnt--;
536 pkt = ha->request_ring_ptr;
537
538 /* Zero out packet. */
539 dword_ptr = (uint32_t *)pkt;
540 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
541 *dword_ptr++ = 0;
542
543 /* Set system defined field. */
544 pkt->sys_define = (uint8_t)ha->req_ring_index;
545
546 /* Set entry count. */
547 pkt->entry_count = 1;
548
549 break;
550 }
551
552 /* Release ring specific lock */
553 spin_unlock(&ha->hardware_lock);
554
555 udelay(2); /* 2 us */
556
557 /* Check for pending interrupts. */
558 /* During init we issue marker directly */
559 if (!ha->marker_needed)
560 qla2x00_poll(ha);
561
562 spin_lock_irq(&ha->hardware_lock);
563 }
564 if (!pkt) {
565 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
566 }
567
568 return (pkt);
569}
570
571/**
572 * qla2x00_isp_cmd() - Modify the request ring pointer.
573 * @ha: HA context
574 *
575 * Note: The caller must hold the hardware lock before calling this routine.
576 */
577void
578qla2x00_isp_cmd(scsi_qla_host_t *ha)
579{
2b6c0cee 580 device_reg_t __iomem *reg = ha->iobase;
1da177e4
LT
581
582 DEBUG5(printk("%s(): IOCB data:\n", __func__));
583 DEBUG5(qla2x00_dump_buffer(
584 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
585
586 /* Adjust ring index. */
587 ha->req_ring_index++;
588 if (ha->req_ring_index == ha->request_q_length) {
589 ha->req_ring_index = 0;
590 ha->request_ring_ptr = ha->request_ring;
591 } else
592 ha->request_ring_ptr++;
593
594 /* Set chip new ring index. */
044cc6c8 595 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
2b6c0cee
AV
596 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
597 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
598 } else {
599 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
600 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
601 }
602
603}
604
605/**
606 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
607 * Continuation Type 1 IOCBs to allocate.
608 *
609 * @dsds: number of data segment decriptors needed
610 *
611 * Returns the number of IOCB entries needed to store @dsds.
612 */
613static inline uint16_t
614qla24xx_calc_iocbs(uint16_t dsds)
615{
616 uint16_t iocbs;
617
618 iocbs = 1;
619 if (dsds > 1) {
620 iocbs += (dsds - 1) / 5;
621 if ((dsds - 1) % 5)
622 iocbs++;
623 }
624 return iocbs;
625}
626
627/**
628 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
629 * IOCB types.
630 *
631 * @sp: SRB command to process
632 * @cmd_pkt: Command type 3 IOCB
633 * @tot_dsds: Total number of segments to transfer
634 */
635static inline void
636qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
637 uint16_t tot_dsds)
638{
639 uint16_t avail_dsds;
640 uint32_t *cur_dsd;
641 scsi_qla_host_t *ha;
642 struct scsi_cmnd *cmd;
643
644 cmd = sp->cmd;
645
646 /* Update entry type to indicate Command Type 3 IOCB */
647 *((uint32_t *)(&cmd_pkt->entry_type)) =
648 __constant_cpu_to_le32(COMMAND_TYPE_7);
649
650 /* No data transfer */
651 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
652 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
653 return;
654 }
655
656 ha = sp->ha;
657
658 /* Set transfer direction */
659 if (cmd->sc_data_direction == DMA_TO_DEVICE)
660 cmd_pkt->task_mgmt_flags =
661 __constant_cpu_to_le16(TMF_WRITE_DATA);
662 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
663 cmd_pkt->task_mgmt_flags =
664 __constant_cpu_to_le16(TMF_READ_DATA);
665
666 /* One DSD is available in the Command Type 3 IOCB */
667 avail_dsds = 1;
668 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
669
670 /* Load data segments */
671 if (cmd->use_sg != 0) {
672 struct scatterlist *cur_seg;
673 struct scatterlist *end_seg;
674
675 cur_seg = (struct scatterlist *)cmd->request_buffer;
676 end_seg = cur_seg + tot_dsds;
677 while (cur_seg < end_seg) {
678 dma_addr_t sle_dma;
679 cont_a64_entry_t *cont_pkt;
680
681 /* Allocate additional continuation packets? */
682 if (avail_dsds == 0) {
683 /*
684 * Five DSDs are available in the Continuation
685 * Type 1 IOCB.
686 */
687 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
688 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
689 avail_dsds = 5;
690 }
691
692 sle_dma = sg_dma_address(cur_seg);
693 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
694 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
695 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
696 avail_dsds--;
697
698 cur_seg++;
699 }
700 } else {
701 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
702 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
703 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
704 }
705}
706
707
708/**
709 * qla24xx_start_scsi() - Send a SCSI command to the ISP
710 * @sp: command to send to the ISP
711 *
712 * Returns non-zero if a failure occured, else zero.
713 */
714int
715qla24xx_start_scsi(srb_t *sp)
716{
717 int ret;
718 unsigned long flags;
719 scsi_qla_host_t *ha;
720 struct scsi_cmnd *cmd;
721 uint32_t *clr_ptr;
722 uint32_t index;
723 uint32_t handle;
724 struct cmd_type_7 *cmd_pkt;
725 struct scatterlist *sg;
726 uint16_t cnt;
727 uint16_t req_cnt;
728 uint16_t tot_dsds;
db776a14 729 struct device_reg_24xx __iomem *reg;
2b6c0cee
AV
730
731 /* Setup device pointers. */
732 ret = 0;
733 ha = sp->ha;
734 reg = &ha->iobase->isp24;
735 cmd = sp->cmd;
736 /* So we know we haven't pci_map'ed anything yet */
737 tot_dsds = 0;
738
739 /* Send marker if required */
740 if (ha->marker_needed != 0) {
741 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
742 return QLA_FUNCTION_FAILED;
743 }
744 ha->marker_needed = 0;
745 }
746
747 /* Acquire ring specific lock */
748 spin_lock_irqsave(&ha->hardware_lock, flags);
749
750 /* Check for room in outstanding command list. */
751 handle = ha->current_outstanding_cmd;
752 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
753 handle++;
754 if (handle == MAX_OUTSTANDING_COMMANDS)
755 handle = 1;
756 if (ha->outstanding_cmds[handle] == 0)
757 break;
758 }
759 if (index == MAX_OUTSTANDING_COMMANDS)
760 goto queuing_error;
761
762 /* Map the sg table so we have an accurate count of sg entries needed */
763 if (cmd->use_sg) {
764 sg = (struct scatterlist *) cmd->request_buffer;
765 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
766 cmd->sc_data_direction);
767 if (tot_dsds == 0)
768 goto queuing_error;
769 } else if (cmd->request_bufflen) {
770 dma_addr_t req_dma;
771
772 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
773 cmd->request_bufflen, cmd->sc_data_direction);
774 if (dma_mapping_error(req_dma))
775 goto queuing_error;
776
777 sp->dma_handle = req_dma;
778 tot_dsds = 1;
779 }
780
781 req_cnt = qla24xx_calc_iocbs(tot_dsds);
782 if (ha->req_q_cnt < (req_cnt + 2)) {
783 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
784 if (ha->req_ring_index < cnt)
785 ha->req_q_cnt = cnt - ha->req_ring_index;
786 else
787 ha->req_q_cnt = ha->request_q_length -
788 (ha->req_ring_index - cnt);
789 }
131736d3 790 if (ha->req_q_cnt < (req_cnt + 2))
2b6c0cee 791 goto queuing_error;
2b6c0cee
AV
792
793 /* Build command packet. */
794 ha->current_outstanding_cmd = handle;
795 ha->outstanding_cmds[handle] = sp;
796 sp->ha = ha;
797 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
798 ha->req_q_cnt -= req_cnt;
799
800 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
801 cmd_pkt->handle = handle;
802
803 /* Zero out remaining portion of packet. */
72df8325 804 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2b6c0cee
AV
805 clr_ptr = (uint32_t *)cmd_pkt + 2;
806 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
807 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
808
809 /* Set NPORT-ID and LUN number*/
810 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
811 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
812 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
813 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
814
661c3f6c 815 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
0d4be124 816 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2b6c0cee 817
2b6c0cee
AV
818 /* Load SCSI command packet. */
819 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
820 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
821
822 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
823
824 /* Build IOCB segments */
825 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
826
827 /* Set total data segment count. */
828 cmd_pkt->entry_count = (uint8_t)req_cnt;
829 wmb();
830
831 /* Adjust ring index. */
832 ha->req_ring_index++;
833 if (ha->req_ring_index == ha->request_q_length) {
834 ha->req_ring_index = 0;
835 ha->request_ring_ptr = ha->request_ring;
836 } else
837 ha->request_ring_ptr++;
838
839 sp->flags |= SRB_DMA_VALID;
2b6c0cee
AV
840
841 /* Set chip new ring index. */
842 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
843 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
844
4fdfefe5
AV
845 /* Manage unprocessed RIO/ZIO commands in response queue. */
846 if (ha->flags.process_response_queue &&
847 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
848 qla24xx_process_response_queue(ha);
849
2b6c0cee
AV
850 spin_unlock_irqrestore(&ha->hardware_lock, flags);
851 return QLA_SUCCESS;
852
853queuing_error:
854 if (cmd->use_sg && tot_dsds) {
855 sg = (struct scatterlist *) cmd->request_buffer;
856 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
857 cmd->sc_data_direction);
858 } else if (tot_dsds) {
859 pci_unmap_single(ha->pdev, sp->dma_handle,
860 cmd->request_bufflen, cmd->sc_data_direction);
861 }
862 spin_unlock_irqrestore(&ha->hardware_lock, flags);
863
864 return QLA_FUNCTION_FAILED;
1da177e4 865}