2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable
;
46 module_param(ql2xtgt_tape_enable
, int, S_IRUGO
|S_IWUSR
);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable
,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
51 module_param(qlini_mode
, charp
, S_IRUGO
);
52 MODULE_PARM_DESC(qlini_mode
,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
62 static int temp_sam_status
= SAM_STAT_BUSY
;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes
{
69 FCP_DATA_LEN_INVALID
= 1,
70 FCP_CMND_FIELDS_INVALID
= 2,
71 FCP_DATA_PARAM_MISMATCH
= 3,
74 FCP_TMF_INVALID_LUN
= 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
103 struct atio_from_isp
*pkt
);
104 static void qlt_response_pkt(struct scsi_qla_host
*ha
, response_t
*pkt
);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
106 int fn
, void *iocb
, int flags
);
107 static void qlt_send_term_exchange(struct scsi_qla_host
*ha
, struct qla_tgt_cmd
108 *cmd
, struct atio_from_isp
*atio
, int ha_locked
);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*ha
,
110 struct qla_tgt_srr_imm
*imm
, int ha_lock
);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host
*vha
,
112 struct qla_tgt_cmd
*cmd
);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
114 struct atio_from_isp
*atio
, uint16_t status
, int qfull
);
115 static void qlt_disable_vha(struct scsi_qla_host
*vha
);
119 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
120 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
121 static struct workqueue_struct
*qla_tgt_wq
;
122 static DEFINE_MUTEX(qla_tgt_mutex
);
123 static LIST_HEAD(qla_tgt_glist
);
125 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126 static struct qla_tgt_sess
*qlt_find_sess_by_port_name(
128 const uint8_t *port_name
)
130 struct qla_tgt_sess
*sess
;
132 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
) {
133 if (!memcmp(sess
->port_name
, port_name
, WWN_SIZE
))
140 /* Might release hw lock, then reaquire!! */
141 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
143 /* Send marker if required */
144 if (unlikely(vha
->marker_needed
!= 0)) {
145 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
146 if (rc
!= QLA_SUCCESS
) {
147 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
148 "qla_target(%d): issue_marker() failed\n",
157 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
160 struct qla_hw_data
*ha
= vha
->hw
;
163 if ((vha
->d_id
.b
.area
!= d_id
[1]) || (vha
->d_id
.b
.domain
!= d_id
[0]))
166 if (vha
->d_id
.b
.al_pa
== d_id
[2])
169 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
170 vp_idx
= ha
->tgt
.tgt_vp_map
[d_id
[2]].idx
;
171 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
172 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
178 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
181 struct qla_hw_data
*ha
= vha
->hw
;
183 if (vha
->vp_idx
== vp_idx
)
186 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
187 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
188 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
193 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host
*vha
)
197 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
199 vha
->hw
->tgt
.num_pend_cmds
++;
200 if (vha
->hw
->tgt
.num_pend_cmds
> vha
->hw
->qla_stats
.stat_max_pend_cmds
)
201 vha
->hw
->qla_stats
.stat_max_pend_cmds
=
202 vha
->hw
->tgt
.num_pend_cmds
;
203 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
205 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host
*vha
)
209 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
210 vha
->hw
->tgt
.num_pend_cmds
--;
211 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
214 static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
215 struct atio_from_isp
*atio
)
217 ql_dbg(ql_dbg_tgt
, vha
, 0xe072,
218 "%s: qla_target(%d): type %x ox_id %04x\n",
219 __func__
, vha
->vp_idx
, atio
->u
.raw
.entry_type
,
220 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
222 switch (atio
->u
.raw
.entry_type
) {
225 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
226 atio
->u
.isp24
.fcp_hdr
.d_id
);
227 if (unlikely(NULL
== host
)) {
228 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
229 "qla_target(%d): Received ATIO_TYPE7 "
230 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
231 atio
->u
.isp24
.fcp_hdr
.d_id
[0],
232 atio
->u
.isp24
.fcp_hdr
.d_id
[1],
233 atio
->u
.isp24
.fcp_hdr
.d_id
[2]);
236 qlt_24xx_atio_pkt(host
, atio
);
240 case IMMED_NOTIFY_TYPE
:
242 struct scsi_qla_host
*host
= vha
;
243 struct imm_ntfy_from_isp
*entry
=
244 (struct imm_ntfy_from_isp
*)atio
;
246 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
247 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
248 host
= qlt_find_host_by_vp_idx(vha
,
249 entry
->u
.isp24
.vp_index
);
250 if (unlikely(!host
)) {
251 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
252 "qla_target(%d): Received "
253 "ATIO (IMMED_NOTIFY_TYPE) "
254 "with unknown vp_index %d\n",
255 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
259 qlt_24xx_atio_pkt(host
, atio
);
264 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
265 "qla_target(%d): Received unknown ATIO atio "
266 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
273 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
, response_t
*pkt
)
275 switch (pkt
->entry_type
) {
277 ql_dbg(ql_dbg_tgt
, vha
, 0xe073,
278 "qla_target(%d):%s: CRC2 Response pkt\n",
279 vha
->vp_idx
, __func__
);
282 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
283 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
285 if (unlikely(!host
)) {
286 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
287 "qla_target(%d): Response pkt (CTIO_TYPE7) "
288 "received, with unknown vp_index %d\n",
289 vha
->vp_idx
, entry
->vp_index
);
292 qlt_response_pkt(host
, pkt
);
296 case IMMED_NOTIFY_TYPE
:
298 struct scsi_qla_host
*host
= vha
;
299 struct imm_ntfy_from_isp
*entry
=
300 (struct imm_ntfy_from_isp
*)pkt
;
302 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
303 if (unlikely(!host
)) {
304 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
305 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
306 "received, with unknown vp_index %d\n",
307 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
310 qlt_response_pkt(host
, pkt
);
314 case NOTIFY_ACK_TYPE
:
316 struct scsi_qla_host
*host
= vha
;
317 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
319 if (0xFF != entry
->u
.isp24
.vp_index
) {
320 host
= qlt_find_host_by_vp_idx(vha
,
321 entry
->u
.isp24
.vp_index
);
322 if (unlikely(!host
)) {
323 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
324 "qla_target(%d): Response "
325 "pkt (NOTIFY_ACK_TYPE) "
326 "received, with unknown "
327 "vp_index %d\n", vha
->vp_idx
,
328 entry
->u
.isp24
.vp_index
);
332 qlt_response_pkt(host
, pkt
);
338 struct abts_recv_from_24xx
*entry
=
339 (struct abts_recv_from_24xx
*)pkt
;
340 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
342 if (unlikely(!host
)) {
343 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
344 "qla_target(%d): Response pkt "
345 "(ABTS_RECV_24XX) received, with unknown "
346 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
349 qlt_response_pkt(host
, pkt
);
355 struct abts_resp_to_24xx
*entry
=
356 (struct abts_resp_to_24xx
*)pkt
;
357 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
359 if (unlikely(!host
)) {
360 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
361 "qla_target(%d): Response pkt "
362 "(ABTS_RECV_24XX) received, with unknown "
363 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
366 qlt_response_pkt(host
, pkt
);
371 qlt_response_pkt(vha
, pkt
);
377 static void qlt_free_session_done(struct work_struct
*work
)
379 struct qla_tgt_sess
*sess
= container_of(work
, struct qla_tgt_sess
,
381 struct qla_tgt
*tgt
= sess
->tgt
;
382 struct scsi_qla_host
*vha
= sess
->vha
;
383 struct qla_hw_data
*ha
= vha
->hw
;
387 * Release the target session for FC Nexus from fabric module code.
389 if (sess
->se_sess
!= NULL
)
390 ha
->tgt
.tgt_ops
->free_session(sess
);
392 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf001,
393 "Unregistration of sess %p finished\n", sess
);
397 * We need to protect against race, when tgt is freed before or
401 if (tgt
->sess_count
== 0)
402 wake_up_all(&tgt
->waitQ
);
405 /* ha->hardware_lock supposed to be held on entry */
406 void qlt_unreg_sess(struct qla_tgt_sess
*sess
)
408 struct scsi_qla_host
*vha
= sess
->vha
;
410 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
412 list_del(&sess
->sess_list_entry
);
414 list_del(&sess
->del_list_entry
);
416 INIT_WORK(&sess
->free_work
, qlt_free_session_done
);
417 schedule_work(&sess
->free_work
);
419 EXPORT_SYMBOL(qlt_unreg_sess
);
421 /* ha->hardware_lock supposed to be held on entry */
422 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
424 struct qla_hw_data
*ha
= vha
->hw
;
425 struct qla_tgt_sess
*sess
= NULL
;
426 uint32_t unpacked_lun
, lun
= 0;
429 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
430 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
432 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
433 if (loop_id
== 0xFFFF) {
434 #if 0 /* FIXME: Re-enable Global event handling.. */
436 atomic_inc(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
437 qlt_clear_tgt_db(ha
->tgt
.qla_tgt
);
438 if (!list_empty(&ha
->tgt
.qla_tgt
->sess_list
)) {
439 sess
= list_entry(ha
->tgt
.qla_tgt
->sess_list
.next
,
440 typeof(*sess
), sess_list_entry
);
442 case QLA_TGT_NEXUS_LOSS_SESS
:
443 mcmd
= QLA_TGT_NEXUS_LOSS
;
445 case QLA_TGT_ABORT_ALL_SESS
:
446 mcmd
= QLA_TGT_ABORT_ALL
;
448 case QLA_TGT_NEXUS_LOSS
:
449 case QLA_TGT_ABORT_ALL
:
452 ql_dbg(ql_dbg_tgt
, vha
, 0xe046,
453 "qla_target(%d): Not allowed "
454 "command %x in %s", vha
->vp_idx
,
463 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
466 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
467 "Using sess for qla_tgt_reset: %p\n", sess
);
473 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
474 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
475 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
478 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
479 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
481 return qlt_issue_task_mgmt(sess
, unpacked_lun
, mcmd
,
482 iocb
, QLA24XX_MGMT_SEND_NACK
);
485 /* ha->hardware_lock supposed to be held on entry */
486 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess
*sess
,
489 struct qla_tgt
*tgt
= sess
->tgt
;
490 uint32_t dev_loss_tmo
= tgt
->ha
->port_down_retry_count
+ 5;
495 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
496 "Scheduling sess %p for deletion\n", sess
);
497 list_add_tail(&sess
->del_list_entry
, &tgt
->del_sess_list
);
503 sess
->expires
= jiffies
+ dev_loss_tmo
* HZ
;
505 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
507 "deletion in %u secs (expires: %lu) immed: %d\n",
508 sess
->vha
->vp_idx
, sess
->port_name
, sess
->loop_id
, dev_loss_tmo
,
509 sess
->expires
, immediate
);
512 schedule_delayed_work(&tgt
->sess_del_work
, 0);
514 schedule_delayed_work(&tgt
->sess_del_work
,
515 sess
->expires
- jiffies
);
518 /* ha->hardware_lock supposed to be held on entry */
519 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
)
521 struct qla_tgt_sess
*sess
;
523 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
)
524 qlt_schedule_sess_for_deletion(sess
, true);
526 /* At this point tgt could be already dead */
529 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, const uint8_t *s_id
,
532 struct qla_hw_data
*ha
= vha
->hw
;
533 dma_addr_t gid_list_dma
;
534 struct gid_list_info
*gid_list
;
539 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
540 &gid_list_dma
, GFP_KERNEL
);
542 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
543 "qla_target(%d): DMA Alloc failed of %u\n",
544 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
548 /* Get list of logged in devices */
549 rc
= qla2x00_get_id_list(vha
, gid_list
, gid_list_dma
, &entries
);
550 if (rc
!= QLA_SUCCESS
) {
551 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
552 "qla_target(%d): get_id_list() failed: %x\n",
555 goto out_free_id_list
;
558 id_iter
= (char *)gid_list
;
560 for (i
= 0; i
< entries
; i
++) {
561 struct gid_list_info
*gid
= (struct gid_list_info
*)id_iter
;
562 if ((gid
->al_pa
== s_id
[2]) &&
563 (gid
->area
== s_id
[1]) &&
564 (gid
->domain
== s_id
[0])) {
565 *loop_id
= le16_to_cpu(gid
->loop_id
);
569 id_iter
+= ha
->gid_list_info_size
;
573 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
574 gid_list
, gid_list_dma
);
578 /* ha->hardware_lock supposed to be held on entry */
579 static void qlt_undelete_sess(struct qla_tgt_sess
*sess
)
581 BUG_ON(!sess
->deleted
);
583 list_del(&sess
->del_list_entry
);
587 static void qlt_del_sess_work_fn(struct delayed_work
*work
)
589 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
,
591 struct scsi_qla_host
*vha
= tgt
->vha
;
592 struct qla_hw_data
*ha
= vha
->hw
;
593 struct qla_tgt_sess
*sess
;
594 unsigned long flags
, elapsed
;
596 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
597 while (!list_empty(&tgt
->del_sess_list
)) {
598 sess
= list_entry(tgt
->del_sess_list
.next
, typeof(*sess
),
601 if (time_after_eq(elapsed
, sess
->expires
)) {
602 qlt_undelete_sess(sess
);
604 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
605 "Timeout: sess %p about to be deleted\n",
607 ha
->tgt
.tgt_ops
->shutdown_sess(sess
);
608 ha
->tgt
.tgt_ops
->put_sess(sess
);
610 schedule_delayed_work(&tgt
->sess_del_work
,
611 sess
->expires
- elapsed
);
615 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
619 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
620 * Caller must put it.
622 static struct qla_tgt_sess
*qlt_create_sess(
623 struct scsi_qla_host
*vha
,
627 struct qla_hw_data
*ha
= vha
->hw
;
628 struct qla_tgt_sess
*sess
;
630 unsigned char be_sid
[3];
632 /* Check to avoid double sessions */
633 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
634 list_for_each_entry(sess
, &vha
->vha_tgt
.qla_tgt
->sess_list
,
636 if (!memcmp(sess
->port_name
, fcport
->port_name
, WWN_SIZE
)) {
637 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf005,
638 "Double sess %p found (s_id %x:%x:%x, "
639 "loop_id %d), updating to d_id %x:%x:%x, "
640 "loop_id %d", sess
, sess
->s_id
.b
.domain
,
641 sess
->s_id
.b
.al_pa
, sess
->s_id
.b
.area
,
642 sess
->loop_id
, fcport
->d_id
.b
.domain
,
643 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
,
647 qlt_undelete_sess(sess
);
649 kref_get(&sess
->se_sess
->sess_kref
);
650 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
651 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
653 if (sess
->local
&& !local
)
655 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
660 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
662 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
664 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04a,
665 "qla_target(%u): session allocation failed, all commands "
666 "from port %8phC will be refused", vha
->vp_idx
,
671 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
673 sess
->s_id
= fcport
->d_id
;
674 sess
->loop_id
= fcport
->loop_id
;
677 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
679 sess
, vha
->vha_tgt
.qla_tgt
);
681 be_sid
[0] = sess
->s_id
.b
.domain
;
682 be_sid
[1] = sess
->s_id
.b
.area
;
683 be_sid
[2] = sess
->s_id
.b
.al_pa
;
685 * Determine if this fc_port->port_name is allowed to access
686 * target mode using explict NodeACLs+MappedLUNs, or using
687 * TPG demo mode. If this is successful a target mode FC nexus
690 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
691 &fcport
->port_name
[0], sess
, &be_sid
[0], fcport
->loop_id
) < 0) {
696 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
697 * access across ->hardware_lock reaquire.
699 kref_get(&sess
->se_sess
->sess_kref
);
701 sess
->conf_compl_supported
= (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
);
702 BUILD_BUG_ON(sizeof(sess
->port_name
) != sizeof(fcport
->port_name
));
703 memcpy(sess
->port_name
, fcport
->port_name
, sizeof(sess
->port_name
));
705 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
706 list_add_tail(&sess
->sess_list_entry
, &vha
->vha_tgt
.qla_tgt
->sess_list
);
707 vha
->vha_tgt
.qla_tgt
->sess_count
++;
708 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
710 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
711 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
712 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
713 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
714 fcport
->loop_id
, sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
,
715 sess
->s_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
723 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
725 struct qla_hw_data
*ha
= vha
->hw
;
726 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
727 struct qla_tgt_sess
*sess
;
730 if (!vha
->hw
->tgt
.tgt_ops
)
733 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
736 if (qla_ini_mode_enabled(vha
))
739 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
741 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
744 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
746 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
748 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
749 sess
= qlt_create_sess(vha
, fcport
, false);
750 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
752 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
754 kref_get(&sess
->se_sess
->sess_kref
);
757 qlt_undelete_sess(sess
);
759 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
760 "qla_target(%u): %ssession for port %8phC "
761 "(loop ID %d) reappeared\n", vha
->vp_idx
,
762 sess
->local
? "local " : "", sess
->port_name
,
765 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
766 "Reappeared sess %p\n", sess
);
768 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
769 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
772 if (sess
&& sess
->local
) {
773 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
774 "qla_target(%u): local session for "
775 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
776 fcport
->port_name
, sess
->loop_id
);
779 ha
->tgt
.tgt_ops
->put_sess(sess
);
780 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
783 void qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
785 struct qla_hw_data
*ha
= vha
->hw
;
786 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
787 struct qla_tgt_sess
*sess
;
790 if (!vha
->hw
->tgt
.tgt_ops
)
793 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
796 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
798 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
801 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
803 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
807 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
810 qlt_schedule_sess_for_deletion(sess
, false);
811 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
814 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
816 struct qla_hw_data
*ha
= tgt
->ha
;
820 * We need to protect against race, when tgt is freed before or
823 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
824 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
825 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
826 tgt
, list_empty(&tgt
->sess_list
), tgt
->sess_count
);
827 res
= (tgt
->sess_count
== 0);
828 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
833 /* Called by tcm_qla2xxx configfs code */
834 int qlt_stop_phase1(struct qla_tgt
*tgt
)
836 struct scsi_qla_host
*vha
= tgt
->vha
;
837 struct qla_hw_data
*ha
= tgt
->ha
;
840 mutex_lock(&qla_tgt_mutex
);
841 if (!vha
->fc_vport
) {
842 struct Scsi_Host
*sh
= vha
->host
;
843 struct fc_host_attrs
*fc_host
= shost_to_fc_host(sh
);
846 spin_lock_irqsave(sh
->host_lock
, flags
);
847 npiv_vports
= (fc_host
->npiv_vports_inuse
);
848 spin_unlock_irqrestore(sh
->host_lock
, flags
);
851 mutex_unlock(&qla_tgt_mutex
);
855 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
856 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
857 "Already in tgt->tgt_stop or tgt_stopped state\n");
858 mutex_unlock(&qla_tgt_mutex
);
862 ql_dbg(ql_dbg_tgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
865 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
866 * Lock is needed, because we still can get an incoming packet.
868 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
869 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
871 qlt_clear_tgt_db(tgt
);
872 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
873 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
874 mutex_unlock(&qla_tgt_mutex
);
876 flush_delayed_work(&tgt
->sess_del_work
);
878 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
879 "Waiting for sess works (tgt %p)", tgt
);
880 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
881 while (!list_empty(&tgt
->sess_works_list
)) {
882 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
883 flush_scheduled_work();
884 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
886 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
888 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
889 "Waiting for tgt %p: list_empty(sess_list)=%d "
890 "sess_count=%d\n", tgt
, list_empty(&tgt
->sess_list
),
893 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
896 if (!ha
->flags
.host_shutting_down
&& qla_tgt_mode_enabled(vha
))
897 qlt_disable_vha(vha
);
899 /* Wait for sessions to clear out (just in case) */
900 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
903 EXPORT_SYMBOL(qlt_stop_phase1
);
905 /* Called by tcm_qla2xxx configfs code */
906 void qlt_stop_phase2(struct qla_tgt
*tgt
)
908 struct qla_hw_data
*ha
= tgt
->ha
;
909 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
912 if (tgt
->tgt_stopped
) {
913 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
914 "Already in tgt->tgt_stopped state\n");
919 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
920 "Waiting for %d IRQ commands to complete (tgt %p)",
921 tgt
->irq_cmd_count
, tgt
);
923 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
924 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
925 while (tgt
->irq_cmd_count
!= 0) {
926 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
928 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
931 tgt
->tgt_stopped
= 1;
932 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
933 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
935 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished",
938 EXPORT_SYMBOL(qlt_stop_phase2
);
940 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
941 static void qlt_release(struct qla_tgt
*tgt
)
943 scsi_qla_host_t
*vha
= tgt
->vha
;
945 if ((vha
->vha_tgt
.qla_tgt
!= NULL
) && !tgt
->tgt_stopped
)
946 qlt_stop_phase2(tgt
);
948 vha
->vha_tgt
.qla_tgt
= NULL
;
950 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
951 "Release of tgt %p finished\n", tgt
);
956 /* ha->hardware_lock supposed to be held on entry */
957 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
958 const void *param
, unsigned int param_size
)
960 struct qla_tgt_sess_work_param
*prm
;
963 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
965 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
966 "qla_target(%d): Unable to create session "
967 "work, command will be refused", 0);
971 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
972 "Scheduling work (type %d, prm %p)"
973 " to find session for param %p (size %d, tgt %p)\n",
974 type
, prm
, param
, param_size
, tgt
);
977 memcpy(&prm
->tm_iocb
, param
, param_size
);
979 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
980 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
981 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
983 schedule_work(&tgt
->sess_work
);
989 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
991 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
992 struct imm_ntfy_from_isp
*ntfy
,
993 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
994 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
996 struct qla_hw_data
*ha
= vha
->hw
;
998 struct nack_to_isp
*nack
;
1000 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
1002 /* Send marker if required */
1003 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1006 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
1008 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
1009 "qla_target(%d): %s failed: unable to allocate "
1010 "request packet\n", vha
->vp_idx
, __func__
);
1014 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
1015 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
1017 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
1018 pkt
->entry_count
= 1;
1020 nack
= (struct nack_to_isp
*)pkt
;
1021 nack
->ox_id
= ntfy
->ox_id
;
1023 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
1024 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
1025 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
1026 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
1028 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
1029 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
1030 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
1031 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
1032 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1033 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1034 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1035 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1036 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1037 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1038 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1040 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1041 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1042 vha
->vp_idx
, nack
->u
.isp24
.status
);
1044 /* Memory Barrier */
1046 qla2x00_start_iocbs(vha
, vha
->req
);
1050 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1052 static void qlt_24xx_send_abts_resp(struct scsi_qla_host
*vha
,
1053 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1056 struct qla_hw_data
*ha
= vha
->hw
;
1057 struct abts_resp_to_24xx
*resp
;
1061 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1062 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1065 /* Send marker if required */
1066 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1069 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
1071 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1072 "qla_target(%d): %s failed: unable to allocate "
1073 "request packet", vha
->vp_idx
, __func__
);
1077 resp
->entry_type
= ABTS_RESP_24XX
;
1078 resp
->entry_count
= 1;
1079 resp
->nport_handle
= abts
->nport_handle
;
1080 resp
->vp_index
= vha
->vp_idx
;
1081 resp
->sof_type
= abts
->sof_type
;
1082 resp
->exchange_address
= abts
->exchange_address
;
1083 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1084 f_ctl
= cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1085 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1086 F_CTL_SEQ_INITIATIVE
);
1087 p
= (uint8_t *)&f_ctl
;
1088 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1089 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1090 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1092 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1093 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1094 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1095 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1096 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1097 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1099 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1100 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1101 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1102 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1103 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1104 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1106 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1107 if (status
== FCP_TMF_CMPL
) {
1108 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1109 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1110 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1111 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1112 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1113 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1115 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1116 resp
->payload
.ba_rjt
.reason_code
=
1117 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1118 /* Other bytes are zero */
1121 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1123 /* Memory Barrier */
1125 qla2x00_start_iocbs(vha
, vha
->req
);
1129 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1131 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1132 struct abts_resp_from_24xx_fw
*entry
)
1134 struct ctio7_to_24xx
*ctio
;
1136 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1137 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha
->hw
);
1138 /* Send marker if required */
1139 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1142 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
1144 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1145 "qla_target(%d): %s failed: unable to allocate "
1146 "request packet\n", vha
->vp_idx
, __func__
);
1151 * We've got on entrance firmware's response on by us generated
1152 * ABTS response. So, in it ID fields are reversed.
1155 ctio
->entry_type
= CTIO_TYPE7
;
1156 ctio
->entry_count
= 1;
1157 ctio
->nport_handle
= entry
->nport_handle
;
1158 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1159 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1160 ctio
->vp_index
= vha
->vp_idx
;
1161 ctio
->initiator_id
[0] = entry
->fcp_hdr_le
.d_id
[0];
1162 ctio
->initiator_id
[1] = entry
->fcp_hdr_le
.d_id
[1];
1163 ctio
->initiator_id
[2] = entry
->fcp_hdr_le
.d_id
[2];
1164 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1165 ctio
->u
.status1
.flags
= cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1166 CTIO7_FLAGS_TERMINATE
);
1167 ctio
->u
.status1
.ox_id
= cpu_to_le16(entry
->fcp_hdr_le
.ox_id
);
1169 /* Memory Barrier */
1171 qla2x00_start_iocbs(vha
, vha
->req
);
1173 qlt_24xx_send_abts_resp(vha
, (struct abts_recv_from_24xx
*)entry
,
1174 FCP_TMF_CMPL
, true);
1177 /* ha->hardware_lock supposed to be held on entry */
1178 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1179 struct abts_recv_from_24xx
*abts
, struct qla_tgt_sess
*sess
)
1181 struct qla_hw_data
*ha
= vha
->hw
;
1182 struct se_session
*se_sess
= sess
->se_sess
;
1183 struct qla_tgt_mgmt_cmd
*mcmd
;
1184 struct se_cmd
*se_cmd
;
1187 bool found_lun
= false;
1189 spin_lock(&se_sess
->sess_cmd_lock
);
1190 list_for_each_entry(se_cmd
, &se_sess
->sess_cmd_list
, se_cmd_list
) {
1191 struct qla_tgt_cmd
*cmd
=
1192 container_of(se_cmd
, struct qla_tgt_cmd
, se_cmd
);
1193 if (se_cmd
->tag
== abts
->exchange_addr_to_abort
) {
1194 lun
= cmd
->unpacked_lun
;
1199 spin_unlock(&se_sess
->sess_cmd_lock
);
1204 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
1205 "qla_target(%d): task abort (tag=%d)\n",
1206 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
1208 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
1210 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
1211 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1212 vha
->vp_idx
, __func__
);
1215 memset(mcmd
, 0, sizeof(*mcmd
));
1218 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
1219 mcmd
->reset_count
= vha
->hw
->chip_reset
;
1221 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, TMR_ABORT_TASK
,
1222 abts
->exchange_addr_to_abort
);
1224 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf052,
1225 "qla_target(%d): tgt_ops->handle_tmr()"
1226 " failed: %d", vha
->vp_idx
, rc
);
1227 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1235 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1237 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1238 struct abts_recv_from_24xx
*abts
)
1240 struct qla_hw_data
*ha
= vha
->hw
;
1241 struct qla_tgt_sess
*sess
;
1242 uint32_t tag
= abts
->exchange_addr_to_abort
;
1246 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
1247 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
1248 "qla_target(%d): ABTS: Abort Sequence not "
1249 "supported\n", vha
->vp_idx
);
1250 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1254 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
1255 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
1256 "qla_target(%d): ABTS: Unknown Exchange "
1257 "Address received\n", vha
->vp_idx
);
1258 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1262 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
1263 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1264 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
[2],
1265 abts
->fcp_hdr_le
.s_id
[1], abts
->fcp_hdr_le
.s_id
[0], tag
,
1266 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
1268 s_id
[0] = abts
->fcp_hdr_le
.s_id
[2];
1269 s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1270 s_id
[2] = abts
->fcp_hdr_le
.s_id
[0];
1272 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
1274 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
1275 "qla_target(%d): task abort for non-existant session\n",
1277 rc
= qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
1278 QLA_TGT_SESS_WORK_ABORT
, abts
, sizeof(*abts
));
1280 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
,
1286 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
1288 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
1289 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1291 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1297 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1299 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host
*ha
,
1300 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
1302 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
1303 struct ctio7_to_24xx
*ctio
;
1306 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
1307 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1308 ha
, atio
, resp_code
);
1310 /* Send marker if required */
1311 if (qlt_issue_marker(ha
, 1) != QLA_SUCCESS
)
1314 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(ha
, NULL
);
1316 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
1317 "qla_target(%d): %s failed: unable to allocate "
1318 "request packet\n", ha
->vp_idx
, __func__
);
1322 ctio
->entry_type
= CTIO_TYPE7
;
1323 ctio
->entry_count
= 1;
1324 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1325 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
1326 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1327 ctio
->vp_index
= ha
->vp_idx
;
1328 ctio
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1329 ctio
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1330 ctio
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1331 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1332 ctio
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
1333 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
);
1334 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1335 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
1336 ctio
->u
.status1
.scsi_status
=
1337 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
1338 ctio
->u
.status1
.response_len
= cpu_to_le16(8);
1339 ctio
->u
.status1
.sense_data
[0] = resp_code
;
1341 /* Memory Barrier */
1343 qla2x00_start_iocbs(ha
, ha
->req
);
1346 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
1348 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1350 EXPORT_SYMBOL(qlt_free_mcmd
);
1352 /* callback from target fabric module code */
1353 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
1355 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
1356 struct qla_hw_data
*ha
= vha
->hw
;
1357 unsigned long flags
;
1359 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
1360 "TM response mcmd (%p) status %#x state %#x",
1361 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
1363 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1365 if (qla2x00_reset_active(vha
) || mcmd
->reset_count
!= ha
->chip_reset
) {
1367 * Either a chip reset is active or this request was from
1368 * previous life, just abort the processing.
1370 ql_dbg(ql_dbg_async
, vha
, 0xe100,
1371 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1372 qla2x00_reset_active(vha
), mcmd
->reset_count
,
1374 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1375 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1379 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
)
1380 qlt_send_notify_ack(vha
, &mcmd
->orig_iocb
.imm_ntfy
,
1383 if (mcmd
->se_cmd
.se_tmr_req
->function
== TMR_ABORT_TASK
)
1384 qlt_24xx_send_abts_resp(vha
, &mcmd
->orig_iocb
.abts
,
1385 mcmd
->fc_tm_rsp
, false);
1387 qlt_24xx_send_task_mgmt_ctio(vha
, mcmd
,
1391 * Make the callback for ->free_mcmd() to queue_work() and invoke
1392 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1393 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1394 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1395 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1396 * qlt_xmit_tm_rsp() returns here..
1398 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1399 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1401 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
1404 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
1406 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
1408 BUG_ON(cmd
->sg_cnt
== 0);
1410 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
1411 prm
->seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
, cmd
->sg
,
1412 cmd
->sg_cnt
, cmd
->dma_data_direction
);
1413 if (unlikely(prm
->seg_cnt
== 0))
1416 prm
->cmd
->sg_mapped
= 1;
1418 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
) {
1420 * If greater than four sg entries then we need to allocate
1421 * the continuation entries
1423 if (prm
->seg_cnt
> prm
->tgt
->datasegs_per_cmd
)
1424 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
1425 prm
->tgt
->datasegs_per_cmd
,
1426 prm
->tgt
->datasegs_per_cont
);
1429 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1430 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1431 prm
->seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
, cmd
->blk_sz
);
1432 prm
->tot_dsds
= prm
->seg_cnt
;
1434 prm
->tot_dsds
= prm
->seg_cnt
;
1436 if (cmd
->prot_sg_cnt
) {
1437 prm
->prot_sg
= cmd
->prot_sg
;
1438 prm
->prot_seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
,
1439 cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1440 cmd
->dma_data_direction
);
1441 if (unlikely(prm
->prot_seg_cnt
== 0))
1444 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1445 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1446 /* Dif Bundling not support here */
1447 prm
->prot_seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
,
1449 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1451 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1458 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe04d,
1459 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1460 0, prm
->cmd
->sg_cnt
);
1464 static void qlt_unmap_sg(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
1466 struct qla_hw_data
*ha
= vha
->hw
;
1468 if (!cmd
->sg_mapped
)
1471 pci_unmap_sg(ha
->pdev
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1474 if (cmd
->prot_sg_cnt
)
1475 pci_unmap_sg(ha
->pdev
, cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1476 cmd
->dma_data_direction
);
1478 if (cmd
->ctx_dsd_alloced
)
1479 qla2x00_clean_dsd_pool(ha
, NULL
, cmd
);
1482 dma_pool_free(ha
->dl_dma_pool
, cmd
->ctx
, cmd
->ctx
->crc_ctx_dma
);
1485 static int qlt_check_reserve_free_req(struct scsi_qla_host
*vha
,
1488 uint32_t cnt
, cnt_in
;
1490 if (vha
->req
->cnt
< (req_cnt
+ 2)) {
1491 cnt
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_out
);
1492 cnt_in
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_in
);
1494 if (vha
->req
->ring_index
< cnt
)
1495 vha
->req
->cnt
= cnt
- vha
->req
->ring_index
;
1497 vha
->req
->cnt
= vha
->req
->length
-
1498 (vha
->req
->ring_index
- cnt
);
1501 if (unlikely(vha
->req
->cnt
< (req_cnt
+ 2))) {
1502 ql_dbg(ql_dbg_io
, vha
, 0x305a,
1503 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1504 vha
->vp_idx
, vha
->req
->ring_index
,
1505 vha
->req
->cnt
, req_cnt
, cnt
, cnt_in
, vha
->req
->length
);
1508 vha
->req
->cnt
-= req_cnt
;
1514 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1516 static inline void *qlt_get_req_pkt(struct scsi_qla_host
*vha
)
1518 /* Adjust ring index. */
1519 vha
->req
->ring_index
++;
1520 if (vha
->req
->ring_index
== vha
->req
->length
) {
1521 vha
->req
->ring_index
= 0;
1522 vha
->req
->ring_ptr
= vha
->req
->ring
;
1524 vha
->req
->ring_ptr
++;
1526 return (cont_entry_t
*)vha
->req
->ring_ptr
;
1529 /* ha->hardware_lock supposed to be held on entry */
1530 static inline uint32_t qlt_make_handle(struct scsi_qla_host
*vha
)
1532 struct qla_hw_data
*ha
= vha
->hw
;
1535 h
= ha
->tgt
.current_handle
;
1536 /* always increment cmd handle */
1539 if (h
> DEFAULT_OUTSTANDING_COMMANDS
)
1540 h
= 1; /* 0 is QLA_TGT_NULL_HANDLE */
1541 if (h
== ha
->tgt
.current_handle
) {
1542 ql_dbg(ql_dbg_io
, vha
, 0x305b,
1543 "qla_target(%d): Ran out of "
1544 "empty cmd slots in ha %p\n", vha
->vp_idx
, ha
);
1545 h
= QLA_TGT_NULL_HANDLE
;
1548 } while ((h
== QLA_TGT_NULL_HANDLE
) ||
1549 (h
== QLA_TGT_SKIP_HANDLE
) ||
1550 (ha
->tgt
.cmds
[h
-1] != NULL
));
1552 if (h
!= QLA_TGT_NULL_HANDLE
)
1553 ha
->tgt
.current_handle
= h
;
1558 /* ha->hardware_lock supposed to be held on entry */
1559 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm
*prm
,
1560 struct scsi_qla_host
*vha
)
1563 struct ctio7_to_24xx
*pkt
;
1564 struct qla_hw_data
*ha
= vha
->hw
;
1565 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
1568 pkt
= (struct ctio7_to_24xx
*)vha
->req
->ring_ptr
;
1570 memset(pkt
, 0, sizeof(*pkt
));
1572 pkt
->entry_type
= CTIO_TYPE7
;
1573 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
1574 pkt
->vp_index
= vha
->vp_idx
;
1576 h
= qlt_make_handle(vha
);
1577 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1579 * CTIO type 7 from the firmware doesn't provide a way to
1580 * know the initiator's LOOP ID, hence we can't find
1581 * the session and, so, the command.
1585 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
1587 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
1588 pkt
->nport_handle
= prm
->cmd
->loop_id
;
1589 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1590 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1591 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1592 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1593 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1594 pkt
->u
.status0
.flags
|= (atio
->u
.isp24
.attr
<< 9);
1595 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1596 pkt
->u
.status0
.ox_id
= cpu_to_le16(temp
);
1597 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
1603 * ha->hardware_lock supposed to be held on entry. We have already made sure
1604 * that there is sufficient amount of request entries to not drop it.
1606 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
,
1607 struct scsi_qla_host
*vha
)
1610 uint32_t *dword_ptr
;
1611 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1613 /* Build continuation packets */
1614 while (prm
->seg_cnt
> 0) {
1615 cont_a64_entry_t
*cont_pkt64
=
1616 (cont_a64_entry_t
*)qlt_get_req_pkt(vha
);
1619 * Make sure that from cont_pkt64 none of
1620 * 64-bit specific fields used for 32-bit
1621 * addressing. Cast to (cont_entry_t *) for
1625 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
1627 cont_pkt64
->entry_count
= 1;
1628 cont_pkt64
->sys_define
= 0;
1630 if (enable_64bit_addressing
) {
1631 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
1633 (uint32_t *)&cont_pkt64
->dseg_0_address
;
1635 cont_pkt64
->entry_type
= CONTINUE_TYPE
;
1637 (uint32_t *)&((cont_entry_t
*)
1638 cont_pkt64
)->dseg_0_address
;
1641 /* Load continuation entry data segments */
1643 cnt
< prm
->tgt
->datasegs_per_cont
&& prm
->seg_cnt
;
1644 cnt
++, prm
->seg_cnt
--) {
1646 cpu_to_le32(pci_dma_lo32
1647 (sg_dma_address(prm
->sg
)));
1648 if (enable_64bit_addressing
) {
1650 cpu_to_le32(pci_dma_hi32
1654 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1656 prm
->sg
= sg_next(prm
->sg
);
1662 * ha->hardware_lock supposed to be held on entry. We have already made sure
1663 * that there is sufficient amount of request entries to not drop it.
1665 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
,
1666 struct scsi_qla_host
*vha
)
1669 uint32_t *dword_ptr
;
1670 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1671 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
1673 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
1675 /* Setup packet address segment pointer */
1676 dword_ptr
= pkt24
->u
.status0
.dseg_0_address
;
1678 /* Set total data segment count */
1680 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
1682 if (prm
->seg_cnt
== 0) {
1683 /* No data transfer */
1689 /* If scatter gather */
1691 /* Load command entry data segments */
1693 (cnt
< prm
->tgt
->datasegs_per_cmd
) && prm
->seg_cnt
;
1694 cnt
++, prm
->seg_cnt
--) {
1696 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm
->sg
)));
1697 if (enable_64bit_addressing
) {
1699 cpu_to_le32(pci_dma_hi32(
1700 sg_dma_address(prm
->sg
)));
1702 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1704 prm
->sg
= sg_next(prm
->sg
);
1707 qlt_load_cont_data_segments(prm
, vha
);
1710 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
1712 return cmd
->bufflen
> 0;
1716 * Called without ha->hardware_lock held
1718 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
1719 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
1720 uint32_t *full_req_cnt
)
1722 struct qla_tgt
*tgt
= cmd
->tgt
;
1723 struct scsi_qla_host
*vha
= tgt
->vha
;
1724 struct qla_hw_data
*ha
= vha
->hw
;
1725 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1727 if (unlikely(cmd
->aborted
)) {
1728 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
1729 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1730 vha
->vp_idx
, cmd
, se_cmd
, se_cmd
->tag
);
1732 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1733 cmd
->cmd_flags
|= BIT_6
;
1735 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 0);
1737 /* !! At this point cmd could be already freed !! */
1738 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
;
1743 prm
->rq_result
= scsi_status
;
1744 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
1745 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
1749 prm
->add_status_pkt
= 0;
1751 /* Send marker if required */
1752 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
1755 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
1756 if (qlt_pci_map_calc_cnt(prm
) != 0)
1760 *full_req_cnt
= prm
->req_cnt
;
1762 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1763 prm
->residual
= se_cmd
->residual_count
;
1764 ql_dbg(ql_dbg_io
+ ql_dbg_verbose
, vha
, 0x305c,
1765 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1766 prm
->residual
, se_cmd
->tag
,
1767 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1768 cmd
->bufflen
, prm
->rq_result
);
1769 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
1770 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1771 prm
->residual
= se_cmd
->residual_count
;
1772 ql_dbg(ql_dbg_io
, vha
, 0x305d,
1773 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1774 prm
->residual
, se_cmd
->tag
, se_cmd
->t_task_cdb
?
1775 se_cmd
->t_task_cdb
[0] : 0, cmd
->bufflen
, prm
->rq_result
);
1776 prm
->rq_result
|= SS_RESIDUAL_OVER
;
1779 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1781 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1782 * ignored in *xmit_response() below
1784 if (qlt_has_data(cmd
)) {
1785 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
1786 (IS_FWI2_CAPABLE(ha
) &&
1787 (prm
->rq_result
!= 0))) {
1788 prm
->add_status_pkt
= 1;
1797 static inline int qlt_need_explicit_conf(struct qla_hw_data
*ha
,
1798 struct qla_tgt_cmd
*cmd
, int sending_sense
)
1800 if (ha
->tgt
.enable_class_2
)
1804 return cmd
->conf_compl_supported
;
1806 return ha
->tgt
.enable_explicit_conf
&&
1807 cmd
->conf_compl_supported
;
1810 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1812 * Original taken from the XFS code
1814 static unsigned long qlt_srr_random(void)
1817 static unsigned long RandomValue
;
1818 static DEFINE_SPINLOCK(lock
);
1819 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1823 unsigned long flags
;
1825 spin_lock_irqsave(&lock
, flags
);
1827 RandomValue
= jiffies
;
1833 rv
= 16807 * lo
- 2836 * hi
;
1837 spin_unlock_irqrestore(&lock
, flags
);
1841 static void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1843 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1844 if ((*xmit_type
& QLA_TGT_XMIT_STATUS
) && (qlt_srr_random() % 200)
1846 *xmit_type
&= ~QLA_TGT_XMIT_STATUS
;
1847 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf015,
1848 "Dropping cmd %p (tag %d) status", cmd
, se_cmd
->tag
);
1852 * It's currently not possible to simulate SRRs for FCP_WRITE without
1853 * a physical link layer failure, so don't even try here..
1855 if (cmd
->dma_data_direction
!= DMA_FROM_DEVICE
)
1858 if (qlt_has_data(cmd
) && (cmd
->sg_cnt
> 1) &&
1859 ((qlt_srr_random() % 100) == 20)) {
1861 unsigned int tot_len
= 0;
1864 leave
= qlt_srr_random() % cmd
->sg_cnt
;
1866 for (i
= 0; i
< leave
; i
++)
1867 tot_len
+= cmd
->sg
[i
].length
;
1869 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf016,
1870 "Cutting cmd %p (tag %d) buffer"
1871 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1872 " cmd->sg_cnt %d)", cmd
, se_cmd
->tag
, tot_len
, leave
,
1873 cmd
->bufflen
, cmd
->sg_cnt
);
1875 cmd
->bufflen
= tot_len
;
1876 cmd
->sg_cnt
= leave
;
1879 if (qlt_has_data(cmd
) && ((qlt_srr_random() % 100) == 70)) {
1880 unsigned int offset
= qlt_srr_random() % cmd
->bufflen
;
1882 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf017,
1883 "Cutting cmd %p (tag %d) buffer head "
1884 "to offset %d (cmd->bufflen %d)", cmd
, se_cmd
->tag
, offset
,
1887 *xmit_type
&= ~QLA_TGT_XMIT_DATA
;
1888 else if (qlt_set_data_offset(cmd
, offset
)) {
1889 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf018,
1890 "qlt_set_data_offset() failed (tag %d)", se_cmd
->tag
);
1895 static inline void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1899 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
1900 struct qla_tgt_prm
*prm
)
1902 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
1903 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
1904 ctio
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
1905 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 0)) {
1906 ctio
->u
.status0
.flags
|= cpu_to_le16(
1907 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1908 CTIO7_FLAGS_CONFORM_REQ
);
1910 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
1911 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
1912 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
1915 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 1)) {
1916 if (prm
->cmd
->se_cmd
.scsi_status
!= 0) {
1917 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe017,
1918 "Skipping EXPLICIT_CONFORM and "
1919 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1920 "non GOOD status\n");
1921 goto skip_explict_conf
;
1923 ctio
->u
.status1
.flags
|= cpu_to_le16(
1924 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1925 CTIO7_FLAGS_CONFORM_REQ
);
1928 ctio
->u
.status1
.flags
&=
1929 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1930 ctio
->u
.status1
.flags
|=
1931 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1932 ctio
->u
.status1
.scsi_status
|=
1933 cpu_to_le16(SS_SENSE_LEN_VALID
);
1934 ctio
->u
.status1
.sense_length
=
1935 cpu_to_le16(prm
->sense_buffer_len
);
1936 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
1937 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
1938 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
1940 if (unlikely((prm
->sense_buffer_len
% 4) != 0)) {
1943 ql_dbg(ql_dbg_tgt
, vha
, 0xe04f,
1944 "qla_target(%d): %d bytes of sense "
1945 "lost", prm
->tgt
->ha
->vp_idx
,
1946 prm
->sense_buffer_len
% 4);
1952 ctio
->u
.status1
.flags
&=
1953 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1954 ctio
->u
.status1
.flags
|=
1955 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1956 ctio
->u
.status1
.sense_length
= 0;
1957 memset(ctio
->u
.status1
.sense_data
, 0,
1958 sizeof(ctio
->u
.status1
.sense_data
));
1961 /* Sense with len > 24, is it possible ??? */
1968 qlt_hba_err_chk_enabled(struct se_cmd
*se_cmd
)
1971 * Uncomment when corresponding SCSI changes are done.
1973 if (!sp->cmd->prot_chk)
1977 switch (se_cmd
->prot_op
) {
1978 case TARGET_PROT_DOUT_INSERT
:
1979 case TARGET_PROT_DIN_STRIP
:
1980 if (ql2xenablehba_err_chk
>= 1)
1983 case TARGET_PROT_DOUT_PASS
:
1984 case TARGET_PROT_DIN_PASS
:
1985 if (ql2xenablehba_err_chk
>= 2)
1988 case TARGET_PROT_DIN_INSERT
:
1989 case TARGET_PROT_DOUT_STRIP
:
1998 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2002 qlt_set_t10dif_tags(struct se_cmd
*se_cmd
, struct crc_context
*ctx
)
2004 uint32_t lba
= 0xffffffff & se_cmd
->t_task_lba
;
2006 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2007 * have been immplemented by TCM, before AppTag is avail.
2008 * Look for modesense_handlers[]
2011 ctx
->app_tag_mask
[0] = 0x0;
2012 ctx
->app_tag_mask
[1] = 0x0;
2014 switch (se_cmd
->prot_type
) {
2015 case TARGET_DIF_TYPE0_PROT
:
2017 * No check for ql2xenablehba_err_chk, as it would be an
2018 * I/O error if hba tag generation is not done.
2020 ctx
->ref_tag
= cpu_to_le32(lba
);
2022 if (!qlt_hba_err_chk_enabled(se_cmd
))
2025 /* enable ALL bytes of the ref tag */
2026 ctx
->ref_tag_mask
[0] = 0xff;
2027 ctx
->ref_tag_mask
[1] = 0xff;
2028 ctx
->ref_tag_mask
[2] = 0xff;
2029 ctx
->ref_tag_mask
[3] = 0xff;
2032 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2035 case TARGET_DIF_TYPE1_PROT
:
2036 ctx
->ref_tag
= cpu_to_le32(lba
);
2038 if (!qlt_hba_err_chk_enabled(se_cmd
))
2041 /* enable ALL bytes of the ref tag */
2042 ctx
->ref_tag_mask
[0] = 0xff;
2043 ctx
->ref_tag_mask
[1] = 0xff;
2044 ctx
->ref_tag_mask
[2] = 0xff;
2045 ctx
->ref_tag_mask
[3] = 0xff;
2048 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2049 * match LBA in CDB + N
2051 case TARGET_DIF_TYPE2_PROT
:
2052 ctx
->ref_tag
= cpu_to_le32(lba
);
2054 if (!qlt_hba_err_chk_enabled(se_cmd
))
2057 /* enable ALL bytes of the ref tag */
2058 ctx
->ref_tag_mask
[0] = 0xff;
2059 ctx
->ref_tag_mask
[1] = 0xff;
2060 ctx
->ref_tag_mask
[2] = 0xff;
2061 ctx
->ref_tag_mask
[3] = 0xff;
2064 /* For Type 3 protection: 16 bit GUARD only */
2065 case TARGET_DIF_TYPE3_PROT
:
2066 ctx
->ref_tag_mask
[0] = ctx
->ref_tag_mask
[1] =
2067 ctx
->ref_tag_mask
[2] = ctx
->ref_tag_mask
[3] = 0x00;
2074 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm
*prm
, scsi_qla_host_t
*vha
)
2077 uint32_t transfer_length
= 0;
2078 uint32_t data_bytes
;
2080 uint8_t bundling
= 1;
2082 struct crc_context
*crc_ctx_pkt
= NULL
;
2083 struct qla_hw_data
*ha
;
2084 struct ctio_crc2_to_fw
*pkt
;
2085 dma_addr_t crc_ctx_dma
;
2086 uint16_t fw_prot_opts
= 0;
2087 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
2088 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2090 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
2095 pkt
= (struct ctio_crc2_to_fw
*)vha
->req
->ring_ptr
;
2097 memset(pkt
, 0, sizeof(*pkt
));
2099 ql_dbg(ql_dbg_tgt
, vha
, 0xe071,
2100 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2101 vha
->vp_idx
, __func__
, se_cmd
, se_cmd
->prot_op
,
2102 prm
->prot_sg
, prm
->prot_seg_cnt
, se_cmd
->t_task_lba
);
2104 if ((se_cmd
->prot_op
== TARGET_PROT_DIN_INSERT
) ||
2105 (se_cmd
->prot_op
== TARGET_PROT_DOUT_STRIP
))
2108 /* Compute dif len and adjust data len to incude protection */
2109 data_bytes
= cmd
->bufflen
;
2110 dif_bytes
= (data_bytes
/ cmd
->blk_sz
) * 8;
2112 switch (se_cmd
->prot_op
) {
2113 case TARGET_PROT_DIN_INSERT
:
2114 case TARGET_PROT_DOUT_STRIP
:
2115 transfer_length
= data_bytes
;
2116 data_bytes
+= dif_bytes
;
2119 case TARGET_PROT_DIN_STRIP
:
2120 case TARGET_PROT_DOUT_INSERT
:
2121 case TARGET_PROT_DIN_PASS
:
2122 case TARGET_PROT_DOUT_PASS
:
2123 transfer_length
= data_bytes
+ dif_bytes
;
2131 if (!qlt_hba_err_chk_enabled(se_cmd
))
2132 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
2133 /* HBA error checking enabled */
2134 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
2135 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
2136 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
2137 fw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
2138 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
2139 fw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
2142 switch (se_cmd
->prot_op
) {
2143 case TARGET_PROT_DIN_INSERT
:
2144 case TARGET_PROT_DOUT_INSERT
:
2145 fw_prot_opts
|= PO_MODE_DIF_INSERT
;
2147 case TARGET_PROT_DIN_STRIP
:
2148 case TARGET_PROT_DOUT_STRIP
:
2149 fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
2151 case TARGET_PROT_DIN_PASS
:
2152 case TARGET_PROT_DOUT_PASS
:
2153 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2154 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2156 default:/* Normal Request */
2157 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2163 /* Update entry type to indicate Command Type CRC_2 IOCB */
2164 pkt
->entry_type
= CTIO_CRC2
;
2165 pkt
->entry_count
= 1;
2166 pkt
->vp_index
= vha
->vp_idx
;
2168 h
= qlt_make_handle(vha
);
2169 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
2171 * CTIO type 7 from the firmware doesn't provide a way to
2172 * know the initiator's LOOP ID, hence we can't find
2173 * the session and, so, the command.
2177 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
2180 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
2181 pkt
->nport_handle
= prm
->cmd
->loop_id
;
2182 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2183 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2184 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2185 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2186 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2188 /* silence compile warning */
2189 t16
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2190 pkt
->ox_id
= cpu_to_le16(t16
);
2192 t16
= (atio
->u
.isp24
.attr
<< 9);
2193 pkt
->flags
|= cpu_to_le16(t16
);
2194 pkt
->relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
2196 /* Set transfer direction */
2197 if (cmd
->dma_data_direction
== DMA_TO_DEVICE
)
2198 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_IN
);
2199 else if (cmd
->dma_data_direction
== DMA_FROM_DEVICE
)
2200 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
);
2203 pkt
->dseg_count
= prm
->tot_dsds
;
2204 /* Fibre channel byte count */
2205 pkt
->transfer_length
= cpu_to_le32(transfer_length
);
2208 /* ----- CRC context -------- */
2210 /* Allocate CRC context from global pool */
2211 crc_ctx_pkt
= cmd
->ctx
=
2212 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
2215 goto crc_queuing_error
;
2217 /* Zero out CTX area. */
2218 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
2219 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
2221 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
2222 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
2225 crc_ctx_pkt
->handle
= pkt
->handle
;
2227 qlt_set_t10dif_tags(se_cmd
, crc_ctx_pkt
);
2229 pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
2230 pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
2231 pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
2235 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
2238 * Configure Bundling if we need to fetch interlaving
2239 * protection PCI accesses
2241 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
2242 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
2243 crc_ctx_pkt
->u
.bundling
.dseg_count
=
2244 cpu_to_le16(prm
->tot_dsds
- prm
->prot_seg_cnt
);
2245 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
2248 /* Finish the common fields of CRC pkt */
2249 crc_ctx_pkt
->blk_size
= cpu_to_le16(cmd
->blk_sz
);
2250 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
2251 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
2252 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
2255 /* Walks data segments */
2256 pkt
->flags
|= cpu_to_le16(CTIO7_FLAGS_DSD_PTR
);
2258 if (!bundling
&& prm
->prot_seg_cnt
) {
2259 if (qla24xx_walk_and_build_sglist_no_difb(ha
, NULL
, cur_dsd
,
2260 prm
->tot_dsds
, cmd
))
2261 goto crc_queuing_error
;
2262 } else if (qla24xx_walk_and_build_sglist(ha
, NULL
, cur_dsd
,
2263 (prm
->tot_dsds
- prm
->prot_seg_cnt
), cmd
))
2264 goto crc_queuing_error
;
2266 if (bundling
&& prm
->prot_seg_cnt
) {
2267 /* Walks dif segments */
2268 pkt
->add_flags
|= CTIO_CRC2_AF_DIF_DSD_ENA
;
2270 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
2271 if (qla24xx_walk_and_build_prot_sglist(ha
, NULL
, cur_dsd
,
2272 prm
->prot_seg_cnt
, cmd
))
2273 goto crc_queuing_error
;
2278 /* Cleanup will be performed by the caller */
2280 return QLA_FUNCTION_FAILED
;
2285 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2286 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2288 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
2289 uint8_t scsi_status
)
2291 struct scsi_qla_host
*vha
= cmd
->vha
;
2292 struct qla_hw_data
*ha
= vha
->hw
;
2293 struct ctio7_to_24xx
*pkt
;
2294 struct qla_tgt_prm prm
;
2295 uint32_t full_req_cnt
= 0;
2296 unsigned long flags
= 0;
2299 memset(&prm
, 0, sizeof(prm
));
2300 qlt_check_srr_debug(cmd
, &xmit_type
);
2302 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe018,
2303 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2304 (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
2305 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
,
2308 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
2310 if (unlikely(res
!= 0)) {
2311 if (res
== QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
)
2317 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2319 if (qla2x00_reset_active(vha
) || cmd
->reset_count
!= ha
->chip_reset
) {
2321 * Either a chip reset is active or this request was from
2322 * previous life, just abort the processing.
2324 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
2325 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2326 ql_dbg(ql_dbg_async
, vha
, 0xe101,
2327 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2328 qla2x00_reset_active(vha
), cmd
->reset_count
,
2330 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2334 /* Does F/W have an IOCBs for this request */
2335 res
= qlt_check_reserve_free_req(vha
, full_req_cnt
);
2337 goto out_unmap_unlock
;
2339 if (cmd
->se_cmd
.prot_op
&& (xmit_type
& QLA_TGT_XMIT_DATA
))
2340 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2342 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2343 if (unlikely(res
!= 0))
2344 goto out_unmap_unlock
;
2347 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2349 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
2350 pkt
->u
.status0
.flags
|=
2351 cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
2352 CTIO7_FLAGS_STATUS_MODE_0
);
2354 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2355 qlt_load_data_segments(&prm
, vha
);
2357 if (prm
.add_status_pkt
== 0) {
2358 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2359 pkt
->u
.status0
.scsi_status
=
2360 cpu_to_le16(prm
.rq_result
);
2361 pkt
->u
.status0
.residual
=
2362 cpu_to_le32(prm
.residual
);
2363 pkt
->u
.status0
.flags
|= cpu_to_le16(
2364 CTIO7_FLAGS_SEND_STATUS
);
2365 if (qlt_need_explicit_conf(ha
, cmd
, 0)) {
2366 pkt
->u
.status0
.flags
|=
2368 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2369 CTIO7_FLAGS_CONFORM_REQ
);
2375 * We have already made sure that there is sufficient
2376 * amount of request entries to not drop HW lock in
2379 struct ctio7_to_24xx
*ctio
=
2380 (struct ctio7_to_24xx
*)qlt_get_req_pkt(vha
);
2382 ql_dbg(ql_dbg_io
, vha
, 0x305e,
2383 "Building additional status packet 0x%p.\n",
2387 * T10Dif: ctio_crc2_to_fw overlay ontop of
2390 memcpy(ctio
, pkt
, sizeof(*ctio
));
2391 /* reset back to CTIO7 */
2392 ctio
->entry_count
= 1;
2393 ctio
->entry_type
= CTIO_TYPE7
;
2394 ctio
->dseg_count
= 0;
2395 ctio
->u
.status1
.flags
&= ~cpu_to_le16(
2396 CTIO7_FLAGS_DATA_IN
);
2398 /* Real finish is ctio_m1's finish */
2399 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
2400 pkt
->u
.status0
.flags
|= cpu_to_le16(
2401 CTIO7_FLAGS_DONT_RET_CTIO
);
2403 /* qlt_24xx_init_ctio_to_isp will correct
2404 * all neccessary fields that's part of CTIO7.
2405 * There should be no residual of CTIO-CRC2 data.
2407 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
2409 pr_debug("Status CTIO7: %p\n", ctio
);
2412 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
2415 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
2416 cmd
->cmd_sent_to_fw
= 1;
2418 /* Memory Barrier */
2420 qla2x00_start_iocbs(vha
, vha
->req
);
2421 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2426 qlt_unmap_sg(vha
, cmd
);
2427 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2431 EXPORT_SYMBOL(qlt_xmit_response
);
2433 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
2435 struct ctio7_to_24xx
*pkt
;
2436 struct scsi_qla_host
*vha
= cmd
->vha
;
2437 struct qla_hw_data
*ha
= vha
->hw
;
2438 struct qla_tgt
*tgt
= cmd
->tgt
;
2439 struct qla_tgt_prm prm
;
2440 unsigned long flags
;
2443 memset(&prm
, 0, sizeof(prm
));
2449 /* Send marker if required */
2450 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
2453 /* Calculate number of entries and segments required */
2454 if (qlt_pci_map_calc_cnt(&prm
) != 0)
2457 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2459 if (qla2x00_reset_active(vha
) || cmd
->reset_count
!= ha
->chip_reset
) {
2461 * Either a chip reset is active or this request was from
2462 * previous life, just abort the processing.
2464 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2465 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2466 ql_dbg(ql_dbg_async
, vha
, 0xe102,
2467 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2468 qla2x00_reset_active(vha
), cmd
->reset_count
,
2470 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2474 /* Does F/W have an IOCBs for this request */
2475 res
= qlt_check_reserve_free_req(vha
, prm
.req_cnt
);
2477 goto out_unlock_free_unmap
;
2478 if (cmd
->se_cmd
.prot_op
)
2479 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2481 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2483 if (unlikely(res
!= 0))
2484 goto out_unlock_free_unmap
;
2485 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2486 pkt
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
2487 CTIO7_FLAGS_STATUS_MODE_0
);
2489 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2490 qlt_load_data_segments(&prm
, vha
);
2492 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2493 cmd
->cmd_sent_to_fw
= 1;
2495 /* Memory Barrier */
2497 qla2x00_start_iocbs(vha
, vha
->req
);
2498 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2502 out_unlock_free_unmap
:
2503 qlt_unmap_sg(vha
, cmd
);
2504 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2508 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
2512 * Checks the guard or meta-data for the type of error
2513 * detected by the HBA.
2516 qlt_handle_dif_error(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
,
2517 struct ctio_crc_from_fw
*sts
)
2519 uint8_t *ap
= &sts
->actual_dif
[0];
2520 uint8_t *ep
= &sts
->expected_dif
[0];
2521 uint32_t e_ref_tag
, a_ref_tag
;
2522 uint16_t e_app_tag
, a_app_tag
;
2523 uint16_t e_guard
, a_guard
;
2524 uint64_t lba
= cmd
->se_cmd
.t_task_lba
;
2526 a_guard
= be16_to_cpu(*(uint16_t *)(ap
+ 0));
2527 a_app_tag
= be16_to_cpu(*(uint16_t *)(ap
+ 2));
2528 a_ref_tag
= be32_to_cpu(*(uint32_t *)(ap
+ 4));
2530 e_guard
= be16_to_cpu(*(uint16_t *)(ep
+ 0));
2531 e_app_tag
= be16_to_cpu(*(uint16_t *)(ep
+ 2));
2532 e_ref_tag
= be32_to_cpu(*(uint32_t *)(ep
+ 4));
2534 ql_dbg(ql_dbg_tgt
, vha
, 0xe075,
2535 "iocb(s) %p Returned STATUS.\n", sts
);
2537 ql_dbg(ql_dbg_tgt
, vha
, 0xf075,
2538 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2539 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2540 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
, a_guard
, e_guard
);
2544 * For type 3: ref & app tag is all 'f's
2545 * For type 0,1,2: app tag is all 'f's
2547 if ((a_app_tag
== 0xffff) &&
2548 ((cmd
->se_cmd
.prot_type
!= TARGET_DIF_TYPE3_PROT
) ||
2549 (a_ref_tag
== 0xffffffff))) {
2550 uint32_t blocks_done
;
2552 /* 2TB boundary case covered automatically with this */
2553 blocks_done
= e_ref_tag
- (uint32_t)lba
+ 1;
2554 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2555 cmd
->se_cmd
.pi_err
= 0;
2556 ql_dbg(ql_dbg_tgt
, vha
, 0xf074,
2557 "need to return scsi good\n");
2559 /* Update protection tag */
2560 if (cmd
->prot_sg_cnt
) {
2561 uint32_t i
, k
= 0, num_ent
;
2562 struct scatterlist
*sg
, *sgl
;
2567 /* Patch the corresponding protection tags */
2568 for_each_sg(sgl
, sg
, cmd
->prot_sg_cnt
, i
) {
2569 num_ent
= sg_dma_len(sg
) / 8;
2570 if (k
+ num_ent
< blocks_done
) {
2578 if (k
!= blocks_done
) {
2579 ql_log(ql_log_warn
, vha
, 0xf076,
2580 "unexpected tag values tag:lba=%u:%llu)\n",
2581 e_ref_tag
, (unsigned long long)lba
);
2586 struct sd_dif_tuple
*spt
;
2588 * This section came from initiator. Is it valid here?
2589 * should ulp be override with actual val???
2591 spt
= page_address(sg_page(sg
)) + sg
->offset
;
2594 spt
->app_tag
= 0xffff;
2595 if (cmd
->se_cmd
.prot_type
== SCSI_PROT_DIF_TYPE3
)
2596 spt
->ref_tag
= 0xffffffff;
2604 if (e_guard
!= a_guard
) {
2605 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
2606 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2608 ql_log(ql_log_warn
, vha
, 0xe076,
2609 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2610 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2611 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2612 a_guard
, e_guard
, cmd
);
2617 if (e_ref_tag
!= a_ref_tag
) {
2618 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
2619 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2621 ql_log(ql_log_warn
, vha
, 0xe077,
2622 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2623 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2624 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2625 a_guard
, e_guard
, cmd
);
2629 /* check appl tag */
2630 if (e_app_tag
!= a_app_tag
) {
2631 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
2632 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2634 ql_log(ql_log_warn
, vha
, 0xe078,
2635 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2636 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2637 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2638 a_guard
, e_guard
, cmd
);
2646 /* If hardware_lock held on entry, might drop it, then reaquire */
2647 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2648 static int __qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2649 struct qla_tgt_cmd
*cmd
,
2650 struct atio_from_isp
*atio
)
2652 struct ctio7_to_24xx
*ctio24
;
2653 struct qla_hw_data
*ha
= vha
->hw
;
2658 ql_dbg(ql_dbg_tgt
, vha
, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
2660 pkt
= (request_t
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
2662 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
2663 "qla_target(%d): %s failed: unable to allocate "
2664 "request packet\n", vha
->vp_idx
, __func__
);
2669 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
2670 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
2671 "qla_target(%d): Terminating cmd %p with "
2672 "incorrect state %d\n", vha
->vp_idx
, cmd
,
2678 pkt
->entry_count
= 1;
2679 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2681 ctio24
= (struct ctio7_to_24xx
*)pkt
;
2682 ctio24
->entry_type
= CTIO_TYPE7
;
2683 ctio24
->nport_handle
= cmd
? cmd
->loop_id
: CTIO7_NHANDLE_UNRECOGNIZED
;
2684 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2685 ctio24
->vp_index
= vha
->vp_idx
;
2686 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2687 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2688 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2689 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2690 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
2691 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
2692 CTIO7_FLAGS_TERMINATE
);
2693 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2694 ctio24
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2696 /* Most likely, it isn't needed */
2697 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
2698 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2699 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
2700 if (ctio24
->u
.status1
.residual
!= 0)
2701 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
2703 /* Memory Barrier */
2705 qla2x00_start_iocbs(vha
, vha
->req
);
2709 static void qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2710 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
)
2712 unsigned long flags
;
2715 if (qlt_issue_marker(vha
, ha_locked
) < 0)
2719 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2721 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
2724 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
2725 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2727 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
2728 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
2731 if (cmd
&& ((cmd
->state
!= QLA_TGT_STATE_ABORTED
) ||
2732 !cmd
->cmd_sent_to_fw
)) {
2733 if (!ha_locked
&& !in_interrupt())
2734 msleep(250); /* just in case */
2736 qlt_unmap_sg(vha
, cmd
);
2737 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
2742 static void qlt_init_term_exchange(struct scsi_qla_host
*vha
)
2744 struct list_head free_list
;
2745 struct qla_tgt_cmd
*cmd
, *tcmd
;
2747 vha
->hw
->tgt
.leak_exchg_thresh_hold
=
2748 (vha
->hw
->fw_xcb_count
/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT
;
2751 if (!list_empty(&vha
->hw
->tgt
.q_full_list
)) {
2752 INIT_LIST_HEAD(&free_list
);
2753 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &free_list
);
2755 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
2756 list_del(&cmd
->cmd_list
);
2757 /* This cmd was never sent to TCM. There is no need
2758 * to schedule free or call free_cmd
2761 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
2764 vha
->hw
->tgt
.num_qfull_cmds_dropped
= 0;
2767 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host
*vha
)
2769 uint32_t total_leaked
;
2771 total_leaked
= vha
->hw
->tgt
.num_qfull_cmds_dropped
;
2773 if (vha
->hw
->tgt
.leak_exchg_thresh_hold
&&
2774 (total_leaked
> vha
->hw
->tgt
.leak_exchg_thresh_hold
)) {
2776 ql_dbg(ql_dbg_tgt
, vha
, 0xe079,
2777 "Chip reset due to exchange starvation: %d/%d.\n",
2778 total_leaked
, vha
->hw
->fw_xcb_count
);
2780 if (IS_P3P_TYPE(vha
->hw
))
2781 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
2783 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2784 qla2xxx_wake_dpc(vha
);
2789 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
2791 struct qla_tgt_sess
*sess
= cmd
->sess
;
2793 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe074,
2794 "%s: se_cmd[%p] ox_id %04x\n",
2795 __func__
, &cmd
->se_cmd
,
2796 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
2798 BUG_ON(cmd
->cmd_in_wq
);
2801 qlt_decr_num_pend_cmds(cmd
->vha
);
2803 BUG_ON(cmd
->sg_mapped
);
2804 cmd
->jiffies_at_free
= get_jiffies_64();
2805 if (unlikely(cmd
->free_sg
))
2808 if (!sess
|| !sess
->se_sess
) {
2812 cmd
->jiffies_at_free
= get_jiffies_64();
2813 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
2815 EXPORT_SYMBOL(qlt_free_cmd
);
2817 /* ha->hardware_lock supposed to be held on entry */
2818 static int qlt_prepare_srr_ctio(struct scsi_qla_host
*vha
,
2819 struct qla_tgt_cmd
*cmd
, void *ctio
)
2821 struct qla_tgt_srr_ctio
*sc
;
2822 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2823 struct qla_tgt_srr_imm
*imm
;
2826 cmd
->cmd_flags
|= BIT_15
;
2828 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
2829 "qla_target(%d): CTIO with SRR status received\n", vha
->vp_idx
);
2832 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf055,
2833 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2838 sc
= kzalloc(sizeof(*sc
), GFP_ATOMIC
);
2841 /* IRQ is already OFF */
2842 spin_lock(&tgt
->srr_lock
);
2843 sc
->srr_id
= tgt
->ctio_srr_id
;
2844 list_add_tail(&sc
->srr_list_entry
,
2845 &tgt
->srr_ctio_list
);
2846 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
2847 "CTIO SRR %p added (id %d)\n", sc
, sc
->srr_id
);
2848 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
2850 list_for_each_entry(imm
, &tgt
->srr_imm_list
,
2852 if (imm
->srr_id
== sc
->srr_id
) {
2858 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01b,
2859 "Scheduling srr work\n");
2860 schedule_work(&tgt
->srr_work
);
2862 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf056,
2863 "qla_target(%d): imm_srr_id "
2864 "== ctio_srr_id (%d), but there is no "
2865 "corresponding SRR IMM, deleting CTIO "
2866 "SRR %p\n", vha
->vp_idx
,
2867 tgt
->ctio_srr_id
, sc
);
2868 list_del(&sc
->srr_list_entry
);
2869 spin_unlock(&tgt
->srr_lock
);
2875 spin_unlock(&tgt
->srr_lock
);
2877 struct qla_tgt_srr_imm
*ti
;
2879 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf057,
2880 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2882 spin_lock(&tgt
->srr_lock
);
2883 list_for_each_entry_safe(imm
, ti
, &tgt
->srr_imm_list
,
2885 if (imm
->srr_id
== tgt
->ctio_srr_id
) {
2886 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01c,
2887 "IMM SRR %p deleted (id %d)\n",
2889 list_del(&imm
->srr_list_entry
);
2890 qlt_reject_free_srr_imm(vha
, imm
, 1);
2893 spin_unlock(&tgt
->srr_lock
);
2902 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2904 static int qlt_term_ctio_exchange(struct scsi_qla_host
*vha
, void *ctio
,
2905 struct qla_tgt_cmd
*cmd
, uint32_t status
)
2910 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
2912 cpu_to_le16(OF_TERM_EXCH
));
2917 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
2922 /* ha->hardware_lock supposed to be held on entry */
2923 static inline struct qla_tgt_cmd
*qlt_get_cmd(struct scsi_qla_host
*vha
,
2926 struct qla_hw_data
*ha
= vha
->hw
;
2929 if (ha
->tgt
.cmds
[handle
] != NULL
) {
2930 struct qla_tgt_cmd
*cmd
= ha
->tgt
.cmds
[handle
];
2931 ha
->tgt
.cmds
[handle
] = NULL
;
2937 /* ha->hardware_lock supposed to be held on entry */
2938 static struct qla_tgt_cmd
*qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
2939 uint32_t handle
, void *ctio
)
2941 struct qla_tgt_cmd
*cmd
= NULL
;
2943 /* Clear out internal marks */
2944 handle
&= ~(CTIO_COMPLETION_HANDLE_MARK
|
2945 CTIO_INTERMEDIATE_HANDLE_MARK
);
2947 if (handle
!= QLA_TGT_NULL_HANDLE
) {
2948 if (unlikely(handle
== QLA_TGT_SKIP_HANDLE
))
2951 /* handle-1 is actually used */
2952 if (unlikely(handle
> DEFAULT_OUTSTANDING_COMMANDS
)) {
2953 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
2954 "qla_target(%d): Wrong handle %x received\n",
2955 vha
->vp_idx
, handle
);
2958 cmd
= qlt_get_cmd(vha
, handle
);
2959 if (unlikely(cmd
== NULL
)) {
2960 ql_dbg(ql_dbg_tgt
, vha
, 0xe053,
2961 "qla_target(%d): Suspicious: unable to "
2962 "find the command with handle %x\n", vha
->vp_idx
,
2966 } else if (ctio
!= NULL
) {
2967 /* We can't get loop ID from CTIO7 */
2968 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
2969 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2970 "support NULL handles\n", vha
->vp_idx
);
2977 /* hardware_lock should be held by caller. */
2979 qlt_abort_cmd_on_host_reset(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
2981 struct qla_hw_data
*ha
= vha
->hw
;
2985 qlt_unmap_sg(vha
, cmd
);
2987 handle
= qlt_make_handle(vha
);
2989 /* TODO: fix debug message type and ids. */
2990 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
2991 ql_dbg(ql_dbg_io
, vha
, 0xff00,
2992 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle
);
2993 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
2994 cmd
->write_data_transferred
= 0;
2995 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
2997 ql_dbg(ql_dbg_io
, vha
, 0xff01,
2998 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle
);
3000 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3002 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3003 ql_dbg(ql_dbg_io
, vha
, 0xff02,
3004 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle
);
3006 ql_dbg(ql_dbg_io
, vha
, 0xff03,
3007 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle
,
3012 cmd
->cmd_flags
|= BIT_12
;
3013 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3017 qlt_host_reset_handler(struct qla_hw_data
*ha
)
3019 struct qla_tgt_cmd
*cmd
;
3020 unsigned long flags
;
3021 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
3022 scsi_qla_host_t
*vha
= NULL
;
3023 struct qla_tgt
*tgt
= base_vha
->vha_tgt
.qla_tgt
;
3026 if (!base_vha
->hw
->tgt
.tgt_ops
)
3029 if (!tgt
|| qla_ini_mode_enabled(base_vha
)) {
3030 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf003,
3031 "Target mode disabled\n");
3035 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xff10,
3036 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3037 base_vha
->dpc_flags
);
3039 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3040 for (i
= 1; i
< DEFAULT_OUTSTANDING_COMMANDS
+ 1; i
++) {
3041 cmd
= qlt_get_cmd(base_vha
, i
);
3044 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3046 qlt_abort_cmd_on_host_reset(vha
, cmd
);
3048 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3053 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3055 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
,
3056 uint32_t status
, void *ctio
)
3058 struct qla_hw_data
*ha
= vha
->hw
;
3059 struct se_cmd
*se_cmd
;
3060 struct qla_tgt_cmd
*cmd
;
3062 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
3063 /* That could happen only in case of an error/reset/abort */
3064 if (status
!= CTIO_SUCCESS
) {
3065 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
3066 "Intermediate CTIO received"
3067 " (status %x)\n", status
);
3072 cmd
= qlt_ctio_to_cmd(vha
, handle
, ctio
);
3076 se_cmd
= &cmd
->se_cmd
;
3077 cmd
->cmd_sent_to_fw
= 0;
3079 qlt_unmap_sg(vha
, cmd
);
3081 if (unlikely(status
!= CTIO_SUCCESS
)) {
3082 switch (status
& 0xFFFF) {
3083 case CTIO_LIP_RESET
:
3084 case CTIO_TARGET_RESET
:
3086 /* driver request abort via Terminate exchange */
3088 case CTIO_INVALID_RX_ID
:
3090 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
3091 "qla_target(%d): CTIO with "
3092 "status %#x received, state %x, se_cmd %p, "
3093 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3094 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
3095 status
, cmd
->state
, se_cmd
);
3098 case CTIO_PORT_LOGGED_OUT
:
3099 case CTIO_PORT_UNAVAILABLE
:
3100 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
3101 "qla_target(%d): CTIO with PORT LOGGED "
3102 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3103 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
3104 status
, cmd
->state
, se_cmd
);
3107 case CTIO_SRR_RECEIVED
:
3108 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05a,
3109 "qla_target(%d): CTIO with SRR_RECEIVED"
3110 " status %x received (state %x, se_cmd %p)\n",
3111 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
3112 if (qlt_prepare_srr_ctio(vha
, cmd
, ctio
) != 0)
3117 case CTIO_DIF_ERROR
: {
3118 struct ctio_crc_from_fw
*crc
=
3119 (struct ctio_crc_from_fw
*)ctio
;
3120 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf073,
3121 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3122 vha
->vp_idx
, status
, cmd
->state
, se_cmd
,
3123 *((u64
*)&crc
->actual_dif
[0]),
3124 *((u64
*)&crc
->expected_dif
[0]));
3126 if (qlt_handle_dif_error(vha
, cmd
, ctio
)) {
3127 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3128 /* scsi Write/xfer rdy complete */
3131 /* scsi read/xmit respond complete
3132 * call handle dif to send scsi status
3133 * rather than terminate exchange.
3135 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3136 ha
->tgt
.tgt_ops
->handle_dif_err(cmd
);
3140 /* Need to generate a SCSI good completion.
3141 * because FW did not send scsi status.
3149 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
3150 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3151 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
3156 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3157 * cmd is already aborted/terminated, we don't
3158 * need to terminate again. The exchange is already
3159 * cleaned up/freed at FW level. Just cleanup at driver
3162 if ((cmd
->state
!= QLA_TGT_STATE_NEED_DATA
) &&
3163 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3164 cmd
->cmd_flags
|= BIT_13
;
3165 if (qlt_term_ctio_exchange(vha
, ctio
, cmd
, status
))
3171 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
3173 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3174 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3176 if (status
== CTIO_SUCCESS
)
3177 cmd
->write_data_transferred
= 1;
3179 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3181 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3182 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
3183 "Aborted command %p (tag %lld) finished\n", cmd
, se_cmd
->tag
);
3185 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
3186 "qla_target(%d): A command in state (%d) should "
3187 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
3190 if (unlikely(status
!= CTIO_SUCCESS
) &&
3191 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3192 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
3197 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3200 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
3205 switch (task_codes
) {
3206 case ATIO_SIMPLE_QUEUE
:
3207 fcp_task_attr
= TCM_SIMPLE_TAG
;
3209 case ATIO_HEAD_OF_QUEUE
:
3210 fcp_task_attr
= TCM_HEAD_TAG
;
3212 case ATIO_ORDERED_QUEUE
:
3213 fcp_task_attr
= TCM_ORDERED_TAG
;
3215 case ATIO_ACA_QUEUE
:
3216 fcp_task_attr
= TCM_ACA_TAG
;
3219 fcp_task_attr
= TCM_SIMPLE_TAG
;
3222 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
3223 "qla_target: unknown task code %x, use ORDERED instead\n",
3225 fcp_task_attr
= TCM_ORDERED_TAG
;
3229 return fcp_task_attr
;
3232 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*,
3235 * Process context for I/O path into tcm_qla2xxx code
3237 static void __qlt_do_work(struct qla_tgt_cmd
*cmd
)
3239 scsi_qla_host_t
*vha
= cmd
->vha
;
3240 struct qla_hw_data
*ha
= vha
->hw
;
3241 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3242 struct qla_tgt_sess
*sess
= cmd
->sess
;
3243 struct atio_from_isp
*atio
= &cmd
->atio
;
3245 unsigned long flags
;
3246 uint32_t data_length
;
3247 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
3250 cmd
->cmd_flags
|= BIT_1
;
3254 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
3255 cmd
->se_cmd
.tag
= atio
->u
.isp24
.exchange_addr
;
3256 cmd
->unpacked_lun
= scsilun_to_int(
3257 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
3259 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
3260 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
3262 data_dir
= DMA_TO_DEVICE
;
3263 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
3264 data_dir
= DMA_FROM_DEVICE
;
3265 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
3266 data_dir
= DMA_TO_DEVICE
;
3268 data_dir
= DMA_NONE
;
3270 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
3271 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
3272 data_length
= be32_to_cpu(get_unaligned((uint32_t *)
3273 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3274 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]));
3276 ret
= ha
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
3277 fcp_task_attr
, data_dir
, bidi
);
3281 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3283 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3284 ha
->tgt
.tgt_ops
->put_sess(sess
);
3285 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3289 ql_dbg(ql_dbg_io
, vha
, 0x3060, "Terminating work cmd %p", cmd
);
3291 * cmd has not sent to target yet, so pass NULL as the second
3292 * argument to qlt_send_term_exchange() and free the memory here.
3294 cmd
->cmd_flags
|= BIT_2
;
3295 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3296 qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
, 1);
3298 qlt_decr_num_pend_cmds(vha
);
3299 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
3300 ha
->tgt
.tgt_ops
->put_sess(sess
);
3301 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3304 static void qlt_do_work(struct work_struct
*work
)
3306 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
3311 static struct qla_tgt_cmd
*qlt_get_tag(scsi_qla_host_t
*vha
,
3312 struct qla_tgt_sess
*sess
,
3313 struct atio_from_isp
*atio
)
3315 struct se_session
*se_sess
= sess
->se_sess
;
3316 struct qla_tgt_cmd
*cmd
;
3319 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
3323 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
3324 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
3326 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
3327 cmd
->state
= QLA_TGT_STATE_NEW
;
3328 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
3329 qlt_incr_num_pend_cmds(vha
);
3331 cmd
->se_cmd
.map_tag
= tag
;
3333 cmd
->loop_id
= sess
->loop_id
;
3334 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
3339 static void qlt_send_busy(struct scsi_qla_host
*, struct atio_from_isp
*,
3342 static void qlt_create_sess_from_atio(struct work_struct
*work
)
3344 struct qla_tgt_sess_op
*op
= container_of(work
,
3345 struct qla_tgt_sess_op
, work
);
3346 scsi_qla_host_t
*vha
= op
->vha
;
3347 struct qla_hw_data
*ha
= vha
->hw
;
3348 struct qla_tgt_sess
*sess
;
3349 struct qla_tgt_cmd
*cmd
;
3350 unsigned long flags
;
3351 uint8_t *s_id
= op
->atio
.u
.isp24
.fcp_hdr
.s_id
;
3353 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf022,
3354 "qla_target(%d): Unable to find wwn login"
3355 " (s_id %x:%x:%x), trying to create it manually\n",
3356 vha
->vp_idx
, s_id
[0], s_id
[1], s_id
[2]);
3358 if (op
->atio
.u
.raw
.entry_count
> 1) {
3359 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf023,
3360 "Dropping multy entry atio %p\n", &op
->atio
);
3364 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
3365 sess
= qlt_make_local_sess(vha
, s_id
);
3366 /* sess has an extra creation ref. */
3367 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
3372 * Now obtain a pre-allocated session tag using the original op->atio
3373 * packet header, and dispatch into __qlt_do_work() using the existing
3376 cmd
= qlt_get_tag(vha
, sess
, &op
->atio
);
3378 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3379 qlt_send_busy(vha
, &op
->atio
, SAM_STAT_BUSY
);
3380 ha
->tgt
.tgt_ops
->put_sess(sess
);
3381 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3386 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3387 * the extra reference taken above by qlt_make_local_sess()
3394 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3395 qlt_send_term_exchange(vha
, NULL
, &op
->atio
, 1);
3396 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3401 /* ha->hardware_lock supposed to be held on entry */
3402 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
3403 struct atio_from_isp
*atio
)
3405 struct qla_hw_data
*ha
= vha
->hw
;
3406 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3407 struct qla_tgt_sess
*sess
;
3408 struct qla_tgt_cmd
*cmd
;
3410 if (unlikely(tgt
->tgt_stop
)) {
3411 ql_dbg(ql_dbg_io
, vha
, 0x3061,
3412 "New command while device %p is shutting down\n", tgt
);
3416 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
3417 if (unlikely(!sess
)) {
3418 struct qla_tgt_sess_op
*op
= kzalloc(sizeof(struct qla_tgt_sess_op
),
3423 memcpy(&op
->atio
, atio
, sizeof(*atio
));
3425 INIT_WORK(&op
->work
, qlt_create_sess_from_atio
);
3426 queue_work(qla_tgt_wq
, &op
->work
);
3430 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3432 kref_get(&sess
->se_sess
->sess_kref
);
3434 cmd
= qlt_get_tag(vha
, sess
, atio
);
3436 ql_dbg(ql_dbg_io
, vha
, 0x3062,
3437 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
3438 ha
->tgt
.tgt_ops
->put_sess(sess
);
3443 cmd
->jiffies_at_alloc
= get_jiffies_64();
3445 cmd
->reset_count
= vha
->hw
->chip_reset
;
3448 cmd
->cmd_flags
|= BIT_0
;
3449 INIT_WORK(&cmd
->work
, qlt_do_work
);
3450 queue_work(qla_tgt_wq
, &cmd
->work
);
3455 /* ha->hardware_lock supposed to be held on entry */
3456 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
3457 int fn
, void *iocb
, int flags
)
3459 struct scsi_qla_host
*vha
= sess
->vha
;
3460 struct qla_hw_data
*ha
= vha
->hw
;
3461 struct qla_tgt_mgmt_cmd
*mcmd
;
3465 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3467 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
3468 "qla_target(%d): Allocation of management "
3469 "command failed, some commands and their data could "
3470 "leak\n", vha
->vp_idx
);
3473 memset(mcmd
, 0, sizeof(*mcmd
));
3477 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3478 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3480 mcmd
->tmr_func
= fn
;
3481 mcmd
->flags
= flags
;
3482 mcmd
->reset_count
= vha
->hw
->chip_reset
;
3485 case QLA_TGT_CLEAR_ACA
:
3486 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10000,
3487 "qla_target(%d): CLEAR_ACA received\n", sess
->vha
->vp_idx
);
3488 tmr_func
= TMR_CLEAR_ACA
;
3491 case QLA_TGT_TARGET_RESET
:
3492 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10001,
3493 "qla_target(%d): TARGET_RESET received\n",
3495 tmr_func
= TMR_TARGET_WARM_RESET
;
3498 case QLA_TGT_LUN_RESET
:
3499 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10002,
3500 "qla_target(%d): LUN_RESET received\n", sess
->vha
->vp_idx
);
3501 tmr_func
= TMR_LUN_RESET
;
3504 case QLA_TGT_CLEAR_TS
:
3505 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10003,
3506 "qla_target(%d): CLEAR_TS received\n", sess
->vha
->vp_idx
);
3507 tmr_func
= TMR_CLEAR_TASK_SET
;
3510 case QLA_TGT_ABORT_TS
:
3511 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10004,
3512 "qla_target(%d): ABORT_TS received\n", sess
->vha
->vp_idx
);
3513 tmr_func
= TMR_ABORT_TASK_SET
;
3516 case QLA_TGT_ABORT_ALL
:
3517 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10005,
3518 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3523 case QLA_TGT_ABORT_ALL_SESS
:
3524 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10006,
3525 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3530 case QLA_TGT_NEXUS_LOSS_SESS
:
3531 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10007,
3532 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3537 case QLA_TGT_NEXUS_LOSS
:
3538 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10008,
3539 "qla_target(%d): Doing NEXUS_LOSS\n", sess
->vha
->vp_idx
);
3544 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000a,
3545 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3546 sess
->vha
->vp_idx
, fn
);
3547 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3551 res
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, tmr_func
, 0);
3553 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000b,
3554 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3555 sess
->vha
->vp_idx
, res
);
3556 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3563 /* ha->hardware_lock supposed to be held on entry */
3564 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
3566 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3567 struct qla_hw_data
*ha
= vha
->hw
;
3568 struct qla_tgt
*tgt
;
3569 struct qla_tgt_sess
*sess
;
3570 uint32_t lun
, unpacked_lun
;
3573 tgt
= vha
->vha_tgt
.qla_tgt
;
3575 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3576 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
3577 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
3578 a
->u
.isp24
.fcp_hdr
.s_id
);
3579 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3582 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf024,
3583 "qla_target(%d): task mgmt fn 0x%x for "
3584 "non-existant session\n", vha
->vp_idx
, fn
);
3585 return qlt_sched_sess_work(tgt
, QLA_TGT_SESS_WORK_TM
, iocb
,
3586 sizeof(struct atio_from_isp
));
3589 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
3592 /* ha->hardware_lock supposed to be held on entry */
3593 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
3594 struct imm_ntfy_from_isp
*iocb
, struct qla_tgt_sess
*sess
)
3596 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3597 struct qla_hw_data
*ha
= vha
->hw
;
3598 struct qla_tgt_mgmt_cmd
*mcmd
;
3599 uint32_t lun
, unpacked_lun
;
3602 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3604 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
3605 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3606 vha
->vp_idx
, __func__
);
3609 memset(mcmd
, 0, sizeof(*mcmd
));
3612 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3613 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3615 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3616 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3617 mcmd
->reset_count
= vha
->hw
->chip_reset
;
3619 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, TMR_ABORT_TASK
,
3620 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
3622 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
3623 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3625 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3632 /* ha->hardware_lock supposed to be held on entry */
3633 static int qlt_abort_task(struct scsi_qla_host
*vha
,
3634 struct imm_ntfy_from_isp
*iocb
)
3636 struct qla_hw_data
*ha
= vha
->hw
;
3637 struct qla_tgt_sess
*sess
;
3640 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
3642 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
3644 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
3645 "qla_target(%d): task abort for unexisting "
3646 "session\n", vha
->vp_idx
);
3647 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
3648 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
3651 return __qlt_abort_task(vha
, iocb
, sess
);
3655 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3657 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
3658 struct imm_ntfy_from_isp
*iocb
)
3662 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf026,
3663 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3664 vha
->vp_idx
, iocb
->u
.isp24
.port_id
, iocb
->u
.isp24
.status_subcode
);
3666 switch (iocb
->u
.isp24
.status_subcode
) {
3672 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3677 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3678 if (tgt
->link_reinit_iocb_pending
) {
3679 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
3681 tgt
->link_reinit_iocb_pending
= 0;
3683 res
= 1; /* send notify ack */
3688 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
3689 "qla_target(%d): Unsupported ELS command %x "
3690 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
3691 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3698 static int qlt_set_data_offset(struct qla_tgt_cmd
*cmd
, uint32_t offset
)
3702 * FIXME: Reject non zero SRR relative offset until we can test
3703 * this code properly.
3705 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset
);
3708 struct scatterlist
*sg
, *sgp
, *sg_srr
, *sg_srr_start
= NULL
;
3709 size_t first_offset
= 0, rem_offset
= offset
, tmp
= 0;
3710 int i
, sg_srr_cnt
, bufflen
= 0;
3712 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe023,
3713 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3714 "cmd->sg_cnt: %u, direction: %d\n",
3715 cmd
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
3717 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3718 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe055,
3719 "Missing cmd->sg or zero cmd->sg_cnt in"
3720 " qla_tgt_set_data_offset\n");
3724 * Walk the current cmd->sg list until we locate the new sg_srr_start
3726 for_each_sg(cmd
->sg
, sg
, cmd
->sg_cnt
, i
) {
3727 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe024,
3728 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3729 i
, sg
, sg_page(sg
), sg
->length
, sg
->offset
);
3731 if ((sg
->length
+ tmp
) > offset
) {
3732 first_offset
= rem_offset
;
3734 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe025,
3735 "Found matching sg[%d], using %p as sg_srr_start, "
3736 "and using first_offset: %zu\n", i
, sg
,
3741 rem_offset
-= sg
->length
;
3744 if (!sg_srr_start
) {
3745 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe056,
3746 "Unable to locate sg_srr_start for offset: %u\n", offset
);
3749 sg_srr_cnt
= (cmd
->sg_cnt
- i
);
3751 sg_srr
= kzalloc(sizeof(struct scatterlist
) * sg_srr_cnt
, GFP_KERNEL
);
3753 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe057,
3754 "Unable to allocate sgp\n");
3757 sg_init_table(sg_srr
, sg_srr_cnt
);
3760 * Walk the remaining list for sg_srr_start, mapping to the newly
3761 * allocated sg_srr taking first_offset into account.
3763 for_each_sg(sg_srr_start
, sg
, sg_srr_cnt
, i
) {
3765 sg_set_page(sgp
, sg_page(sg
),
3766 (sg
->length
- first_offset
), first_offset
);
3769 sg_set_page(sgp
, sg_page(sg
), sg
->length
, 0);
3771 bufflen
+= sgp
->length
;
3779 cmd
->sg_cnt
= sg_srr_cnt
;
3780 cmd
->bufflen
= bufflen
;
3781 cmd
->offset
+= offset
;
3784 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe026, "New cmd->sg: %p\n", cmd
->sg
);
3785 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe027, "New cmd->sg_cnt: %u\n",
3787 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe028, "New cmd->bufflen: %u\n",
3789 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe029, "New cmd->offset: %u\n",
3792 if (cmd
->sg_cnt
< 0)
3795 if (cmd
->bufflen
< 0)
3802 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd
*cmd
,
3803 uint32_t srr_rel_offs
, int *xmit_type
)
3805 int res
= 0, rel_offs
;
3807 rel_offs
= srr_rel_offs
- cmd
->offset
;
3808 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3809 srr_rel_offs
, rel_offs
);
3811 *xmit_type
= QLA_TGT_XMIT_ALL
;
3814 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf062,
3815 "qla_target(%d): SRR rel_offs (%d) < 0",
3816 cmd
->vha
->vp_idx
, rel_offs
);
3818 } else if (rel_offs
== cmd
->bufflen
)
3819 *xmit_type
= QLA_TGT_XMIT_STATUS
;
3820 else if (rel_offs
> 0)
3821 res
= qlt_set_data_offset(cmd
, rel_offs
);
3826 /* No locks, thread context */
3827 static void qlt_handle_srr(struct scsi_qla_host
*vha
,
3828 struct qla_tgt_srr_ctio
*sctio
, struct qla_tgt_srr_imm
*imm
)
3830 struct imm_ntfy_from_isp
*ntfy
=
3831 (struct imm_ntfy_from_isp
*)&imm
->imm_ntfy
;
3832 struct qla_hw_data
*ha
= vha
->hw
;
3833 struct qla_tgt_cmd
*cmd
= sctio
->cmd
;
3834 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3835 unsigned long flags
;
3836 int xmit_type
= 0, resp
= 0;
3840 offset
= le32_to_cpu(ntfy
->u
.isp24
.srr_rel_offs
);
3841 srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3843 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf028, "SRR cmd %p, srr_ui %x\n",
3848 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3849 qlt_send_notify_ack(vha
, ntfy
,
3850 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3851 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3852 xmit_type
= QLA_TGT_XMIT_STATUS
;
3855 case SRR_IU_DATA_IN
:
3856 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3857 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf063,
3858 "Unable to process SRR_IU_DATA_IN due to"
3859 " missing cmd->sg, state: %d\n", cmd
->state
);
3863 if (se_cmd
->scsi_status
!= 0) {
3864 ql_dbg(ql_dbg_tgt
, vha
, 0xe02a,
3865 "Rejecting SRR_IU_DATA_IN with non GOOD "
3869 cmd
->bufflen
= se_cmd
->data_length
;
3871 if (qlt_has_data(cmd
)) {
3872 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3874 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3875 qlt_send_notify_ack(vha
, ntfy
,
3876 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3877 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3880 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf064,
3881 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
3882 vha
->vp_idx
, se_cmd
->tag
,
3883 cmd
->se_cmd
.scsi_status
);
3887 case SRR_IU_DATA_OUT
:
3888 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3889 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf065,
3890 "Unable to process SRR_IU_DATA_OUT due to"
3891 " missing cmd->sg\n");
3895 if (se_cmd
->scsi_status
!= 0) {
3896 ql_dbg(ql_dbg_tgt
, vha
, 0xe02b,
3897 "Rejecting SRR_IU_DATA_OUT"
3898 " with non GOOD scsi_status\n");
3901 cmd
->bufflen
= se_cmd
->data_length
;
3903 if (qlt_has_data(cmd
)) {
3904 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3906 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3907 qlt_send_notify_ack(vha
, ntfy
,
3908 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3909 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3910 if (xmit_type
& QLA_TGT_XMIT_DATA
) {
3911 cmd
->cmd_flags
|= BIT_8
;
3912 qlt_rdy_to_xfer(cmd
);
3915 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf066,
3916 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
3917 vha
->vp_idx
, se_cmd
->tag
, cmd
->se_cmd
.scsi_status
);
3922 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf067,
3923 "qla_target(%d): Unknown srr_ui value %x",
3924 vha
->vp_idx
, srr_ui
);
3928 /* Transmit response in case of status and data-in cases */
3930 cmd
->cmd_flags
|= BIT_7
;
3931 qlt_xmit_response(cmd
, xmit_type
, se_cmd
->scsi_status
);
3937 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3938 qlt_send_notify_ack(vha
, ntfy
, 0, 0, 0,
3939 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3940 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3941 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3942 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3943 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3946 cmd
->cmd_flags
|= BIT_9
;
3947 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
3949 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3952 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*vha
,
3953 struct qla_tgt_srr_imm
*imm
, int ha_locked
)
3955 struct qla_hw_data
*ha
= vha
->hw
;
3956 unsigned long flags
= 0;
3959 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3961 qlt_send_notify_ack(vha
, (void *)&imm
->imm_ntfy
, 0, 0, 0,
3962 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3963 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3964 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3967 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3972 static void qlt_handle_srr_work(struct work_struct
*work
)
3974 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, srr_work
);
3975 struct scsi_qla_host
*vha
= tgt
->vha
;
3976 struct qla_tgt_srr_ctio
*sctio
;
3977 unsigned long flags
;
3979 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf029, "Entering SRR work (tgt %p)\n",
3983 spin_lock_irqsave(&tgt
->srr_lock
, flags
);
3984 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
, srr_list_entry
) {
3985 struct qla_tgt_srr_imm
*imm
, *i
, *ti
;
3986 struct qla_tgt_cmd
*cmd
;
3987 struct se_cmd
*se_cmd
;
3990 list_for_each_entry_safe(i
, ti
, &tgt
->srr_imm_list
,
3992 if (i
->srr_id
== sctio
->srr_id
) {
3993 list_del(&i
->srr_list_entry
);
3995 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf068,
3996 "qla_target(%d): There must be "
3997 "only one IMM SRR per CTIO SRR "
3998 "(IMM SRR %p, id %d, CTIO %p\n",
3999 vha
->vp_idx
, i
, i
->srr_id
, sctio
);
4000 qlt_reject_free_srr_imm(tgt
->vha
, i
, 0);
4006 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02a,
4007 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm
, sctio
,
4011 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02b,
4012 "Not found matching IMM for SRR CTIO (id %d)\n",
4016 list_del(&sctio
->srr_list_entry
);
4018 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
4022 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4023 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4032 se_cmd
= &cmd
->se_cmd
;
4034 cmd
->sg_cnt
= se_cmd
->t_data_nents
;
4035 cmd
->sg
= se_cmd
->t_data_sg
;
4037 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02c,
4038 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4039 cmd
, &cmd
->se_cmd
, se_cmd
->tag
, se_cmd
->t_task_cdb
?
4040 se_cmd
->t_task_cdb
[0] : 0, cmd
->sg_cnt
, cmd
->offset
);
4042 qlt_handle_srr(vha
, sctio
, imm
);
4048 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
4051 /* ha->hardware_lock supposed to be held on entry */
4052 static void qlt_prepare_srr_imm(struct scsi_qla_host
*vha
,
4053 struct imm_ntfy_from_isp
*iocb
)
4055 struct qla_tgt_srr_imm
*imm
;
4056 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4057 struct qla_tgt_srr_ctio
*sctio
;
4061 ql_log(ql_log_warn
, vha
, 0xf02d, "qla_target(%d): SRR received\n",
4064 imm
= kzalloc(sizeof(*imm
), GFP_ATOMIC
);
4066 memcpy(&imm
->imm_ntfy
, iocb
, sizeof(imm
->imm_ntfy
));
4068 /* IRQ is already OFF */
4069 spin_lock(&tgt
->srr_lock
);
4070 imm
->srr_id
= tgt
->imm_srr_id
;
4071 list_add_tail(&imm
->srr_list_entry
,
4072 &tgt
->srr_imm_list
);
4073 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02e,
4074 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4075 imm
, imm
->srr_id
, iocb
->u
.isp24
.srr_ui
);
4076 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
4078 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
,
4080 if (sctio
->srr_id
== imm
->srr_id
) {
4086 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02f, "%s",
4087 "Scheduling srr work\n");
4088 schedule_work(&tgt
->srr_work
);
4090 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf030,
4091 "qla_target(%d): imm_srr_id "
4092 "== ctio_srr_id (%d), but there is no "
4093 "corresponding SRR CTIO, deleting IMM "
4094 "SRR %p\n", vha
->vp_idx
, tgt
->ctio_srr_id
,
4096 list_del(&imm
->srr_list_entry
);
4100 spin_unlock(&tgt
->srr_lock
);
4104 spin_unlock(&tgt
->srr_lock
);
4106 struct qla_tgt_srr_ctio
*ts
;
4108 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf069,
4109 "qla_target(%d): Unable to allocate SRR IMM "
4110 "entry, SRR request will be rejected\n", vha
->vp_idx
);
4112 /* IRQ is already OFF */
4113 spin_lock(&tgt
->srr_lock
);
4114 list_for_each_entry_safe(sctio
, ts
, &tgt
->srr_ctio_list
,
4116 if (sctio
->srr_id
== tgt
->imm_srr_id
) {
4117 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf031,
4118 "CTIO SRR %p deleted (id %d)\n",
4119 sctio
, sctio
->srr_id
);
4120 list_del(&sctio
->srr_list_entry
);
4121 qlt_send_term_exchange(vha
, sctio
->cmd
,
4122 &sctio
->cmd
->atio
, 1);
4126 spin_unlock(&tgt
->srr_lock
);
4133 qlt_send_notify_ack(vha
, iocb
, 0, 0, 0,
4134 NOTIFY_ACK_SRR_FLAGS_REJECT
,
4135 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
4136 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
4140 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4142 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
4143 struct imm_ntfy_from_isp
*iocb
)
4145 struct qla_hw_data
*ha
= vha
->hw
;
4146 uint32_t add_flags
= 0;
4147 int send_notify_ack
= 1;
4150 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
4152 case IMM_NTFY_LIP_RESET
:
4154 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
4155 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4156 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4157 iocb
->u
.isp24
.status_subcode
);
4159 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4160 send_notify_ack
= 0;
4164 case IMM_NTFY_LIP_LINK_REINIT
:
4166 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4167 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
4168 "qla_target(%d): LINK REINIT (loop %#x, "
4169 "subcode %x)\n", vha
->vp_idx
,
4170 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4171 iocb
->u
.isp24
.status_subcode
);
4172 if (tgt
->link_reinit_iocb_pending
) {
4173 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
4176 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
4177 tgt
->link_reinit_iocb_pending
= 1;
4179 * QLogic requires to wait after LINK REINIT for possible
4180 * PDISC or ADISC ELS commands
4182 send_notify_ack
= 0;
4186 case IMM_NTFY_PORT_LOGOUT
:
4187 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
4188 "qla_target(%d): Port logout (loop "
4189 "%#x, subcode %x)\n", vha
->vp_idx
,
4190 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4191 iocb
->u
.isp24
.status_subcode
);
4193 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
4194 send_notify_ack
= 0;
4195 /* The sessions will be cleared in the callback, if needed */
4198 case IMM_NTFY_GLBL_TPRLO
:
4199 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
4200 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
4201 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4202 send_notify_ack
= 0;
4203 /* The sessions will be cleared in the callback, if needed */
4206 case IMM_NTFY_PORT_CONFIG
:
4207 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
4208 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
4210 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4211 send_notify_ack
= 0;
4212 /* The sessions will be cleared in the callback, if needed */
4215 case IMM_NTFY_GLBL_LOGO
:
4216 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
4217 "qla_target(%d): Link failure detected\n",
4219 /* I_T nexus loss */
4220 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4221 send_notify_ack
= 0;
4224 case IMM_NTFY_IOCB_OVERFLOW
:
4225 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
4226 "qla_target(%d): Cannot provide requested "
4227 "capability (IOCB overflowed the immediate notify "
4228 "resource count)\n", vha
->vp_idx
);
4231 case IMM_NTFY_ABORT_TASK
:
4232 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
4233 "qla_target(%d): Abort Task (S %08x I %#x -> "
4234 "L %#x)\n", vha
->vp_idx
,
4235 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
4236 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
4237 le16_to_cpu(iocb
->u
.isp2x
.lun
));
4238 if (qlt_abort_task(vha
, iocb
) == 0)
4239 send_notify_ack
= 0;
4242 case IMM_NTFY_RESOURCE
:
4243 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
4244 "qla_target(%d): Out of resources, host %ld\n",
4245 vha
->vp_idx
, vha
->host_no
);
4248 case IMM_NTFY_MSG_RX
:
4249 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
4250 "qla_target(%d): Immediate notify task %x\n",
4251 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
4252 if (qlt_handle_task_mgmt(vha
, iocb
) == 0)
4253 send_notify_ack
= 0;
4257 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
4258 send_notify_ack
= 0;
4262 qlt_prepare_srr_imm(vha
, iocb
);
4263 send_notify_ack
= 0;
4267 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
4268 "qla_target(%d): Received unknown immediate "
4269 "notify status %x\n", vha
->vp_idx
, status
);
4273 if (send_notify_ack
)
4274 qlt_send_notify_ack(vha
, iocb
, add_flags
, 0, 0, 0, 0, 0);
4278 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4279 * This function sends busy to ISP 2xxx or 24xx.
4281 static int __qlt_send_busy(struct scsi_qla_host
*vha
,
4282 struct atio_from_isp
*atio
, uint16_t status
)
4284 struct ctio7_to_24xx
*ctio24
;
4285 struct qla_hw_data
*ha
= vha
->hw
;
4287 struct qla_tgt_sess
*sess
= NULL
;
4289 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4290 atio
->u
.isp24
.fcp_hdr
.s_id
);
4292 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4295 /* Sending marker isn't necessary, since we called from ISR */
4297 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
4299 ql_dbg(ql_dbg_io
, vha
, 0x3063,
4300 "qla_target(%d): %s failed: unable to allocate "
4301 "request packet", vha
->vp_idx
, __func__
);
4305 pkt
->entry_count
= 1;
4306 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
4308 ctio24
= (struct ctio7_to_24xx
*)pkt
;
4309 ctio24
->entry_type
= CTIO_TYPE7
;
4310 ctio24
->nport_handle
= sess
->loop_id
;
4311 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
4312 ctio24
->vp_index
= vha
->vp_idx
;
4313 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
4314 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
4315 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
4316 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
4317 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
4319 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
4320 CTIO7_FLAGS_DONT_RET_CTIO
);
4322 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4323 * if the explicit conformation is used.
4325 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
4326 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
4327 /* Memory Barrier */
4329 qla2x00_start_iocbs(vha
, vha
->req
);
4334 * This routine is used to allocate a command for either a QFull condition
4335 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4339 qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
4340 struct atio_from_isp
*atio
, uint16_t status
, int qfull
)
4342 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4343 struct qla_hw_data
*ha
= vha
->hw
;
4344 struct qla_tgt_sess
*sess
;
4345 struct se_session
*se_sess
;
4346 struct qla_tgt_cmd
*cmd
;
4349 if (unlikely(tgt
->tgt_stop
)) {
4350 ql_dbg(ql_dbg_io
, vha
, 0x300a,
4351 "New command while device %p is shutting down\n", tgt
);
4355 if ((vha
->hw
->tgt
.num_qfull_cmds_alloc
+ 1) > MAX_QFULL_CMDS_ALLOC
) {
4356 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
4357 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
4358 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
)
4359 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
=
4360 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
4362 ql_dbg(ql_dbg_io
, vha
, 0x3068,
4363 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4364 vha
->vp_idx
, __func__
,
4365 vha
->hw
->tgt
.num_qfull_cmds_dropped
);
4367 qlt_chk_exch_leak_thresh_hold(vha
);
4371 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id
4372 (vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
4376 se_sess
= sess
->se_sess
;
4378 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
4382 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
4384 ql_dbg(ql_dbg_io
, vha
, 0x3009,
4385 "qla_target(%d): %s: Allocation of cmd failed\n",
4386 vha
->vp_idx
, __func__
);
4388 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
4389 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
4390 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
)
4391 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
=
4392 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
4394 qlt_chk_exch_leak_thresh_hold(vha
);
4398 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
4400 qlt_incr_num_pend_cmds(vha
);
4401 INIT_LIST_HEAD(&cmd
->cmd_list
);
4402 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
4404 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
4406 cmd
->reset_count
= vha
->hw
->chip_reset
;
4411 /* NOTE: borrowing the state field to carry the status */
4412 cmd
->state
= status
;
4414 cmd
->term_exchg
= 1;
4416 list_add_tail(&cmd
->cmd_list
, &vha
->hw
->tgt
.q_full_list
);
4418 vha
->hw
->tgt
.num_qfull_cmds_alloc
++;
4419 if (vha
->hw
->tgt
.num_qfull_cmds_alloc
>
4420 vha
->hw
->qla_stats
.stat_max_qfull_cmds_alloc
)
4421 vha
->hw
->qla_stats
.stat_max_qfull_cmds_alloc
=
4422 vha
->hw
->tgt
.num_qfull_cmds_alloc
;
4426 qlt_free_qfull_cmds(struct scsi_qla_host
*vha
)
4428 struct qla_hw_data
*ha
= vha
->hw
;
4429 unsigned long flags
;
4430 struct qla_tgt_cmd
*cmd
, *tcmd
;
4431 struct list_head free_list
;
4434 if (list_empty(&ha
->tgt
.q_full_list
))
4437 INIT_LIST_HEAD(&free_list
);
4439 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
4441 if (list_empty(&ha
->tgt
.q_full_list
)) {
4442 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
4446 list_for_each_entry_safe(cmd
, tcmd
, &ha
->tgt
.q_full_list
, cmd_list
) {
4448 /* cmd->state is a borrowed field to hold status */
4449 rc
= __qlt_send_busy(vha
, &cmd
->atio
, cmd
->state
);
4450 else if (cmd
->term_exchg
)
4451 rc
= __qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
);
4457 ql_dbg(ql_dbg_io
, vha
, 0x3006,
4458 "%s: busy sent for ox_id[%04x]\n", __func__
,
4459 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
4460 else if (cmd
->term_exchg
)
4461 ql_dbg(ql_dbg_io
, vha
, 0x3007,
4462 "%s: Term exchg sent for ox_id[%04x]\n", __func__
,
4463 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
4465 ql_dbg(ql_dbg_io
, vha
, 0x3008,
4466 "%s: Unexpected cmd in QFull list %p\n", __func__
,
4469 list_del(&cmd
->cmd_list
);
4470 list_add_tail(&cmd
->cmd_list
, &free_list
);
4472 /* piggy back on hardware_lock for protection */
4473 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
4475 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
4479 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
4480 list_del(&cmd
->cmd_list
);
4481 /* This cmd was never sent to TCM. There is no need
4482 * to schedule free or call free_cmd
4490 qlt_send_busy(struct scsi_qla_host
*vha
,
4491 struct atio_from_isp
*atio
, uint16_t status
)
4495 rc
= __qlt_send_busy(vha
, atio
, status
);
4497 qlt_alloc_qfull_cmd(vha
, atio
, status
, 1);
4501 qlt_chk_qfull_thresh_hold(struct scsi_qla_host
*vha
,
4502 struct atio_from_isp
*atio
)
4504 struct qla_hw_data
*ha
= vha
->hw
;
4507 if (ha
->tgt
.num_pend_cmds
< Q_FULL_THRESH_HOLD(ha
))
4510 status
= temp_sam_status
;
4511 qlt_send_busy(vha
, atio
, status
);
4515 /* ha->hardware_lock supposed to be held on entry */
4516 /* called via callback from qla2xxx */
4517 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
4518 struct atio_from_isp
*atio
)
4520 struct qla_hw_data
*ha
= vha
->hw
;
4521 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4524 if (unlikely(tgt
== NULL
)) {
4525 ql_dbg(ql_dbg_io
, vha
, 0x3064,
4526 "ATIO pkt, but no tgt (ha %p)", ha
);
4530 * In tgt_stop mode we also should allow all requests to pass.
4531 * Otherwise, some commands can stuck.
4534 tgt
->irq_cmd_count
++;
4536 switch (atio
->u
.raw
.entry_type
) {
4538 if (unlikely(atio
->u
.isp24
.exchange_addr
==
4539 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
4540 ql_dbg(ql_dbg_io
, vha
, 0x3065,
4541 "qla_target(%d): ATIO_TYPE7 "
4542 "received with UNKNOWN exchange address, "
4543 "sending QUEUE_FULL\n", vha
->vp_idx
);
4544 qlt_send_busy(vha
, atio
, SAM_STAT_TASK_SET_FULL
);
4550 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0)) {
4551 rc
= qlt_chk_qfull_thresh_hold(vha
, atio
);
4553 tgt
->irq_cmd_count
--;
4556 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
4558 rc
= qlt_handle_task_mgmt(vha
, atio
);
4560 if (unlikely(rc
!= 0)) {
4562 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4563 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
4565 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4568 if (tgt
->tgt_stop
) {
4569 ql_dbg(ql_dbg_tgt
, vha
, 0xe059,
4570 "qla_target: Unable to send "
4571 "command to target for req, "
4574 ql_dbg(ql_dbg_tgt
, vha
, 0xe05a,
4575 "qla_target(%d): Unable to send "
4576 "command to target, sending BUSY "
4577 "status.\n", vha
->vp_idx
);
4578 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
4584 case IMMED_NOTIFY_TYPE
:
4586 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
4587 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
4588 "qla_target(%d): Received ATIO packet %x "
4589 "with error status %x\n", vha
->vp_idx
,
4590 atio
->u
.raw
.entry_type
,
4591 atio
->u
.isp2x
.entry_status
);
4594 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
4595 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
4600 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
4601 "qla_target(%d): Received unknown ATIO atio "
4602 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
4606 tgt
->irq_cmd_count
--;
4609 /* ha->hardware_lock supposed to be held on entry */
4610 /* called via callback from qla2xxx */
4611 static void qlt_response_pkt(struct scsi_qla_host
*vha
, response_t
*pkt
)
4613 struct qla_hw_data
*ha
= vha
->hw
;
4614 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4616 if (unlikely(tgt
== NULL
)) {
4617 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
4618 "qla_target(%d): Response pkt %x received, but no "
4619 "tgt (ha %p)\n", vha
->vp_idx
, pkt
->entry_type
, ha
);
4624 * In tgt_stop mode we also should allow all requests to pass.
4625 * Otherwise, some commands can stuck.
4628 tgt
->irq_cmd_count
++;
4630 switch (pkt
->entry_type
) {
4634 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
4635 qlt_do_ctio_completion(vha
, entry
->handle
,
4636 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4641 case ACCEPT_TGT_IO_TYPE
:
4643 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
4645 if (atio
->u
.isp2x
.status
!=
4646 cpu_to_le16(ATIO_CDB_VALID
)) {
4647 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
4648 "qla_target(%d): ATIO with error "
4649 "status %x received\n", vha
->vp_idx
,
4650 le16_to_cpu(atio
->u
.isp2x
.status
));
4654 rc
= qlt_chk_qfull_thresh_hold(vha
, atio
);
4656 tgt
->irq_cmd_count
--;
4660 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
4661 if (unlikely(rc
!= 0)) {
4663 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4664 qlt_send_busy(vha
, atio
, 0);
4666 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4669 if (tgt
->tgt_stop
) {
4670 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
4671 "qla_target: Unable to send "
4672 "command to target, sending TERM "
4673 "EXCHANGE for rsp\n");
4674 qlt_send_term_exchange(vha
, NULL
,
4677 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
4678 "qla_target(%d): Unable to send "
4679 "command to target, sending BUSY "
4680 "status\n", vha
->vp_idx
);
4681 qlt_send_busy(vha
, atio
, 0);
4688 case CONTINUE_TGT_IO_TYPE
:
4690 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
4691 qlt_do_ctio_completion(vha
, entry
->handle
,
4692 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4699 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
4700 qlt_do_ctio_completion(vha
, entry
->handle
,
4701 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4706 case IMMED_NOTIFY_TYPE
:
4707 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
4708 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
4711 case NOTIFY_ACK_TYPE
:
4712 if (tgt
->notify_ack_expected
> 0) {
4713 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
4714 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
4715 "NOTIFY_ACK seq %08x status %x\n",
4716 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
4717 le16_to_cpu(entry
->u
.isp2x
.status
));
4718 tgt
->notify_ack_expected
--;
4719 if (entry
->u
.isp2x
.status
!=
4720 cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
4721 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
4722 "qla_target(%d): NOTIFY_ACK "
4723 "failed %x\n", vha
->vp_idx
,
4724 le16_to_cpu(entry
->u
.isp2x
.status
));
4727 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
4728 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
4733 case ABTS_RECV_24XX
:
4734 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
4735 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
4736 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
4739 case ABTS_RESP_24XX
:
4740 if (tgt
->abts_resp_expected
> 0) {
4741 struct abts_resp_from_24xx_fw
*entry
=
4742 (struct abts_resp_from_24xx_fw
*)pkt
;
4743 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
4744 "ABTS_RESP_24XX: compl_status %x\n",
4745 entry
->compl_status
);
4746 tgt
->abts_resp_expected
--;
4747 if (le16_to_cpu(entry
->compl_status
) !=
4748 ABTS_RESP_COMPL_SUCCESS
) {
4749 if ((entry
->error_subcode1
== 0x1E) &&
4750 (entry
->error_subcode2
== 0)) {
4752 * We've got a race here: aborted
4753 * exchange not terminated, i.e.
4754 * response for the aborted command was
4755 * sent between the abort request was
4756 * received and processed.
4757 * Unfortunately, the firmware has a
4758 * silly requirement that all aborted
4759 * exchanges must be explicitely
4760 * terminated, otherwise it refuses to
4761 * send responses for the abort
4762 * requests. So, we have to
4763 * (re)terminate the exchange and retry
4764 * the abort response.
4766 qlt_24xx_retry_term_exchange(vha
,
4769 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
4770 "qla_target(%d): ABTS_RESP_24XX "
4771 "failed %x (subcode %x:%x)",
4772 vha
->vp_idx
, entry
->compl_status
,
4773 entry
->error_subcode1
,
4774 entry
->error_subcode2
);
4777 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
4778 "qla_target(%d): Unexpected ABTS_RESP_24XX "
4779 "received\n", vha
->vp_idx
);
4784 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
4785 "qla_target(%d): Received unknown response pkt "
4786 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
4790 tgt
->irq_cmd_count
--;
4794 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4796 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
4799 struct qla_hw_data
*ha
= vha
->hw
;
4800 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4803 if (!ha
->tgt
.tgt_ops
)
4806 if (unlikely(tgt
== NULL
)) {
4807 ql_dbg(ql_dbg_tgt
, vha
, 0xe03a,
4808 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code
, ha
);
4812 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
4816 * In tgt_stop mode we also should allow all requests to pass.
4817 * Otherwise, some commands can stuck.
4820 tgt
->irq_cmd_count
++;
4823 case MBA_RESET
: /* Reset */
4824 case MBA_SYSTEM_ERR
: /* System Error */
4825 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
4826 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
4827 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
4828 "qla_target(%d): System error async event %#x "
4829 "occurred", vha
->vp_idx
, code
);
4831 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
4832 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4837 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
4838 "qla_target(%d): Async LOOP_UP occurred "
4839 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
4840 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4841 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4842 if (tgt
->link_reinit_iocb_pending
) {
4843 qlt_send_notify_ack(vha
, (void *)&tgt
->link_reinit_iocb
,
4845 tgt
->link_reinit_iocb_pending
= 0;
4850 case MBA_LIP_OCCURRED
:
4853 case MBA_RSCN_UPDATE
:
4854 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
4855 "qla_target(%d): Async event %#x occurred "
4856 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
4857 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4858 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4861 case MBA_PORT_UPDATE
:
4862 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
4863 "qla_target(%d): Port update async event %#x "
4864 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4865 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
4866 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4867 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4869 login_code
= le16_to_cpu(mailbox
[2]);
4870 if (login_code
== 0x4)
4871 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
4872 "Async MB 2: Got PLOGI Complete\n");
4873 else if (login_code
== 0x7)
4874 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
4875 "Async MB 2: Port Logged Out\n");
4882 tgt
->irq_cmd_count
--;
4885 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
4891 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
4893 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
4894 "qla_target(%d): Allocation of tmp FC port failed",
4899 fcport
->loop_id
= loop_id
;
4901 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
4902 if (rc
!= QLA_SUCCESS
) {
4903 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
4904 "qla_target(%d): Failed to retrieve fcport "
4905 "information -- get_port_database() returned %x "
4906 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
4914 /* Must be called under tgt_mutex */
4915 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
4918 struct qla_tgt_sess
*sess
= NULL
;
4919 fc_port_t
*fcport
= NULL
;
4920 int rc
, global_resets
;
4921 uint16_t loop_id
= 0;
4925 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
4927 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
4929 if ((s_id
[0] == 0xFF) &&
4930 (s_id
[1] == 0xFC)) {
4932 * This is Domain Controller, so it should be
4933 * OK to drop SCSI commands from it.
4935 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
4936 "Unable to find initiator with S_ID %x:%x:%x",
4937 s_id
[0], s_id
[1], s_id
[2]);
4939 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf071,
4940 "qla_target(%d): Unable to find "
4941 "initiator with S_ID %x:%x:%x",
4942 vha
->vp_idx
, s_id
[0], s_id
[1],
4947 fcport
= qlt_get_port_database(vha
, loop_id
);
4951 if (global_resets
!=
4952 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
4953 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
4954 "qla_target(%d): global reset during session discovery "
4955 "(counter was %d, new %d), retrying", vha
->vp_idx
,
4957 atomic_read(&vha
->vha_tgt
.
4958 qla_tgt
->tgt_global_resets_count
));
4962 sess
= qlt_create_sess(vha
, fcport
, true);
4968 static void qlt_abort_work(struct qla_tgt
*tgt
,
4969 struct qla_tgt_sess_work_param
*prm
)
4971 struct scsi_qla_host
*vha
= tgt
->vha
;
4972 struct qla_hw_data
*ha
= vha
->hw
;
4973 struct qla_tgt_sess
*sess
= NULL
;
4974 unsigned long flags
;
4979 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4984 s_id
[0] = prm
->abts
.fcp_hdr_le
.s_id
[2];
4985 s_id
[1] = prm
->abts
.fcp_hdr_le
.s_id
[1];
4986 s_id
[2] = prm
->abts
.fcp_hdr_le
.s_id
[0];
4988 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4989 (unsigned char *)&be_s_id
);
4991 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4993 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
4994 sess
= qlt_make_local_sess(vha
, s_id
);
4995 /* sess has got an extra creation ref */
4996 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
4998 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5002 kref_get(&sess
->se_sess
->sess_kref
);
5008 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
5012 ha
->tgt
.tgt_ops
->put_sess(sess
);
5013 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5017 qlt_24xx_send_abts_resp(vha
, &prm
->abts
, FCP_TMF_REJECTED
, false);
5019 ha
->tgt
.tgt_ops
->put_sess(sess
);
5020 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5023 static void qlt_tmr_work(struct qla_tgt
*tgt
,
5024 struct qla_tgt_sess_work_param
*prm
)
5026 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
5027 struct scsi_qla_host
*vha
= tgt
->vha
;
5028 struct qla_hw_data
*ha
= vha
->hw
;
5029 struct qla_tgt_sess
*sess
= NULL
;
5030 unsigned long flags
;
5031 uint8_t *s_id
= NULL
; /* to hide compiler warnings */
5033 uint32_t lun
, unpacked_lun
;
5037 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5042 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
5043 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
5045 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5047 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
5048 sess
= qlt_make_local_sess(vha
, s_id
);
5049 /* sess has got an extra creation ref */
5050 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
5052 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5056 kref_get(&sess
->se_sess
->sess_kref
);
5060 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
5061 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
5062 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
5064 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
5068 ha
->tgt
.tgt_ops
->put_sess(sess
);
5069 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5073 qlt_send_term_exchange(vha
, NULL
, &prm
->tm_iocb2
, 1);
5075 ha
->tgt
.tgt_ops
->put_sess(sess
);
5076 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5079 static void qlt_sess_work_fn(struct work_struct
*work
)
5081 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
5082 struct scsi_qla_host
*vha
= tgt
->vha
;
5083 unsigned long flags
;
5085 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
5087 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
5088 while (!list_empty(&tgt
->sess_works_list
)) {
5089 struct qla_tgt_sess_work_param
*prm
= list_entry(
5090 tgt
->sess_works_list
.next
, typeof(*prm
),
5091 sess_works_list_entry
);
5094 * This work can be scheduled on several CPUs at time, so we
5095 * must delete the entry to eliminate double processing
5097 list_del(&prm
->sess_works_list_entry
);
5099 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
5101 switch (prm
->type
) {
5102 case QLA_TGT_SESS_WORK_ABORT
:
5103 qlt_abort_work(tgt
, prm
);
5105 case QLA_TGT_SESS_WORK_TM
:
5106 qlt_tmr_work(tgt
, prm
);
5113 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
5117 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
5120 /* Must be called under tgt_host_action_mutex */
5121 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
5123 struct qla_tgt
*tgt
;
5125 if (!QLA_TGT_MODE_ENABLED())
5128 if (!IS_TGT_MODE_CAPABLE(ha
)) {
5129 ql_log(ql_log_warn
, base_vha
, 0xe070,
5130 "This adapter does not support target mode.\n");
5134 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
5135 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
5137 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
5139 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
5141 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
5142 "Unable to allocate struct qla_tgt\n");
5146 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
5147 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
5150 tgt
->vha
= base_vha
;
5151 init_waitqueue_head(&tgt
->waitQ
);
5152 INIT_LIST_HEAD(&tgt
->sess_list
);
5153 INIT_LIST_HEAD(&tgt
->del_sess_list
);
5154 INIT_DELAYED_WORK(&tgt
->sess_del_work
,
5155 (void (*)(struct work_struct
*))qlt_del_sess_work_fn
);
5156 spin_lock_init(&tgt
->sess_work_lock
);
5157 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
5158 INIT_LIST_HEAD(&tgt
->sess_works_list
);
5159 spin_lock_init(&tgt
->srr_lock
);
5160 INIT_LIST_HEAD(&tgt
->srr_ctio_list
);
5161 INIT_LIST_HEAD(&tgt
->srr_imm_list
);
5162 INIT_WORK(&tgt
->srr_work
, qlt_handle_srr_work
);
5163 atomic_set(&tgt
->tgt_global_resets_count
, 0);
5165 base_vha
->vha_tgt
.qla_tgt
= tgt
;
5167 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
5168 "qla_target(%d): using 64 Bit PCI addressing",
5170 tgt
->tgt_enable_64bit_addr
= 1;
5172 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
5173 tgt
->datasegs_per_cmd
= QLA_TGT_DATASEGS_PER_CMD_24XX
;
5174 tgt
->datasegs_per_cont
= QLA_TGT_DATASEGS_PER_CONT_24XX
;
5176 if (base_vha
->fc_vport
)
5179 mutex_lock(&qla_tgt_mutex
);
5180 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
5181 mutex_unlock(&qla_tgt_mutex
);
5186 /* Must be called under tgt_host_action_mutex */
5187 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
5189 if (!vha
->vha_tgt
.qla_tgt
)
5192 if (vha
->fc_vport
) {
5193 qlt_release(vha
->vha_tgt
.qla_tgt
);
5197 /* free left over qfull cmds */
5198 qlt_init_term_exchange(vha
);
5200 mutex_lock(&qla_tgt_mutex
);
5201 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
5202 mutex_unlock(&qla_tgt_mutex
);
5204 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
5206 qlt_release(vha
->vha_tgt
.qla_tgt
);
5211 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
5216 pr_debug("qla2xxx HW vha->node_name: ");
5217 for (i
= 0; i
< WWN_SIZE
; i
++)
5218 pr_debug("%02x ", vha
->node_name
[i
]);
5220 pr_debug("qla2xxx HW vha->port_name: ");
5221 for (i
= 0; i
< WWN_SIZE
; i
++)
5222 pr_debug("%02x ", vha
->port_name
[i
]);
5225 pr_debug("qla2xxx passed configfs WWPN: ");
5226 put_unaligned_be64(wwpn
, b
);
5227 for (i
= 0; i
< WWN_SIZE
; i
++)
5228 pr_debug("%02x ", b
[i
]);
5233 * qla_tgt_lport_register - register lport with external module
5235 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5236 * @wwpn: Passwd FC target WWPN
5237 * @callback: lport initialization callback for tcm_qla2xxx code
5238 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5240 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
5241 u64 npiv_wwpn
, u64 npiv_wwnn
,
5242 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
5244 struct qla_tgt
*tgt
;
5245 struct scsi_qla_host
*vha
;
5246 struct qla_hw_data
*ha
;
5247 struct Scsi_Host
*host
;
5248 unsigned long flags
;
5252 mutex_lock(&qla_tgt_mutex
);
5253 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
5261 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
5264 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5265 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
5266 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5268 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5271 if (tgt
->tgt_stop
) {
5272 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5274 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5277 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5279 if (!scsi_host_get(host
)) {
5280 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
5281 "Unable to scsi_host_get() for"
5282 " qla2xxx scsi_host\n");
5285 qlt_lport_dump(vha
, phys_wwpn
, b
);
5287 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
5288 scsi_host_put(host
);
5291 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
5293 scsi_host_put(host
);
5295 mutex_unlock(&qla_tgt_mutex
);
5298 mutex_unlock(&qla_tgt_mutex
);
5302 EXPORT_SYMBOL(qlt_lport_register
);
5305 * qla_tgt_lport_deregister - Degister lport
5307 * @vha: Registered scsi_qla_host pointer
5309 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
5311 struct qla_hw_data
*ha
= vha
->hw
;
5312 struct Scsi_Host
*sh
= vha
->host
;
5314 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5316 vha
->vha_tgt
.target_lport_ptr
= NULL
;
5317 ha
->tgt
.tgt_ops
= NULL
;
5319 * Release the Scsi_Host reference for the underlying qla2xxx host
5323 EXPORT_SYMBOL(qlt_lport_deregister
);
5325 /* Must be called under HW lock */
5326 static void qlt_set_mode(struct scsi_qla_host
*vha
)
5328 struct qla_hw_data
*ha
= vha
->hw
;
5330 switch (ql2x_ini_mode
) {
5331 case QLA2XXX_INI_MODE_DISABLED
:
5332 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5333 vha
->host
->active_mode
= MODE_TARGET
;
5335 case QLA2XXX_INI_MODE_ENABLED
:
5336 vha
->host
->active_mode
|= MODE_TARGET
;
5342 if (ha
->tgt
.ini_mode_force_reverse
)
5343 qla_reverse_ini_mode(vha
);
5346 /* Must be called under HW lock */
5347 static void qlt_clear_mode(struct scsi_qla_host
*vha
)
5349 struct qla_hw_data
*ha
= vha
->hw
;
5351 switch (ql2x_ini_mode
) {
5352 case QLA2XXX_INI_MODE_DISABLED
:
5353 vha
->host
->active_mode
= MODE_UNKNOWN
;
5355 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5356 vha
->host
->active_mode
= MODE_INITIATOR
;
5358 case QLA2XXX_INI_MODE_ENABLED
:
5359 vha
->host
->active_mode
&= ~MODE_TARGET
;
5365 if (ha
->tgt
.ini_mode_force_reverse
)
5366 qla_reverse_ini_mode(vha
);
5370 * qla_tgt_enable_vha - NO LOCK HELD
5372 * host_reset, bring up w/ Target Mode Enabled
5375 qlt_enable_vha(struct scsi_qla_host
*vha
)
5377 struct qla_hw_data
*ha
= vha
->hw
;
5378 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5379 unsigned long flags
;
5380 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
5383 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
5384 "Unable to locate qla_tgt pointer from"
5385 " struct qla_hw_data\n");
5390 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5391 tgt
->tgt_stopped
= 0;
5393 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5396 qla24xx_disable_vp(vha
);
5397 qla24xx_enable_vp(vha
);
5399 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
5400 qla2xxx_wake_dpc(base_vha
);
5401 qla2x00_wait_for_hba_online(base_vha
);
5404 EXPORT_SYMBOL(qlt_enable_vha
);
5407 * qla_tgt_disable_vha - NO LOCK HELD
5409 * Disable Target Mode and reset the adapter
5411 static void qlt_disable_vha(struct scsi_qla_host
*vha
)
5413 struct qla_hw_data
*ha
= vha
->hw
;
5414 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5415 unsigned long flags
;
5418 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
5419 "Unable to locate qla_tgt pointer from"
5420 " struct qla_hw_data\n");
5425 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5426 qlt_clear_mode(vha
);
5427 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5429 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5430 qla2xxx_wake_dpc(vha
);
5431 qla2x00_wait_for_hba_online(vha
);
5435 * Called from qla_init.c:qla24xx_vport_create() contex to setup
5436 * the target mode specific struct scsi_qla_host and struct qla_hw_data
5440 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
5442 if (!qla_tgt_mode_enabled(vha
))
5445 vha
->vha_tgt
.qla_tgt
= NULL
;
5447 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
5448 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
5450 qlt_clear_mode(vha
);
5453 * NOTE: Currently the value is kept the same for <24xx and
5454 * >=24xx ISPs. If it is necessary to change it,
5455 * the check should be added for specific ISPs,
5456 * assigning the value appropriately.
5458 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
5460 qlt_add_target(ha
, vha
);
5464 qlt_rff_id(struct scsi_qla_host
*vha
, struct ct_sns_req
*ct_req
)
5467 * FC-4 Feature bit 0 indicates target functionality to the name server.
5469 if (qla_tgt_mode_enabled(vha
)) {
5470 if (qla_ini_mode_enabled(vha
))
5471 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
| BIT_1
;
5473 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
;
5474 } else if (qla_ini_mode_enabled(vha
)) {
5475 ct_req
->req
.rff_id
.fc4_feature
= BIT_1
;
5480 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
5483 * Beginning of ATIO ring has initialization control block already built
5484 * by nvram config routine.
5486 * Returns 0 on success.
5489 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
5491 struct qla_hw_data
*ha
= vha
->hw
;
5493 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
5495 if (!qla_tgt_mode_enabled(vha
))
5498 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
5499 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
5506 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
5507 * @ha: SCSI driver HA context
5510 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
)
5512 struct qla_hw_data
*ha
= vha
->hw
;
5513 struct atio_from_isp
*pkt
;
5516 if (!vha
->flags
.online
)
5519 while (ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) {
5520 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
5521 cnt
= pkt
->u
.raw
.entry_count
;
5523 qlt_24xx_atio_pkt_all_vps(vha
, (struct atio_from_isp
*)pkt
);
5525 for (i
= 0; i
< cnt
; i
++) {
5526 ha
->tgt
.atio_ring_index
++;
5527 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
5528 ha
->tgt
.atio_ring_index
= 0;
5529 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
5531 ha
->tgt
.atio_ring_ptr
++;
5533 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
5534 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
5539 /* Adjust ring index */
5540 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
5544 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
5546 struct qla_hw_data
*ha
= vha
->hw
;
5547 if (!QLA_TGT_MODE_ENABLED())
5550 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha
), 0);
5551 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), 0);
5552 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha
));
5554 if (IS_ATIO_MSIX_CAPABLE(ha
)) {
5555 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
5556 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
5558 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
5559 ql_dbg(ql_dbg_init
, vha
, 0xf072,
5560 "Registering ICB vector 0x%x for atio que.\n",
5566 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
5568 struct qla_hw_data
*ha
= vha
->hw
;
5570 if (qla_tgt_mode_enabled(vha
)) {
5571 if (!ha
->tgt
.saved_set
) {
5572 /* We save only once */
5573 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
5574 ha
->tgt
.saved_firmware_options_1
=
5575 nv
->firmware_options_1
;
5576 ha
->tgt
.saved_firmware_options_2
=
5577 nv
->firmware_options_2
;
5578 ha
->tgt
.saved_firmware_options_3
=
5579 nv
->firmware_options_3
;
5580 ha
->tgt
.saved_set
= 1;
5583 nv
->exchange_count
= cpu_to_le16(0xFFFF);
5585 /* Enable target mode */
5586 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
5588 /* Disable ini mode, if requested */
5589 if (!qla_ini_mode_enabled(vha
))
5590 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
5592 /* Disable Full Login after LIP */
5593 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
5594 /* Enable initial LIP */
5595 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
5596 if (ql2xtgt_tape_enable
)
5597 /* Enable FC Tape support */
5598 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
5600 /* Disable FC Tape support */
5601 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
5603 /* Disable Full Login after LIP */
5604 nv
->host_p
&= cpu_to_le32(~BIT_10
);
5605 /* Enable target PRLI control */
5606 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
5608 if (ha
->tgt
.saved_set
) {
5609 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
5610 nv
->firmware_options_1
=
5611 ha
->tgt
.saved_firmware_options_1
;
5612 nv
->firmware_options_2
=
5613 ha
->tgt
.saved_firmware_options_2
;
5614 nv
->firmware_options_3
=
5615 ha
->tgt
.saved_firmware_options_3
;
5620 /* out-of-order frames reassembly */
5621 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
5623 if (ha
->tgt
.enable_class_2
) {
5624 if (vha
->flags
.init_done
)
5625 fc_host_supported_classes(vha
->host
) =
5626 FC_COS_CLASS2
| FC_COS_CLASS3
;
5628 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
5630 if (vha
->flags
.init_done
)
5631 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
5633 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
5638 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
5639 struct init_cb_24xx
*icb
)
5641 struct qla_hw_data
*ha
= vha
->hw
;
5643 if (ha
->tgt
.node_name_set
) {
5644 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
5645 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
5650 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
5652 struct qla_hw_data
*ha
= vha
->hw
;
5654 if (!QLA_TGT_MODE_ENABLED())
5657 if (qla_tgt_mode_enabled(vha
)) {
5658 if (!ha
->tgt
.saved_set
) {
5659 /* We save only once */
5660 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
5661 ha
->tgt
.saved_firmware_options_1
=
5662 nv
->firmware_options_1
;
5663 ha
->tgt
.saved_firmware_options_2
=
5664 nv
->firmware_options_2
;
5665 ha
->tgt
.saved_firmware_options_3
=
5666 nv
->firmware_options_3
;
5667 ha
->tgt
.saved_set
= 1;
5670 nv
->exchange_count
= cpu_to_le16(0xFFFF);
5672 /* Enable target mode */
5673 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
5675 /* Disable ini mode, if requested */
5676 if (!qla_ini_mode_enabled(vha
))
5677 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
5679 /* Disable Full Login after LIP */
5680 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
5681 /* Enable initial LIP */
5682 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
5683 if (ql2xtgt_tape_enable
)
5684 /* Enable FC tape support */
5685 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
5687 /* Disable FC tape support */
5688 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
5690 /* Disable Full Login after LIP */
5691 nv
->host_p
&= cpu_to_le32(~BIT_10
);
5692 /* Enable target PRLI control */
5693 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
5695 if (ha
->tgt
.saved_set
) {
5696 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
5697 nv
->firmware_options_1
=
5698 ha
->tgt
.saved_firmware_options_1
;
5699 nv
->firmware_options_2
=
5700 ha
->tgt
.saved_firmware_options_2
;
5701 nv
->firmware_options_3
=
5702 ha
->tgt
.saved_firmware_options_3
;
5707 /* out-of-order frames reassembly */
5708 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
5710 if (ha
->tgt
.enable_class_2
) {
5711 if (vha
->flags
.init_done
)
5712 fc_host_supported_classes(vha
->host
) =
5713 FC_COS_CLASS2
| FC_COS_CLASS3
;
5715 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
5717 if (vha
->flags
.init_done
)
5718 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
5720 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
5725 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
5726 struct init_cb_81xx
*icb
)
5728 struct qla_hw_data
*ha
= vha
->hw
;
5730 if (!QLA_TGT_MODE_ENABLED())
5733 if (ha
->tgt
.node_name_set
) {
5734 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
5735 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
5740 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
5742 if (!QLA_TGT_MODE_ENABLED())
5745 ha
->msix_count
+= 1; /* For ATIO Q */
5749 qlt_24xx_process_response_error(struct scsi_qla_host
*vha
,
5750 struct sts_entry_24xx
*pkt
)
5752 switch (pkt
->entry_type
) {
5753 case ABTS_RECV_24XX
:
5754 case ABTS_RESP_24XX
:
5756 case NOTIFY_ACK_TYPE
:
5765 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
5766 struct vp_config_entry_24xx
*vpmod
)
5768 if (qla_tgt_mode_enabled(vha
))
5769 vpmod
->options_idx1
&= ~BIT_5
;
5770 /* Disable ini mode, if requested */
5771 if (!qla_ini_mode_enabled(vha
))
5772 vpmod
->options_idx1
&= ~BIT_4
;
5776 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
5778 if (!QLA_TGT_MODE_ENABLED())
5781 if (ha
->mqenable
|| IS_QLA83XX(ha
)) {
5782 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
5783 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
5785 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
5786 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
5789 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
5790 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
5791 qlt_clear_mode(base_vha
);
5795 qla83xx_msix_atio_q(int irq
, void *dev_id
)
5797 struct rsp_que
*rsp
;
5798 scsi_qla_host_t
*vha
;
5799 struct qla_hw_data
*ha
;
5800 unsigned long flags
;
5802 rsp
= (struct rsp_que
*) dev_id
;
5804 vha
= pci_get_drvdata(ha
->pdev
);
5806 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5808 qlt_24xx_process_atio_queue(vha
);
5809 qla24xx_process_response_queue(vha
, rsp
);
5811 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5817 qlt_mem_alloc(struct qla_hw_data
*ha
)
5819 if (!QLA_TGT_MODE_ENABLED())
5822 ha
->tgt
.tgt_vp_map
= kzalloc(sizeof(struct qla_tgt_vp_map
) *
5823 MAX_MULTI_ID_FABRIC
, GFP_KERNEL
);
5824 if (!ha
->tgt
.tgt_vp_map
)
5827 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
5828 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
5829 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
5830 if (!ha
->tgt
.atio_ring
) {
5831 kfree(ha
->tgt
.tgt_vp_map
);
5838 qlt_mem_free(struct qla_hw_data
*ha
)
5840 if (!QLA_TGT_MODE_ENABLED())
5843 if (ha
->tgt
.atio_ring
) {
5844 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
5845 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
5848 kfree(ha
->tgt
.tgt_vp_map
);
5851 /* vport_slock to be held by the caller */
5853 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
5855 if (!QLA_TGT_MODE_ENABLED())
5860 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
5863 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= vha
->vp_idx
;
5866 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
5869 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= 0;
5874 static int __init
qlt_parse_ini_mode(void)
5876 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
5877 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
5878 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
5879 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
5880 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
5881 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
5888 int __init
qlt_init(void)
5892 if (!qlt_parse_ini_mode()) {
5893 ql_log(ql_log_fatal
, NULL
, 0xe06b,
5894 "qlt_parse_ini_mode() failed\n");
5898 if (!QLA_TGT_MODE_ENABLED())
5901 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5902 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
5903 qla_tgt_mgmt_cmd
), 0, NULL
);
5904 if (!qla_tgt_mgmt_cmd_cachep
) {
5905 ql_log(ql_log_fatal
, NULL
, 0xe06d,
5906 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5910 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
5911 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
5912 if (!qla_tgt_mgmt_cmd_mempool
) {
5913 ql_log(ql_log_fatal
, NULL
, 0xe06e,
5914 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5916 goto out_mgmt_cmd_cachep
;
5919 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
5921 ql_log(ql_log_fatal
, NULL
, 0xe06f,
5922 "alloc_workqueue for qla_tgt_wq failed\n");
5924 goto out_cmd_mempool
;
5927 * Return 1 to signal that initiator-mode is being disabled
5929 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
5932 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
5933 out_mgmt_cmd_cachep
:
5934 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
5940 if (!QLA_TGT_MODE_ENABLED())
5943 destroy_workqueue(qla_tgt_wq
);
5944 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
5945 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);