target: Change target_submit_cmd() to return void
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / target / target_core_transport.c
CommitLineData
c66ac9db
NB
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
c66ac9db
NB
29#include <linux/net.h>
30#include <linux/delay.h>
31#include <linux/string.h>
32#include <linux/timer.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/spinlock.h>
c66ac9db
NB
36#include <linux/kthread.h>
37#include <linux/in.h>
38#include <linux/cdrom.h>
827509e3 39#include <linux/module.h>
c66ac9db
NB
40#include <asm/unaligned.h>
41#include <net/sock.h>
42#include <net/tcp.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h>
e66ecd50 45#include <scsi/scsi_tcq.h>
c66ac9db
NB
46
47#include <target/target_core_base.h>
c4795fb2
CH
48#include <target/target_core_backend.h>
49#include <target/target_core_fabric.h>
c66ac9db
NB
50#include <target/target_core_configfs.h>
51
e26d99ae 52#include "target_core_internal.h"
c66ac9db 53#include "target_core_alua.h"
c66ac9db 54#include "target_core_pr.h"
c66ac9db
NB
55#include "target_core_ua.h"
56
e3d6f909 57static int sub_api_initialized;
c66ac9db 58
35e0e757 59static struct workqueue_struct *target_completion_wq;
c66ac9db
NB
60static struct kmem_cache *se_sess_cache;
61struct kmem_cache *se_tmr_req_cache;
62struct kmem_cache *se_ua_cache;
c66ac9db
NB
63struct kmem_cache *t10_pr_reg_cache;
64struct kmem_cache *t10_alua_lu_gp_cache;
65struct kmem_cache *t10_alua_lu_gp_mem_cache;
66struct kmem_cache *t10_alua_tg_pt_gp_cache;
67struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68
c66ac9db 69static int transport_generic_write_pending(struct se_cmd *);
5951146d 70static int transport_processing_thread(void *param);
4d2300cc 71static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
c66ac9db 72static void transport_complete_task_attr(struct se_cmd *cmd);
07bde79a 73static void transport_handle_queue_full(struct se_cmd *cmd,
e057f533 74 struct se_device *dev);
c66ac9db 75static void transport_free_dev_tasks(struct se_cmd *cmd);
05d1c7c0 76static int transport_generic_get_mem(struct se_cmd *cmd);
39c05f32 77static void transport_put_cmd(struct se_cmd *cmd);
3df8d40b 78static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
c66ac9db 79static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
03e98c9e 80static void transport_generic_request_failure(struct se_cmd *);
35e0e757 81static void target_complete_ok_work(struct work_struct *work);
c66ac9db 82
e3d6f909 83int init_se_kmem_caches(void)
c66ac9db 84{
c66ac9db
NB
85 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
86 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
87 0, NULL);
6708bb27
AG
88 if (!se_tmr_req_cache) {
89 pr_err("kmem_cache_create() for struct se_tmr_req"
c66ac9db 90 " failed\n");
97c34f3b 91 goto out;
c66ac9db
NB
92 }
93 se_sess_cache = kmem_cache_create("se_sess_cache",
94 sizeof(struct se_session), __alignof__(struct se_session),
95 0, NULL);
6708bb27
AG
96 if (!se_sess_cache) {
97 pr_err("kmem_cache_create() for struct se_session"
c66ac9db 98 " failed\n");
35e0e757 99 goto out_free_tmr_req_cache;
c66ac9db
NB
100 }
101 se_ua_cache = kmem_cache_create("se_ua_cache",
102 sizeof(struct se_ua), __alignof__(struct se_ua),
103 0, NULL);
6708bb27
AG
104 if (!se_ua_cache) {
105 pr_err("kmem_cache_create() for struct se_ua failed\n");
35e0e757 106 goto out_free_sess_cache;
c66ac9db 107 }
c66ac9db
NB
108 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
109 sizeof(struct t10_pr_registration),
110 __alignof__(struct t10_pr_registration), 0, NULL);
6708bb27
AG
111 if (!t10_pr_reg_cache) {
112 pr_err("kmem_cache_create() for struct t10_pr_registration"
c66ac9db 113 " failed\n");
35e0e757 114 goto out_free_ua_cache;
c66ac9db
NB
115 }
116 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
117 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
118 0, NULL);
6708bb27
AG
119 if (!t10_alua_lu_gp_cache) {
120 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
c66ac9db 121 " failed\n");
35e0e757 122 goto out_free_pr_reg_cache;
c66ac9db
NB
123 }
124 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
125 sizeof(struct t10_alua_lu_gp_member),
126 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
6708bb27
AG
127 if (!t10_alua_lu_gp_mem_cache) {
128 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
c66ac9db 129 "cache failed\n");
35e0e757 130 goto out_free_lu_gp_cache;
c66ac9db
NB
131 }
132 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
133 sizeof(struct t10_alua_tg_pt_gp),
134 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
6708bb27
AG
135 if (!t10_alua_tg_pt_gp_cache) {
136 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db 137 "cache failed\n");
35e0e757 138 goto out_free_lu_gp_mem_cache;
c66ac9db
NB
139 }
140 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
141 "t10_alua_tg_pt_gp_mem_cache",
142 sizeof(struct t10_alua_tg_pt_gp_member),
143 __alignof__(struct t10_alua_tg_pt_gp_member),
144 0, NULL);
6708bb27
AG
145 if (!t10_alua_tg_pt_gp_mem_cache) {
146 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db 147 "mem_t failed\n");
35e0e757 148 goto out_free_tg_pt_gp_cache;
c66ac9db
NB
149 }
150
35e0e757
CH
151 target_completion_wq = alloc_workqueue("target_completion",
152 WQ_MEM_RECLAIM, 0);
153 if (!target_completion_wq)
154 goto out_free_tg_pt_gp_mem_cache;
155
c66ac9db 156 return 0;
35e0e757
CH
157
158out_free_tg_pt_gp_mem_cache:
159 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
160out_free_tg_pt_gp_cache:
161 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
162out_free_lu_gp_mem_cache:
163 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
164out_free_lu_gp_cache:
165 kmem_cache_destroy(t10_alua_lu_gp_cache);
166out_free_pr_reg_cache:
167 kmem_cache_destroy(t10_pr_reg_cache);
168out_free_ua_cache:
169 kmem_cache_destroy(se_ua_cache);
170out_free_sess_cache:
171 kmem_cache_destroy(se_sess_cache);
172out_free_tmr_req_cache:
173 kmem_cache_destroy(se_tmr_req_cache);
c66ac9db 174out:
e3d6f909 175 return -ENOMEM;
c66ac9db
NB
176}
177
e3d6f909 178void release_se_kmem_caches(void)
c66ac9db 179{
35e0e757 180 destroy_workqueue(target_completion_wq);
c66ac9db
NB
181 kmem_cache_destroy(se_tmr_req_cache);
182 kmem_cache_destroy(se_sess_cache);
183 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
184 kmem_cache_destroy(t10_pr_reg_cache);
185 kmem_cache_destroy(t10_alua_lu_gp_cache);
186 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
187 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
188 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
c66ac9db
NB
189}
190
e3d6f909
AG
191/* This code ensures unique mib indexes are handed out. */
192static DEFINE_SPINLOCK(scsi_mib_index_lock);
193static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
e89d15ee
NB
194
195/*
196 * Allocate a new row index for the entry type specified
197 */
198u32 scsi_get_new_index(scsi_index_t type)
199{
200 u32 new_index;
201
e3d6f909 202 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
e89d15ee 203
e3d6f909
AG
204 spin_lock(&scsi_mib_index_lock);
205 new_index = ++scsi_mib_index[type];
206 spin_unlock(&scsi_mib_index_lock);
e89d15ee
NB
207
208 return new_index;
209}
210
e26d99ae 211static void transport_init_queue_obj(struct se_queue_obj *qobj)
c66ac9db
NB
212{
213 atomic_set(&qobj->queue_cnt, 0);
214 INIT_LIST_HEAD(&qobj->qobj_list);
215 init_waitqueue_head(&qobj->thread_wq);
216 spin_lock_init(&qobj->cmd_queue_lock);
217}
c66ac9db 218
dbc5623e 219void transport_subsystem_check_init(void)
c66ac9db
NB
220{
221 int ret;
222
dbc5623e
NB
223 if (sub_api_initialized)
224 return;
225
c66ac9db
NB
226 ret = request_module("target_core_iblock");
227 if (ret != 0)
6708bb27 228 pr_err("Unable to load target_core_iblock\n");
c66ac9db
NB
229
230 ret = request_module("target_core_file");
231 if (ret != 0)
6708bb27 232 pr_err("Unable to load target_core_file\n");
c66ac9db
NB
233
234 ret = request_module("target_core_pscsi");
235 if (ret != 0)
6708bb27 236 pr_err("Unable to load target_core_pscsi\n");
c66ac9db
NB
237
238 ret = request_module("target_core_stgt");
239 if (ret != 0)
6708bb27 240 pr_err("Unable to load target_core_stgt\n");
c66ac9db 241
e3d6f909 242 sub_api_initialized = 1;
dbc5623e 243 return;
c66ac9db
NB
244}
245
246struct se_session *transport_init_session(void)
247{
248 struct se_session *se_sess;
249
250 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
6708bb27
AG
251 if (!se_sess) {
252 pr_err("Unable to allocate struct se_session from"
c66ac9db
NB
253 " se_sess_cache\n");
254 return ERR_PTR(-ENOMEM);
255 }
256 INIT_LIST_HEAD(&se_sess->sess_list);
257 INIT_LIST_HEAD(&se_sess->sess_acl_list);
a17f091d
NB
258 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
259 INIT_LIST_HEAD(&se_sess->sess_wait_list);
260 spin_lock_init(&se_sess->sess_cmd_lock);
c66ac9db
NB
261
262 return se_sess;
263}
264EXPORT_SYMBOL(transport_init_session);
265
266/*
267 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
268 */
269void __transport_register_session(
270 struct se_portal_group *se_tpg,
271 struct se_node_acl *se_nacl,
272 struct se_session *se_sess,
273 void *fabric_sess_ptr)
274{
275 unsigned char buf[PR_REG_ISID_LEN];
276
277 se_sess->se_tpg = se_tpg;
278 se_sess->fabric_sess_ptr = fabric_sess_ptr;
279 /*
280 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
281 *
282 * Only set for struct se_session's that will actually be moving I/O.
283 * eg: *NOT* discovery sessions.
284 */
285 if (se_nacl) {
286 /*
287 * If the fabric module supports an ISID based TransportID,
288 * save this value in binary from the fabric I_T Nexus now.
289 */
e3d6f909 290 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
c66ac9db 291 memset(&buf[0], 0, PR_REG_ISID_LEN);
e3d6f909 292 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
c66ac9db
NB
293 &buf[0], PR_REG_ISID_LEN);
294 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
295 }
296 spin_lock_irq(&se_nacl->nacl_sess_lock);
297 /*
298 * The se_nacl->nacl_sess pointer will be set to the
299 * last active I_T Nexus for each struct se_node_acl.
300 */
301 se_nacl->nacl_sess = se_sess;
302
303 list_add_tail(&se_sess->sess_acl_list,
304 &se_nacl->acl_sess_list);
305 spin_unlock_irq(&se_nacl->nacl_sess_lock);
306 }
307 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
308
6708bb27 309 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
e3d6f909 310 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
c66ac9db
NB
311}
312EXPORT_SYMBOL(__transport_register_session);
313
314void transport_register_session(
315 struct se_portal_group *se_tpg,
316 struct se_node_acl *se_nacl,
317 struct se_session *se_sess,
318 void *fabric_sess_ptr)
319{
320 spin_lock_bh(&se_tpg->session_lock);
321 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
322 spin_unlock_bh(&se_tpg->session_lock);
323}
324EXPORT_SYMBOL(transport_register_session);
325
326void transport_deregister_session_configfs(struct se_session *se_sess)
327{
328 struct se_node_acl *se_nacl;
23388864 329 unsigned long flags;
c66ac9db
NB
330 /*
331 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
332 */
333 se_nacl = se_sess->se_node_acl;
6708bb27 334 if (se_nacl) {
23388864 335 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
336 list_del(&se_sess->sess_acl_list);
337 /*
338 * If the session list is empty, then clear the pointer.
339 * Otherwise, set the struct se_session pointer from the tail
340 * element of the per struct se_node_acl active session list.
341 */
342 if (list_empty(&se_nacl->acl_sess_list))
343 se_nacl->nacl_sess = NULL;
344 else {
345 se_nacl->nacl_sess = container_of(
346 se_nacl->acl_sess_list.prev,
347 struct se_session, sess_acl_list);
348 }
23388864 349 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
350 }
351}
352EXPORT_SYMBOL(transport_deregister_session_configfs);
353
354void transport_free_session(struct se_session *se_sess)
355{
356 kmem_cache_free(se_sess_cache, se_sess);
357}
358EXPORT_SYMBOL(transport_free_session);
359
360void transport_deregister_session(struct se_session *se_sess)
361{
362 struct se_portal_group *se_tpg = se_sess->se_tpg;
363 struct se_node_acl *se_nacl;
e63a8e19 364 unsigned long flags;
c66ac9db 365
6708bb27 366 if (!se_tpg) {
c66ac9db
NB
367 transport_free_session(se_sess);
368 return;
369 }
c66ac9db 370
e63a8e19 371 spin_lock_irqsave(&se_tpg->session_lock, flags);
c66ac9db
NB
372 list_del(&se_sess->sess_list);
373 se_sess->se_tpg = NULL;
374 se_sess->fabric_sess_ptr = NULL;
e63a8e19 375 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
c66ac9db
NB
376
377 /*
378 * Determine if we need to do extra work for this initiator node's
379 * struct se_node_acl if it had been previously dynamically generated.
380 */
381 se_nacl = se_sess->se_node_acl;
6708bb27 382 if (se_nacl) {
e63a8e19 383 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
c66ac9db 384 if (se_nacl->dynamic_node_acl) {
6708bb27
AG
385 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
386 se_tpg)) {
c66ac9db
NB
387 list_del(&se_nacl->acl_list);
388 se_tpg->num_node_acls--;
e63a8e19 389 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
390
391 core_tpg_wait_for_nacl_pr_ref(se_nacl);
c66ac9db 392 core_free_device_list_for_node(se_nacl, se_tpg);
e3d6f909 393 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
c66ac9db 394 se_nacl);
e63a8e19 395 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
396 }
397 }
e63a8e19 398 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
399 }
400
401 transport_free_session(se_sess);
402
6708bb27 403 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
e3d6f909 404 se_tpg->se_tpg_tfo->get_fabric_name());
c66ac9db
NB
405}
406EXPORT_SYMBOL(transport_deregister_session);
407
408/*
a1d8b49a 409 * Called with cmd->t_state_lock held.
c66ac9db
NB
410 */
411static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
412{
42bf829e 413 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
414 struct se_task *task;
415 unsigned long flags;
416
42bf829e
CH
417 if (!dev)
418 return;
c66ac9db 419
42bf829e 420 list_for_each_entry(task, &cmd->t_task_list, t_list) {
6c76bf95 421 if (task->task_flags & TF_ACTIVE)
c66ac9db
NB
422 continue;
423
c66ac9db 424 spin_lock_irqsave(&dev->execute_task_lock, flags);
1880807a
CH
425 if (task->t_state_active) {
426 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
427 cmd->se_tfo->get_task_tag(cmd), dev, task);
c66ac9db 428
1880807a
CH
429 list_del(&task->t_state_list);
430 atomic_dec(&cmd->t_task_cdbs_ex_left);
431 task->t_state_active = false;
432 }
433 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db 434 }
1880807a 435
c66ac9db
NB
436}
437
438/* transport_cmd_check_stop():
439 *
440 * 'transport_off = 1' determines if t_transport_active should be cleared.
441 * 'transport_off = 2' determines if task_dev_state should be removed.
442 *
443 * A non-zero u8 t_state sets cmd->t_state.
444 * Returns 1 when command is stopped, else 0.
445 */
446static int transport_cmd_check_stop(
447 struct se_cmd *cmd,
448 int transport_off,
449 u8 t_state)
450{
451 unsigned long flags;
452
a1d8b49a 453 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
454 /*
455 * Determine if IOCTL context caller in requesting the stopping of this
456 * command for LUN shutdown purposes.
457 */
a1d8b49a 458 if (atomic_read(&cmd->transport_lun_stop)) {
6708bb27 459 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
c66ac9db 460 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
e3d6f909 461 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 462
a1d8b49a 463 atomic_set(&cmd->t_transport_active, 0);
c66ac9db
NB
464 if (transport_off == 2)
465 transport_all_task_dev_remove_state(cmd);
a1d8b49a 466 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 467
a1d8b49a 468 complete(&cmd->transport_lun_stop_comp);
c66ac9db
NB
469 return 1;
470 }
471 /*
472 * Determine if frontend context caller is requesting the stopping of
e3d6f909 473 * this command for frontend exceptions.
c66ac9db 474 */
a1d8b49a 475 if (atomic_read(&cmd->t_transport_stop)) {
6708bb27 476 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
c66ac9db 477 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
e3d6f909 478 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 479
c66ac9db
NB
480 if (transport_off == 2)
481 transport_all_task_dev_remove_state(cmd);
482
483 /*
484 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
485 * to FE.
486 */
487 if (transport_off == 2)
488 cmd->se_lun = NULL;
a1d8b49a 489 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 490
a1d8b49a 491 complete(&cmd->t_transport_stop_comp);
c66ac9db
NB
492 return 1;
493 }
494 if (transport_off) {
a1d8b49a 495 atomic_set(&cmd->t_transport_active, 0);
c66ac9db
NB
496 if (transport_off == 2) {
497 transport_all_task_dev_remove_state(cmd);
498 /*
499 * Clear struct se_cmd->se_lun before the transport_off == 2
500 * handoff to fabric module.
501 */
502 cmd->se_lun = NULL;
503 /*
504 * Some fabric modules like tcm_loop can release
25985edc 505 * their internally allocated I/O reference now and
c66ac9db 506 * struct se_cmd now.
88dd9e26
NB
507 *
508 * Fabric modules are expected to return '1' here if the
509 * se_cmd being passed is released at this point,
510 * or zero if not being released.
c66ac9db 511 */
e3d6f909 512 if (cmd->se_tfo->check_stop_free != NULL) {
c66ac9db 513 spin_unlock_irqrestore(
a1d8b49a 514 &cmd->t_state_lock, flags);
c66ac9db 515
88dd9e26 516 return cmd->se_tfo->check_stop_free(cmd);
c66ac9db
NB
517 }
518 }
a1d8b49a 519 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
520
521 return 0;
522 } else if (t_state)
523 cmd->t_state = t_state;
a1d8b49a 524 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
525
526 return 0;
527}
528
529static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
530{
531 return transport_cmd_check_stop(cmd, 2, 0);
532}
533
534static void transport_lun_remove_cmd(struct se_cmd *cmd)
535{
e3d6f909 536 struct se_lun *lun = cmd->se_lun;
c66ac9db
NB
537 unsigned long flags;
538
539 if (!lun)
540 return;
541
a1d8b49a 542 spin_lock_irqsave(&cmd->t_state_lock, flags);
6708bb27 543 if (!atomic_read(&cmd->transport_dev_active)) {
a1d8b49a 544 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
545 goto check_lun;
546 }
a1d8b49a 547 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 548 transport_all_task_dev_remove_state(cmd);
a1d8b49a 549 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 550
c66ac9db
NB
551
552check_lun:
553 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
a1d8b49a 554 if (atomic_read(&cmd->transport_lun_active)) {
5951146d 555 list_del(&cmd->se_lun_node);
a1d8b49a 556 atomic_set(&cmd->transport_lun_active, 0);
c66ac9db 557#if 0
6708bb27 558 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
e3d6f909 559 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
c66ac9db
NB
560#endif
561 }
562 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
563}
564
565void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
566{
8dc52b54
NB
567 if (!cmd->se_tmr_req)
568 transport_lun_remove_cmd(cmd);
c66ac9db
NB
569
570 if (transport_cmd_check_stop_to_fabric(cmd))
571 return;
77039d1e 572 if (remove) {
3df8d40b 573 transport_remove_cmd_from_queue(cmd);
e6a2573f 574 transport_put_cmd(cmd);
77039d1e 575 }
c66ac9db
NB
576}
577
f7a5cc0b
CH
578static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
579 bool at_head)
c66ac9db
NB
580{
581 struct se_device *dev = cmd->se_dev;
e3d6f909 582 struct se_queue_obj *qobj = &dev->dev_queue_obj;
c66ac9db
NB
583 unsigned long flags;
584
c66ac9db 585 if (t_state) {
a1d8b49a 586 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 587 cmd->t_state = t_state;
a1d8b49a
AG
588 atomic_set(&cmd->t_transport_active, 1);
589 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
590 }
591
592 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
79a7fef2
RD
593
594 /* If the cmd is already on the list, remove it before we add it */
595 if (!list_empty(&cmd->se_queue_node))
596 list_del(&cmd->se_queue_node);
597 else
598 atomic_inc(&qobj->queue_cnt);
599
f7a5cc0b 600 if (at_head)
07bde79a 601 list_add(&cmd->se_queue_node, &qobj->qobj_list);
f7a5cc0b 602 else
07bde79a 603 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
79a7fef2 604 atomic_set(&cmd->t_transport_queue_active, 1);
c66ac9db
NB
605 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
606
c66ac9db 607 wake_up_interruptible(&qobj->thread_wq);
c66ac9db
NB
608}
609
5951146d
AG
610static struct se_cmd *
611transport_get_cmd_from_queue(struct se_queue_obj *qobj)
c66ac9db 612{
5951146d 613 struct se_cmd *cmd;
c66ac9db
NB
614 unsigned long flags;
615
616 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
617 if (list_empty(&qobj->qobj_list)) {
618 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
619 return NULL;
620 }
5951146d 621 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
c66ac9db 622
79a7fef2 623 atomic_set(&cmd->t_transport_queue_active, 0);
c66ac9db 624
79a7fef2 625 list_del_init(&cmd->se_queue_node);
c66ac9db
NB
626 atomic_dec(&qobj->queue_cnt);
627 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
628
5951146d 629 return cmd;
c66ac9db
NB
630}
631
3df8d40b 632static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
c66ac9db 633{
3df8d40b 634 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
c66ac9db
NB
635 unsigned long flags;
636
637 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
6708bb27 638 if (!atomic_read(&cmd->t_transport_queue_active)) {
c66ac9db
NB
639 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
640 return;
641 }
79a7fef2
RD
642 atomic_set(&cmd->t_transport_queue_active, 0);
643 atomic_dec(&qobj->queue_cnt);
644 list_del_init(&cmd->se_queue_node);
c66ac9db
NB
645 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
646
a1d8b49a 647 if (atomic_read(&cmd->t_transport_queue_active)) {
6708bb27 648 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
e3d6f909 649 cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 650 atomic_read(&cmd->t_transport_queue_active));
c66ac9db
NB
651 }
652}
653
654/*
655 * Completion function used by TCM subsystem plugins (such as FILEIO)
656 * for queueing up response from struct se_subsystem_api->do_task()
657 */
658void transport_complete_sync_cache(struct se_cmd *cmd, int good)
659{
a1d8b49a 660 struct se_task *task = list_entry(cmd->t_task_list.next,
c66ac9db
NB
661 struct se_task, t_list);
662
663 if (good) {
664 cmd->scsi_status = SAM_STAT_GOOD;
665 task->task_scsi_status = GOOD;
666 } else {
667 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
03e98c9e
NB
668 task->task_se_cmd->scsi_sense_reason =
669 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
670
c66ac9db
NB
671 }
672
673 transport_complete_task(task, good);
674}
675EXPORT_SYMBOL(transport_complete_sync_cache);
676
35e0e757
CH
677static void target_complete_failure_work(struct work_struct *work)
678{
679 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
680
03e98c9e 681 transport_generic_request_failure(cmd);
35e0e757
CH
682}
683
c66ac9db
NB
684/* transport_complete_task():
685 *
686 * Called from interrupt and non interrupt context depending
687 * on the transport plugin.
688 */
689void transport_complete_task(struct se_task *task, int success)
690{
e3d6f909 691 struct se_cmd *cmd = task->task_se_cmd;
42bf829e 692 struct se_device *dev = cmd->se_dev;
c66ac9db 693 unsigned long flags;
c66ac9db 694
a1d8b49a 695 spin_lock_irqsave(&cmd->t_state_lock, flags);
6c76bf95 696 task->task_flags &= ~TF_ACTIVE;
c66ac9db
NB
697
698 /*
699 * See if any sense data exists, if so set the TASK_SENSE flag.
700 * Also check for any other post completion work that needs to be
701 * done by the plugins.
702 */
703 if (dev && dev->transport->transport_complete) {
704 if (dev->transport->transport_complete(task) != 0) {
705 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
ef804a84 706 task->task_flags |= TF_HAS_SENSE;
c66ac9db
NB
707 success = 1;
708 }
709 }
710
711 /*
712 * See if we are waiting for outstanding struct se_task
713 * to complete for an exception condition
714 */
6c76bf95 715 if (task->task_flags & TF_REQUEST_STOP) {
a1d8b49a 716 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
717 complete(&task->task_stop_comp);
718 return;
719 }
2235007c
CH
720
721 if (!success)
722 cmd->t_tasks_failed = 1;
723
c66ac9db
NB
724 /*
725 * Decrement the outstanding t_task_cdbs_left count. The last
726 * struct se_task from struct se_cmd will complete itself into the
727 * device queue depending upon int success.
728 */
6708bb27 729 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
a1d8b49a 730 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
731 return;
732 }
733
2235007c 734 if (cmd->t_tasks_failed) {
41e16e98 735 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
35e0e757 736 INIT_WORK(&cmd->work, target_complete_failure_work);
c66ac9db 737 } else {
a1d8b49a 738 atomic_set(&cmd->t_transport_complete, 1);
35e0e757 739 INIT_WORK(&cmd->work, target_complete_ok_work);
c66ac9db 740 }
35e0e757 741
35e0e757
CH
742 cmd->t_state = TRANSPORT_COMPLETE;
743 atomic_set(&cmd->t_transport_active, 1);
a1d8b49a 744 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 745
35e0e757 746 queue_work(target_completion_wq, &cmd->work);
c66ac9db
NB
747}
748EXPORT_SYMBOL(transport_complete_task);
749
750/*
751 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
752 * struct se_task list are ready to be added to the active execution list
753 * struct se_device
754
755 * Called with se_dev_t->execute_task_lock called.
756 */
757static inline int transport_add_task_check_sam_attr(
758 struct se_task *task,
759 struct se_task *task_prev,
760 struct se_device *dev)
761{
762 /*
763 * No SAM Task attribute emulation enabled, add to tail of
764 * execution queue
765 */
766 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
767 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
768 return 0;
769 }
770 /*
771 * HEAD_OF_QUEUE attribute for received CDB, which means
772 * the first task that is associated with a struct se_cmd goes to
773 * head of the struct se_device->execute_task_list, and task_prev
774 * after that for each subsequent task
775 */
e66ecd50 776 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db
NB
777 list_add(&task->t_execute_list,
778 (task_prev != NULL) ?
779 &task_prev->t_execute_list :
780 &dev->execute_task_list);
781
6708bb27 782 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
c66ac9db 783 " in execution queue\n",
6708bb27 784 task->task_se_cmd->t_task_cdb[0]);
c66ac9db
NB
785 return 1;
786 }
787 /*
788 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
789 * transitioned from Dermant -> Active state, and are added to the end
790 * of the struct se_device->execute_task_list
791 */
792 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
793 return 0;
794}
795
796/* __transport_add_task_to_execute_queue():
797 *
798 * Called with se_dev_t->execute_task_lock called.
799 */
800static void __transport_add_task_to_execute_queue(
801 struct se_task *task,
802 struct se_task *task_prev,
803 struct se_device *dev)
804{
805 int head_of_queue;
806
807 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
808 atomic_inc(&dev->execute_tasks);
809
1880807a 810 if (task->t_state_active)
c66ac9db
NB
811 return;
812 /*
813 * Determine if this task needs to go to HEAD_OF_QUEUE for the
814 * state list as well. Running with SAM Task Attribute emulation
815 * will always return head_of_queue == 0 here
816 */
817 if (head_of_queue)
818 list_add(&task->t_state_list, (task_prev) ?
819 &task_prev->t_state_list :
820 &dev->state_task_list);
821 else
822 list_add_tail(&task->t_state_list, &dev->state_task_list);
823
1880807a 824 task->t_state_active = true;
c66ac9db 825
6708bb27 826 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
e3d6f909 827 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
c66ac9db
NB
828 task, dev);
829}
830
831static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
832{
42bf829e 833 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
834 struct se_task *task;
835 unsigned long flags;
836
a1d8b49a
AG
837 spin_lock_irqsave(&cmd->t_state_lock, flags);
838 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db 839 spin_lock(&dev->execute_task_lock);
1880807a
CH
840 if (!task->t_state_active) {
841 list_add_tail(&task->t_state_list,
842 &dev->state_task_list);
843 task->t_state_active = true;
844
845 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
846 task->task_se_cmd->se_tfo->get_task_tag(
847 task->task_se_cmd), task, dev);
848 }
c66ac9db
NB
849 spin_unlock(&dev->execute_task_lock);
850 }
a1d8b49a 851 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
852}
853
4d2300cc 854static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
c66ac9db 855{
5951146d 856 struct se_device *dev = cmd->se_dev;
c66ac9db 857 struct se_task *task, *task_prev = NULL;
c66ac9db 858
a1d8b49a 859 list_for_each_entry(task, &cmd->t_task_list, t_list) {
04629b7b 860 if (!list_empty(&task->t_execute_list))
c66ac9db
NB
861 continue;
862 /*
863 * __transport_add_task_to_execute_queue() handles the
864 * SAM Task Attribute emulation if enabled
865 */
866 __transport_add_task_to_execute_queue(task, task_prev, dev);
c66ac9db
NB
867 task_prev = task;
868 }
4d2300cc
NB
869}
870
871static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
872{
873 unsigned long flags;
874 struct se_device *dev = cmd->se_dev;
875
876 spin_lock_irqsave(&dev->execute_task_lock, flags);
877 __transport_add_tasks_from_cmd(cmd);
c66ac9db 878 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db
NB
879}
880
04629b7b
CH
881void __transport_remove_task_from_execute_queue(struct se_task *task,
882 struct se_device *dev)
883{
884 list_del_init(&task->t_execute_list);
885 atomic_dec(&dev->execute_tasks);
886}
887
e26d99ae 888static void transport_remove_task_from_execute_queue(
c66ac9db
NB
889 struct se_task *task,
890 struct se_device *dev)
891{
892 unsigned long flags;
893
04629b7b 894 if (WARN_ON(list_empty(&task->t_execute_list)))
af57c3ac 895 return;
af57c3ac 896
c66ac9db 897 spin_lock_irqsave(&dev->execute_task_lock, flags);
04629b7b 898 __transport_remove_task_from_execute_queue(task, dev);
c66ac9db
NB
899 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
900}
901
07bde79a 902/*
f147abb4 903 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
07bde79a
NB
904 */
905
906static void target_qf_do_work(struct work_struct *work)
907{
908 struct se_device *dev = container_of(work, struct se_device,
909 qf_work_queue);
bcac364a 910 LIST_HEAD(qf_cmd_list);
07bde79a
NB
911 struct se_cmd *cmd, *cmd_tmp;
912
913 spin_lock_irq(&dev->qf_cmd_lock);
bcac364a
RD
914 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
915 spin_unlock_irq(&dev->qf_cmd_lock);
07bde79a 916
bcac364a 917 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
07bde79a
NB
918 list_del(&cmd->se_qf_node);
919 atomic_dec(&dev->dev_qf_count);
920 smp_mb__after_atomic_dec();
07bde79a 921
6708bb27 922 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
07bde79a 923 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
e057f533 924 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
07bde79a
NB
925 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
926 : "UNKNOWN");
f7a5cc0b
CH
927
928 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
07bde79a 929 }
07bde79a
NB
930}
931
c66ac9db
NB
932unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
933{
934 switch (cmd->data_direction) {
935 case DMA_NONE:
936 return "NONE";
937 case DMA_FROM_DEVICE:
938 return "READ";
939 case DMA_TO_DEVICE:
940 return "WRITE";
941 case DMA_BIDIRECTIONAL:
942 return "BIDI";
943 default:
944 break;
945 }
946
947 return "UNKNOWN";
948}
949
950void transport_dump_dev_state(
951 struct se_device *dev,
952 char *b,
953 int *bl)
954{
955 *bl += sprintf(b + *bl, "Status: ");
956 switch (dev->dev_status) {
957 case TRANSPORT_DEVICE_ACTIVATED:
958 *bl += sprintf(b + *bl, "ACTIVATED");
959 break;
960 case TRANSPORT_DEVICE_DEACTIVATED:
961 *bl += sprintf(b + *bl, "DEACTIVATED");
962 break;
963 case TRANSPORT_DEVICE_SHUTDOWN:
964 *bl += sprintf(b + *bl, "SHUTDOWN");
965 break;
966 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
967 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
968 *bl += sprintf(b + *bl, "OFFLINE");
969 break;
970 default:
971 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
972 break;
973 }
974
65586d51
NB
975 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
976 atomic_read(&dev->execute_tasks), dev->queue_depth);
c66ac9db 977 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
e3d6f909 978 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db
NB
979 *bl += sprintf(b + *bl, " ");
980}
981
c66ac9db
NB
982void transport_dump_vpd_proto_id(
983 struct t10_vpd *vpd,
984 unsigned char *p_buf,
985 int p_buf_len)
986{
987 unsigned char buf[VPD_TMP_BUF_SIZE];
988 int len;
989
990 memset(buf, 0, VPD_TMP_BUF_SIZE);
991 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
992
993 switch (vpd->protocol_identifier) {
994 case 0x00:
995 sprintf(buf+len, "Fibre Channel\n");
996 break;
997 case 0x10:
998 sprintf(buf+len, "Parallel SCSI\n");
999 break;
1000 case 0x20:
1001 sprintf(buf+len, "SSA\n");
1002 break;
1003 case 0x30:
1004 sprintf(buf+len, "IEEE 1394\n");
1005 break;
1006 case 0x40:
1007 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1008 " Protocol\n");
1009 break;
1010 case 0x50:
1011 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1012 break;
1013 case 0x60:
1014 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1015 break;
1016 case 0x70:
1017 sprintf(buf+len, "Automation/Drive Interface Transport"
1018 " Protocol\n");
1019 break;
1020 case 0x80:
1021 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1022 break;
1023 default:
1024 sprintf(buf+len, "Unknown 0x%02x\n",
1025 vpd->protocol_identifier);
1026 break;
1027 }
1028
1029 if (p_buf)
1030 strncpy(p_buf, buf, p_buf_len);
1031 else
6708bb27 1032 pr_debug("%s", buf);
c66ac9db
NB
1033}
1034
1035void
1036transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1037{
1038 /*
1039 * Check if the Protocol Identifier Valid (PIV) bit is set..
1040 *
1041 * from spc3r23.pdf section 7.5.1
1042 */
1043 if (page_83[1] & 0x80) {
1044 vpd->protocol_identifier = (page_83[0] & 0xf0);
1045 vpd->protocol_identifier_set = 1;
1046 transport_dump_vpd_proto_id(vpd, NULL, 0);
1047 }
1048}
1049EXPORT_SYMBOL(transport_set_vpd_proto_id);
1050
1051int transport_dump_vpd_assoc(
1052 struct t10_vpd *vpd,
1053 unsigned char *p_buf,
1054 int p_buf_len)
1055{
1056 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1057 int ret = 0;
1058 int len;
c66ac9db
NB
1059
1060 memset(buf, 0, VPD_TMP_BUF_SIZE);
1061 len = sprintf(buf, "T10 VPD Identifier Association: ");
1062
1063 switch (vpd->association) {
1064 case 0x00:
1065 sprintf(buf+len, "addressed logical unit\n");
1066 break;
1067 case 0x10:
1068 sprintf(buf+len, "target port\n");
1069 break;
1070 case 0x20:
1071 sprintf(buf+len, "SCSI target device\n");
1072 break;
1073 default:
1074 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
e3d6f909 1075 ret = -EINVAL;
c66ac9db
NB
1076 break;
1077 }
1078
1079 if (p_buf)
1080 strncpy(p_buf, buf, p_buf_len);
1081 else
6708bb27 1082 pr_debug("%s", buf);
c66ac9db
NB
1083
1084 return ret;
1085}
1086
1087int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1088{
1089 /*
1090 * The VPD identification association..
1091 *
1092 * from spc3r23.pdf Section 7.6.3.1 Table 297
1093 */
1094 vpd->association = (page_83[1] & 0x30);
1095 return transport_dump_vpd_assoc(vpd, NULL, 0);
1096}
1097EXPORT_SYMBOL(transport_set_vpd_assoc);
1098
1099int transport_dump_vpd_ident_type(
1100 struct t10_vpd *vpd,
1101 unsigned char *p_buf,
1102 int p_buf_len)
1103{
1104 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1105 int ret = 0;
1106 int len;
c66ac9db
NB
1107
1108 memset(buf, 0, VPD_TMP_BUF_SIZE);
1109 len = sprintf(buf, "T10 VPD Identifier Type: ");
1110
1111 switch (vpd->device_identifier_type) {
1112 case 0x00:
1113 sprintf(buf+len, "Vendor specific\n");
1114 break;
1115 case 0x01:
1116 sprintf(buf+len, "T10 Vendor ID based\n");
1117 break;
1118 case 0x02:
1119 sprintf(buf+len, "EUI-64 based\n");
1120 break;
1121 case 0x03:
1122 sprintf(buf+len, "NAA\n");
1123 break;
1124 case 0x04:
1125 sprintf(buf+len, "Relative target port identifier\n");
1126 break;
1127 case 0x08:
1128 sprintf(buf+len, "SCSI name string\n");
1129 break;
1130 default:
1131 sprintf(buf+len, "Unsupported: 0x%02x\n",
1132 vpd->device_identifier_type);
e3d6f909 1133 ret = -EINVAL;
c66ac9db
NB
1134 break;
1135 }
1136
e3d6f909
AG
1137 if (p_buf) {
1138 if (p_buf_len < strlen(buf)+1)
1139 return -EINVAL;
c66ac9db 1140 strncpy(p_buf, buf, p_buf_len);
e3d6f909 1141 } else {
6708bb27 1142 pr_debug("%s", buf);
e3d6f909 1143 }
c66ac9db
NB
1144
1145 return ret;
1146}
1147
1148int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1149{
1150 /*
1151 * The VPD identifier type..
1152 *
1153 * from spc3r23.pdf Section 7.6.3.1 Table 298
1154 */
1155 vpd->device_identifier_type = (page_83[1] & 0x0f);
1156 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1157}
1158EXPORT_SYMBOL(transport_set_vpd_ident_type);
1159
1160int transport_dump_vpd_ident(
1161 struct t10_vpd *vpd,
1162 unsigned char *p_buf,
1163 int p_buf_len)
1164{
1165 unsigned char buf[VPD_TMP_BUF_SIZE];
1166 int ret = 0;
1167
1168 memset(buf, 0, VPD_TMP_BUF_SIZE);
1169
1170 switch (vpd->device_identifier_code_set) {
1171 case 0x01: /* Binary */
1172 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1173 &vpd->device_identifier[0]);
1174 break;
1175 case 0x02: /* ASCII */
1176 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1177 &vpd->device_identifier[0]);
1178 break;
1179 case 0x03: /* UTF-8 */
1180 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1181 &vpd->device_identifier[0]);
1182 break;
1183 default:
1184 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1185 " 0x%02x", vpd->device_identifier_code_set);
e3d6f909 1186 ret = -EINVAL;
c66ac9db
NB
1187 break;
1188 }
1189
1190 if (p_buf)
1191 strncpy(p_buf, buf, p_buf_len);
1192 else
6708bb27 1193 pr_debug("%s", buf);
c66ac9db
NB
1194
1195 return ret;
1196}
1197
1198int
1199transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1200{
1201 static const char hex_str[] = "0123456789abcdef";
1202 int j = 0, i = 4; /* offset to start of the identifer */
1203
1204 /*
1205 * The VPD Code Set (encoding)
1206 *
1207 * from spc3r23.pdf Section 7.6.3.1 Table 296
1208 */
1209 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1210 switch (vpd->device_identifier_code_set) {
1211 case 0x01: /* Binary */
1212 vpd->device_identifier[j++] =
1213 hex_str[vpd->device_identifier_type];
1214 while (i < (4 + page_83[3])) {
1215 vpd->device_identifier[j++] =
1216 hex_str[(page_83[i] & 0xf0) >> 4];
1217 vpd->device_identifier[j++] =
1218 hex_str[page_83[i] & 0x0f];
1219 i++;
1220 }
1221 break;
1222 case 0x02: /* ASCII */
1223 case 0x03: /* UTF-8 */
1224 while (i < (4 + page_83[3]))
1225 vpd->device_identifier[j++] = page_83[i++];
1226 break;
1227 default:
1228 break;
1229 }
1230
1231 return transport_dump_vpd_ident(vpd, NULL, 0);
1232}
1233EXPORT_SYMBOL(transport_set_vpd_ident);
1234
1235static void core_setup_task_attr_emulation(struct se_device *dev)
1236{
1237 /*
1238 * If this device is from Target_Core_Mod/pSCSI, disable the
1239 * SAM Task Attribute emulation.
1240 *
1241 * This is currently not available in upsream Linux/SCSI Target
1242 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1243 */
e3d6f909 1244 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
c66ac9db
NB
1245 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1246 return;
1247 }
1248
1249 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
6708bb27 1250 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
e3d6f909
AG
1251 " device\n", dev->transport->name,
1252 dev->transport->get_device_rev(dev));
c66ac9db
NB
1253}
1254
1255static void scsi_dump_inquiry(struct se_device *dev)
1256{
e3d6f909 1257 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
e59a41b6 1258 char buf[17];
c66ac9db
NB
1259 int i, device_type;
1260 /*
1261 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1262 */
c66ac9db
NB
1263 for (i = 0; i < 8; i++)
1264 if (wwn->vendor[i] >= 0x20)
e59a41b6 1265 buf[i] = wwn->vendor[i];
c66ac9db 1266 else
e59a41b6
SAS
1267 buf[i] = ' ';
1268 buf[i] = '\0';
1269 pr_debug(" Vendor: %s\n", buf);
c66ac9db 1270
c66ac9db
NB
1271 for (i = 0; i < 16; i++)
1272 if (wwn->model[i] >= 0x20)
e59a41b6 1273 buf[i] = wwn->model[i];
c66ac9db 1274 else
e59a41b6
SAS
1275 buf[i] = ' ';
1276 buf[i] = '\0';
1277 pr_debug(" Model: %s\n", buf);
c66ac9db 1278
c66ac9db
NB
1279 for (i = 0; i < 4; i++)
1280 if (wwn->revision[i] >= 0x20)
e59a41b6 1281 buf[i] = wwn->revision[i];
c66ac9db 1282 else
e59a41b6
SAS
1283 buf[i] = ' ';
1284 buf[i] = '\0';
1285 pr_debug(" Revision: %s\n", buf);
c66ac9db 1286
e3d6f909 1287 device_type = dev->transport->get_device_type(dev);
6708bb27
AG
1288 pr_debug(" Type: %s ", scsi_device_type(device_type));
1289 pr_debug(" ANSI SCSI revision: %02x\n",
e3d6f909 1290 dev->transport->get_device_rev(dev));
c66ac9db
NB
1291}
1292
1293struct se_device *transport_add_device_to_core_hba(
1294 struct se_hba *hba,
1295 struct se_subsystem_api *transport,
1296 struct se_subsystem_dev *se_dev,
1297 u32 device_flags,
1298 void *transport_dev,
1299 struct se_dev_limits *dev_limits,
1300 const char *inquiry_prod,
1301 const char *inquiry_rev)
1302{
12a18bdc 1303 int force_pt;
c66ac9db
NB
1304 struct se_device *dev;
1305
1306 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
6708bb27
AG
1307 if (!dev) {
1308 pr_err("Unable to allocate memory for se_dev_t\n");
c66ac9db
NB
1309 return NULL;
1310 }
c66ac9db 1311
e3d6f909 1312 transport_init_queue_obj(&dev->dev_queue_obj);
c66ac9db
NB
1313 dev->dev_flags = device_flags;
1314 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
5951146d 1315 dev->dev_ptr = transport_dev;
c66ac9db
NB
1316 dev->se_hba = hba;
1317 dev->se_sub_dev = se_dev;
1318 dev->transport = transport;
c66ac9db
NB
1319 INIT_LIST_HEAD(&dev->dev_list);
1320 INIT_LIST_HEAD(&dev->dev_sep_list);
1321 INIT_LIST_HEAD(&dev->dev_tmr_list);
1322 INIT_LIST_HEAD(&dev->execute_task_list);
1323 INIT_LIST_HEAD(&dev->delayed_cmd_list);
c66ac9db 1324 INIT_LIST_HEAD(&dev->state_task_list);
07bde79a 1325 INIT_LIST_HEAD(&dev->qf_cmd_list);
c66ac9db
NB
1326 spin_lock_init(&dev->execute_task_lock);
1327 spin_lock_init(&dev->delayed_cmd_lock);
c66ac9db
NB
1328 spin_lock_init(&dev->dev_reservation_lock);
1329 spin_lock_init(&dev->dev_status_lock);
c66ac9db
NB
1330 spin_lock_init(&dev->se_port_lock);
1331 spin_lock_init(&dev->se_tmr_lock);
07bde79a 1332 spin_lock_init(&dev->qf_cmd_lock);
c66ac9db
NB
1333 atomic_set(&dev->dev_ordered_id, 0);
1334
1335 se_dev_set_default_attribs(dev, dev_limits);
1336
1337 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1338 dev->creation_time = get_jiffies_64();
1339 spin_lock_init(&dev->stats_lock);
1340
1341 spin_lock(&hba->device_lock);
1342 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1343 hba->dev_count++;
1344 spin_unlock(&hba->device_lock);
1345 /*
1346 * Setup the SAM Task Attribute emulation for struct se_device
1347 */
1348 core_setup_task_attr_emulation(dev);
1349 /*
1350 * Force PR and ALUA passthrough emulation with internal object use.
1351 */
1352 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1353 /*
1354 * Setup the Reservations infrastructure for struct se_device
1355 */
1356 core_setup_reservations(dev, force_pt);
1357 /*
1358 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1359 */
1360 if (core_setup_alua(dev, force_pt) < 0)
1361 goto out;
1362
1363 /*
1364 * Startup the struct se_device processing thread
1365 */
1366 dev->process_thread = kthread_run(transport_processing_thread, dev,
e3d6f909 1367 "LIO_%s", dev->transport->name);
c66ac9db 1368 if (IS_ERR(dev->process_thread)) {
6708bb27 1369 pr_err("Unable to create kthread: LIO_%s\n",
e3d6f909 1370 dev->transport->name);
c66ac9db
NB
1371 goto out;
1372 }
07bde79a
NB
1373 /*
1374 * Setup work_queue for QUEUE_FULL
1375 */
1376 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
c66ac9db
NB
1377 /*
1378 * Preload the initial INQUIRY const values if we are doing
1379 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1380 * passthrough because this is being provided by the backend LLD.
1381 * This is required so that transport_get_inquiry() copies these
1382 * originals once back into DEV_T10_WWN(dev) for the virtual device
1383 * setup.
1384 */
e3d6f909 1385 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
f22c1196 1386 if (!inquiry_prod || !inquiry_rev) {
6708bb27 1387 pr_err("All non TCM/pSCSI plugins require"
c66ac9db
NB
1388 " INQUIRY consts\n");
1389 goto out;
1390 }
1391
e3d6f909
AG
1392 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1393 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1394 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
c66ac9db
NB
1395 }
1396 scsi_dump_inquiry(dev);
1397
12a18bdc 1398 return dev;
c66ac9db 1399out:
c66ac9db
NB
1400 kthread_stop(dev->process_thread);
1401
1402 spin_lock(&hba->device_lock);
1403 list_del(&dev->dev_list);
1404 hba->dev_count--;
1405 spin_unlock(&hba->device_lock);
1406
1407 se_release_vpd_for_dev(dev);
1408
c66ac9db
NB
1409 kfree(dev);
1410
1411 return NULL;
1412}
1413EXPORT_SYMBOL(transport_add_device_to_core_hba);
1414
1415/* transport_generic_prepare_cdb():
1416 *
1417 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1418 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1419 * The point of this is since we are mapping iSCSI LUNs to
1420 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1421 * devices and HBAs for a loop.
1422 */
1423static inline void transport_generic_prepare_cdb(
1424 unsigned char *cdb)
1425{
1426 switch (cdb[0]) {
1427 case READ_10: /* SBC - RDProtect */
1428 case READ_12: /* SBC - RDProtect */
1429 case READ_16: /* SBC - RDProtect */
1430 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1431 case VERIFY: /* SBC - VRProtect */
1432 case VERIFY_16: /* SBC - VRProtect */
1433 case WRITE_VERIFY: /* SBC - VRProtect */
1434 case WRITE_VERIFY_12: /* SBC - VRProtect */
1435 break;
1436 default:
1437 cdb[1] &= 0x1f; /* clear logical unit number */
1438 break;
1439 }
1440}
1441
1442static struct se_task *
1443transport_generic_get_task(struct se_cmd *cmd,
1444 enum dma_data_direction data_direction)
1445{
1446 struct se_task *task;
5951146d 1447 struct se_device *dev = cmd->se_dev;
c66ac9db 1448
6708bb27 1449 task = dev->transport->alloc_task(cmd->t_task_cdb);
c66ac9db 1450 if (!task) {
6708bb27 1451 pr_err("Unable to allocate struct se_task\n");
c66ac9db
NB
1452 return NULL;
1453 }
1454
1455 INIT_LIST_HEAD(&task->t_list);
1456 INIT_LIST_HEAD(&task->t_execute_list);
1457 INIT_LIST_HEAD(&task->t_state_list);
1458 init_completion(&task->task_stop_comp);
c66ac9db 1459 task->task_se_cmd = cmd;
c66ac9db
NB
1460 task->task_data_direction = data_direction;
1461
c66ac9db
NB
1462 return task;
1463}
1464
1465static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1466
c66ac9db
NB
1467/*
1468 * Used by fabric modules containing a local struct se_cmd within their
1469 * fabric dependent per I/O descriptor.
1470 */
1471void transport_init_se_cmd(
1472 struct se_cmd *cmd,
1473 struct target_core_fabric_ops *tfo,
1474 struct se_session *se_sess,
1475 u32 data_length,
1476 int data_direction,
1477 int task_attr,
1478 unsigned char *sense_buffer)
1479{
5951146d
AG
1480 INIT_LIST_HEAD(&cmd->se_lun_node);
1481 INIT_LIST_HEAD(&cmd->se_delayed_node);
07bde79a 1482 INIT_LIST_HEAD(&cmd->se_qf_node);
79a7fef2 1483 INIT_LIST_HEAD(&cmd->se_queue_node);
a17f091d 1484 INIT_LIST_HEAD(&cmd->se_cmd_list);
a1d8b49a
AG
1485 INIT_LIST_HEAD(&cmd->t_task_list);
1486 init_completion(&cmd->transport_lun_fe_stop_comp);
1487 init_completion(&cmd->transport_lun_stop_comp);
1488 init_completion(&cmd->t_transport_stop_comp);
a17f091d 1489 init_completion(&cmd->cmd_wait_comp);
a1d8b49a
AG
1490 spin_lock_init(&cmd->t_state_lock);
1491 atomic_set(&cmd->transport_dev_active, 1);
c66ac9db
NB
1492
1493 cmd->se_tfo = tfo;
1494 cmd->se_sess = se_sess;
1495 cmd->data_length = data_length;
1496 cmd->data_direction = data_direction;
1497 cmd->sam_task_attr = task_attr;
1498 cmd->sense_buffer = sense_buffer;
1499}
1500EXPORT_SYMBOL(transport_init_se_cmd);
1501
1502static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1503{
1504 /*
1505 * Check if SAM Task Attribute emulation is enabled for this
1506 * struct se_device storage object
1507 */
5951146d 1508 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
1509 return 0;
1510
e66ecd50 1511 if (cmd->sam_task_attr == MSG_ACA_TAG) {
6708bb27 1512 pr_debug("SAM Task Attribute ACA"
c66ac9db 1513 " emulation is not supported\n");
e3d6f909 1514 return -EINVAL;
c66ac9db
NB
1515 }
1516 /*
1517 * Used to determine when ORDERED commands should go from
1518 * Dormant to Active status.
1519 */
5951146d 1520 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
c66ac9db 1521 smp_mb__after_atomic_inc();
6708bb27 1522 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
c66ac9db 1523 cmd->se_ordered_id, cmd->sam_task_attr,
6708bb27 1524 cmd->se_dev->transport->name);
c66ac9db
NB
1525 return 0;
1526}
1527
c66ac9db
NB
1528/* transport_generic_allocate_tasks():
1529 *
1530 * Called from fabric RX Thread.
1531 */
1532int transport_generic_allocate_tasks(
1533 struct se_cmd *cmd,
1534 unsigned char *cdb)
1535{
1536 int ret;
1537
1538 transport_generic_prepare_cdb(cdb);
c66ac9db
NB
1539 /*
1540 * Ensure that the received CDB is less than the max (252 + 8) bytes
1541 * for VARIABLE_LENGTH_CMD
1542 */
1543 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
6708bb27 1544 pr_err("Received SCSI CDB with command_size: %d that"
c66ac9db
NB
1545 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1546 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
03e98c9e
NB
1547 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1548 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
e3d6f909 1549 return -EINVAL;
c66ac9db
NB
1550 }
1551 /*
1552 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1553 * allocate the additional extended CDB buffer now.. Otherwise
1554 * setup the pointer from __t_task_cdb to t_task_cdb.
1555 */
a1d8b49a
AG
1556 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1557 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
c66ac9db 1558 GFP_KERNEL);
6708bb27
AG
1559 if (!cmd->t_task_cdb) {
1560 pr_err("Unable to allocate cmd->t_task_cdb"
a1d8b49a 1561 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
c66ac9db 1562 scsi_command_size(cdb),
a1d8b49a 1563 (unsigned long)sizeof(cmd->__t_task_cdb));
03e98c9e
NB
1564 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1565 cmd->scsi_sense_reason =
1566 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
e3d6f909 1567 return -ENOMEM;
c66ac9db
NB
1568 }
1569 } else
a1d8b49a 1570 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
c66ac9db 1571 /*
a1d8b49a 1572 * Copy the original CDB into cmd->
c66ac9db 1573 */
a1d8b49a 1574 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
c66ac9db
NB
1575 /*
1576 * Setup the received CDB based on SCSI defined opcodes and
1577 * perform unit attention, persistent reservations and ALUA
a1d8b49a 1578 * checks for virtual device backends. The cmd->t_task_cdb
c66ac9db
NB
1579 * pointer is expected to be setup before we reach this point.
1580 */
1581 ret = transport_generic_cmd_sequencer(cmd, cdb);
1582 if (ret < 0)
1583 return ret;
1584 /*
1585 * Check for SAM Task Attribute Emulation
1586 */
1587 if (transport_check_alloc_task_attr(cmd) < 0) {
1588 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1589 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 1590 return -EINVAL;
c66ac9db
NB
1591 }
1592 spin_lock(&cmd->se_lun->lun_sep_lock);
1593 if (cmd->se_lun->lun_sep)
1594 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1595 spin_unlock(&cmd->se_lun->lun_sep_lock);
1596 return 0;
1597}
1598EXPORT_SYMBOL(transport_generic_allocate_tasks);
1599
695434e1
NB
1600/*
1601 * Used by fabric module frontends to queue tasks directly.
1602 * Many only be used from process context only
1603 */
1604int transport_handle_cdb_direct(
1605 struct se_cmd *cmd)
1606{
dd8ae59d
NB
1607 int ret;
1608
695434e1
NB
1609 if (!cmd->se_lun) {
1610 dump_stack();
6708bb27 1611 pr_err("cmd->se_lun is NULL\n");
695434e1
NB
1612 return -EINVAL;
1613 }
1614 if (in_interrupt()) {
1615 dump_stack();
6708bb27 1616 pr_err("transport_generic_handle_cdb cannot be called"
695434e1
NB
1617 " from interrupt context\n");
1618 return -EINVAL;
1619 }
dd8ae59d
NB
1620 /*
1621 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1622 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1623 * in existing usage to ensure that outstanding descriptors are handled
d14921d6 1624 * correctly during shutdown via transport_wait_for_tasks()
dd8ae59d
NB
1625 *
1626 * Also, we don't take cmd->t_state_lock here as we only expect
1627 * this to be called for initial descriptor submission.
1628 */
1629 cmd->t_state = TRANSPORT_NEW_CMD;
1630 atomic_set(&cmd->t_transport_active, 1);
1631 /*
1632 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1633 * so follow TRANSPORT_NEW_CMD processing thread context usage
1634 * and call transport_generic_request_failure() if necessary..
1635 */
1636 ret = transport_generic_new_cmd(cmd);
03e98c9e
NB
1637 if (ret < 0)
1638 transport_generic_request_failure(cmd);
1639
dd8ae59d 1640 return 0;
695434e1
NB
1641}
1642EXPORT_SYMBOL(transport_handle_cdb_direct);
1643
a6360785
NB
1644/**
1645 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1646 *
1647 * @se_cmd: command descriptor to submit
1648 * @se_sess: associated se_sess for endpoint
1649 * @cdb: pointer to SCSI CDB
1650 * @sense: pointer to SCSI sense buffer
1651 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1652 * @data_length: fabric expected data transfer length
1653 * @task_addr: SAM task attribute
1654 * @data_dir: DMA data direction
1655 * @flags: flags for command submission from target_sc_flags_tables
1656 *
1657 * This may only be called from process context, and also currently
1658 * assumes internal allocation of fabric payload buffer by target-core.
1659 **/
1edcdb49 1660void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
a6360785
NB
1661 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1662 u32 data_length, int task_attr, int data_dir, int flags)
1663{
1664 struct se_portal_group *se_tpg;
1665 int rc;
1666
1667 se_tpg = se_sess->se_tpg;
1668 BUG_ON(!se_tpg);
1669 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1670 BUG_ON(in_interrupt());
1671 /*
1672 * Initialize se_cmd for target operation. From this point
1673 * exceptions are handled by sending exception status via
1674 * target_core_fabric_ops->queue_status() callback
1675 */
1676 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1677 data_length, data_dir, task_attr, sense);
1678 /*
1679 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1680 * se_sess->sess_cmd_list. A second kref_get here is necessary
1681 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1682 * kref_put() to happen during fabric packet acknowledgement.
1683 */
1684 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1685 /*
1686 * Signal bidirectional data payloads to target-core
1687 */
1688 if (flags & TARGET_SCF_BIDI_OP)
1689 se_cmd->se_cmd_flags |= SCF_BIDI;
1690 /*
1691 * Locate se_lun pointer and attach it to struct se_cmd
1692 */
1693 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
1694 goto out_check_cond;
1695 /*
1696 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1697 * allocate the necessary tasks to complete the received CDB+data
1698 */
1699 rc = transport_generic_allocate_tasks(se_cmd, cdb);
1700 if (rc != 0)
1701 goto out_check_cond;
1702 /*
1703 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1704 * for immediate execution of READs, otherwise wait for
1705 * transport_generic_handle_data() to be called for WRITEs
1706 * when fabric has filled the incoming buffer.
1707 */
1708 transport_handle_cdb_direct(se_cmd);
1edcdb49 1709 return;
a6360785
NB
1710
1711out_check_cond:
1712 transport_send_check_condition_and_sense(se_cmd,
1713 se_cmd->scsi_sense_reason, 0);
a6360785
NB
1714}
1715EXPORT_SYMBOL(target_submit_cmd);
1716
c66ac9db
NB
1717/*
1718 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1719 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1720 * complete setup in TCM process context w/ TFO->new_cmd_map().
1721 */
1722int transport_generic_handle_cdb_map(
1723 struct se_cmd *cmd)
1724{
e3d6f909 1725 if (!cmd->se_lun) {
c66ac9db 1726 dump_stack();
6708bb27 1727 pr_err("cmd->se_lun is NULL\n");
e3d6f909 1728 return -EINVAL;
c66ac9db
NB
1729 }
1730
f7a5cc0b 1731 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
c66ac9db
NB
1732 return 0;
1733}
1734EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1735
1736/* transport_generic_handle_data():
1737 *
1738 *
1739 */
1740int transport_generic_handle_data(
1741 struct se_cmd *cmd)
1742{
1743 /*
1744 * For the software fabric case, then we assume the nexus is being
1745 * failed/shutdown when signals are pending from the kthread context
1746 * caller, so we return a failure. For the HW target mode case running
1747 * in interrupt code, the signal_pending() check is skipped.
1748 */
1749 if (!in_interrupt() && signal_pending(current))
e3d6f909 1750 return -EPERM;
c66ac9db
NB
1751 /*
1752 * If the received CDB has aleady been ABORTED by the generic
1753 * target engine, we now call transport_check_aborted_status()
1754 * to queue any delated TASK_ABORTED status for the received CDB to the
25985edc 1755 * fabric module as we are expecting no further incoming DATA OUT
c66ac9db
NB
1756 * sequences at this point.
1757 */
1758 if (transport_check_aborted_status(cmd, 1) != 0)
1759 return 0;
1760
f7a5cc0b 1761 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
c66ac9db
NB
1762 return 0;
1763}
1764EXPORT_SYMBOL(transport_generic_handle_data);
1765
1766/* transport_generic_handle_tmr():
1767 *
1768 *
1769 */
1770int transport_generic_handle_tmr(
1771 struct se_cmd *cmd)
1772{
f7a5cc0b 1773 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
c66ac9db
NB
1774 return 0;
1775}
1776EXPORT_SYMBOL(transport_generic_handle_tmr);
1777
cdbb70bb
CH
1778/*
1779 * If the task is active, request it to be stopped and sleep until it
1780 * has completed.
1781 */
1782bool target_stop_task(struct se_task *task, unsigned long *flags)
1783{
1784 struct se_cmd *cmd = task->task_se_cmd;
1785 bool was_active = false;
1786
1787 if (task->task_flags & TF_ACTIVE) {
1788 task->task_flags |= TF_REQUEST_STOP;
1789 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1790
1791 pr_debug("Task %p waiting to complete\n", task);
1792 wait_for_completion(&task->task_stop_comp);
1793 pr_debug("Task %p stopped successfully\n", task);
1794
1795 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1796 atomic_dec(&cmd->t_task_cdbs_left);
1797 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1798 was_active = true;
1799 }
1800
cdbb70bb
CH
1801 return was_active;
1802}
1803
c66ac9db
NB
1804static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1805{
1806 struct se_task *task, *task_tmp;
1807 unsigned long flags;
1808 int ret = 0;
1809
6708bb27 1810 pr_debug("ITT[0x%08x] - Stopping tasks\n",
e3d6f909 1811 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
1812
1813 /*
1814 * No tasks remain in the execution queue
1815 */
a1d8b49a 1816 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 1817 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 1818 &cmd->t_task_list, t_list) {
04629b7b 1819 pr_debug("Processing task %p\n", task);
c66ac9db
NB
1820 /*
1821 * If the struct se_task has not been sent and is not active,
1822 * remove the struct se_task from the execution queue.
1823 */
6c76bf95 1824 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
a1d8b49a 1825 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
1826 flags);
1827 transport_remove_task_from_execute_queue(task,
42bf829e 1828 cmd->se_dev);
c66ac9db 1829
04629b7b 1830 pr_debug("Task %p removed from execute queue\n", task);
a1d8b49a 1831 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
1832 continue;
1833 }
1834
cdbb70bb 1835 if (!target_stop_task(task, &flags)) {
04629b7b 1836 pr_debug("Task %p - did nothing\n", task);
c66ac9db
NB
1837 ret++;
1838 }
c66ac9db 1839 }
a1d8b49a 1840 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
1841
1842 return ret;
1843}
1844
c66ac9db
NB
1845/*
1846 * Handle SAM-esque emulation for generic transport request failures.
1847 */
03e98c9e 1848static void transport_generic_request_failure(struct se_cmd *cmd)
c66ac9db 1849{
07bde79a
NB
1850 int ret = 0;
1851
6708bb27 1852 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
e3d6f909 1853 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 1854 cmd->t_task_cdb[0]);
03e98c9e 1855 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
e3d6f909 1856 cmd->se_tfo->get_cmd_state(cmd),
03e98c9e 1857 cmd->t_state, cmd->scsi_sense_reason);
6708bb27 1858 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
c66ac9db
NB
1859 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1860 " t_transport_active: %d t_transport_stop: %d"
6708bb27 1861 " t_transport_sent: %d\n", cmd->t_task_list_num,
a1d8b49a
AG
1862 atomic_read(&cmd->t_task_cdbs_left),
1863 atomic_read(&cmd->t_task_cdbs_sent),
1864 atomic_read(&cmd->t_task_cdbs_ex_left),
1865 atomic_read(&cmd->t_transport_active),
1866 atomic_read(&cmd->t_transport_stop),
1867 atomic_read(&cmd->t_transport_sent));
c66ac9db 1868
c66ac9db
NB
1869 /*
1870 * For SAM Task Attribute emulation for failed struct se_cmd
1871 */
1872 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1873 transport_complete_task_attr(cmd);
1874
03e98c9e
NB
1875 switch (cmd->scsi_sense_reason) {
1876 case TCM_NON_EXISTENT_LUN:
1877 case TCM_UNSUPPORTED_SCSI_OPCODE:
1878 case TCM_INVALID_CDB_FIELD:
1879 case TCM_INVALID_PARAMETER_LIST:
1880 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1881 case TCM_UNKNOWN_MODE_PAGE:
1882 case TCM_WRITE_PROTECTED:
1883 case TCM_CHECK_CONDITION_ABORT_CMD:
1884 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1885 case TCM_CHECK_CONDITION_NOT_READY:
c66ac9db 1886 break;
03e98c9e 1887 case TCM_RESERVATION_CONFLICT:
c66ac9db
NB
1888 /*
1889 * No SENSE Data payload for this case, set SCSI Status
1890 * and queue the response to $FABRIC_MOD.
1891 *
1892 * Uses linux/include/scsi/scsi.h SAM status codes defs
1893 */
1894 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1895 /*
1896 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1897 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1898 * CONFLICT STATUS.
1899 *
1900 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1901 */
e3d6f909
AG
1902 if (cmd->se_sess &&
1903 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1904 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
c66ac9db
NB
1905 cmd->orig_fe_lun, 0x2C,
1906 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1907
07bde79a 1908 ret = cmd->se_tfo->queue_status(cmd);
f147abb4 1909 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 1910 goto queue_full;
c66ac9db 1911 goto check_stop;
c66ac9db 1912 default:
6708bb27 1913 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
03e98c9e 1914 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
c66ac9db
NB
1915 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1916 break;
1917 }
16ab8e60
NB
1918 /*
1919 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1920 * make the call to transport_send_check_condition_and_sense()
1921 * directly. Otherwise expect the fabric to make the call to
1922 * transport_send_check_condition_and_sense() after handling
1923 * possible unsoliticied write data payloads.
1924 */
03e98c9e
NB
1925 ret = transport_send_check_condition_and_sense(cmd,
1926 cmd->scsi_sense_reason, 0);
1927 if (ret == -EAGAIN || ret == -ENOMEM)
1928 goto queue_full;
07bde79a 1929
c66ac9db
NB
1930check_stop:
1931 transport_lun_remove_cmd(cmd);
6708bb27 1932 if (!transport_cmd_check_stop_to_fabric(cmd))
c66ac9db 1933 ;
07bde79a
NB
1934 return;
1935
1936queue_full:
e057f533
CH
1937 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1938 transport_handle_queue_full(cmd, cmd->se_dev);
c66ac9db
NB
1939}
1940
c66ac9db
NB
1941static inline u32 transport_lba_21(unsigned char *cdb)
1942{
1943 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1944}
1945
1946static inline u32 transport_lba_32(unsigned char *cdb)
1947{
1948 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1949}
1950
1951static inline unsigned long long transport_lba_64(unsigned char *cdb)
1952{
1953 unsigned int __v1, __v2;
1954
1955 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1956 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1957
1958 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1959}
1960
1961/*
1962 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1963 */
1964static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1965{
1966 unsigned int __v1, __v2;
1967
1968 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1969 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1970
1971 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1972}
1973
1974static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1975{
1976 unsigned long flags;
1977
a1d8b49a 1978 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
c66ac9db 1979 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
a1d8b49a 1980 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
c66ac9db
NB
1981}
1982
c66ac9db
NB
1983/*
1984 * Called from Fabric Module context from transport_execute_tasks()
1985 *
1986 * The return of this function determins if the tasks from struct se_cmd
1987 * get added to the execution queue in transport_execute_tasks(),
1988 * or are added to the delayed or ordered lists here.
1989 */
1990static inline int transport_execute_task_attr(struct se_cmd *cmd)
1991{
5951146d 1992 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
1993 return 1;
1994 /*
25985edc 1995 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
c66ac9db
NB
1996 * to allow the passed struct se_cmd list of tasks to the front of the list.
1997 */
e66ecd50 1998 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
6708bb27 1999 pr_debug("Added HEAD_OF_QUEUE for CDB:"
c66ac9db 2000 " 0x%02x, se_ordered_id: %u\n",
6708bb27 2001 cmd->t_task_cdb[0],
c66ac9db
NB
2002 cmd->se_ordered_id);
2003 return 1;
e66ecd50 2004 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
5951146d 2005 atomic_inc(&cmd->se_dev->dev_ordered_sync);
c66ac9db
NB
2006 smp_mb__after_atomic_inc();
2007
6708bb27 2008 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
c66ac9db 2009 " list, se_ordered_id: %u\n",
a1d8b49a 2010 cmd->t_task_cdb[0],
c66ac9db
NB
2011 cmd->se_ordered_id);
2012 /*
2013 * Add ORDERED command to tail of execution queue if
2014 * no other older commands exist that need to be
2015 * completed first.
2016 */
6708bb27 2017 if (!atomic_read(&cmd->se_dev->simple_cmds))
c66ac9db
NB
2018 return 1;
2019 } else {
2020 /*
2021 * For SIMPLE and UNTAGGED Task Attribute commands
2022 */
5951146d 2023 atomic_inc(&cmd->se_dev->simple_cmds);
c66ac9db
NB
2024 smp_mb__after_atomic_inc();
2025 }
2026 /*
2027 * Otherwise if one or more outstanding ORDERED task attribute exist,
2028 * add the dormant task(s) built for the passed struct se_cmd to the
2029 * execution queue and become in Active state for this struct se_device.
2030 */
5951146d 2031 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
c66ac9db
NB
2032 /*
2033 * Otherwise, add cmd w/ tasks to delayed cmd queue that
25985edc 2034 * will be drained upon completion of HEAD_OF_QUEUE task.
c66ac9db 2035 */
5951146d 2036 spin_lock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2037 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
5951146d
AG
2038 list_add_tail(&cmd->se_delayed_node,
2039 &cmd->se_dev->delayed_cmd_list);
2040 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2041
6708bb27 2042 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
c66ac9db 2043 " delayed CMD list, se_ordered_id: %u\n",
a1d8b49a 2044 cmd->t_task_cdb[0], cmd->sam_task_attr,
c66ac9db
NB
2045 cmd->se_ordered_id);
2046 /*
2047 * Return zero to let transport_execute_tasks() know
2048 * not to add the delayed tasks to the execution list.
2049 */
2050 return 0;
2051 }
2052 /*
2053 * Otherwise, no ORDERED task attributes exist..
2054 */
2055 return 1;
2056}
2057
2058/*
2059 * Called from fabric module context in transport_generic_new_cmd() and
2060 * transport_generic_process_write()
2061 */
2062static int transport_execute_tasks(struct se_cmd *cmd)
2063{
2064 int add_tasks;
40be67f4 2065 struct se_device *se_dev = cmd->se_dev;
c66ac9db
NB
2066 /*
2067 * Call transport_cmd_check_stop() to see if a fabric exception
25985edc 2068 * has occurred that prevents execution.
c66ac9db 2069 */
6708bb27 2070 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
c66ac9db
NB
2071 /*
2072 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2073 * attribute for the tasks of the received struct se_cmd CDB
2074 */
2075 add_tasks = transport_execute_task_attr(cmd);
e3d6f909 2076 if (!add_tasks)
c66ac9db
NB
2077 goto execute_tasks;
2078 /*
4d2300cc
NB
2079 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2080 * adds associated se_tasks while holding dev->execute_task_lock
2081 * before I/O dispath to avoid a double spinlock access.
c66ac9db 2082 */
4d2300cc
NB
2083 __transport_execute_tasks(se_dev, cmd);
2084 return 0;
c66ac9db 2085 }
4d2300cc 2086
c66ac9db 2087execute_tasks:
4d2300cc 2088 __transport_execute_tasks(se_dev, NULL);
c66ac9db
NB
2089 return 0;
2090}
2091
2092/*
2093 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2094 * from struct se_device->execute_task_list and
2095 *
2096 * Called from transport_processing_thread()
2097 */
4d2300cc 2098static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
c66ac9db
NB
2099{
2100 int error;
2101 struct se_cmd *cmd = NULL;
e3d6f909 2102 struct se_task *task = NULL;
c66ac9db
NB
2103 unsigned long flags;
2104
c66ac9db 2105check_depth:
e3d6f909 2106 spin_lock_irq(&dev->execute_task_lock);
4d2300cc
NB
2107 if (new_cmd != NULL)
2108 __transport_add_tasks_from_cmd(new_cmd);
2109
e3d6f909
AG
2110 if (list_empty(&dev->execute_task_list)) {
2111 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db
NB
2112 return 0;
2113 }
e3d6f909
AG
2114 task = list_first_entry(&dev->execute_task_list,
2115 struct se_task, t_execute_list);
04629b7b 2116 __transport_remove_task_from_execute_queue(task, dev);
e3d6f909 2117 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db 2118
e3d6f909 2119 cmd = task->task_se_cmd;
a1d8b49a 2120 spin_lock_irqsave(&cmd->t_state_lock, flags);
6c76bf95 2121 task->task_flags |= (TF_ACTIVE | TF_SENT);
a1d8b49a 2122 atomic_inc(&cmd->t_task_cdbs_sent);
c66ac9db 2123
a1d8b49a
AG
2124 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2125 cmd->t_task_list_num)
415a090a 2126 atomic_set(&cmd->t_transport_sent, 1);
c66ac9db 2127
a1d8b49a 2128 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2129
5bda90c8
CH
2130 if (cmd->execute_task)
2131 error = cmd->execute_task(task);
2132 else
2133 error = dev->transport->do_task(task);
d29a5b6a 2134 if (error != 0) {
d29a5b6a
CH
2135 spin_lock_irqsave(&cmd->t_state_lock, flags);
2136 task->task_flags &= ~TF_ACTIVE;
2137 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2138 atomic_set(&cmd->t_transport_sent, 0);
2139 transport_stop_tasks_for_cmd(cmd);
03e98c9e 2140 transport_generic_request_failure(cmd);
c66ac9db
NB
2141 }
2142
4d2300cc 2143 new_cmd = NULL;
c66ac9db
NB
2144 goto check_depth;
2145
2146 return 0;
2147}
2148
c66ac9db
NB
2149static inline u32 transport_get_sectors_6(
2150 unsigned char *cdb,
2151 struct se_cmd *cmd,
2152 int *ret)
2153{
5951146d 2154 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2155
2156 /*
2157 * Assume TYPE_DISK for non struct se_device objects.
2158 * Use 8-bit sector value.
2159 */
2160 if (!dev)
2161 goto type_disk;
2162
2163 /*
2164 * Use 24-bit allocation length for TYPE_TAPE.
2165 */
e3d6f909 2166 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2167 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2168
2169 /*
2170 * Everything else assume TYPE_DISK Sector CDB location.
9b5cd7f3
RD
2171 * Use 8-bit sector value. SBC-3 says:
2172 *
2173 * A TRANSFER LENGTH field set to zero specifies that 256
2174 * logical blocks shall be written. Any other value
2175 * specifies the number of logical blocks that shall be
2176 * written.
c66ac9db
NB
2177 */
2178type_disk:
9b5cd7f3 2179 return cdb[4] ? : 256;
c66ac9db
NB
2180}
2181
2182static inline u32 transport_get_sectors_10(
2183 unsigned char *cdb,
2184 struct se_cmd *cmd,
2185 int *ret)
2186{
5951146d 2187 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2188
2189 /*
2190 * Assume TYPE_DISK for non struct se_device objects.
2191 * Use 16-bit sector value.
2192 */
2193 if (!dev)
2194 goto type_disk;
2195
2196 /*
2197 * XXX_10 is not defined in SSC, throw an exception
2198 */
e3d6f909
AG
2199 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2200 *ret = -EINVAL;
c66ac9db
NB
2201 return 0;
2202 }
2203
2204 /*
2205 * Everything else assume TYPE_DISK Sector CDB location.
2206 * Use 16-bit sector value.
2207 */
2208type_disk:
2209 return (u32)(cdb[7] << 8) + cdb[8];
2210}
2211
2212static inline u32 transport_get_sectors_12(
2213 unsigned char *cdb,
2214 struct se_cmd *cmd,
2215 int *ret)
2216{
5951146d 2217 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2218
2219 /*
2220 * Assume TYPE_DISK for non struct se_device objects.
2221 * Use 32-bit sector value.
2222 */
2223 if (!dev)
2224 goto type_disk;
2225
2226 /*
2227 * XXX_12 is not defined in SSC, throw an exception
2228 */
e3d6f909
AG
2229 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2230 *ret = -EINVAL;
c66ac9db
NB
2231 return 0;
2232 }
2233
2234 /*
2235 * Everything else assume TYPE_DISK Sector CDB location.
2236 * Use 32-bit sector value.
2237 */
2238type_disk:
2239 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2240}
2241
2242static inline u32 transport_get_sectors_16(
2243 unsigned char *cdb,
2244 struct se_cmd *cmd,
2245 int *ret)
2246{
5951146d 2247 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2248
2249 /*
2250 * Assume TYPE_DISK for non struct se_device objects.
2251 * Use 32-bit sector value.
2252 */
2253 if (!dev)
2254 goto type_disk;
2255
2256 /*
2257 * Use 24-bit allocation length for TYPE_TAPE.
2258 */
e3d6f909 2259 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2260 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2261
2262type_disk:
2263 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2264 (cdb[12] << 8) + cdb[13];
2265}
2266
2267/*
2268 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2269 */
2270static inline u32 transport_get_sectors_32(
2271 unsigned char *cdb,
2272 struct se_cmd *cmd,
2273 int *ret)
2274{
2275 /*
2276 * Assume TYPE_DISK for non struct se_device objects.
2277 * Use 32-bit sector value.
2278 */
2279 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2280 (cdb[30] << 8) + cdb[31];
2281
2282}
2283
2284static inline u32 transport_get_size(
2285 u32 sectors,
2286 unsigned char *cdb,
2287 struct se_cmd *cmd)
2288{
5951146d 2289 struct se_device *dev = cmd->se_dev;
c66ac9db 2290
e3d6f909 2291 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
c66ac9db 2292 if (cdb[1] & 1) { /* sectors */
e3d6f909 2293 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2294 } else /* bytes */
2295 return sectors;
2296 }
2297#if 0
6708bb27 2298 pr_debug("Returning block_size: %u, sectors: %u == %u for"
e3d6f909
AG
2299 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2300 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2301 dev->transport->name);
c66ac9db 2302#endif
e3d6f909 2303 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2304}
2305
c66ac9db
NB
2306static void transport_xor_callback(struct se_cmd *cmd)
2307{
2308 unsigned char *buf, *addr;
ec98f782 2309 struct scatterlist *sg;
c66ac9db
NB
2310 unsigned int offset;
2311 int i;
ec98f782 2312 int count;
c66ac9db
NB
2313 /*
2314 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2315 *
2316 * 1) read the specified logical block(s);
2317 * 2) transfer logical blocks from the data-out buffer;
2318 * 3) XOR the logical blocks transferred from the data-out buffer with
2319 * the logical blocks read, storing the resulting XOR data in a buffer;
2320 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2321 * blocks transferred from the data-out buffer; and
2322 * 5) transfer the resulting XOR data to the data-in buffer.
2323 */
2324 buf = kmalloc(cmd->data_length, GFP_KERNEL);
6708bb27
AG
2325 if (!buf) {
2326 pr_err("Unable to allocate xor_callback buf\n");
c66ac9db
NB
2327 return;
2328 }
2329 /*
ec98f782 2330 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
c66ac9db
NB
2331 * into the locally allocated *buf
2332 */
ec98f782
AG
2333 sg_copy_to_buffer(cmd->t_data_sg,
2334 cmd->t_data_nents,
2335 buf,
2336 cmd->data_length);
2337
c66ac9db
NB
2338 /*
2339 * Now perform the XOR against the BIDI read memory located at
a1d8b49a 2340 * cmd->t_mem_bidi_list
c66ac9db
NB
2341 */
2342
2343 offset = 0;
ec98f782
AG
2344 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2345 addr = kmap_atomic(sg_page(sg), KM_USER0);
2346 if (!addr)
c66ac9db
NB
2347 goto out;
2348
ec98f782
AG
2349 for (i = 0; i < sg->length; i++)
2350 *(addr + sg->offset + i) ^= *(buf + offset + i);
c66ac9db 2351
ec98f782 2352 offset += sg->length;
c66ac9db
NB
2353 kunmap_atomic(addr, KM_USER0);
2354 }
ec98f782 2355
c66ac9db
NB
2356out:
2357 kfree(buf);
2358}
2359
2360/*
2361 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2362 */
2363static int transport_get_sense_data(struct se_cmd *cmd)
2364{
2365 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
42bf829e 2366 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2367 struct se_task *task = NULL, *task_tmp;
2368 unsigned long flags;
2369 u32 offset = 0;
2370
e3d6f909
AG
2371 WARN_ON(!cmd->se_lun);
2372
42bf829e
CH
2373 if (!dev)
2374 return 0;
2375
a1d8b49a 2376 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2377 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 2378 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2379 return 0;
2380 }
2381
2382 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 2383 &cmd->t_task_list, t_list) {
ef804a84 2384 if (!(task->task_flags & TF_HAS_SENSE))
c66ac9db
NB
2385 continue;
2386
e3d6f909 2387 if (!dev->transport->get_sense_buffer) {
6708bb27 2388 pr_err("dev->transport->get_sense_buffer"
c66ac9db
NB
2389 " is NULL\n");
2390 continue;
2391 }
2392
e3d6f909 2393 sense_buffer = dev->transport->get_sense_buffer(task);
6708bb27 2394 if (!sense_buffer) {
04629b7b 2395 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
c66ac9db 2396 " sense buffer for task with sense\n",
04629b7b 2397 cmd->se_tfo->get_task_tag(cmd), task);
c66ac9db
NB
2398 continue;
2399 }
a1d8b49a 2400 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2401
e3d6f909 2402 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
2403 TRANSPORT_SENSE_BUFFER);
2404
5951146d 2405 memcpy(&buffer[offset], sense_buffer,
c66ac9db
NB
2406 TRANSPORT_SENSE_BUFFER);
2407 cmd->scsi_status = task->task_scsi_status;
2408 /* Automatically padded */
2409 cmd->scsi_sense_length =
2410 (TRANSPORT_SENSE_BUFFER + offset);
2411
6708bb27 2412 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
c66ac9db 2413 " and sense\n",
e3d6f909 2414 dev->se_hba->hba_id, dev->transport->name,
c66ac9db
NB
2415 cmd->scsi_status);
2416 return 0;
2417 }
a1d8b49a 2418 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2419
2420 return -1;
2421}
2422
ec98f782
AG
2423static inline long long transport_dev_end_lba(struct se_device *dev)
2424{
2425 return dev->transport->get_blocks(dev) + 1;
2426}
2427
2428static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2429{
2430 struct se_device *dev = cmd->se_dev;
2431 u32 sectors;
2432
2433 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2434 return 0;
2435
2436 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2437
6708bb27
AG
2438 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2439 pr_err("LBA: %llu Sectors: %u exceeds"
ec98f782
AG
2440 " transport_dev_end_lba(): %llu\n",
2441 cmd->t_task_lba, sectors,
2442 transport_dev_end_lba(dev));
7abbe7f3 2443 return -EINVAL;
ec98f782
AG
2444 }
2445
7abbe7f3 2446 return 0;
ec98f782
AG
2447}
2448
706d5860
NB
2449static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2450{
2451 /*
2452 * Determine if the received WRITE_SAME is used to for direct
2453 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2454 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2455 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2456 */
2457 int passthrough = (dev->transport->transport_type ==
2458 TRANSPORT_PLUGIN_PHBA_PDEV);
2459
2460 if (!passthrough) {
2461 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2462 pr_err("WRITE_SAME PBDATA and LBDATA"
2463 " bits not supported for Block Discard"
2464 " Emulation\n");
2465 return -ENOSYS;
2466 }
2467 /*
2468 * Currently for the emulated case we only accept
2469 * tpws with the UNMAP=1 bit set.
2470 */
2471 if (!(flags[0] & 0x08)) {
2472 pr_err("WRITE_SAME w/o UNMAP bit not"
2473 " supported for Block Discard Emulation\n");
2474 return -ENOSYS;
2475 }
2476 }
2477
2478 return 0;
2479}
2480
c66ac9db
NB
2481/* transport_generic_cmd_sequencer():
2482 *
2483 * Generic Command Sequencer that should work for most DAS transport
2484 * drivers.
2485 *
2486 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2487 * RX Thread.
2488 *
2489 * FIXME: Need to support other SCSI OPCODES where as well.
2490 */
2491static int transport_generic_cmd_sequencer(
2492 struct se_cmd *cmd,
2493 unsigned char *cdb)
2494{
5951146d 2495 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2496 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2497 int ret = 0, sector_ret = 0, passthrough;
2498 u32 sectors = 0, size = 0, pr_reg_type = 0;
2499 u16 service_action;
2500 u8 alua_ascq = 0;
2501 /*
2502 * Check for an existing UNIT ATTENTION condition
2503 */
2504 if (core_scsi3_ua_check(cmd, cdb) < 0) {
c66ac9db
NB
2505 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2506 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
5951146d 2507 return -EINVAL;
c66ac9db
NB
2508 }
2509 /*
2510 * Check status of Asymmetric Logical Unit Assignment port
2511 */
e3d6f909 2512 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
c66ac9db 2513 if (ret != 0) {
c66ac9db 2514 /*
25985edc 2515 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
c66ac9db
NB
2516 * The ALUA additional sense code qualifier (ASCQ) is determined
2517 * by the ALUA primary or secondary access state..
2518 */
2519 if (ret > 0) {
2520#if 0
6708bb27 2521 pr_debug("[%s]: ALUA TG Port not available,"
c66ac9db 2522 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
e3d6f909 2523 cmd->se_tfo->get_fabric_name(), alua_ascq);
c66ac9db
NB
2524#endif
2525 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2526 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2527 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
5951146d 2528 return -EINVAL;
c66ac9db
NB
2529 }
2530 goto out_invalid_cdb_field;
2531 }
2532 /*
2533 * Check status for SPC-3 Persistent Reservations
2534 */
e3d6f909
AG
2535 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2536 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
03e98c9e
NB
2537 cmd, cdb, pr_reg_type) != 0) {
2538 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2539 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2540 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2541 return -EBUSY;
2542 }
c66ac9db
NB
2543 /*
2544 * This means the CDB is allowed for the SCSI Initiator port
2545 * when said port is *NOT* holding the legacy SPC-2 or
2546 * SPC-3 Persistent Reservation.
2547 */
2548 }
2549
5bda90c8
CH
2550 /*
2551 * If we operate in passthrough mode we skip most CDB emulation and
2552 * instead hand the commands down to the physical SCSI device.
2553 */
2554 passthrough =
2555 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2556
c66ac9db
NB
2557 switch (cdb[0]) {
2558 case READ_6:
2559 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2560 if (sector_ret)
2561 goto out_unsupported_cdb;
2562 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2563 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
2564 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2565 break;
2566 case READ_10:
2567 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2568 if (sector_ret)
2569 goto out_unsupported_cdb;
2570 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2571 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2572 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2573 break;
2574 case READ_12:
2575 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2576 if (sector_ret)
2577 goto out_unsupported_cdb;
2578 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2579 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2580 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2581 break;
2582 case READ_16:
2583 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2584 if (sector_ret)
2585 goto out_unsupported_cdb;
2586 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2587 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
2588 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2589 break;
2590 case WRITE_6:
2591 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2592 if (sector_ret)
2593 goto out_unsupported_cdb;
2594 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2595 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
2596 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2597 break;
2598 case WRITE_10:
2599 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2600 if (sector_ret)
2601 goto out_unsupported_cdb;
2602 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2603 cmd->t_task_lba = transport_lba_32(cdb);
2d3a4b51
CH
2604 if (cdb[1] & 0x8)
2605 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2606 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2607 break;
2608 case WRITE_12:
2609 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2610 if (sector_ret)
2611 goto out_unsupported_cdb;
2612 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2613 cmd->t_task_lba = transport_lba_32(cdb);
2d3a4b51
CH
2614 if (cdb[1] & 0x8)
2615 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2616 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2617 break;
2618 case WRITE_16:
2619 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2620 if (sector_ret)
2621 goto out_unsupported_cdb;
2622 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2623 cmd->t_task_lba = transport_lba_64(cdb);
2d3a4b51
CH
2624 if (cdb[1] & 0x8)
2625 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2626 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2627 break;
2628 case XDWRITEREAD_10:
2629 if ((cmd->data_direction != DMA_TO_DEVICE) ||
33c3fafc 2630 !(cmd->se_cmd_flags & SCF_BIDI))
c66ac9db
NB
2631 goto out_invalid_cdb_field;
2632 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2633 if (sector_ret)
2634 goto out_unsupported_cdb;
2635 size = transport_get_size(sectors, cdb, cmd);
a1d8b49a 2636 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db 2637 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
7c1c6af3 2638
5bda90c8
CH
2639 /*
2640 * Do now allow BIDI commands for passthrough mode.
2641 */
2642 if (passthrough)
7c1c6af3 2643 goto out_unsupported_cdb;
5bda90c8 2644
c66ac9db 2645 /*
35e0e757 2646 * Setup BIDI XOR callback to be run after I/O completion.
c66ac9db
NB
2647 */
2648 cmd->transport_complete_callback = &transport_xor_callback;
2d3a4b51
CH
2649 if (cdb[1] & 0x8)
2650 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2651 break;
2652 case VARIABLE_LENGTH_CMD:
2653 service_action = get_unaligned_be16(&cdb[8]);
c66ac9db
NB
2654 switch (service_action) {
2655 case XDWRITEREAD_32:
2656 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2657 if (sector_ret)
2658 goto out_unsupported_cdb;
2659 size = transport_get_size(sectors, cdb, cmd);
2660 /*
2661 * Use WRITE_32 and READ_32 opcodes for the emulated
2662 * XDWRITE_READ_32 logic.
2663 */
a1d8b49a 2664 cmd->t_task_lba = transport_lba_64_ext(cdb);
c66ac9db
NB
2665 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2666
5bda90c8
CH
2667 /*
2668 * Do now allow BIDI commands for passthrough mode.
2669 */
c66ac9db 2670 if (passthrough)
7c1c6af3 2671 goto out_unsupported_cdb;
5bda90c8 2672
c66ac9db 2673 /*
35e0e757
CH
2674 * Setup BIDI XOR callback to be run during after I/O
2675 * completion.
c66ac9db
NB
2676 */
2677 cmd->transport_complete_callback = &transport_xor_callback;
2d3a4b51
CH
2678 if (cdb[1] & 0x8)
2679 cmd->se_cmd_flags |= SCF_FUA;
c66ac9db
NB
2680 break;
2681 case WRITE_SAME_32:
2682 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2683 if (sector_ret)
2684 goto out_unsupported_cdb;
dd3a5ad8 2685
6708bb27 2686 if (sectors)
12850626 2687 size = transport_get_size(1, cdb, cmd);
6708bb27
AG
2688 else {
2689 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2690 " supported\n");
2691 goto out_invalid_cdb_field;
2692 }
dd3a5ad8 2693
a1d8b49a 2694 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
c66ac9db
NB
2695 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2696
706d5860 2697 if (target_check_write_same_discard(&cdb[10], dev) < 0)
c66ac9db 2698 goto out_invalid_cdb_field;
5bda90c8
CH
2699 if (!passthrough)
2700 cmd->execute_task = target_emulate_write_same;
c66ac9db
NB
2701 break;
2702 default:
6708bb27 2703 pr_err("VARIABLE_LENGTH_CMD service action"
c66ac9db
NB
2704 " 0x%04x not supported\n", service_action);
2705 goto out_unsupported_cdb;
2706 }
2707 break;
e434f1f1 2708 case MAINTENANCE_IN:
e3d6f909 2709 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
2710 /* MAINTENANCE_IN from SCC-2 */
2711 /*
2712 * Check for emulated MI_REPORT_TARGET_PGS.
2713 */
e76a35d6
CH
2714 if (cdb[1] == MI_REPORT_TARGET_PGS &&
2715 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2716 cmd->execute_task =
2717 target_emulate_report_target_port_groups;
c66ac9db
NB
2718 }
2719 size = (cdb[6] << 24) | (cdb[7] << 16) |
2720 (cdb[8] << 8) | cdb[9];
2721 } else {
2722 /* GPCMD_SEND_KEY from multi media commands */
2723 size = (cdb[8] << 8) + cdb[9];
2724 }
05d1c7c0 2725 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2726 break;
2727 case MODE_SELECT:
2728 size = cdb[4];
2729 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2730 break;
2731 case MODE_SELECT_10:
2732 size = (cdb[7] << 8) + cdb[8];
2733 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2734 break;
2735 case MODE_SENSE:
2736 size = cdb[4];
05d1c7c0 2737 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2738 if (!passthrough)
2739 cmd->execute_task = target_emulate_modesense;
c66ac9db
NB
2740 break;
2741 case MODE_SENSE_10:
5bda90c8
CH
2742 size = (cdb[7] << 8) + cdb[8];
2743 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2744 if (!passthrough)
2745 cmd->execute_task = target_emulate_modesense;
2746 break;
c66ac9db
NB
2747 case GPCMD_READ_BUFFER_CAPACITY:
2748 case GPCMD_SEND_OPC:
2749 case LOG_SELECT:
2750 case LOG_SENSE:
2751 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 2752 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2753 break;
2754 case READ_BLOCK_LIMITS:
2755 size = READ_BLOCK_LEN;
05d1c7c0 2756 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2757 break;
2758 case GPCMD_GET_CONFIGURATION:
2759 case GPCMD_READ_FORMAT_CAPACITIES:
2760 case GPCMD_READ_DISC_INFO:
2761 case GPCMD_READ_TRACK_RZONE_INFO:
2762 size = (cdb[7] << 8) + cdb[8];
2763 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2764 break;
2765 case PERSISTENT_RESERVE_IN:
617c0e06 2766 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
e76a35d6 2767 cmd->execute_task = target_scsi3_emulate_pr_in;
617c0e06
CH
2768 size = (cdb[7] << 8) + cdb[8];
2769 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2770 break;
c66ac9db 2771 case PERSISTENT_RESERVE_OUT:
617c0e06 2772 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
e76a35d6 2773 cmd->execute_task = target_scsi3_emulate_pr_out;
c66ac9db 2774 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 2775 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2776 break;
2777 case GPCMD_MECHANISM_STATUS:
2778 case GPCMD_READ_DVD_STRUCTURE:
2779 size = (cdb[8] << 8) + cdb[9];
2780 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2781 break;
2782 case READ_POSITION:
2783 size = READ_POSITION_LEN;
05d1c7c0 2784 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db 2785 break;
e434f1f1 2786 case MAINTENANCE_OUT:
e3d6f909 2787 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
2788 /* MAINTENANCE_OUT from SCC-2
2789 *
2790 * Check for emulated MO_SET_TARGET_PGS.
2791 */
e76a35d6
CH
2792 if (cdb[1] == MO_SET_TARGET_PGS &&
2793 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2794 cmd->execute_task =
2795 target_emulate_set_target_port_groups;
c66ac9db
NB
2796 }
2797
2798 size = (cdb[6] << 24) | (cdb[7] << 16) |
2799 (cdb[8] << 8) | cdb[9];
2800 } else {
2801 /* GPCMD_REPORT_KEY from multi media commands */
2802 size = (cdb[8] << 8) + cdb[9];
2803 }
05d1c7c0 2804 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2805 break;
2806 case INQUIRY:
2807 size = (cdb[3] << 8) + cdb[4];
2808 /*
2809 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2810 * See spc4r17 section 5.3
2811 */
5951146d 2812 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 2813 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 2814 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2815 if (!passthrough)
2816 cmd->execute_task = target_emulate_inquiry;
c66ac9db
NB
2817 break;
2818 case READ_BUFFER:
2819 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 2820 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2821 break;
2822 case READ_CAPACITY:
2823 size = READ_CAP_LEN;
05d1c7c0 2824 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2825 if (!passthrough)
2826 cmd->execute_task = target_emulate_readcapacity;
c66ac9db
NB
2827 break;
2828 case READ_MEDIA_SERIAL_NUMBER:
2829 case SECURITY_PROTOCOL_IN:
2830 case SECURITY_PROTOCOL_OUT:
2831 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
05d1c7c0 2832 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2833 break;
2834 case SERVICE_ACTION_IN:
5bda90c8
CH
2835 switch (cmd->t_task_cdb[1] & 0x1f) {
2836 case SAI_READ_CAPACITY_16:
2837 if (!passthrough)
2838 cmd->execute_task =
2839 target_emulate_readcapacity_16;
2840 break;
2841 default:
2842 if (passthrough)
2843 break;
2844
2845 pr_err("Unsupported SA: 0x%02x\n",
2846 cmd->t_task_cdb[1] & 0x1f);
2847 goto out_unsupported_cdb;
2848 }
2849 /*FALLTHROUGH*/
c66ac9db
NB
2850 case ACCESS_CONTROL_IN:
2851 case ACCESS_CONTROL_OUT:
2852 case EXTENDED_COPY:
2853 case READ_ATTRIBUTE:
2854 case RECEIVE_COPY_RESULTS:
2855 case WRITE_ATTRIBUTE:
2856 size = (cdb[10] << 24) | (cdb[11] << 16) |
2857 (cdb[12] << 8) | cdb[13];
05d1c7c0 2858 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2859 break;
2860 case RECEIVE_DIAGNOSTIC:
2861 case SEND_DIAGNOSTIC:
2862 size = (cdb[3] << 8) | cdb[4];
05d1c7c0 2863 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2864 break;
2865/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2866#if 0
2867 case GPCMD_READ_CD:
2868 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2869 size = (2336 * sectors);
05d1c7c0 2870 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2871 break;
2872#endif
2873 case READ_TOC:
2874 size = cdb[8];
05d1c7c0 2875 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2876 break;
2877 case REQUEST_SENSE:
2878 size = cdb[4];
05d1c7c0 2879 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2880 if (!passthrough)
2881 cmd->execute_task = target_emulate_request_sense;
c66ac9db
NB
2882 break;
2883 case READ_ELEMENT_STATUS:
2884 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
05d1c7c0 2885 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2886 break;
2887 case WRITE_BUFFER:
2888 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 2889 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
2890 break;
2891 case RESERVE:
2892 case RESERVE_10:
2893 /*
2894 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2895 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2896 */
2897 if (cdb[0] == RESERVE_10)
2898 size = (cdb[7] << 8) | cdb[8];
2899 else
2900 size = cmd->data_length;
2901
2902 /*
2903 * Setup the legacy emulated handler for SPC-2 and
2904 * >= SPC-3 compatible reservation handling (CRH=1)
2905 * Otherwise, we assume the underlying SCSI logic is
2906 * is running in SPC_PASSTHROUGH, and wants reservations
2907 * emulation disabled.
2908 */
e76a35d6
CH
2909 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2910 cmd->execute_task = target_scsi2_reservation_reserve;
c66ac9db
NB
2911 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2912 break;
2913 case RELEASE:
2914 case RELEASE_10:
2915 /*
2916 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2917 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2918 */
2919 if (cdb[0] == RELEASE_10)
2920 size = (cdb[7] << 8) | cdb[8];
2921 else
2922 size = cmd->data_length;
2923
e76a35d6
CH
2924 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2925 cmd->execute_task = target_scsi2_reservation_release;
c66ac9db
NB
2926 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2927 break;
2928 case SYNCHRONIZE_CACHE:
2929 case 0x91: /* SYNCHRONIZE_CACHE_16: */
2930 /*
2931 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2932 */
2933 if (cdb[0] == SYNCHRONIZE_CACHE) {
2934 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
a1d8b49a 2935 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2936 } else {
2937 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
a1d8b49a 2938 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
2939 }
2940 if (sector_ret)
2941 goto out_unsupported_cdb;
2942
2943 size = transport_get_size(sectors, cdb, cmd);
2944 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2945
5bda90c8 2946 if (passthrough)
c66ac9db 2947 break;
5bda90c8 2948
c66ac9db
NB
2949 /*
2950 * Check to ensure that LBA + Range does not exceed past end of
7abbe7f3 2951 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
c66ac9db 2952 */
7abbe7f3
NB
2953 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2954 if (transport_cmd_get_valid_sectors(cmd) < 0)
2955 goto out_invalid_cdb_field;
2956 }
5bda90c8 2957 cmd->execute_task = target_emulate_synchronize_cache;
c66ac9db
NB
2958 break;
2959 case UNMAP:
2960 size = get_unaligned_be16(&cdb[7]);
05d1c7c0 2961 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
5bda90c8
CH
2962 if (!passthrough)
2963 cmd->execute_task = target_emulate_unmap;
c66ac9db
NB
2964 break;
2965 case WRITE_SAME_16:
2966 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2967 if (sector_ret)
2968 goto out_unsupported_cdb;
dd3a5ad8 2969
6708bb27 2970 if (sectors)
12850626 2971 size = transport_get_size(1, cdb, cmd);
6708bb27
AG
2972 else {
2973 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2974 goto out_invalid_cdb_field;
2975 }
dd3a5ad8 2976
5db0753b 2977 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
706d5860
NB
2978 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2979
2980 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2981 goto out_invalid_cdb_field;
5bda90c8
CH
2982 if (!passthrough)
2983 cmd->execute_task = target_emulate_write_same;
706d5860
NB
2984 break;
2985 case WRITE_SAME:
2986 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2987 if (sector_ret)
2988 goto out_unsupported_cdb;
2989
2990 if (sectors)
12850626 2991 size = transport_get_size(1, cdb, cmd);
706d5860
NB
2992 else {
2993 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2994 goto out_invalid_cdb_field;
c66ac9db 2995 }
706d5860
NB
2996
2997 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
c66ac9db 2998 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
706d5860
NB
2999 /*
3000 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3001 * of byte 1 bit 3 UNMAP instead of original reserved field
3002 */
3003 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3004 goto out_invalid_cdb_field;
5bda90c8
CH
3005 if (!passthrough)
3006 cmd->execute_task = target_emulate_write_same;
c66ac9db
NB
3007 break;
3008 case ALLOW_MEDIUM_REMOVAL:
c66ac9db 3009 case ERASE:
c66ac9db
NB
3010 case REZERO_UNIT:
3011 case SEEK_10:
c66ac9db
NB
3012 case SPACE:
3013 case START_STOP:
3014 case TEST_UNIT_READY:
3015 case VERIFY:
3016 case WRITE_FILEMARKS:
5bda90c8
CH
3017 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3018 if (!passthrough)
3019 cmd->execute_task = target_emulate_noop;
3020 break;
3021 case GPCMD_CLOSE_TRACK:
3022 case INITIALIZE_ELEMENT_STATUS:
3023 case GPCMD_LOAD_UNLOAD:
3024 case GPCMD_SET_SPEED:
c66ac9db
NB
3025 case MOVE_MEDIUM:
3026 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3027 break;
3028 case REPORT_LUNS:
e76a35d6 3029 cmd->execute_task = target_report_luns;
c66ac9db
NB
3030 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3031 /*
3032 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3033 * See spc4r17 section 5.3
3034 */
5951146d 3035 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 3036 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 3037 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3038 break;
3039 default:
6708bb27 3040 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
c66ac9db 3041 " 0x%02x, sending CHECK_CONDITION.\n",
e3d6f909 3042 cmd->se_tfo->get_fabric_name(), cdb[0]);
c66ac9db
NB
3043 goto out_unsupported_cdb;
3044 }
3045
3046 if (size != cmd->data_length) {
6708bb27 3047 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
c66ac9db 3048 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
e3d6f909 3049 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
c66ac9db
NB
3050 cmd->data_length, size, cdb[0]);
3051
3052 cmd->cmd_spdtl = size;
3053
3054 if (cmd->data_direction == DMA_TO_DEVICE) {
6708bb27 3055 pr_err("Rejecting underflow/overflow"
c66ac9db
NB
3056 " WRITE data\n");
3057 goto out_invalid_cdb_field;
3058 }
3059 /*
3060 * Reject READ_* or WRITE_* with overflow/underflow for
3061 * type SCF_SCSI_DATA_SG_IO_CDB.
3062 */
6708bb27
AG
3063 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3064 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
c66ac9db 3065 " CDB on non 512-byte sector setup subsystem"
e3d6f909 3066 " plugin: %s\n", dev->transport->name);
c66ac9db
NB
3067 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3068 goto out_invalid_cdb_field;
3069 }
3070
3071 if (size > cmd->data_length) {
3072 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3073 cmd->residual_count = (size - cmd->data_length);
3074 } else {
3075 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3076 cmd->residual_count = (cmd->data_length - size);
3077 }
3078 cmd->data_length = size;
3079 }
3080
5bda90c8
CH
3081 /* reject any command that we don't have a handler for */
3082 if (!(passthrough || cmd->execute_task ||
3083 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3084 goto out_unsupported_cdb;
3085
c66ac9db
NB
3086 transport_set_supported_SAM_opcode(cmd);
3087 return ret;
3088
3089out_unsupported_cdb:
3090 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3091 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
5951146d 3092 return -EINVAL;
c66ac9db
NB
3093out_invalid_cdb_field:
3094 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3095 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 3096 return -EINVAL;
c66ac9db
NB
3097}
3098
c66ac9db 3099/*
35e0e757 3100 * Called from I/O completion to determine which dormant/delayed
c66ac9db
NB
3101 * and ordered cmds need to have their tasks added to the execution queue.
3102 */
3103static void transport_complete_task_attr(struct se_cmd *cmd)
3104{
5951146d 3105 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
3106 struct se_cmd *cmd_p, *cmd_tmp;
3107 int new_active_tasks = 0;
3108
e66ecd50 3109 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
c66ac9db
NB
3110 atomic_dec(&dev->simple_cmds);
3111 smp_mb__after_atomic_dec();
3112 dev->dev_cur_ordered_id++;
6708bb27 3113 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
c66ac9db
NB
3114 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3115 cmd->se_ordered_id);
e66ecd50 3116 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db 3117 dev->dev_cur_ordered_id++;
6708bb27 3118 pr_debug("Incremented dev_cur_ordered_id: %u for"
c66ac9db
NB
3119 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3120 cmd->se_ordered_id);
e66ecd50 3121 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
c66ac9db
NB
3122 atomic_dec(&dev->dev_ordered_sync);
3123 smp_mb__after_atomic_dec();
c66ac9db
NB
3124
3125 dev->dev_cur_ordered_id++;
6708bb27 3126 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
c66ac9db
NB
3127 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3128 }
3129 /*
3130 * Process all commands up to the last received
3131 * ORDERED task attribute which requires another blocking
3132 * boundary
3133 */
3134 spin_lock(&dev->delayed_cmd_lock);
3135 list_for_each_entry_safe(cmd_p, cmd_tmp,
5951146d 3136 &dev->delayed_cmd_list, se_delayed_node) {
c66ac9db 3137
5951146d 3138 list_del(&cmd_p->se_delayed_node);
c66ac9db
NB
3139 spin_unlock(&dev->delayed_cmd_lock);
3140
6708bb27 3141 pr_debug("Calling add_tasks() for"
c66ac9db
NB
3142 " cmd_p: 0x%02x Task Attr: 0x%02x"
3143 " Dormant -> Active, se_ordered_id: %u\n",
6708bb27 3144 cmd_p->t_task_cdb[0],
c66ac9db
NB
3145 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3146
3147 transport_add_tasks_from_cmd(cmd_p);
3148 new_active_tasks++;
3149
3150 spin_lock(&dev->delayed_cmd_lock);
e66ecd50 3151 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
c66ac9db
NB
3152 break;
3153 }
3154 spin_unlock(&dev->delayed_cmd_lock);
3155 /*
3156 * If new tasks have become active, wake up the transport thread
3157 * to do the processing of the Active tasks.
3158 */
3159 if (new_active_tasks != 0)
e3d6f909 3160 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
c66ac9db
NB
3161}
3162
e057f533 3163static void transport_complete_qf(struct se_cmd *cmd)
07bde79a
NB
3164{
3165 int ret = 0;
3166
e057f533
CH
3167 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3168 transport_complete_task_attr(cmd);
3169
3170 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3171 ret = cmd->se_tfo->queue_status(cmd);
3172 if (ret)
3173 goto out;
3174 }
07bde79a
NB
3175
3176 switch (cmd->data_direction) {
3177 case DMA_FROM_DEVICE:
3178 ret = cmd->se_tfo->queue_data_in(cmd);
3179 break;
3180 case DMA_TO_DEVICE:
ec98f782 3181 if (cmd->t_bidi_data_sg) {
07bde79a
NB
3182 ret = cmd->se_tfo->queue_data_in(cmd);
3183 if (ret < 0)
e057f533 3184 break;
07bde79a
NB
3185 }
3186 /* Fall through for DMA_TO_DEVICE */
3187 case DMA_NONE:
3188 ret = cmd->se_tfo->queue_status(cmd);
3189 break;
3190 default:
3191 break;
3192 }
3193
e057f533
CH
3194out:
3195 if (ret < 0) {
3196 transport_handle_queue_full(cmd, cmd->se_dev);
3197 return;
3198 }
3199 transport_lun_remove_cmd(cmd);
3200 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
3201}
3202
3203static void transport_handle_queue_full(
3204 struct se_cmd *cmd,
e057f533 3205 struct se_device *dev)
07bde79a
NB
3206{
3207 spin_lock_irq(&dev->qf_cmd_lock);
07bde79a
NB
3208 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3209 atomic_inc(&dev->dev_qf_count);
3210 smp_mb__after_atomic_inc();
3211 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3212
3213 schedule_work(&cmd->se_dev->qf_work_queue);
3214}
3215
35e0e757 3216static void target_complete_ok_work(struct work_struct *work)
c66ac9db 3217{
35e0e757 3218 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
07bde79a 3219 int reason = 0, ret;
35e0e757 3220
c66ac9db
NB
3221 /*
3222 * Check if we need to move delayed/dormant tasks from cmds on the
3223 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3224 * Attribute.
3225 */
5951146d 3226 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
c66ac9db 3227 transport_complete_task_attr(cmd);
07bde79a
NB
3228 /*
3229 * Check to schedule QUEUE_FULL work, or execute an existing
3230 * cmd->transport_qf_callback()
3231 */
3232 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3233 schedule_work(&cmd->se_dev->qf_work_queue);
3234
c66ac9db
NB
3235 /*
3236 * Check if we need to retrieve a sense buffer from
3237 * the struct se_cmd in question.
3238 */
3239 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3240 if (transport_get_sense_data(cmd) < 0)
3241 reason = TCM_NON_EXISTENT_LUN;
3242
3243 /*
3244 * Only set when an struct se_task->task_scsi_status returned
3245 * a non GOOD status.
3246 */
3247 if (cmd->scsi_status) {
07bde79a 3248 ret = transport_send_check_condition_and_sense(
c66ac9db 3249 cmd, reason, 1);
f147abb4 3250 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a
NB
3251 goto queue_full;
3252
c66ac9db
NB
3253 transport_lun_remove_cmd(cmd);
3254 transport_cmd_check_stop_to_fabric(cmd);
3255 return;
3256 }
3257 }
3258 /*
25985edc 3259 * Check for a callback, used by amongst other things
c66ac9db
NB
3260 * XDWRITE_READ_10 emulation.
3261 */
3262 if (cmd->transport_complete_callback)
3263 cmd->transport_complete_callback(cmd);
3264
3265 switch (cmd->data_direction) {
3266 case DMA_FROM_DEVICE:
3267 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3268 if (cmd->se_lun->lun_sep) {
3269 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3270 cmd->data_length;
3271 }
3272 spin_unlock(&cmd->se_lun->lun_sep_lock);
c66ac9db 3273
07bde79a 3274 ret = cmd->se_tfo->queue_data_in(cmd);
f147abb4 3275 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 3276 goto queue_full;
c66ac9db
NB
3277 break;
3278 case DMA_TO_DEVICE:
3279 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3280 if (cmd->se_lun->lun_sep) {
3281 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
c66ac9db
NB
3282 cmd->data_length;
3283 }
3284 spin_unlock(&cmd->se_lun->lun_sep_lock);
3285 /*
3286 * Check if we need to send READ payload for BIDI-COMMAND
3287 */
ec98f782 3288 if (cmd->t_bidi_data_sg) {
c66ac9db 3289 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3290 if (cmd->se_lun->lun_sep) {
3291 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3292 cmd->data_length;
3293 }
3294 spin_unlock(&cmd->se_lun->lun_sep_lock);
07bde79a 3295 ret = cmd->se_tfo->queue_data_in(cmd);
f147abb4 3296 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 3297 goto queue_full;
c66ac9db
NB
3298 break;
3299 }
3300 /* Fall through for DMA_TO_DEVICE */
3301 case DMA_NONE:
07bde79a 3302 ret = cmd->se_tfo->queue_status(cmd);
f147abb4 3303 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a 3304 goto queue_full;
c66ac9db
NB
3305 break;
3306 default:
3307 break;
3308 }
3309
3310 transport_lun_remove_cmd(cmd);
3311 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
3312 return;
3313
3314queue_full:
6708bb27 3315 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
07bde79a 3316 " data_direction: %d\n", cmd, cmd->data_direction);
e057f533
CH
3317 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3318 transport_handle_queue_full(cmd, cmd->se_dev);
c66ac9db
NB
3319}
3320
3321static void transport_free_dev_tasks(struct se_cmd *cmd)
3322{
3323 struct se_task *task, *task_tmp;
3324 unsigned long flags;
0c2cfe5f 3325 LIST_HEAD(dispose_list);
c66ac9db 3326
a1d8b49a 3327 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3328 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 3329 &cmd->t_task_list, t_list) {
0c2cfe5f
CH
3330 if (!(task->task_flags & TF_ACTIVE))
3331 list_move_tail(&task->t_list, &dispose_list);
3332 }
3333 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3334
3335 while (!list_empty(&dispose_list)) {
3336 task = list_first_entry(&dispose_list, struct se_task, t_list);
c66ac9db 3337
af3f00c7
CH
3338 if (task->task_sg != cmd->t_data_sg &&
3339 task->task_sg != cmd->t_bidi_data_sg)
3340 kfree(task->task_sg);
c66ac9db
NB
3341
3342 list_del(&task->t_list);
3343
42bf829e 3344 cmd->se_dev->transport->free_task(task);
c66ac9db 3345 }
c66ac9db
NB
3346}
3347
6708bb27 3348static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
c66ac9db 3349{
ec98f782 3350 struct scatterlist *sg;
ec98f782 3351 int count;
c66ac9db 3352
6708bb27
AG
3353 for_each_sg(sgl, sg, nents, count)
3354 __free_page(sg_page(sg));
c66ac9db 3355
6708bb27
AG
3356 kfree(sgl);
3357}
c66ac9db 3358
6708bb27
AG
3359static inline void transport_free_pages(struct se_cmd *cmd)
3360{
3361 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3362 return;
3363
3364 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
ec98f782
AG
3365 cmd->t_data_sg = NULL;
3366 cmd->t_data_nents = 0;
c66ac9db 3367
6708bb27 3368 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
ec98f782
AG
3369 cmd->t_bidi_data_sg = NULL;
3370 cmd->t_bidi_data_nents = 0;
c66ac9db
NB
3371}
3372
e26d99ae
CH
3373/**
3374 * transport_release_cmd - free a command
3375 * @cmd: command to free
3376 *
3377 * This routine unconditionally frees a command, and reference counting
3378 * or list removal must be done in the caller.
3379 */
3380static void transport_release_cmd(struct se_cmd *cmd)
3381{
3382 BUG_ON(!cmd->se_tfo);
3383
3384 if (cmd->se_tmr_req)
3385 core_tmr_release_req(cmd->se_tmr_req);
3386 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3387 kfree(cmd->t_task_cdb);
3388 /*
7481deb4
NB
3389 * If this cmd has been setup with target_get_sess_cmd(), drop
3390 * the kref and call ->release_cmd() in kref callback.
e26d99ae 3391 */
7481deb4
NB
3392 if (cmd->check_release != 0) {
3393 target_put_sess_cmd(cmd->se_sess, cmd);
3394 return;
3395 }
e26d99ae
CH
3396 cmd->se_tfo->release_cmd(cmd);
3397}
3398
d3df7825
CH
3399/**
3400 * transport_put_cmd - release a reference to a command
3401 * @cmd: command to release
3402 *
3403 * This routine releases our reference to the command and frees it if possible.
3404 */
39c05f32 3405static void transport_put_cmd(struct se_cmd *cmd)
c66ac9db
NB
3406{
3407 unsigned long flags;
4911e3cc 3408 int free_tasks = 0;
c66ac9db 3409
a1d8b49a 3410 spin_lock_irqsave(&cmd->t_state_lock, flags);
4911e3cc
CH
3411 if (atomic_read(&cmd->t_fe_count)) {
3412 if (!atomic_dec_and_test(&cmd->t_fe_count))
3413 goto out_busy;
3414 }
3415
3416 if (atomic_read(&cmd->t_se_count)) {
3417 if (!atomic_dec_and_test(&cmd->t_se_count))
3418 goto out_busy;
3419 }
3420
3421 if (atomic_read(&cmd->transport_dev_active)) {
3422 atomic_set(&cmd->transport_dev_active, 0);
3423 transport_all_task_dev_remove_state(cmd);
3424 free_tasks = 1;
c66ac9db 3425 }
a1d8b49a 3426 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3427
4911e3cc
CH
3428 if (free_tasks != 0)
3429 transport_free_dev_tasks(cmd);
d3df7825 3430
c66ac9db 3431 transport_free_pages(cmd);
31afc39c 3432 transport_release_cmd(cmd);
39c05f32 3433 return;
4911e3cc
CH
3434out_busy:
3435 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3436}
3437
c66ac9db 3438/*
ec98f782
AG
3439 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3440 * allocating in the core.
c66ac9db
NB
3441 * @cmd: Associated se_cmd descriptor
3442 * @mem: SGL style memory for TCM WRITE / READ
3443 * @sg_mem_num: Number of SGL elements
3444 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3445 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3446 *
3447 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3448 * of parameters.
3449 */
3450int transport_generic_map_mem_to_cmd(
3451 struct se_cmd *cmd,
5951146d
AG
3452 struct scatterlist *sgl,
3453 u32 sgl_count,
3454 struct scatterlist *sgl_bidi,
3455 u32 sgl_bidi_count)
c66ac9db 3456{
5951146d 3457 if (!sgl || !sgl_count)
c66ac9db 3458 return 0;
c66ac9db 3459
c66ac9db
NB
3460 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3461 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
fef58a60
NB
3462 /*
3463 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3464 * scatterlists already have been set to follow what the fabric
3465 * passes for the original expected data transfer length.
3466 */
3467 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3468 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3469 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3470 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3471 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3472 return -EINVAL;
3473 }
c66ac9db 3474
ec98f782
AG
3475 cmd->t_data_sg = sgl;
3476 cmd->t_data_nents = sgl_count;
c66ac9db 3477
ec98f782
AG
3478 if (sgl_bidi && sgl_bidi_count) {
3479 cmd->t_bidi_data_sg = sgl_bidi;
3480 cmd->t_bidi_data_nents = sgl_bidi_count;
c66ac9db
NB
3481 }
3482 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
c66ac9db
NB
3483 }
3484
3485 return 0;
3486}
3487EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3488
4949314c 3489void *transport_kmap_data_sg(struct se_cmd *cmd)
05d1c7c0 3490{
ec98f782 3491 struct scatterlist *sg = cmd->t_data_sg;
4949314c
AG
3492 struct page **pages;
3493 int i;
05d1c7c0 3494
ec98f782 3495 BUG_ON(!sg);
05d1c7c0 3496 /*
ec98f782
AG
3497 * We need to take into account a possible offset here for fabrics like
3498 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3499 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
05d1c7c0 3500 */
4949314c
AG
3501 if (!cmd->t_data_nents)
3502 return NULL;
3503 else if (cmd->t_data_nents == 1)
3504 return kmap(sg_page(sg)) + sg->offset;
3505
3506 /* >1 page. use vmap */
3507 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3508 if (!pages)
3509 return NULL;
3510
3511 /* convert sg[] to pages[] */
3512 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3513 pages[i] = sg_page(sg);
3514 }
3515
3516 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
3517 kfree(pages);
3518 if (!cmd->t_data_vmap)
3519 return NULL;
3520
3521 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
05d1c7c0 3522}
4949314c 3523EXPORT_SYMBOL(transport_kmap_data_sg);
05d1c7c0 3524
4949314c 3525void transport_kunmap_data_sg(struct se_cmd *cmd)
05d1c7c0 3526{
4949314c
AG
3527 if (!cmd->t_data_nents)
3528 return;
3529 else if (cmd->t_data_nents == 1)
3530 kunmap(sg_page(cmd->t_data_sg));
3531
3532 vunmap(cmd->t_data_vmap);
3533 cmd->t_data_vmap = NULL;
05d1c7c0 3534}
4949314c 3535EXPORT_SYMBOL(transport_kunmap_data_sg);
05d1c7c0 3536
c66ac9db 3537static int
05d1c7c0 3538transport_generic_get_mem(struct se_cmd *cmd)
c66ac9db 3539{
ec98f782
AG
3540 u32 length = cmd->data_length;
3541 unsigned int nents;
3542 struct page *page;
9db9da33 3543 gfp_t zero_flag;
ec98f782 3544 int i = 0;
c66ac9db 3545
ec98f782
AG
3546 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3547 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3548 if (!cmd->t_data_sg)
3549 return -ENOMEM;
c66ac9db 3550
ec98f782
AG
3551 cmd->t_data_nents = nents;
3552 sg_init_table(cmd->t_data_sg, nents);
c66ac9db 3553
9db9da33 3554 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3555
ec98f782
AG
3556 while (length) {
3557 u32 page_len = min_t(u32, length, PAGE_SIZE);
9db9da33 3558 page = alloc_page(GFP_KERNEL | zero_flag);
ec98f782
AG
3559 if (!page)
3560 goto out;
c66ac9db 3561
ec98f782
AG
3562 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3563 length -= page_len;
3564 i++;
c66ac9db 3565 }
c66ac9db 3566 return 0;
c66ac9db 3567
ec98f782
AG
3568out:
3569 while (i >= 0) {
3570 __free_page(sg_page(&cmd->t_data_sg[i]));
3571 i--;
c66ac9db 3572 }
ec98f782
AG
3573 kfree(cmd->t_data_sg);
3574 cmd->t_data_sg = NULL;
3575 return -ENOMEM;
c66ac9db
NB
3576}
3577
a1d8b49a
AG
3578/* Reduce sectors if they are too long for the device */
3579static inline sector_t transport_limit_task_sectors(
c66ac9db
NB
3580 struct se_device *dev,
3581 unsigned long long lba,
a1d8b49a 3582 sector_t sectors)
c66ac9db 3583{
a1d8b49a 3584 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db 3585
a1d8b49a
AG
3586 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3587 if ((lba + sectors) > transport_dev_end_lba(dev))
3588 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
c66ac9db 3589
a1d8b49a 3590 return sectors;
c66ac9db
NB
3591}
3592
c66ac9db
NB
3593
3594/*
3595 * This function can be used by HW target mode drivers to create a linked
3596 * scatterlist from all contiguously allocated struct se_task->task_sg[].
3597 * This is intended to be called during the completion path by TCM Core
3598 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3599 */
3600void transport_do_task_sg_chain(struct se_cmd *cmd)
3601{
ec98f782
AG
3602 struct scatterlist *sg_first = NULL;
3603 struct scatterlist *sg_prev = NULL;
3604 int sg_prev_nents = 0;
3605 struct scatterlist *sg;
c66ac9db 3606 struct se_task *task;
ec98f782 3607 u32 chained_nents = 0;
c66ac9db
NB
3608 int i;
3609
ec98f782
AG
3610 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3611
c66ac9db
NB
3612 /*
3613 * Walk the struct se_task list and setup scatterlist chains
a1d8b49a 3614 * for each contiguously allocated struct se_task->task_sg[].
c66ac9db 3615 */
a1d8b49a 3616 list_for_each_entry(task, &cmd->t_task_list, t_list) {
ec98f782 3617 if (!task->task_sg)
c66ac9db
NB
3618 continue;
3619
ec98f782
AG
3620 if (!sg_first) {
3621 sg_first = task->task_sg;
6708bb27 3622 chained_nents = task->task_sg_nents;
97868c89 3623 } else {
ec98f782 3624 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
6708bb27 3625 chained_nents += task->task_sg_nents;
97868c89 3626 }
c3c74c7a
NB
3627 /*
3628 * For the padded tasks, use the extra SGL vector allocated
3629 * in transport_allocate_data_tasks() for the sg_prev_nents
04629b7b
CH
3630 * offset into sg_chain() above.
3631 *
3632 * We do not need the padding for the last task (or a single
3633 * task), but in that case we will never use the sg_prev_nents
3634 * value below which would be incorrect.
c3c74c7a 3635 */
04629b7b 3636 sg_prev_nents = (task->task_sg_nents + 1);
ec98f782 3637 sg_prev = task->task_sg;
c66ac9db
NB
3638 }
3639 /*
3640 * Setup the starting pointer and total t_tasks_sg_linked_no including
3641 * padding SGs for linking and to mark the end.
3642 */
a1d8b49a 3643 cmd->t_tasks_sg_chained = sg_first;
ec98f782 3644 cmd->t_tasks_sg_chained_no = chained_nents;
c66ac9db 3645
6708bb27 3646 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
a1d8b49a
AG
3647 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3648 cmd->t_tasks_sg_chained_no);
c66ac9db 3649
a1d8b49a
AG
3650 for_each_sg(cmd->t_tasks_sg_chained, sg,
3651 cmd->t_tasks_sg_chained_no, i) {
c66ac9db 3652
6708bb27 3653 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
5951146d 3654 i, sg, sg_page(sg), sg->length, sg->offset);
c66ac9db 3655 if (sg_is_chain(sg))
6708bb27 3656 pr_debug("SG: %p sg_is_chain=1\n", sg);
c66ac9db 3657 if (sg_is_last(sg))
6708bb27 3658 pr_debug("SG: %p sg_is_last=1\n", sg);
c66ac9db 3659 }
c66ac9db
NB
3660}
3661EXPORT_SYMBOL(transport_do_task_sg_chain);
3662
a1d8b49a
AG
3663/*
3664 * Break up cmd into chunks transport can handle
3665 */
38b40067
CH
3666static int
3667transport_allocate_data_tasks(struct se_cmd *cmd,
c66ac9db 3668 enum dma_data_direction data_direction,
38b40067 3669 struct scatterlist *cmd_sg, unsigned int sgl_nents)
c66ac9db 3670{
5951146d 3671 struct se_device *dev = cmd->se_dev;
a3eedc22 3672 int task_count, i;
38b40067
CH
3673 unsigned long long lba;
3674 sector_t sectors, dev_max_sectors;
3675 u32 sector_size;
3676
3677 if (transport_cmd_get_valid_sectors(cmd) < 0)
3678 return -EINVAL;
3679
3680 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3681 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
a1d8b49a 3682
ec98f782 3683 WARN_ON(cmd->data_length % sector_size);
38b40067
CH
3684
3685 lba = cmd->t_task_lba;
ec98f782 3686 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
277c5f27 3687 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
af3f00c7
CH
3688
3689 /*
3690 * If we need just a single task reuse the SG list in the command
3691 * and avoid a lot of work.
3692 */
3693 if (task_count == 1) {
3694 struct se_task *task;
3695 unsigned long flags;
3696
3697 task = transport_generic_get_task(cmd, data_direction);
3698 if (!task)
3699 return -ENOMEM;
3700
3701 task->task_sg = cmd_sg;
3702 task->task_sg_nents = sgl_nents;
3703
3704 task->task_lba = lba;
3705 task->task_sectors = sectors;
3706 task->task_size = task->task_sectors * sector_size;
3707
3708 spin_lock_irqsave(&cmd->t_state_lock, flags);
3709 list_add_tail(&task->t_list, &cmd->t_task_list);
3710 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3711
3712 return task_count;
3713 }
3714
ec98f782 3715 for (i = 0; i < task_count; i++) {
38b40067 3716 struct se_task *task;
c3c74c7a 3717 unsigned int task_size, task_sg_nents_padded;
38b40067
CH
3718 struct scatterlist *sg;
3719 unsigned long flags;
ec98f782 3720 int count;
a1d8b49a 3721
c66ac9db 3722 task = transport_generic_get_task(cmd, data_direction);
a1d8b49a 3723 if (!task)
ec98f782 3724 return -ENOMEM;
c66ac9db 3725
c66ac9db 3726 task->task_lba = lba;
ec98f782
AG
3727 task->task_sectors = min(sectors, dev_max_sectors);
3728 task->task_size = task->task_sectors * sector_size;
c66ac9db 3729
525a48a2
NB
3730 /*
3731 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3732 * in order to calculate the number per task SGL entries
3733 */
3734 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
c66ac9db 3735 /*
ec98f782
AG
3736 * Check if the fabric module driver is requesting that all
3737 * struct se_task->task_sg[] be chained together.. If so,
3738 * then allocate an extra padding SG entry for linking and
c3c74c7a
NB
3739 * marking the end of the chained SGL for every task except
3740 * the last one for (task_count > 1) operation, or skipping
3741 * the extra padding for the (task_count == 1) case.
c66ac9db 3742 */
c3c74c7a
NB
3743 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3744 task_sg_nents_padded = (task->task_sg_nents + 1);
c3c74c7a
NB
3745 } else
3746 task_sg_nents_padded = task->task_sg_nents;
c66ac9db 3747
1d20bb61 3748 task->task_sg = kmalloc(sizeof(struct scatterlist) *
c3c74c7a 3749 task_sg_nents_padded, GFP_KERNEL);
ec98f782
AG
3750 if (!task->task_sg) {
3751 cmd->se_dev->transport->free_task(task);
3752 return -ENOMEM;
3753 }
3754
c3c74c7a 3755 sg_init_table(task->task_sg, task_sg_nents_padded);
c66ac9db 3756
ec98f782
AG
3757 task_size = task->task_size;
3758
3759 /* Build new sgl, only up to task_size */
6708bb27 3760 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
ec98f782
AG
3761 if (cmd_sg->length > task_size)
3762 break;
3763
3764 *sg = *cmd_sg;
3765 task_size -= cmd_sg->length;
3766 cmd_sg = sg_next(cmd_sg);
c66ac9db 3767 }
c66ac9db 3768
ec98f782
AG
3769 lba += task->task_sectors;
3770 sectors -= task->task_sectors;
c66ac9db 3771
ec98f782
AG
3772 spin_lock_irqsave(&cmd->t_state_lock, flags);
3773 list_add_tail(&task->t_list, &cmd->t_task_list);
3774 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3775 }
3776
ec98f782 3777 return task_count;
c66ac9db
NB
3778}
3779
3780static int
ec98f782 3781transport_allocate_control_task(struct se_cmd *cmd)
c66ac9db 3782{
c66ac9db 3783 struct se_task *task;
ec98f782 3784 unsigned long flags;
c66ac9db 3785
91ec1d35
NB
3786 /* Workaround for handling zero-length control CDBs */
3787 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3788 !cmd->data_length)
3789 return 0;
3790
c66ac9db
NB
3791 task = transport_generic_get_task(cmd, cmd->data_direction);
3792 if (!task)
ec98f782 3793 return -ENOMEM;
c66ac9db 3794
af3f00c7 3795 task->task_sg = cmd->t_data_sg;
c66ac9db 3796 task->task_size = cmd->data_length;
6708bb27 3797 task->task_sg_nents = cmd->t_data_nents;
c66ac9db 3798
ec98f782
AG
3799 spin_lock_irqsave(&cmd->t_state_lock, flags);
3800 list_add_tail(&task->t_list, &cmd->t_task_list);
3801 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3802
6708bb27 3803 /* Success! Return number of tasks allocated */
a3eedc22 3804 return 1;
ec98f782
AG
3805}
3806
da0f7619
CH
3807/*
3808 * Allocate any required ressources to execute the command, and either place
3809 * it on the execution queue if possible. For writes we might not have the
3810 * payload yet, thus notify the fabric via a call to ->write_pending instead.
c66ac9db 3811 */
a1d8b49a 3812int transport_generic_new_cmd(struct se_cmd *cmd)
c66ac9db 3813{
da0f7619 3814 struct se_device *dev = cmd->se_dev;
9ac54987 3815 int task_cdbs, task_cdbs_bidi = 0;
da0f7619 3816 int set_counts = 1;
c66ac9db
NB
3817 int ret = 0;
3818
3819 /*
3820 * Determine is the TCM fabric module has already allocated physical
3821 * memory, and is directly calling transport_generic_map_mem_to_cmd()
ec98f782 3822 * beforehand.
c66ac9db 3823 */
ec98f782
AG
3824 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3825 cmd->data_length) {
05d1c7c0 3826 ret = transport_generic_get_mem(cmd);
c66ac9db 3827 if (ret < 0)
03e98c9e 3828 goto out_fail;
c66ac9db 3829 }
da0f7619 3830
1d20bb61 3831 /*
38b40067 3832 * For BIDI command set up the read tasks first.
1d20bb61 3833 */
da0f7619 3834 if (cmd->t_bidi_data_sg &&
38b40067
CH
3835 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3836 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3837
9ac54987
NB
3838 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3839 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3840 cmd->t_bidi_data_nents);
3841 if (task_cdbs_bidi <= 0)
da0f7619
CH
3842 goto out_fail;
3843
3844 atomic_inc(&cmd->t_fe_count);
3845 atomic_inc(&cmd->t_se_count);
3846 set_counts = 0;
3847 }
38b40067
CH
3848
3849 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3850 task_cdbs = transport_allocate_data_tasks(cmd,
3851 cmd->data_direction, cmd->t_data_sg,
3852 cmd->t_data_nents);
3853 } else {
3854 task_cdbs = transport_allocate_control_task(cmd);
3855 }
3856
410f6702 3857 if (task_cdbs < 0)
da0f7619 3858 goto out_fail;
410f6702
RD
3859 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3860 cmd->t_state = TRANSPORT_COMPLETE;
3861 atomic_set(&cmd->t_transport_active, 1);
91ec1d35
NB
3862
3863 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3864 u8 ua_asc = 0, ua_ascq = 0;
3865
3866 core_scsi3_ua_clear_for_request_sense(cmd,
3867 &ua_asc, &ua_ascq);
3868 }
3869
410f6702
RD
3870 INIT_WORK(&cmd->work, target_complete_ok_work);
3871 queue_work(target_completion_wq, &cmd->work);
3872 return 0;
3873 }
da0f7619
CH
3874
3875 if (set_counts) {
3876 atomic_inc(&cmd->t_fe_count);
3877 atomic_inc(&cmd->t_se_count);
3878 }
3879
9ac54987
NB
3880 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3881 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3882 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
da0f7619 3883
c66ac9db 3884 /*
a1d8b49a 3885 * For WRITEs, let the fabric know its buffer is ready..
c66ac9db
NB
3886 * This WRITE struct se_cmd (and all of its associated struct se_task's)
3887 * will be added to the struct se_device execution queue after its WRITE
3888 * data has arrived. (ie: It gets handled by the transport processing
3889 * thread a second time)
3890 */
3891 if (cmd->data_direction == DMA_TO_DEVICE) {
3892 transport_add_tasks_to_state_queue(cmd);
3893 return transport_generic_write_pending(cmd);
3894 }
3895 /*
3896 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3897 * to the execution queue.
3898 */
3899 transport_execute_tasks(cmd);
3900 return 0;
da0f7619
CH
3901
3902out_fail:
3903 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3904 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3905 return -EINVAL;
c66ac9db 3906}
a1d8b49a 3907EXPORT_SYMBOL(transport_generic_new_cmd);
c66ac9db
NB
3908
3909/* transport_generic_process_write():
3910 *
3911 *
3912 */
3913void transport_generic_process_write(struct se_cmd *cmd)
3914{
c66ac9db
NB
3915 transport_execute_tasks(cmd);
3916}
3917EXPORT_SYMBOL(transport_generic_process_write);
3918
e057f533 3919static void transport_write_pending_qf(struct se_cmd *cmd)
07bde79a 3920{
f147abb4
NB
3921 int ret;
3922
3923 ret = cmd->se_tfo->write_pending(cmd);
3924 if (ret == -EAGAIN || ret == -ENOMEM) {
e057f533
CH
3925 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3926 cmd);
3927 transport_handle_queue_full(cmd, cmd->se_dev);
3928 }
07bde79a
NB
3929}
3930
c66ac9db
NB
3931static int transport_generic_write_pending(struct se_cmd *cmd)
3932{
3933 unsigned long flags;
3934 int ret;
3935
a1d8b49a 3936 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3937 cmd->t_state = TRANSPORT_WRITE_PENDING;
a1d8b49a 3938 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
07bde79a 3939
c66ac9db
NB
3940 /*
3941 * Clear the se_cmd for WRITE_PENDING status in order to set
a1d8b49a 3942 * cmd->t_transport_active=0 so that transport_generic_handle_data
c66ac9db 3943 * can be called from HW target mode interrupt code. This is safe
e3d6f909 3944 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
c66ac9db
NB
3945 * because the se_cmd->se_lun pointer is not being cleared.
3946 */
3947 transport_cmd_check_stop(cmd, 1, 0);
3948
3949 /*
3950 * Call the fabric write_pending function here to let the
3951 * frontend know that WRITE buffers are ready.
3952 */
e3d6f909 3953 ret = cmd->se_tfo->write_pending(cmd);
f147abb4 3954 if (ret == -EAGAIN || ret == -ENOMEM)
07bde79a
NB
3955 goto queue_full;
3956 else if (ret < 0)
c66ac9db
NB
3957 return ret;
3958
03e98c9e 3959 return 1;
07bde79a
NB
3960
3961queue_full:
6708bb27 3962 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
07bde79a 3963 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
e057f533 3964 transport_handle_queue_full(cmd, cmd->se_dev);
f147abb4 3965 return 0;
c66ac9db
NB
3966}
3967
39c05f32 3968void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
c66ac9db 3969{
d14921d6
NB
3970 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3971 if (wait_for_tasks && cmd->se_tmr_req)
3972 transport_wait_for_tasks(cmd);
3973
35462975 3974 transport_release_cmd(cmd);
d14921d6
NB
3975 } else {
3976 if (wait_for_tasks)
3977 transport_wait_for_tasks(cmd);
3978
c66ac9db
NB
3979 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3980
82f1c8a4 3981 if (cmd->se_lun)
c66ac9db 3982 transport_lun_remove_cmd(cmd);
c66ac9db 3983
f4366772
NB
3984 transport_free_dev_tasks(cmd);
3985
39c05f32 3986 transport_put_cmd(cmd);
c66ac9db
NB
3987 }
3988}
3989EXPORT_SYMBOL(transport_generic_free_cmd);
3990
a17f091d
NB
3991/* target_get_sess_cmd - Add command to active ->sess_cmd_list
3992 * @se_sess: session to reference
3993 * @se_cmd: command descriptor to add
a6360785 3994 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
a17f091d 3995 */
a6360785
NB
3996void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3997 bool ack_kref)
a17f091d
NB
3998{
3999 unsigned long flags;
4000
7481deb4 4001 kref_init(&se_cmd->cmd_kref);
a6360785
NB
4002 /*
4003 * Add a second kref if the fabric caller is expecting to handle
4004 * fabric acknowledgement that requires two target_put_sess_cmd()
4005 * invocations before se_cmd descriptor release.
4006 */
4007 if (ack_kref == true)
4008 kref_get(&se_cmd->cmd_kref);
7481deb4 4009
a17f091d
NB
4010 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4011 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
4012 se_cmd->check_release = 1;
4013 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4014}
4015EXPORT_SYMBOL(target_get_sess_cmd);
4016
7481deb4 4017static void target_release_cmd_kref(struct kref *kref)
a17f091d 4018{
7481deb4
NB
4019 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
4020 struct se_session *se_sess = se_cmd->se_sess;
a17f091d
NB
4021 unsigned long flags;
4022
4023 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4024 if (list_empty(&se_cmd->se_cmd_list)) {
4025 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4026 WARN_ON(1);
7481deb4 4027 return;
a17f091d 4028 }
a17f091d
NB
4029 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
4030 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4031 complete(&se_cmd->cmd_wait_comp);
7481deb4 4032 return;
a17f091d
NB
4033 }
4034 list_del(&se_cmd->se_cmd_list);
4035 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4036
7481deb4
NB
4037 se_cmd->se_tfo->release_cmd(se_cmd);
4038}
4039
4040/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
4041 * @se_sess: session to reference
4042 * @se_cmd: command descriptor to drop
4043 */
4044int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4045{
4046 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
a17f091d
NB
4047}
4048EXPORT_SYMBOL(target_put_sess_cmd);
4049
4050/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
4051 * @se_sess: session to split
4052 */
4053void target_splice_sess_cmd_list(struct se_session *se_sess)
4054{
4055 struct se_cmd *se_cmd;
4056 unsigned long flags;
4057
4058 WARN_ON(!list_empty(&se_sess->sess_wait_list));
4059 INIT_LIST_HEAD(&se_sess->sess_wait_list);
4060
4061 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4062 se_sess->sess_tearing_down = 1;
4063
4064 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4065
4066 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4067 se_cmd->cmd_wait_set = 1;
4068
4069 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4070}
4071EXPORT_SYMBOL(target_splice_sess_cmd_list);
4072
4073/* target_wait_for_sess_cmds - Wait for outstanding descriptors
4074 * @se_sess: session to wait for active I/O
4075 * @wait_for_tasks: Make extra transport_wait_for_tasks call
4076 */
4077void target_wait_for_sess_cmds(
4078 struct se_session *se_sess,
4079 int wait_for_tasks)
4080{
4081 struct se_cmd *se_cmd, *tmp_cmd;
4082 bool rc = false;
4083
4084 list_for_each_entry_safe(se_cmd, tmp_cmd,
4085 &se_sess->sess_wait_list, se_cmd_list) {
4086 list_del(&se_cmd->se_cmd_list);
4087
4088 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4089 " %d\n", se_cmd, se_cmd->t_state,
4090 se_cmd->se_tfo->get_cmd_state(se_cmd));
4091
4092 if (wait_for_tasks) {
4093 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4094 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4095 se_cmd->se_tfo->get_cmd_state(se_cmd));
4096
4097 rc = transport_wait_for_tasks(se_cmd);
4098
4099 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4100 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4101 se_cmd->se_tfo->get_cmd_state(se_cmd));
4102 }
4103
4104 if (!rc) {
4105 wait_for_completion(&se_cmd->cmd_wait_comp);
4106 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4107 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4108 se_cmd->se_tfo->get_cmd_state(se_cmd));
4109 }
4110
4111 se_cmd->se_tfo->release_cmd(se_cmd);
4112 }
4113}
4114EXPORT_SYMBOL(target_wait_for_sess_cmds);
4115
c66ac9db
NB
4116/* transport_lun_wait_for_tasks():
4117 *
4118 * Called from ConfigFS context to stop the passed struct se_cmd to allow
4119 * an struct se_lun to be successfully shutdown.
4120 */
4121static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4122{
4123 unsigned long flags;
4124 int ret;
4125 /*
4126 * If the frontend has already requested this struct se_cmd to
4127 * be stopped, we can safely ignore this struct se_cmd.
4128 */
a1d8b49a
AG
4129 spin_lock_irqsave(&cmd->t_state_lock, flags);
4130 if (atomic_read(&cmd->t_transport_stop)) {
4131 atomic_set(&cmd->transport_lun_stop, 0);
6708bb27 4132 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
e3d6f909 4133 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4134 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4135 transport_cmd_check_stop(cmd, 1, 0);
e3d6f909 4136 return -EPERM;
c66ac9db 4137 }
a1d8b49a
AG
4138 atomic_set(&cmd->transport_lun_fe_stop, 1);
4139 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4140
5951146d 4141 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db
NB
4142
4143 ret = transport_stop_tasks_for_cmd(cmd);
4144
6708bb27
AG
4145 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4146 " %d\n", cmd, cmd->t_task_list_num, ret);
c66ac9db 4147 if (!ret) {
6708bb27 4148 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
e3d6f909 4149 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4150 wait_for_completion(&cmd->transport_lun_stop_comp);
6708bb27 4151 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
e3d6f909 4152 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4153 }
3df8d40b 4154 transport_remove_cmd_from_queue(cmd);
c66ac9db
NB
4155
4156 return 0;
4157}
4158
c66ac9db
NB
4159static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4160{
4161 struct se_cmd *cmd = NULL;
4162 unsigned long lun_flags, cmd_flags;
4163 /*
4164 * Do exception processing and return CHECK_CONDITION status to the
4165 * Initiator Port.
4166 */
4167 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5951146d
AG
4168 while (!list_empty(&lun->lun_cmd_list)) {
4169 cmd = list_first_entry(&lun->lun_cmd_list,
4170 struct se_cmd, se_lun_node);
4171 list_del(&cmd->se_lun_node);
4172
a1d8b49a 4173 atomic_set(&cmd->transport_lun_active, 0);
c66ac9db
NB
4174 /*
4175 * This will notify iscsi_target_transport.c:
4176 * transport_cmd_check_stop() that a LUN shutdown is in
4177 * progress for the iscsi_cmd_t.
4178 */
a1d8b49a 4179 spin_lock(&cmd->t_state_lock);
6708bb27 4180 pr_debug("SE_LUN[%d] - Setting cmd->transport"
c66ac9db 4181 "_lun_stop for ITT: 0x%08x\n",
e3d6f909
AG
4182 cmd->se_lun->unpacked_lun,
4183 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a
AG
4184 atomic_set(&cmd->transport_lun_stop, 1);
4185 spin_unlock(&cmd->t_state_lock);
c66ac9db
NB
4186
4187 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4188
6708bb27
AG
4189 if (!cmd->se_lun) {
4190 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
e3d6f909
AG
4191 cmd->se_tfo->get_task_tag(cmd),
4192 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
c66ac9db
NB
4193 BUG();
4194 }
4195 /*
4196 * If the Storage engine still owns the iscsi_cmd_t, determine
4197 * and/or stop its context.
4198 */
6708bb27 4199 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
e3d6f909
AG
4200 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4201 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4202
e3d6f909 4203 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
c66ac9db
NB
4204 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4205 continue;
4206 }
4207
6708bb27 4208 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
c66ac9db 4209 "_wait_for_tasks(): SUCCESS\n",
e3d6f909
AG
4210 cmd->se_lun->unpacked_lun,
4211 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4212
a1d8b49a 4213 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
6708bb27 4214 if (!atomic_read(&cmd->transport_dev_active)) {
a1d8b49a 4215 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4216 goto check_cond;
4217 }
a1d8b49a 4218 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 4219 transport_all_task_dev_remove_state(cmd);
a1d8b49a 4220 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4221
4222 transport_free_dev_tasks(cmd);
4223 /*
4224 * The Storage engine stopped this struct se_cmd before it was
4225 * send to the fabric frontend for delivery back to the
4226 * Initiator Node. Return this SCSI CDB back with an
4227 * CHECK_CONDITION status.
4228 */
4229check_cond:
4230 transport_send_check_condition_and_sense(cmd,
4231 TCM_NON_EXISTENT_LUN, 0);
4232 /*
4233 * If the fabric frontend is waiting for this iscsi_cmd_t to
4234 * be released, notify the waiting thread now that LU has
4235 * finished accessing it.
4236 */
a1d8b49a
AG
4237 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4238 if (atomic_read(&cmd->transport_lun_fe_stop)) {
6708bb27 4239 pr_debug("SE_LUN[%d] - Detected FE stop for"
c66ac9db
NB
4240 " struct se_cmd: %p ITT: 0x%08x\n",
4241 lun->unpacked_lun,
e3d6f909 4242 cmd, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4243
a1d8b49a 4244 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
4245 cmd_flags);
4246 transport_cmd_check_stop(cmd, 1, 0);
a1d8b49a 4247 complete(&cmd->transport_lun_fe_stop_comp);
c66ac9db
NB
4248 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4249 continue;
4250 }
6708bb27 4251 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
e3d6f909 4252 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4253
a1d8b49a 4254 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4255 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4256 }
4257 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4258}
4259
4260static int transport_clear_lun_thread(void *p)
4261{
8359cf43 4262 struct se_lun *lun = p;
c66ac9db
NB
4263
4264 __transport_clear_lun_from_sessions(lun);
4265 complete(&lun->lun_shutdown_comp);
4266
4267 return 0;
4268}
4269
4270int transport_clear_lun_from_sessions(struct se_lun *lun)
4271{
4272 struct task_struct *kt;
4273
5951146d 4274 kt = kthread_run(transport_clear_lun_thread, lun,
c66ac9db
NB
4275 "tcm_cl_%u", lun->unpacked_lun);
4276 if (IS_ERR(kt)) {
6708bb27 4277 pr_err("Unable to start clear_lun thread\n");
e3d6f909 4278 return PTR_ERR(kt);
c66ac9db
NB
4279 }
4280 wait_for_completion(&lun->lun_shutdown_comp);
4281
4282 return 0;
4283}
4284
d14921d6
NB
4285/**
4286 * transport_wait_for_tasks - wait for completion to occur
4287 * @cmd: command to wait
c66ac9db 4288 *
d14921d6
NB
4289 * Called from frontend fabric context to wait for storage engine
4290 * to pause and/or release frontend generated struct se_cmd.
c66ac9db 4291 */
a17f091d 4292bool transport_wait_for_tasks(struct se_cmd *cmd)
c66ac9db
NB
4293{
4294 unsigned long flags;
4295
a1d8b49a 4296 spin_lock_irqsave(&cmd->t_state_lock, flags);
d14921d6
NB
4297 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4298 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 4299 return false;
d14921d6
NB
4300 }
4301 /*
4302 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4303 * has been set in transport_set_supported_SAM_opcode().
4304 */
4305 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4306 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 4307 return false;
d14921d6 4308 }
c66ac9db
NB
4309 /*
4310 * If we are already stopped due to an external event (ie: LUN shutdown)
4311 * sleep until the connection can have the passed struct se_cmd back.
a1d8b49a 4312 * The cmd->transport_lun_stopped_sem will be upped by
c66ac9db
NB
4313 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4314 * has completed its operation on the struct se_cmd.
4315 */
a1d8b49a 4316 if (atomic_read(&cmd->transport_lun_stop)) {
c66ac9db 4317
6708bb27 4318 pr_debug("wait_for_tasks: Stopping"
e3d6f909 4319 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
c66ac9db 4320 "_stop_comp); for ITT: 0x%08x\n",
e3d6f909 4321 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4322 /*
4323 * There is a special case for WRITES where a FE exception +
4324 * LUN shutdown means ConfigFS context is still sleeping on
4325 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4326 * We go ahead and up transport_lun_stop_comp just to be sure
4327 * here.
4328 */
a1d8b49a
AG
4329 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4330 complete(&cmd->transport_lun_stop_comp);
4331 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4332 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
4333
4334 transport_all_task_dev_remove_state(cmd);
4335 /*
4336 * At this point, the frontend who was the originator of this
4337 * struct se_cmd, now owns the structure and can be released through
4338 * normal means below.
4339 */
6708bb27 4340 pr_debug("wait_for_tasks: Stopped"
e3d6f909 4341 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
c66ac9db 4342 "stop_comp); for ITT: 0x%08x\n",
e3d6f909 4343 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4344
a1d8b49a 4345 atomic_set(&cmd->transport_lun_stop, 0);
c66ac9db 4346 }
a1d8b49a 4347 if (!atomic_read(&cmd->t_transport_active) ||
d14921d6
NB
4348 atomic_read(&cmd->t_transport_aborted)) {
4349 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 4350 return false;
d14921d6 4351 }
c66ac9db 4352
a1d8b49a 4353 atomic_set(&cmd->t_transport_stop, 1);
c66ac9db 4354
6708bb27 4355 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
f2da9dbd
CH
4356 " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
4357 cmd, cmd->se_tfo->get_task_tag(cmd),
4358 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
c66ac9db 4359
a1d8b49a 4360 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4361
5951146d 4362 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db 4363
a1d8b49a 4364 wait_for_completion(&cmd->t_transport_stop_comp);
c66ac9db 4365
a1d8b49a
AG
4366 spin_lock_irqsave(&cmd->t_state_lock, flags);
4367 atomic_set(&cmd->t_transport_active, 0);
4368 atomic_set(&cmd->t_transport_stop, 0);
c66ac9db 4369
6708bb27 4370 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
a1d8b49a 4371 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
e3d6f909 4372 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4373
d14921d6 4374 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d
NB
4375
4376 return true;
c66ac9db 4377}
d14921d6 4378EXPORT_SYMBOL(transport_wait_for_tasks);
c66ac9db
NB
4379
4380static int transport_get_sense_codes(
4381 struct se_cmd *cmd,
4382 u8 *asc,
4383 u8 *ascq)
4384{
4385 *asc = cmd->scsi_asc;
4386 *ascq = cmd->scsi_ascq;
4387
4388 return 0;
4389}
4390
4391static int transport_set_sense_codes(
4392 struct se_cmd *cmd,
4393 u8 asc,
4394 u8 ascq)
4395{
4396 cmd->scsi_asc = asc;
4397 cmd->scsi_ascq = ascq;
4398
4399 return 0;
4400}
4401
4402int transport_send_check_condition_and_sense(
4403 struct se_cmd *cmd,
4404 u8 reason,
4405 int from_transport)
4406{
4407 unsigned char *buffer = cmd->sense_buffer;
4408 unsigned long flags;
4409 int offset;
4410 u8 asc = 0, ascq = 0;
4411
a1d8b49a 4412 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 4413 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 4414 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4415 return 0;
4416 }
4417 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
a1d8b49a 4418 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4419
4420 if (!reason && from_transport)
4421 goto after_reason;
4422
4423 if (!from_transport)
4424 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4425 /*
4426 * Data Segment and SenseLength of the fabric response PDU.
4427 *
4428 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4429 * from include/scsi/scsi_cmnd.h
4430 */
e3d6f909 4431 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
4432 TRANSPORT_SENSE_BUFFER);
4433 /*
4434 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4435 * SENSE KEY values from include/scsi/scsi.h
4436 */
4437 switch (reason) {
4438 case TCM_NON_EXISTENT_LUN:
eb39d340
NB
4439 /* CURRENT ERROR */
4440 buffer[offset] = 0x70;
895f3022 4441 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
eb39d340
NB
4442 /* ILLEGAL REQUEST */
4443 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4444 /* LOGICAL UNIT NOT SUPPORTED */
4445 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4446 break;
c66ac9db
NB
4447 case TCM_UNSUPPORTED_SCSI_OPCODE:
4448 case TCM_SECTOR_COUNT_TOO_MANY:
4449 /* CURRENT ERROR */
4450 buffer[offset] = 0x70;
895f3022 4451 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4452 /* ILLEGAL REQUEST */
4453 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4454 /* INVALID COMMAND OPERATION CODE */
4455 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4456 break;
4457 case TCM_UNKNOWN_MODE_PAGE:
4458 /* CURRENT ERROR */
4459 buffer[offset] = 0x70;
895f3022 4460 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4461 /* ILLEGAL REQUEST */
4462 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4463 /* INVALID FIELD IN CDB */
4464 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4465 break;
4466 case TCM_CHECK_CONDITION_ABORT_CMD:
4467 /* CURRENT ERROR */
4468 buffer[offset] = 0x70;
895f3022 4469 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4470 /* ABORTED COMMAND */
4471 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4472 /* BUS DEVICE RESET FUNCTION OCCURRED */
4473 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4474 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4475 break;
4476 case TCM_INCORRECT_AMOUNT_OF_DATA:
4477 /* CURRENT ERROR */
4478 buffer[offset] = 0x70;
895f3022 4479 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4480 /* ABORTED COMMAND */
4481 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4482 /* WRITE ERROR */
4483 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4484 /* NOT ENOUGH UNSOLICITED DATA */
4485 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4486 break;
4487 case TCM_INVALID_CDB_FIELD:
4488 /* CURRENT ERROR */
4489 buffer[offset] = 0x70;
895f3022 4490 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
9fbc8909
RD
4491 /* ILLEGAL REQUEST */
4492 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
c66ac9db
NB
4493 /* INVALID FIELD IN CDB */
4494 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4495 break;
4496 case TCM_INVALID_PARAMETER_LIST:
4497 /* CURRENT ERROR */
4498 buffer[offset] = 0x70;
895f3022 4499 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
9fbc8909
RD
4500 /* ILLEGAL REQUEST */
4501 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
c66ac9db
NB
4502 /* INVALID FIELD IN PARAMETER LIST */
4503 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4504 break;
4505 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4506 /* CURRENT ERROR */
4507 buffer[offset] = 0x70;
895f3022 4508 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4509 /* ABORTED COMMAND */
4510 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4511 /* WRITE ERROR */
4512 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4513 /* UNEXPECTED_UNSOLICITED_DATA */
4514 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4515 break;
4516 case TCM_SERVICE_CRC_ERROR:
4517 /* CURRENT ERROR */
4518 buffer[offset] = 0x70;
895f3022 4519 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4520 /* ABORTED COMMAND */
4521 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4522 /* PROTOCOL SERVICE CRC ERROR */
4523 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4524 /* N/A */
4525 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4526 break;
4527 case TCM_SNACK_REJECTED:
4528 /* CURRENT ERROR */
4529 buffer[offset] = 0x70;
895f3022 4530 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4531 /* ABORTED COMMAND */
4532 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4533 /* READ ERROR */
4534 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4535 /* FAILED RETRANSMISSION REQUEST */
4536 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4537 break;
4538 case TCM_WRITE_PROTECTED:
4539 /* CURRENT ERROR */
4540 buffer[offset] = 0x70;
895f3022 4541 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4542 /* DATA PROTECT */
4543 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4544 /* WRITE PROTECTED */
4545 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4546 break;
4547 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4548 /* CURRENT ERROR */
4549 buffer[offset] = 0x70;
895f3022 4550 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4551 /* UNIT ATTENTION */
4552 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4553 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4554 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4555 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4556 break;
4557 case TCM_CHECK_CONDITION_NOT_READY:
4558 /* CURRENT ERROR */
4559 buffer[offset] = 0x70;
895f3022 4560 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4561 /* Not Ready */
4562 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4563 transport_get_sense_codes(cmd, &asc, &ascq);
4564 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4565 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4566 break;
4567 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4568 default:
4569 /* CURRENT ERROR */
4570 buffer[offset] = 0x70;
895f3022 4571 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
c66ac9db
NB
4572 /* ILLEGAL REQUEST */
4573 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4574 /* LOGICAL UNIT COMMUNICATION FAILURE */
4575 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4576 break;
4577 }
4578 /*
4579 * This code uses linux/include/scsi/scsi.h SAM status codes!
4580 */
4581 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4582 /*
4583 * Automatically padded, this value is encoded in the fabric's
4584 * data_length response PDU containing the SCSI defined sense data.
4585 */
4586 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
4587
4588after_reason:
07bde79a 4589 return cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4590}
4591EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4592
4593int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4594{
4595 int ret = 0;
4596
a1d8b49a 4597 if (atomic_read(&cmd->t_transport_aborted) != 0) {
6708bb27 4598 if (!send_status ||
c66ac9db
NB
4599 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4600 return 1;
4601#if 0
6708bb27 4602 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
c66ac9db 4603 " status for CDB: 0x%02x ITT: 0x%08x\n",
a1d8b49a 4604 cmd->t_task_cdb[0],
e3d6f909 4605 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4606#endif
4607 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
e3d6f909 4608 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4609 ret = 1;
4610 }
4611 return ret;
4612}
4613EXPORT_SYMBOL(transport_check_aborted_status);
4614
4615void transport_send_task_abort(struct se_cmd *cmd)
4616{
c252f003
NB
4617 unsigned long flags;
4618
4619 spin_lock_irqsave(&cmd->t_state_lock, flags);
4620 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4621 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4622 return;
4623 }
4624 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4625
c66ac9db
NB
4626 /*
4627 * If there are still expected incoming fabric WRITEs, we wait
4628 * until until they have completed before sending a TASK_ABORTED
4629 * response. This response with TASK_ABORTED status will be
4630 * queued back to fabric module by transport_check_aborted_status().
4631 */
4632 if (cmd->data_direction == DMA_TO_DEVICE) {
e3d6f909 4633 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
a1d8b49a 4634 atomic_inc(&cmd->t_transport_aborted);
c66ac9db 4635 smp_mb__after_atomic_inc();
c66ac9db
NB
4636 }
4637 }
4638 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4639#if 0
6708bb27 4640 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
a1d8b49a 4641 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
e3d6f909 4642 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4643#endif
e3d6f909 4644 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4645}
4646
e26d99ae 4647static int transport_generic_do_tmr(struct se_cmd *cmd)
c66ac9db 4648{
5951146d 4649 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
4650 struct se_tmr_req *tmr = cmd->se_tmr_req;
4651 int ret;
4652
4653 switch (tmr->function) {
5c6cd613 4654 case TMR_ABORT_TASK:
c66ac9db
NB
4655 tmr->response = TMR_FUNCTION_REJECTED;
4656 break;
5c6cd613
NB
4657 case TMR_ABORT_TASK_SET:
4658 case TMR_CLEAR_ACA:
4659 case TMR_CLEAR_TASK_SET:
c66ac9db
NB
4660 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4661 break;
5c6cd613 4662 case TMR_LUN_RESET:
c66ac9db
NB
4663 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4664 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4665 TMR_FUNCTION_REJECTED;
4666 break;
5c6cd613 4667 case TMR_TARGET_WARM_RESET:
c66ac9db
NB
4668 tmr->response = TMR_FUNCTION_REJECTED;
4669 break;
5c6cd613 4670 case TMR_TARGET_COLD_RESET:
c66ac9db
NB
4671 tmr->response = TMR_FUNCTION_REJECTED;
4672 break;
c66ac9db 4673 default:
6708bb27 4674 pr_err("Uknown TMR function: 0x%02x.\n",
c66ac9db
NB
4675 tmr->function);
4676 tmr->response = TMR_FUNCTION_REJECTED;
4677 break;
4678 }
4679
4680 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
e3d6f909 4681 cmd->se_tfo->queue_tm_rsp(cmd);
c66ac9db 4682
b7b8bef7 4683 transport_cmd_check_stop_to_fabric(cmd);
c66ac9db
NB
4684 return 0;
4685}
4686
c66ac9db
NB
4687/* transport_processing_thread():
4688 *
4689 *
4690 */
4691static int transport_processing_thread(void *param)
4692{
5951146d 4693 int ret;
c66ac9db 4694 struct se_cmd *cmd;
8359cf43 4695 struct se_device *dev = param;
c66ac9db 4696
c66ac9db 4697 while (!kthread_should_stop()) {
e3d6f909
AG
4698 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4699 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
c66ac9db
NB
4700 kthread_should_stop());
4701 if (ret < 0)
4702 goto out;
4703
c66ac9db 4704get_cmd:
5951146d
AG
4705 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4706 if (!cmd)
c66ac9db
NB
4707 continue;
4708
5951146d 4709 switch (cmd->t_state) {
680b73c5
CH
4710 case TRANSPORT_NEW_CMD:
4711 BUG();
4712 break;
c66ac9db 4713 case TRANSPORT_NEW_CMD_MAP:
6708bb27
AG
4714 if (!cmd->se_tfo->new_cmd_map) {
4715 pr_err("cmd->se_tfo->new_cmd_map is"
c66ac9db
NB
4716 " NULL for TRANSPORT_NEW_CMD_MAP\n");
4717 BUG();
4718 }
e3d6f909 4719 ret = cmd->se_tfo->new_cmd_map(cmd);
c66ac9db 4720 if (ret < 0) {
03e98c9e 4721 transport_generic_request_failure(cmd);
c66ac9db
NB
4722 break;
4723 }
c66ac9db 4724 ret = transport_generic_new_cmd(cmd);
f147abb4 4725 if (ret < 0) {
03e98c9e
NB
4726 transport_generic_request_failure(cmd);
4727 break;
c66ac9db
NB
4728 }
4729 break;
4730 case TRANSPORT_PROCESS_WRITE:
4731 transport_generic_process_write(cmd);
4732 break;
c66ac9db
NB
4733 case TRANSPORT_PROCESS_TMR:
4734 transport_generic_do_tmr(cmd);
4735 break;
07bde79a 4736 case TRANSPORT_COMPLETE_QF_WP:
e057f533
CH
4737 transport_write_pending_qf(cmd);
4738 break;
4739 case TRANSPORT_COMPLETE_QF_OK:
4740 transport_complete_qf(cmd);
07bde79a 4741 break;
c66ac9db 4742 default:
f2da9dbd
CH
4743 pr_err("Unknown t_state: %d for ITT: 0x%08x "
4744 "i_state: %d on SE LUN: %u\n",
4745 cmd->t_state,
e3d6f909
AG
4746 cmd->se_tfo->get_task_tag(cmd),
4747 cmd->se_tfo->get_cmd_state(cmd),
4748 cmd->se_lun->unpacked_lun);
c66ac9db
NB
4749 BUG();
4750 }
4751
4752 goto get_cmd;
4753 }
4754
4755out:
ce8762f6
NB
4756 WARN_ON(!list_empty(&dev->state_task_list));
4757 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
c66ac9db
NB
4758 dev->process_thread = NULL;
4759 return 0;
4760}