target: Eliminate usage of struct se_mem
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / target / target_core_transport.c
CommitLineData
c66ac9db
NB
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/net.h>
31#include <linux/delay.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/blkdev.h>
36#include <linux/spinlock.h>
c66ac9db
NB
37#include <linux/kthread.h>
38#include <linux/in.h>
39#include <linux/cdrom.h>
40#include <asm/unaligned.h>
41#include <net/sock.h>
42#include <net/tcp.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h>
e66ecd50 45#include <scsi/scsi_tcq.h>
c66ac9db
NB
46
47#include <target/target_core_base.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tmr.h>
50#include <target/target_core_tpg.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_configfs.h>
54
55#include "target_core_alua.h"
56#include "target_core_hba.h"
57#include "target_core_pr.h"
58#include "target_core_scdb.h"
59#include "target_core_ua.h"
60
61/* #define DEBUG_CDB_HANDLER */
62#ifdef DEBUG_CDB_HANDLER
63#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
64#else
65#define DEBUG_CDB_H(x...)
66#endif
67
68/* #define DEBUG_CMD_MAP */
69#ifdef DEBUG_CMD_MAP
70#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
71#else
72#define DEBUG_CMD_M(x...)
73#endif
74
75/* #define DEBUG_MEM_ALLOC */
76#ifdef DEBUG_MEM_ALLOC
77#define DEBUG_MEM(x...) printk(KERN_INFO x)
78#else
79#define DEBUG_MEM(x...)
80#endif
81
82/* #define DEBUG_MEM2_ALLOC */
83#ifdef DEBUG_MEM2_ALLOC
84#define DEBUG_MEM2(x...) printk(KERN_INFO x)
85#else
86#define DEBUG_MEM2(x...)
87#endif
88
89/* #define DEBUG_SG_CALC */
90#ifdef DEBUG_SG_CALC
91#define DEBUG_SC(x...) printk(KERN_INFO x)
92#else
93#define DEBUG_SC(x...)
94#endif
95
96/* #define DEBUG_SE_OBJ */
97#ifdef DEBUG_SE_OBJ
98#define DEBUG_SO(x...) printk(KERN_INFO x)
99#else
100#define DEBUG_SO(x...)
101#endif
102
103/* #define DEBUG_CMD_VOL */
104#ifdef DEBUG_CMD_VOL
105#define DEBUG_VOL(x...) printk(KERN_INFO x)
106#else
107#define DEBUG_VOL(x...)
108#endif
109
110/* #define DEBUG_CMD_STOP */
111#ifdef DEBUG_CMD_STOP
112#define DEBUG_CS(x...) printk(KERN_INFO x)
113#else
114#define DEBUG_CS(x...)
115#endif
116
117/* #define DEBUG_PASSTHROUGH */
118#ifdef DEBUG_PASSTHROUGH
119#define DEBUG_PT(x...) printk(KERN_INFO x)
120#else
121#define DEBUG_PT(x...)
122#endif
123
124/* #define DEBUG_TASK_STOP */
125#ifdef DEBUG_TASK_STOP
126#define DEBUG_TS(x...) printk(KERN_INFO x)
127#else
128#define DEBUG_TS(x...)
129#endif
130
131/* #define DEBUG_TRANSPORT_STOP */
132#ifdef DEBUG_TRANSPORT_STOP
133#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
134#else
135#define DEBUG_TRANSPORT_S(x...)
136#endif
137
138/* #define DEBUG_TASK_FAILURE */
139#ifdef DEBUG_TASK_FAILURE
140#define DEBUG_TF(x...) printk(KERN_INFO x)
141#else
142#define DEBUG_TF(x...)
143#endif
144
145/* #define DEBUG_DEV_OFFLINE */
146#ifdef DEBUG_DEV_OFFLINE
147#define DEBUG_DO(x...) printk(KERN_INFO x)
148#else
149#define DEBUG_DO(x...)
150#endif
151
152/* #define DEBUG_TASK_STATE */
153#ifdef DEBUG_TASK_STATE
154#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
155#else
156#define DEBUG_TSTATE(x...)
157#endif
158
159/* #define DEBUG_STATUS_THR */
160#ifdef DEBUG_STATUS_THR
161#define DEBUG_ST(x...) printk(KERN_INFO x)
162#else
163#define DEBUG_ST(x...)
164#endif
165
166/* #define DEBUG_TASK_TIMEOUT */
167#ifdef DEBUG_TASK_TIMEOUT
168#define DEBUG_TT(x...) printk(KERN_INFO x)
169#else
170#define DEBUG_TT(x...)
171#endif
172
173/* #define DEBUG_GENERIC_REQUEST_FAILURE */
174#ifdef DEBUG_GENERIC_REQUEST_FAILURE
175#define DEBUG_GRF(x...) printk(KERN_INFO x)
176#else
177#define DEBUG_GRF(x...)
178#endif
179
180/* #define DEBUG_SAM_TASK_ATTRS */
181#ifdef DEBUG_SAM_TASK_ATTRS
182#define DEBUG_STA(x...) printk(KERN_INFO x)
183#else
184#define DEBUG_STA(x...)
185#endif
186
e3d6f909 187static int sub_api_initialized;
c66ac9db
NB
188
189static struct kmem_cache *se_cmd_cache;
190static struct kmem_cache *se_sess_cache;
191struct kmem_cache *se_tmr_req_cache;
192struct kmem_cache *se_ua_cache;
c66ac9db
NB
193struct kmem_cache *t10_pr_reg_cache;
194struct kmem_cache *t10_alua_lu_gp_cache;
195struct kmem_cache *t10_alua_lu_gp_mem_cache;
196struct kmem_cache *t10_alua_tg_pt_gp_cache;
197struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
198
199/* Used for transport_dev_get_map_*() */
200typedef int (*map_func_t)(struct se_task *, u32);
201
202static int transport_generic_write_pending(struct se_cmd *);
5951146d 203static int transport_processing_thread(void *param);
c66ac9db
NB
204static int __transport_execute_tasks(struct se_device *dev);
205static void transport_complete_task_attr(struct se_cmd *cmd);
07bde79a
NB
206static int transport_complete_qf(struct se_cmd *cmd);
207static void transport_handle_queue_full(struct se_cmd *cmd,
208 struct se_device *dev, int (*qf_callback)(struct se_cmd *));
c66ac9db
NB
209static void transport_direct_request_timeout(struct se_cmd *cmd);
210static void transport_free_dev_tasks(struct se_cmd *cmd);
a1d8b49a 211static u32 transport_allocate_tasks(struct se_cmd *cmd,
ec98f782 212 unsigned long long starting_lba,
c66ac9db 213 enum dma_data_direction data_direction,
ec98f782 214 struct scatterlist *sgl, unsigned int nents);
05d1c7c0 215static int transport_generic_get_mem(struct se_cmd *cmd);
c66ac9db 216static int transport_generic_remove(struct se_cmd *cmd,
35462975 217 int session_reinstatement);
c66ac9db
NB
218static void transport_release_fe_cmd(struct se_cmd *cmd);
219static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
220 struct se_queue_obj *qobj);
221static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
222static void transport_stop_all_task_timers(struct se_cmd *cmd);
223
e3d6f909 224int init_se_kmem_caches(void)
c66ac9db 225{
c66ac9db
NB
226 se_cmd_cache = kmem_cache_create("se_cmd_cache",
227 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
228 if (!(se_cmd_cache)) {
229 printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
230 goto out;
231 }
232 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
233 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
234 0, NULL);
235 if (!(se_tmr_req_cache)) {
236 printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
237 " failed\n");
238 goto out;
239 }
240 se_sess_cache = kmem_cache_create("se_sess_cache",
241 sizeof(struct se_session), __alignof__(struct se_session),
242 0, NULL);
243 if (!(se_sess_cache)) {
244 printk(KERN_ERR "kmem_cache_create() for struct se_session"
245 " failed\n");
246 goto out;
247 }
248 se_ua_cache = kmem_cache_create("se_ua_cache",
249 sizeof(struct se_ua), __alignof__(struct se_ua),
250 0, NULL);
251 if (!(se_ua_cache)) {
252 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
253 goto out;
254 }
c66ac9db
NB
255 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
256 sizeof(struct t10_pr_registration),
257 __alignof__(struct t10_pr_registration), 0, NULL);
258 if (!(t10_pr_reg_cache)) {
259 printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
260 " failed\n");
261 goto out;
262 }
263 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
264 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
265 0, NULL);
266 if (!(t10_alua_lu_gp_cache)) {
267 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
268 " failed\n");
269 goto out;
270 }
271 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
272 sizeof(struct t10_alua_lu_gp_member),
273 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
274 if (!(t10_alua_lu_gp_mem_cache)) {
275 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
276 "cache failed\n");
277 goto out;
278 }
279 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
280 sizeof(struct t10_alua_tg_pt_gp),
281 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
282 if (!(t10_alua_tg_pt_gp_cache)) {
283 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
284 "cache failed\n");
285 goto out;
286 }
287 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
288 "t10_alua_tg_pt_gp_mem_cache",
289 sizeof(struct t10_alua_tg_pt_gp_member),
290 __alignof__(struct t10_alua_tg_pt_gp_member),
291 0, NULL);
292 if (!(t10_alua_tg_pt_gp_mem_cache)) {
293 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
294 "mem_t failed\n");
295 goto out;
296 }
297
c66ac9db
NB
298 return 0;
299out:
300 if (se_cmd_cache)
301 kmem_cache_destroy(se_cmd_cache);
302 if (se_tmr_req_cache)
303 kmem_cache_destroy(se_tmr_req_cache);
304 if (se_sess_cache)
305 kmem_cache_destroy(se_sess_cache);
306 if (se_ua_cache)
307 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
308 if (t10_pr_reg_cache)
309 kmem_cache_destroy(t10_pr_reg_cache);
310 if (t10_alua_lu_gp_cache)
311 kmem_cache_destroy(t10_alua_lu_gp_cache);
312 if (t10_alua_lu_gp_mem_cache)
313 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
314 if (t10_alua_tg_pt_gp_cache)
315 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
316 if (t10_alua_tg_pt_gp_mem_cache)
317 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
e3d6f909 318 return -ENOMEM;
c66ac9db
NB
319}
320
e3d6f909 321void release_se_kmem_caches(void)
c66ac9db 322{
c66ac9db
NB
323 kmem_cache_destroy(se_cmd_cache);
324 kmem_cache_destroy(se_tmr_req_cache);
325 kmem_cache_destroy(se_sess_cache);
326 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
327 kmem_cache_destroy(t10_pr_reg_cache);
328 kmem_cache_destroy(t10_alua_lu_gp_cache);
329 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
330 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
331 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
c66ac9db
NB
332}
333
e3d6f909
AG
334/* This code ensures unique mib indexes are handed out. */
335static DEFINE_SPINLOCK(scsi_mib_index_lock);
336static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
e89d15ee
NB
337
338/*
339 * Allocate a new row index for the entry type specified
340 */
341u32 scsi_get_new_index(scsi_index_t type)
342{
343 u32 new_index;
344
e3d6f909 345 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
e89d15ee 346
e3d6f909
AG
347 spin_lock(&scsi_mib_index_lock);
348 new_index = ++scsi_mib_index[type];
349 spin_unlock(&scsi_mib_index_lock);
e89d15ee
NB
350
351 return new_index;
352}
353
c66ac9db
NB
354void transport_init_queue_obj(struct se_queue_obj *qobj)
355{
356 atomic_set(&qobj->queue_cnt, 0);
357 INIT_LIST_HEAD(&qobj->qobj_list);
358 init_waitqueue_head(&qobj->thread_wq);
359 spin_lock_init(&qobj->cmd_queue_lock);
360}
361EXPORT_SYMBOL(transport_init_queue_obj);
362
363static int transport_subsystem_reqmods(void)
364{
365 int ret;
366
367 ret = request_module("target_core_iblock");
368 if (ret != 0)
369 printk(KERN_ERR "Unable to load target_core_iblock\n");
370
371 ret = request_module("target_core_file");
372 if (ret != 0)
373 printk(KERN_ERR "Unable to load target_core_file\n");
374
375 ret = request_module("target_core_pscsi");
376 if (ret != 0)
377 printk(KERN_ERR "Unable to load target_core_pscsi\n");
378
379 ret = request_module("target_core_stgt");
380 if (ret != 0)
381 printk(KERN_ERR "Unable to load target_core_stgt\n");
382
383 return 0;
384}
385
386int transport_subsystem_check_init(void)
387{
e3d6f909
AG
388 int ret;
389
390 if (sub_api_initialized)
c66ac9db
NB
391 return 0;
392 /*
393 * Request the loading of known TCM subsystem plugins..
394 */
e3d6f909
AG
395 ret = transport_subsystem_reqmods();
396 if (ret < 0)
397 return ret;
c66ac9db 398
e3d6f909 399 sub_api_initialized = 1;
c66ac9db
NB
400 return 0;
401}
402
403struct se_session *transport_init_session(void)
404{
405 struct se_session *se_sess;
406
407 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
408 if (!(se_sess)) {
409 printk(KERN_ERR "Unable to allocate struct se_session from"
410 " se_sess_cache\n");
411 return ERR_PTR(-ENOMEM);
412 }
413 INIT_LIST_HEAD(&se_sess->sess_list);
414 INIT_LIST_HEAD(&se_sess->sess_acl_list);
c66ac9db
NB
415
416 return se_sess;
417}
418EXPORT_SYMBOL(transport_init_session);
419
420/*
421 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
422 */
423void __transport_register_session(
424 struct se_portal_group *se_tpg,
425 struct se_node_acl *se_nacl,
426 struct se_session *se_sess,
427 void *fabric_sess_ptr)
428{
429 unsigned char buf[PR_REG_ISID_LEN];
430
431 se_sess->se_tpg = se_tpg;
432 se_sess->fabric_sess_ptr = fabric_sess_ptr;
433 /*
434 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
435 *
436 * Only set for struct se_session's that will actually be moving I/O.
437 * eg: *NOT* discovery sessions.
438 */
439 if (se_nacl) {
440 /*
441 * If the fabric module supports an ISID based TransportID,
442 * save this value in binary from the fabric I_T Nexus now.
443 */
e3d6f909 444 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
c66ac9db 445 memset(&buf[0], 0, PR_REG_ISID_LEN);
e3d6f909 446 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
c66ac9db
NB
447 &buf[0], PR_REG_ISID_LEN);
448 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
449 }
450 spin_lock_irq(&se_nacl->nacl_sess_lock);
451 /*
452 * The se_nacl->nacl_sess pointer will be set to the
453 * last active I_T Nexus for each struct se_node_acl.
454 */
455 se_nacl->nacl_sess = se_sess;
456
457 list_add_tail(&se_sess->sess_acl_list,
458 &se_nacl->acl_sess_list);
459 spin_unlock_irq(&se_nacl->nacl_sess_lock);
460 }
461 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
462
463 printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
e3d6f909 464 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
c66ac9db
NB
465}
466EXPORT_SYMBOL(__transport_register_session);
467
468void transport_register_session(
469 struct se_portal_group *se_tpg,
470 struct se_node_acl *se_nacl,
471 struct se_session *se_sess,
472 void *fabric_sess_ptr)
473{
474 spin_lock_bh(&se_tpg->session_lock);
475 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
476 spin_unlock_bh(&se_tpg->session_lock);
477}
478EXPORT_SYMBOL(transport_register_session);
479
480void transport_deregister_session_configfs(struct se_session *se_sess)
481{
482 struct se_node_acl *se_nacl;
23388864 483 unsigned long flags;
c66ac9db
NB
484 /*
485 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
486 */
487 se_nacl = se_sess->se_node_acl;
488 if ((se_nacl)) {
23388864 489 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
490 list_del(&se_sess->sess_acl_list);
491 /*
492 * If the session list is empty, then clear the pointer.
493 * Otherwise, set the struct se_session pointer from the tail
494 * element of the per struct se_node_acl active session list.
495 */
496 if (list_empty(&se_nacl->acl_sess_list))
497 se_nacl->nacl_sess = NULL;
498 else {
499 se_nacl->nacl_sess = container_of(
500 se_nacl->acl_sess_list.prev,
501 struct se_session, sess_acl_list);
502 }
23388864 503 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
504 }
505}
506EXPORT_SYMBOL(transport_deregister_session_configfs);
507
508void transport_free_session(struct se_session *se_sess)
509{
510 kmem_cache_free(se_sess_cache, se_sess);
511}
512EXPORT_SYMBOL(transport_free_session);
513
514void transport_deregister_session(struct se_session *se_sess)
515{
516 struct se_portal_group *se_tpg = se_sess->se_tpg;
517 struct se_node_acl *se_nacl;
518
519 if (!(se_tpg)) {
520 transport_free_session(se_sess);
521 return;
522 }
c66ac9db
NB
523
524 spin_lock_bh(&se_tpg->session_lock);
525 list_del(&se_sess->sess_list);
526 se_sess->se_tpg = NULL;
527 se_sess->fabric_sess_ptr = NULL;
528 spin_unlock_bh(&se_tpg->session_lock);
529
530 /*
531 * Determine if we need to do extra work for this initiator node's
532 * struct se_node_acl if it had been previously dynamically generated.
533 */
534 se_nacl = se_sess->se_node_acl;
535 if ((se_nacl)) {
536 spin_lock_bh(&se_tpg->acl_node_lock);
537 if (se_nacl->dynamic_node_acl) {
e3d6f909 538 if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
c66ac9db
NB
539 se_tpg))) {
540 list_del(&se_nacl->acl_list);
541 se_tpg->num_node_acls--;
542 spin_unlock_bh(&se_tpg->acl_node_lock);
543
544 core_tpg_wait_for_nacl_pr_ref(se_nacl);
c66ac9db 545 core_free_device_list_for_node(se_nacl, se_tpg);
e3d6f909 546 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
c66ac9db
NB
547 se_nacl);
548 spin_lock_bh(&se_tpg->acl_node_lock);
549 }
550 }
551 spin_unlock_bh(&se_tpg->acl_node_lock);
552 }
553
554 transport_free_session(se_sess);
555
556 printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
e3d6f909 557 se_tpg->se_tpg_tfo->get_fabric_name());
c66ac9db
NB
558}
559EXPORT_SYMBOL(transport_deregister_session);
560
561/*
a1d8b49a 562 * Called with cmd->t_state_lock held.
c66ac9db
NB
563 */
564static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
565{
566 struct se_device *dev;
567 struct se_task *task;
568 unsigned long flags;
569
a1d8b49a 570 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db
NB
571 dev = task->se_dev;
572 if (!(dev))
573 continue;
574
575 if (atomic_read(&task->task_active))
576 continue;
577
578 if (!(atomic_read(&task->task_state_active)))
579 continue;
580
581 spin_lock_irqsave(&dev->execute_task_lock, flags);
582 list_del(&task->t_state_list);
583 DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
e3d6f909 584 cmd->se_tfo->tfo_get_task_tag(cmd), dev, task);
c66ac9db
NB
585 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
586
587 atomic_set(&task->task_state_active, 0);
a1d8b49a 588 atomic_dec(&cmd->t_task_cdbs_ex_left);
c66ac9db
NB
589 }
590}
591
592/* transport_cmd_check_stop():
593 *
594 * 'transport_off = 1' determines if t_transport_active should be cleared.
595 * 'transport_off = 2' determines if task_dev_state should be removed.
596 *
597 * A non-zero u8 t_state sets cmd->t_state.
598 * Returns 1 when command is stopped, else 0.
599 */
600static int transport_cmd_check_stop(
601 struct se_cmd *cmd,
602 int transport_off,
603 u8 t_state)
604{
605 unsigned long flags;
606
a1d8b49a 607 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
608 /*
609 * Determine if IOCTL context caller in requesting the stopping of this
610 * command for LUN shutdown purposes.
611 */
a1d8b49a
AG
612 if (atomic_read(&cmd->transport_lun_stop)) {
613 DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)"
c66ac9db 614 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
e3d6f909 615 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
616
617 cmd->deferred_t_state = cmd->t_state;
618 cmd->t_state = TRANSPORT_DEFERRED_CMD;
a1d8b49a 619 atomic_set(&cmd->t_transport_active, 0);
c66ac9db
NB
620 if (transport_off == 2)
621 transport_all_task_dev_remove_state(cmd);
a1d8b49a 622 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 623
a1d8b49a 624 complete(&cmd->transport_lun_stop_comp);
c66ac9db
NB
625 return 1;
626 }
627 /*
628 * Determine if frontend context caller is requesting the stopping of
e3d6f909 629 * this command for frontend exceptions.
c66ac9db 630 */
a1d8b49a
AG
631 if (atomic_read(&cmd->t_transport_stop)) {
632 DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) =="
c66ac9db 633 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
e3d6f909 634 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
635
636 cmd->deferred_t_state = cmd->t_state;
637 cmd->t_state = TRANSPORT_DEFERRED_CMD;
638 if (transport_off == 2)
639 transport_all_task_dev_remove_state(cmd);
640
641 /*
642 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
643 * to FE.
644 */
645 if (transport_off == 2)
646 cmd->se_lun = NULL;
a1d8b49a 647 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 648
a1d8b49a 649 complete(&cmd->t_transport_stop_comp);
c66ac9db
NB
650 return 1;
651 }
652 if (transport_off) {
a1d8b49a 653 atomic_set(&cmd->t_transport_active, 0);
c66ac9db
NB
654 if (transport_off == 2) {
655 transport_all_task_dev_remove_state(cmd);
656 /*
657 * Clear struct se_cmd->se_lun before the transport_off == 2
658 * handoff to fabric module.
659 */
660 cmd->se_lun = NULL;
661 /*
662 * Some fabric modules like tcm_loop can release
25985edc 663 * their internally allocated I/O reference now and
c66ac9db
NB
664 * struct se_cmd now.
665 */
e3d6f909 666 if (cmd->se_tfo->check_stop_free != NULL) {
c66ac9db 667 spin_unlock_irqrestore(
a1d8b49a 668 &cmd->t_state_lock, flags);
c66ac9db 669
e3d6f909 670 cmd->se_tfo->check_stop_free(cmd);
c66ac9db
NB
671 return 1;
672 }
673 }
a1d8b49a 674 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
675
676 return 0;
677 } else if (t_state)
678 cmd->t_state = t_state;
a1d8b49a 679 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
680
681 return 0;
682}
683
684static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
685{
686 return transport_cmd_check_stop(cmd, 2, 0);
687}
688
689static void transport_lun_remove_cmd(struct se_cmd *cmd)
690{
e3d6f909 691 struct se_lun *lun = cmd->se_lun;
c66ac9db
NB
692 unsigned long flags;
693
694 if (!lun)
695 return;
696
a1d8b49a
AG
697 spin_lock_irqsave(&cmd->t_state_lock, flags);
698 if (!(atomic_read(&cmd->transport_dev_active))) {
699 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
700 goto check_lun;
701 }
a1d8b49a 702 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 703 transport_all_task_dev_remove_state(cmd);
a1d8b49a 704 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 705
c66ac9db
NB
706
707check_lun:
708 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
a1d8b49a 709 if (atomic_read(&cmd->transport_lun_active)) {
5951146d 710 list_del(&cmd->se_lun_node);
a1d8b49a 711 atomic_set(&cmd->transport_lun_active, 0);
c66ac9db
NB
712#if 0
713 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
e3d6f909 714 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
c66ac9db
NB
715#endif
716 }
717 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
718}
719
720void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
721{
5951146d 722 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
723 transport_lun_remove_cmd(cmd);
724
725 if (transport_cmd_check_stop_to_fabric(cmd))
726 return;
727 if (remove)
35462975 728 transport_generic_remove(cmd, 0);
c66ac9db
NB
729}
730
731void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
732{
5951146d 733 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
734
735 if (transport_cmd_check_stop_to_fabric(cmd))
736 return;
737
35462975 738 transport_generic_remove(cmd, 0);
c66ac9db
NB
739}
740
5951146d 741static void transport_add_cmd_to_queue(
c66ac9db
NB
742 struct se_cmd *cmd,
743 int t_state)
744{
745 struct se_device *dev = cmd->se_dev;
e3d6f909 746 struct se_queue_obj *qobj = &dev->dev_queue_obj;
c66ac9db
NB
747 unsigned long flags;
748
5951146d 749 INIT_LIST_HEAD(&cmd->se_queue_node);
c66ac9db
NB
750
751 if (t_state) {
a1d8b49a 752 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 753 cmd->t_state = t_state;
a1d8b49a
AG
754 atomic_set(&cmd->t_transport_active, 1);
755 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
756 }
757
758 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
07bde79a
NB
759 if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
760 cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
761 list_add(&cmd->se_queue_node, &qobj->qobj_list);
762 } else
763 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
a1d8b49a 764 atomic_inc(&cmd->t_transport_queue_active);
c66ac9db
NB
765 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
766
767 atomic_inc(&qobj->queue_cnt);
768 wake_up_interruptible(&qobj->thread_wq);
c66ac9db
NB
769}
770
5951146d
AG
771static struct se_cmd *
772transport_get_cmd_from_queue(struct se_queue_obj *qobj)
c66ac9db 773{
5951146d 774 struct se_cmd *cmd;
c66ac9db
NB
775 unsigned long flags;
776
777 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
778 if (list_empty(&qobj->qobj_list)) {
779 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
780 return NULL;
781 }
5951146d 782 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
c66ac9db 783
a1d8b49a 784 atomic_dec(&cmd->t_transport_queue_active);
c66ac9db 785
5951146d 786 list_del(&cmd->se_queue_node);
c66ac9db
NB
787 atomic_dec(&qobj->queue_cnt);
788 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
789
5951146d 790 return cmd;
c66ac9db
NB
791}
792
793static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
794 struct se_queue_obj *qobj)
795{
5951146d 796 struct se_cmd *t;
c66ac9db
NB
797 unsigned long flags;
798
799 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
a1d8b49a 800 if (!(atomic_read(&cmd->t_transport_queue_active))) {
c66ac9db
NB
801 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
802 return;
803 }
804
5951146d
AG
805 list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
806 if (t == cmd) {
a1d8b49a 807 atomic_dec(&cmd->t_transport_queue_active);
5951146d
AG
808 atomic_dec(&qobj->queue_cnt);
809 list_del(&cmd->se_queue_node);
810 break;
811 }
c66ac9db
NB
812 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
813
a1d8b49a 814 if (atomic_read(&cmd->t_transport_queue_active)) {
c66ac9db 815 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
e3d6f909 816 cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 817 atomic_read(&cmd->t_transport_queue_active));
c66ac9db
NB
818 }
819}
820
821/*
822 * Completion function used by TCM subsystem plugins (such as FILEIO)
823 * for queueing up response from struct se_subsystem_api->do_task()
824 */
825void transport_complete_sync_cache(struct se_cmd *cmd, int good)
826{
a1d8b49a 827 struct se_task *task = list_entry(cmd->t_task_list.next,
c66ac9db
NB
828 struct se_task, t_list);
829
830 if (good) {
831 cmd->scsi_status = SAM_STAT_GOOD;
832 task->task_scsi_status = GOOD;
833 } else {
834 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
835 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
e3d6f909 836 task->task_se_cmd->transport_error_status =
c66ac9db
NB
837 PYX_TRANSPORT_ILLEGAL_REQUEST;
838 }
839
840 transport_complete_task(task, good);
841}
842EXPORT_SYMBOL(transport_complete_sync_cache);
843
844/* transport_complete_task():
845 *
846 * Called from interrupt and non interrupt context depending
847 * on the transport plugin.
848 */
849void transport_complete_task(struct se_task *task, int success)
850{
e3d6f909 851 struct se_cmd *cmd = task->task_se_cmd;
c66ac9db
NB
852 struct se_device *dev = task->se_dev;
853 int t_state;
854 unsigned long flags;
855#if 0
856 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
a1d8b49a 857 cmd->t_task_cdb[0], dev);
c66ac9db 858#endif
e3d6f909 859 if (dev)
c66ac9db 860 atomic_inc(&dev->depth_left);
c66ac9db 861
a1d8b49a 862 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
863 atomic_set(&task->task_active, 0);
864
865 /*
866 * See if any sense data exists, if so set the TASK_SENSE flag.
867 * Also check for any other post completion work that needs to be
868 * done by the plugins.
869 */
870 if (dev && dev->transport->transport_complete) {
871 if (dev->transport->transport_complete(task) != 0) {
872 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
873 task->task_sense = 1;
874 success = 1;
875 }
876 }
877
878 /*
879 * See if we are waiting for outstanding struct se_task
880 * to complete for an exception condition
881 */
882 if (atomic_read(&task->task_stop)) {
883 /*
a1d8b49a 884 * Decrement cmd->t_se_count if this task had
c66ac9db
NB
885 * previously thrown its timeout exception handler.
886 */
887 if (atomic_read(&task->task_timeout)) {
a1d8b49a 888 atomic_dec(&cmd->t_se_count);
c66ac9db
NB
889 atomic_set(&task->task_timeout, 0);
890 }
a1d8b49a 891 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
892
893 complete(&task->task_stop_comp);
894 return;
895 }
896 /*
897 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
898 * left counter to determine when the struct se_cmd is ready to be queued to
899 * the processing thread.
900 */
901 if (atomic_read(&task->task_timeout)) {
902 if (!(atomic_dec_and_test(
a1d8b49a
AG
903 &cmd->t_task_cdbs_timeout_left))) {
904 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
905 flags);
906 return;
907 }
908 t_state = TRANSPORT_COMPLETE_TIMEOUT;
a1d8b49a 909 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
910
911 transport_add_cmd_to_queue(cmd, t_state);
912 return;
913 }
a1d8b49a 914 atomic_dec(&cmd->t_task_cdbs_timeout_left);
c66ac9db
NB
915
916 /*
917 * Decrement the outstanding t_task_cdbs_left count. The last
918 * struct se_task from struct se_cmd will complete itself into the
919 * device queue depending upon int success.
920 */
a1d8b49a 921 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
c66ac9db 922 if (!success)
a1d8b49a 923 cmd->t_tasks_failed = 1;
c66ac9db 924
a1d8b49a 925 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
926 return;
927 }
928
a1d8b49a 929 if (!success || cmd->t_tasks_failed) {
c66ac9db
NB
930 t_state = TRANSPORT_COMPLETE_FAILURE;
931 if (!task->task_error_status) {
932 task->task_error_status =
933 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
934 cmd->transport_error_status =
935 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
936 }
937 } else {
a1d8b49a 938 atomic_set(&cmd->t_transport_complete, 1);
c66ac9db
NB
939 t_state = TRANSPORT_COMPLETE_OK;
940 }
a1d8b49a 941 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
942
943 transport_add_cmd_to_queue(cmd, t_state);
944}
945EXPORT_SYMBOL(transport_complete_task);
946
947/*
948 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
949 * struct se_task list are ready to be added to the active execution list
950 * struct se_device
951
952 * Called with se_dev_t->execute_task_lock called.
953 */
954static inline int transport_add_task_check_sam_attr(
955 struct se_task *task,
956 struct se_task *task_prev,
957 struct se_device *dev)
958{
959 /*
960 * No SAM Task attribute emulation enabled, add to tail of
961 * execution queue
962 */
963 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
964 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
965 return 0;
966 }
967 /*
968 * HEAD_OF_QUEUE attribute for received CDB, which means
969 * the first task that is associated with a struct se_cmd goes to
970 * head of the struct se_device->execute_task_list, and task_prev
971 * after that for each subsequent task
972 */
e66ecd50 973 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db
NB
974 list_add(&task->t_execute_list,
975 (task_prev != NULL) ?
976 &task_prev->t_execute_list :
977 &dev->execute_task_list);
978
979 DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
980 " in execution queue\n",
981 T_TASK(task->task_se_cmd)->t_task_cdb[0]);
982 return 1;
983 }
984 /*
985 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
986 * transitioned from Dermant -> Active state, and are added to the end
987 * of the struct se_device->execute_task_list
988 */
989 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
990 return 0;
991}
992
993/* __transport_add_task_to_execute_queue():
994 *
995 * Called with se_dev_t->execute_task_lock called.
996 */
997static void __transport_add_task_to_execute_queue(
998 struct se_task *task,
999 struct se_task *task_prev,
1000 struct se_device *dev)
1001{
1002 int head_of_queue;
1003
1004 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
1005 atomic_inc(&dev->execute_tasks);
1006
1007 if (atomic_read(&task->task_state_active))
1008 return;
1009 /*
1010 * Determine if this task needs to go to HEAD_OF_QUEUE for the
1011 * state list as well. Running with SAM Task Attribute emulation
1012 * will always return head_of_queue == 0 here
1013 */
1014 if (head_of_queue)
1015 list_add(&task->t_state_list, (task_prev) ?
1016 &task_prev->t_state_list :
1017 &dev->state_task_list);
1018 else
1019 list_add_tail(&task->t_state_list, &dev->state_task_list);
1020
1021 atomic_set(&task->task_state_active, 1);
1022
1023 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
e3d6f909 1024 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
c66ac9db
NB
1025 task, dev);
1026}
1027
1028static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1029{
1030 struct se_device *dev;
1031 struct se_task *task;
1032 unsigned long flags;
1033
a1d8b49a
AG
1034 spin_lock_irqsave(&cmd->t_state_lock, flags);
1035 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db
NB
1036 dev = task->se_dev;
1037
1038 if (atomic_read(&task->task_state_active))
1039 continue;
1040
1041 spin_lock(&dev->execute_task_lock);
1042 list_add_tail(&task->t_state_list, &dev->state_task_list);
1043 atomic_set(&task->task_state_active, 1);
1044
1045 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
e3d6f909 1046 task->se_cmd->se_tfo->get_task_tag(
c66ac9db
NB
1047 task->task_se_cmd), task, dev);
1048
1049 spin_unlock(&dev->execute_task_lock);
1050 }
a1d8b49a 1051 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
1052}
1053
1054static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1055{
5951146d 1056 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
1057 struct se_task *task, *task_prev = NULL;
1058 unsigned long flags;
1059
1060 spin_lock_irqsave(&dev->execute_task_lock, flags);
a1d8b49a 1061 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db
NB
1062 if (atomic_read(&task->task_execute_queue))
1063 continue;
1064 /*
1065 * __transport_add_task_to_execute_queue() handles the
1066 * SAM Task Attribute emulation if enabled
1067 */
1068 __transport_add_task_to_execute_queue(task, task_prev, dev);
1069 atomic_set(&task->task_execute_queue, 1);
1070 task_prev = task;
1071 }
1072 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db
NB
1073}
1074
1075/* transport_remove_task_from_execute_queue():
1076 *
1077 *
1078 */
52208ae3 1079void transport_remove_task_from_execute_queue(
c66ac9db
NB
1080 struct se_task *task,
1081 struct se_device *dev)
1082{
1083 unsigned long flags;
1084
af57c3ac
NB
1085 if (atomic_read(&task->task_execute_queue) == 0) {
1086 dump_stack();
1087 return;
1088 }
1089
c66ac9db
NB
1090 spin_lock_irqsave(&dev->execute_task_lock, flags);
1091 list_del(&task->t_execute_list);
af57c3ac 1092 atomic_set(&task->task_execute_queue, 0);
c66ac9db
NB
1093 atomic_dec(&dev->execute_tasks);
1094 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1095}
1096
07bde79a
NB
1097/*
1098 * Handle QUEUE_FULL / -EAGAIN status
1099 */
1100
1101static void target_qf_do_work(struct work_struct *work)
1102{
1103 struct se_device *dev = container_of(work, struct se_device,
1104 qf_work_queue);
1105 struct se_cmd *cmd, *cmd_tmp;
1106
1107 spin_lock_irq(&dev->qf_cmd_lock);
1108 list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
1109
1110 list_del(&cmd->se_qf_node);
1111 atomic_dec(&dev->dev_qf_count);
1112 smp_mb__after_atomic_dec();
1113 spin_unlock_irq(&dev->qf_cmd_lock);
1114
1115 printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue"
1116 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
1117 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
1118 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
1119 : "UNKNOWN");
1120 /*
1121 * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
1122 * has been added to head of queue
1123 */
1124 transport_add_cmd_to_queue(cmd, cmd->t_state);
1125
1126 spin_lock_irq(&dev->qf_cmd_lock);
1127 }
1128 spin_unlock_irq(&dev->qf_cmd_lock);
1129}
1130
c66ac9db
NB
1131unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1132{
1133 switch (cmd->data_direction) {
1134 case DMA_NONE:
1135 return "NONE";
1136 case DMA_FROM_DEVICE:
1137 return "READ";
1138 case DMA_TO_DEVICE:
1139 return "WRITE";
1140 case DMA_BIDIRECTIONAL:
1141 return "BIDI";
1142 default:
1143 break;
1144 }
1145
1146 return "UNKNOWN";
1147}
1148
1149void transport_dump_dev_state(
1150 struct se_device *dev,
1151 char *b,
1152 int *bl)
1153{
1154 *bl += sprintf(b + *bl, "Status: ");
1155 switch (dev->dev_status) {
1156 case TRANSPORT_DEVICE_ACTIVATED:
1157 *bl += sprintf(b + *bl, "ACTIVATED");
1158 break;
1159 case TRANSPORT_DEVICE_DEACTIVATED:
1160 *bl += sprintf(b + *bl, "DEACTIVATED");
1161 break;
1162 case TRANSPORT_DEVICE_SHUTDOWN:
1163 *bl += sprintf(b + *bl, "SHUTDOWN");
1164 break;
1165 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1166 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1167 *bl += sprintf(b + *bl, "OFFLINE");
1168 break;
1169 default:
1170 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1171 break;
1172 }
1173
1174 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
1175 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1176 dev->queue_depth);
1177 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
e3d6f909 1178 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db
NB
1179 *bl += sprintf(b + *bl, " ");
1180}
1181
1182/* transport_release_all_cmds():
1183 *
1184 *
1185 */
1186static void transport_release_all_cmds(struct se_device *dev)
1187{
5951146d 1188 struct se_cmd *cmd, *tcmd;
c66ac9db
NB
1189 int bug_out = 0, t_state;
1190 unsigned long flags;
1191
e3d6f909 1192 spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
5951146d
AG
1193 list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
1194 se_queue_node) {
1195 t_state = cmd->t_state;
1196 list_del(&cmd->se_queue_node);
e3d6f909 1197 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
c66ac9db
NB
1198 flags);
1199
1200 printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
1201 " t_state: %u directly\n",
e3d6f909
AG
1202 cmd->se_tfo->get_task_tag(cmd),
1203 cmd->se_tfo->get_cmd_state(cmd), t_state);
c66ac9db
NB
1204
1205 transport_release_fe_cmd(cmd);
1206 bug_out = 1;
1207
e3d6f909 1208 spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
c66ac9db 1209 }
e3d6f909 1210 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
c66ac9db
NB
1211#if 0
1212 if (bug_out)
1213 BUG();
1214#endif
1215}
1216
1217void transport_dump_vpd_proto_id(
1218 struct t10_vpd *vpd,
1219 unsigned char *p_buf,
1220 int p_buf_len)
1221{
1222 unsigned char buf[VPD_TMP_BUF_SIZE];
1223 int len;
1224
1225 memset(buf, 0, VPD_TMP_BUF_SIZE);
1226 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1227
1228 switch (vpd->protocol_identifier) {
1229 case 0x00:
1230 sprintf(buf+len, "Fibre Channel\n");
1231 break;
1232 case 0x10:
1233 sprintf(buf+len, "Parallel SCSI\n");
1234 break;
1235 case 0x20:
1236 sprintf(buf+len, "SSA\n");
1237 break;
1238 case 0x30:
1239 sprintf(buf+len, "IEEE 1394\n");
1240 break;
1241 case 0x40:
1242 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1243 " Protocol\n");
1244 break;
1245 case 0x50:
1246 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1247 break;
1248 case 0x60:
1249 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1250 break;
1251 case 0x70:
1252 sprintf(buf+len, "Automation/Drive Interface Transport"
1253 " Protocol\n");
1254 break;
1255 case 0x80:
1256 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1257 break;
1258 default:
1259 sprintf(buf+len, "Unknown 0x%02x\n",
1260 vpd->protocol_identifier);
1261 break;
1262 }
1263
1264 if (p_buf)
1265 strncpy(p_buf, buf, p_buf_len);
1266 else
1267 printk(KERN_INFO "%s", buf);
1268}
1269
1270void
1271transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1272{
1273 /*
1274 * Check if the Protocol Identifier Valid (PIV) bit is set..
1275 *
1276 * from spc3r23.pdf section 7.5.1
1277 */
1278 if (page_83[1] & 0x80) {
1279 vpd->protocol_identifier = (page_83[0] & 0xf0);
1280 vpd->protocol_identifier_set = 1;
1281 transport_dump_vpd_proto_id(vpd, NULL, 0);
1282 }
1283}
1284EXPORT_SYMBOL(transport_set_vpd_proto_id);
1285
1286int transport_dump_vpd_assoc(
1287 struct t10_vpd *vpd,
1288 unsigned char *p_buf,
1289 int p_buf_len)
1290{
1291 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1292 int ret = 0;
1293 int len;
c66ac9db
NB
1294
1295 memset(buf, 0, VPD_TMP_BUF_SIZE);
1296 len = sprintf(buf, "T10 VPD Identifier Association: ");
1297
1298 switch (vpd->association) {
1299 case 0x00:
1300 sprintf(buf+len, "addressed logical unit\n");
1301 break;
1302 case 0x10:
1303 sprintf(buf+len, "target port\n");
1304 break;
1305 case 0x20:
1306 sprintf(buf+len, "SCSI target device\n");
1307 break;
1308 default:
1309 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
e3d6f909 1310 ret = -EINVAL;
c66ac9db
NB
1311 break;
1312 }
1313
1314 if (p_buf)
1315 strncpy(p_buf, buf, p_buf_len);
1316 else
1317 printk("%s", buf);
1318
1319 return ret;
1320}
1321
1322int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1323{
1324 /*
1325 * The VPD identification association..
1326 *
1327 * from spc3r23.pdf Section 7.6.3.1 Table 297
1328 */
1329 vpd->association = (page_83[1] & 0x30);
1330 return transport_dump_vpd_assoc(vpd, NULL, 0);
1331}
1332EXPORT_SYMBOL(transport_set_vpd_assoc);
1333
1334int transport_dump_vpd_ident_type(
1335 struct t10_vpd *vpd,
1336 unsigned char *p_buf,
1337 int p_buf_len)
1338{
1339 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1340 int ret = 0;
1341 int len;
c66ac9db
NB
1342
1343 memset(buf, 0, VPD_TMP_BUF_SIZE);
1344 len = sprintf(buf, "T10 VPD Identifier Type: ");
1345
1346 switch (vpd->device_identifier_type) {
1347 case 0x00:
1348 sprintf(buf+len, "Vendor specific\n");
1349 break;
1350 case 0x01:
1351 sprintf(buf+len, "T10 Vendor ID based\n");
1352 break;
1353 case 0x02:
1354 sprintf(buf+len, "EUI-64 based\n");
1355 break;
1356 case 0x03:
1357 sprintf(buf+len, "NAA\n");
1358 break;
1359 case 0x04:
1360 sprintf(buf+len, "Relative target port identifier\n");
1361 break;
1362 case 0x08:
1363 sprintf(buf+len, "SCSI name string\n");
1364 break;
1365 default:
1366 sprintf(buf+len, "Unsupported: 0x%02x\n",
1367 vpd->device_identifier_type);
e3d6f909 1368 ret = -EINVAL;
c66ac9db
NB
1369 break;
1370 }
1371
e3d6f909
AG
1372 if (p_buf) {
1373 if (p_buf_len < strlen(buf)+1)
1374 return -EINVAL;
c66ac9db 1375 strncpy(p_buf, buf, p_buf_len);
e3d6f909 1376 } else {
c66ac9db 1377 printk("%s", buf);
e3d6f909 1378 }
c66ac9db
NB
1379
1380 return ret;
1381}
1382
1383int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1384{
1385 /*
1386 * The VPD identifier type..
1387 *
1388 * from spc3r23.pdf Section 7.6.3.1 Table 298
1389 */
1390 vpd->device_identifier_type = (page_83[1] & 0x0f);
1391 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1392}
1393EXPORT_SYMBOL(transport_set_vpd_ident_type);
1394
1395int transport_dump_vpd_ident(
1396 struct t10_vpd *vpd,
1397 unsigned char *p_buf,
1398 int p_buf_len)
1399{
1400 unsigned char buf[VPD_TMP_BUF_SIZE];
1401 int ret = 0;
1402
1403 memset(buf, 0, VPD_TMP_BUF_SIZE);
1404
1405 switch (vpd->device_identifier_code_set) {
1406 case 0x01: /* Binary */
1407 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1408 &vpd->device_identifier[0]);
1409 break;
1410 case 0x02: /* ASCII */
1411 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1412 &vpd->device_identifier[0]);
1413 break;
1414 case 0x03: /* UTF-8 */
1415 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1416 &vpd->device_identifier[0]);
1417 break;
1418 default:
1419 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1420 " 0x%02x", vpd->device_identifier_code_set);
e3d6f909 1421 ret = -EINVAL;
c66ac9db
NB
1422 break;
1423 }
1424
1425 if (p_buf)
1426 strncpy(p_buf, buf, p_buf_len);
1427 else
1428 printk("%s", buf);
1429
1430 return ret;
1431}
1432
1433int
1434transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1435{
1436 static const char hex_str[] = "0123456789abcdef";
1437 int j = 0, i = 4; /* offset to start of the identifer */
1438
1439 /*
1440 * The VPD Code Set (encoding)
1441 *
1442 * from spc3r23.pdf Section 7.6.3.1 Table 296
1443 */
1444 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1445 switch (vpd->device_identifier_code_set) {
1446 case 0x01: /* Binary */
1447 vpd->device_identifier[j++] =
1448 hex_str[vpd->device_identifier_type];
1449 while (i < (4 + page_83[3])) {
1450 vpd->device_identifier[j++] =
1451 hex_str[(page_83[i] & 0xf0) >> 4];
1452 vpd->device_identifier[j++] =
1453 hex_str[page_83[i] & 0x0f];
1454 i++;
1455 }
1456 break;
1457 case 0x02: /* ASCII */
1458 case 0x03: /* UTF-8 */
1459 while (i < (4 + page_83[3]))
1460 vpd->device_identifier[j++] = page_83[i++];
1461 break;
1462 default:
1463 break;
1464 }
1465
1466 return transport_dump_vpd_ident(vpd, NULL, 0);
1467}
1468EXPORT_SYMBOL(transport_set_vpd_ident);
1469
1470static void core_setup_task_attr_emulation(struct se_device *dev)
1471{
1472 /*
1473 * If this device is from Target_Core_Mod/pSCSI, disable the
1474 * SAM Task Attribute emulation.
1475 *
1476 * This is currently not available in upsream Linux/SCSI Target
1477 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1478 */
e3d6f909 1479 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
c66ac9db
NB
1480 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1481 return;
1482 }
1483
1484 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1485 DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
e3d6f909
AG
1486 " device\n", dev->transport->name,
1487 dev->transport->get_device_rev(dev));
c66ac9db
NB
1488}
1489
1490static void scsi_dump_inquiry(struct se_device *dev)
1491{
e3d6f909 1492 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
c66ac9db
NB
1493 int i, device_type;
1494 /*
1495 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1496 */
1497 printk(" Vendor: ");
1498 for (i = 0; i < 8; i++)
1499 if (wwn->vendor[i] >= 0x20)
1500 printk("%c", wwn->vendor[i]);
1501 else
1502 printk(" ");
1503
1504 printk(" Model: ");
1505 for (i = 0; i < 16; i++)
1506 if (wwn->model[i] >= 0x20)
1507 printk("%c", wwn->model[i]);
1508 else
1509 printk(" ");
1510
1511 printk(" Revision: ");
1512 for (i = 0; i < 4; i++)
1513 if (wwn->revision[i] >= 0x20)
1514 printk("%c", wwn->revision[i]);
1515 else
1516 printk(" ");
1517
1518 printk("\n");
1519
e3d6f909 1520 device_type = dev->transport->get_device_type(dev);
c66ac9db
NB
1521 printk(" Type: %s ", scsi_device_type(device_type));
1522 printk(" ANSI SCSI revision: %02x\n",
e3d6f909 1523 dev->transport->get_device_rev(dev));
c66ac9db
NB
1524}
1525
1526struct se_device *transport_add_device_to_core_hba(
1527 struct se_hba *hba,
1528 struct se_subsystem_api *transport,
1529 struct se_subsystem_dev *se_dev,
1530 u32 device_flags,
1531 void *transport_dev,
1532 struct se_dev_limits *dev_limits,
1533 const char *inquiry_prod,
1534 const char *inquiry_rev)
1535{
12a18bdc 1536 int force_pt;
c66ac9db
NB
1537 struct se_device *dev;
1538
1539 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1540 if (!(dev)) {
1541 printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
1542 return NULL;
1543 }
c66ac9db 1544
e3d6f909 1545 transport_init_queue_obj(&dev->dev_queue_obj);
c66ac9db
NB
1546 dev->dev_flags = device_flags;
1547 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
5951146d 1548 dev->dev_ptr = transport_dev;
c66ac9db
NB
1549 dev->se_hba = hba;
1550 dev->se_sub_dev = se_dev;
1551 dev->transport = transport;
1552 atomic_set(&dev->active_cmds, 0);
1553 INIT_LIST_HEAD(&dev->dev_list);
1554 INIT_LIST_HEAD(&dev->dev_sep_list);
1555 INIT_LIST_HEAD(&dev->dev_tmr_list);
1556 INIT_LIST_HEAD(&dev->execute_task_list);
1557 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1558 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1559 INIT_LIST_HEAD(&dev->state_task_list);
07bde79a 1560 INIT_LIST_HEAD(&dev->qf_cmd_list);
c66ac9db
NB
1561 spin_lock_init(&dev->execute_task_lock);
1562 spin_lock_init(&dev->delayed_cmd_lock);
1563 spin_lock_init(&dev->ordered_cmd_lock);
1564 spin_lock_init(&dev->state_task_lock);
1565 spin_lock_init(&dev->dev_alua_lock);
1566 spin_lock_init(&dev->dev_reservation_lock);
1567 spin_lock_init(&dev->dev_status_lock);
1568 spin_lock_init(&dev->dev_status_thr_lock);
1569 spin_lock_init(&dev->se_port_lock);
1570 spin_lock_init(&dev->se_tmr_lock);
07bde79a 1571 spin_lock_init(&dev->qf_cmd_lock);
c66ac9db
NB
1572
1573 dev->queue_depth = dev_limits->queue_depth;
1574 atomic_set(&dev->depth_left, dev->queue_depth);
1575 atomic_set(&dev->dev_ordered_id, 0);
1576
1577 se_dev_set_default_attribs(dev, dev_limits);
1578
1579 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1580 dev->creation_time = get_jiffies_64();
1581 spin_lock_init(&dev->stats_lock);
1582
1583 spin_lock(&hba->device_lock);
1584 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1585 hba->dev_count++;
1586 spin_unlock(&hba->device_lock);
1587 /*
1588 * Setup the SAM Task Attribute emulation for struct se_device
1589 */
1590 core_setup_task_attr_emulation(dev);
1591 /*
1592 * Force PR and ALUA passthrough emulation with internal object use.
1593 */
1594 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1595 /*
1596 * Setup the Reservations infrastructure for struct se_device
1597 */
1598 core_setup_reservations(dev, force_pt);
1599 /*
1600 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1601 */
1602 if (core_setup_alua(dev, force_pt) < 0)
1603 goto out;
1604
1605 /*
1606 * Startup the struct se_device processing thread
1607 */
1608 dev->process_thread = kthread_run(transport_processing_thread, dev,
e3d6f909 1609 "LIO_%s", dev->transport->name);
c66ac9db
NB
1610 if (IS_ERR(dev->process_thread)) {
1611 printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
e3d6f909 1612 dev->transport->name);
c66ac9db
NB
1613 goto out;
1614 }
07bde79a
NB
1615 /*
1616 * Setup work_queue for QUEUE_FULL
1617 */
1618 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
c66ac9db
NB
1619 /*
1620 * Preload the initial INQUIRY const values if we are doing
1621 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1622 * passthrough because this is being provided by the backend LLD.
1623 * This is required so that transport_get_inquiry() copies these
1624 * originals once back into DEV_T10_WWN(dev) for the virtual device
1625 * setup.
1626 */
e3d6f909 1627 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
f22c1196 1628 if (!inquiry_prod || !inquiry_rev) {
c66ac9db
NB
1629 printk(KERN_ERR "All non TCM/pSCSI plugins require"
1630 " INQUIRY consts\n");
1631 goto out;
1632 }
1633
e3d6f909
AG
1634 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1635 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1636 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
c66ac9db
NB
1637 }
1638 scsi_dump_inquiry(dev);
1639
12a18bdc 1640 return dev;
c66ac9db 1641out:
c66ac9db
NB
1642 kthread_stop(dev->process_thread);
1643
1644 spin_lock(&hba->device_lock);
1645 list_del(&dev->dev_list);
1646 hba->dev_count--;
1647 spin_unlock(&hba->device_lock);
1648
1649 se_release_vpd_for_dev(dev);
1650
c66ac9db
NB
1651 kfree(dev);
1652
1653 return NULL;
1654}
1655EXPORT_SYMBOL(transport_add_device_to_core_hba);
1656
1657/* transport_generic_prepare_cdb():
1658 *
1659 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1660 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1661 * The point of this is since we are mapping iSCSI LUNs to
1662 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1663 * devices and HBAs for a loop.
1664 */
1665static inline void transport_generic_prepare_cdb(
1666 unsigned char *cdb)
1667{
1668 switch (cdb[0]) {
1669 case READ_10: /* SBC - RDProtect */
1670 case READ_12: /* SBC - RDProtect */
1671 case READ_16: /* SBC - RDProtect */
1672 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1673 case VERIFY: /* SBC - VRProtect */
1674 case VERIFY_16: /* SBC - VRProtect */
1675 case WRITE_VERIFY: /* SBC - VRProtect */
1676 case WRITE_VERIFY_12: /* SBC - VRProtect */
1677 break;
1678 default:
1679 cdb[1] &= 0x1f; /* clear logical unit number */
1680 break;
1681 }
1682}
1683
1684static struct se_task *
1685transport_generic_get_task(struct se_cmd *cmd,
1686 enum dma_data_direction data_direction)
1687{
1688 struct se_task *task;
5951146d 1689 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
1690
1691 task = dev->transport->alloc_task(cmd);
1692 if (!task) {
1693 printk(KERN_ERR "Unable to allocate struct se_task\n");
1694 return NULL;
1695 }
1696
1697 INIT_LIST_HEAD(&task->t_list);
1698 INIT_LIST_HEAD(&task->t_execute_list);
1699 INIT_LIST_HEAD(&task->t_state_list);
1700 init_completion(&task->task_stop_comp);
c66ac9db
NB
1701 task->task_se_cmd = cmd;
1702 task->se_dev = dev;
1703 task->task_data_direction = data_direction;
1704
c66ac9db
NB
1705 return task;
1706}
1707
1708static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1709
c66ac9db
NB
1710/*
1711 * Used by fabric modules containing a local struct se_cmd within their
1712 * fabric dependent per I/O descriptor.
1713 */
1714void transport_init_se_cmd(
1715 struct se_cmd *cmd,
1716 struct target_core_fabric_ops *tfo,
1717 struct se_session *se_sess,
1718 u32 data_length,
1719 int data_direction,
1720 int task_attr,
1721 unsigned char *sense_buffer)
1722{
5951146d
AG
1723 INIT_LIST_HEAD(&cmd->se_lun_node);
1724 INIT_LIST_HEAD(&cmd->se_delayed_node);
1725 INIT_LIST_HEAD(&cmd->se_ordered_node);
07bde79a 1726 INIT_LIST_HEAD(&cmd->se_qf_node);
c66ac9db 1727
a1d8b49a
AG
1728 INIT_LIST_HEAD(&cmd->t_task_list);
1729 init_completion(&cmd->transport_lun_fe_stop_comp);
1730 init_completion(&cmd->transport_lun_stop_comp);
1731 init_completion(&cmd->t_transport_stop_comp);
1732 spin_lock_init(&cmd->t_state_lock);
1733 atomic_set(&cmd->transport_dev_active, 1);
c66ac9db
NB
1734
1735 cmd->se_tfo = tfo;
1736 cmd->se_sess = se_sess;
1737 cmd->data_length = data_length;
1738 cmd->data_direction = data_direction;
1739 cmd->sam_task_attr = task_attr;
1740 cmd->sense_buffer = sense_buffer;
1741}
1742EXPORT_SYMBOL(transport_init_se_cmd);
1743
1744static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1745{
1746 /*
1747 * Check if SAM Task Attribute emulation is enabled for this
1748 * struct se_device storage object
1749 */
5951146d 1750 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
1751 return 0;
1752
e66ecd50 1753 if (cmd->sam_task_attr == MSG_ACA_TAG) {
c66ac9db
NB
1754 DEBUG_STA("SAM Task Attribute ACA"
1755 " emulation is not supported\n");
e3d6f909 1756 return -EINVAL;
c66ac9db
NB
1757 }
1758 /*
1759 * Used to determine when ORDERED commands should go from
1760 * Dormant to Active status.
1761 */
5951146d 1762 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
c66ac9db
NB
1763 smp_mb__after_atomic_inc();
1764 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1765 cmd->se_ordered_id, cmd->sam_task_attr,
1766 TRANSPORT(cmd->se_dev)->name);
1767 return 0;
1768}
1769
1770void transport_free_se_cmd(
1771 struct se_cmd *se_cmd)
1772{
1773 if (se_cmd->se_tmr_req)
1774 core_tmr_release_req(se_cmd->se_tmr_req);
1775 /*
1776 * Check and free any extended CDB buffer that was allocated
1777 */
a1d8b49a
AG
1778 if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
1779 kfree(se_cmd->t_task_cdb);
c66ac9db
NB
1780}
1781EXPORT_SYMBOL(transport_free_se_cmd);
1782
1783static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1784
1785/* transport_generic_allocate_tasks():
1786 *
1787 * Called from fabric RX Thread.
1788 */
1789int transport_generic_allocate_tasks(
1790 struct se_cmd *cmd,
1791 unsigned char *cdb)
1792{
1793 int ret;
1794
1795 transport_generic_prepare_cdb(cdb);
1796
1797 /*
1798 * This is needed for early exceptions.
1799 */
1800 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1801
c66ac9db
NB
1802 /*
1803 * Ensure that the received CDB is less than the max (252 + 8) bytes
1804 * for VARIABLE_LENGTH_CMD
1805 */
1806 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1807 printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
1808 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1809 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
e3d6f909 1810 return -EINVAL;
c66ac9db
NB
1811 }
1812 /*
1813 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1814 * allocate the additional extended CDB buffer now.. Otherwise
1815 * setup the pointer from __t_task_cdb to t_task_cdb.
1816 */
a1d8b49a
AG
1817 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1818 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
c66ac9db 1819 GFP_KERNEL);
a1d8b49a
AG
1820 if (!(cmd->t_task_cdb)) {
1821 printk(KERN_ERR "Unable to allocate cmd->t_task_cdb"
1822 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
c66ac9db 1823 scsi_command_size(cdb),
a1d8b49a 1824 (unsigned long)sizeof(cmd->__t_task_cdb));
e3d6f909 1825 return -ENOMEM;
c66ac9db
NB
1826 }
1827 } else
a1d8b49a 1828 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
c66ac9db 1829 /*
a1d8b49a 1830 * Copy the original CDB into cmd->
c66ac9db 1831 */
a1d8b49a 1832 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
c66ac9db
NB
1833 /*
1834 * Setup the received CDB based on SCSI defined opcodes and
1835 * perform unit attention, persistent reservations and ALUA
a1d8b49a 1836 * checks for virtual device backends. The cmd->t_task_cdb
c66ac9db
NB
1837 * pointer is expected to be setup before we reach this point.
1838 */
1839 ret = transport_generic_cmd_sequencer(cmd, cdb);
1840 if (ret < 0)
1841 return ret;
1842 /*
1843 * Check for SAM Task Attribute Emulation
1844 */
1845 if (transport_check_alloc_task_attr(cmd) < 0) {
1846 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1847 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 1848 return -EINVAL;
c66ac9db
NB
1849 }
1850 spin_lock(&cmd->se_lun->lun_sep_lock);
1851 if (cmd->se_lun->lun_sep)
1852 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1853 spin_unlock(&cmd->se_lun->lun_sep_lock);
1854 return 0;
1855}
1856EXPORT_SYMBOL(transport_generic_allocate_tasks);
1857
1858/*
1859 * Used by fabric module frontends not defining a TFO->new_cmd_map()
1860 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1861 */
1862int transport_generic_handle_cdb(
1863 struct se_cmd *cmd)
1864{
e3d6f909 1865 if (!cmd->se_lun) {
c66ac9db 1866 dump_stack();
e3d6f909
AG
1867 printk(KERN_ERR "cmd->se_lun is NULL\n");
1868 return -EINVAL;
c66ac9db 1869 }
695434e1 1870
c66ac9db
NB
1871 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1872 return 0;
1873}
1874EXPORT_SYMBOL(transport_generic_handle_cdb);
1875
695434e1
NB
1876/*
1877 * Used by fabric module frontends to queue tasks directly.
1878 * Many only be used from process context only
1879 */
1880int transport_handle_cdb_direct(
1881 struct se_cmd *cmd)
1882{
1883 if (!cmd->se_lun) {
1884 dump_stack();
1885 printk(KERN_ERR "cmd->se_lun is NULL\n");
1886 return -EINVAL;
1887 }
1888 if (in_interrupt()) {
1889 dump_stack();
1890 printk(KERN_ERR "transport_generic_handle_cdb cannot be called"
1891 " from interrupt context\n");
1892 return -EINVAL;
1893 }
1894
1895 return transport_generic_new_cmd(cmd);
1896}
1897EXPORT_SYMBOL(transport_handle_cdb_direct);
1898
c66ac9db
NB
1899/*
1900 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1901 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1902 * complete setup in TCM process context w/ TFO->new_cmd_map().
1903 */
1904int transport_generic_handle_cdb_map(
1905 struct se_cmd *cmd)
1906{
e3d6f909 1907 if (!cmd->se_lun) {
c66ac9db 1908 dump_stack();
e3d6f909
AG
1909 printk(KERN_ERR "cmd->se_lun is NULL\n");
1910 return -EINVAL;
c66ac9db
NB
1911 }
1912
1913 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
1914 return 0;
1915}
1916EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1917
1918/* transport_generic_handle_data():
1919 *
1920 *
1921 */
1922int transport_generic_handle_data(
1923 struct se_cmd *cmd)
1924{
1925 /*
1926 * For the software fabric case, then we assume the nexus is being
1927 * failed/shutdown when signals are pending from the kthread context
1928 * caller, so we return a failure. For the HW target mode case running
1929 * in interrupt code, the signal_pending() check is skipped.
1930 */
1931 if (!in_interrupt() && signal_pending(current))
e3d6f909 1932 return -EPERM;
c66ac9db
NB
1933 /*
1934 * If the received CDB has aleady been ABORTED by the generic
1935 * target engine, we now call transport_check_aborted_status()
1936 * to queue any delated TASK_ABORTED status for the received CDB to the
25985edc 1937 * fabric module as we are expecting no further incoming DATA OUT
c66ac9db
NB
1938 * sequences at this point.
1939 */
1940 if (transport_check_aborted_status(cmd, 1) != 0)
1941 return 0;
1942
1943 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
1944 return 0;
1945}
1946EXPORT_SYMBOL(transport_generic_handle_data);
1947
1948/* transport_generic_handle_tmr():
1949 *
1950 *
1951 */
1952int transport_generic_handle_tmr(
1953 struct se_cmd *cmd)
1954{
1955 /*
1956 * This is needed for early exceptions.
1957 */
1958 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
c66ac9db
NB
1959
1960 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
1961 return 0;
1962}
1963EXPORT_SYMBOL(transport_generic_handle_tmr);
1964
f4366772
NB
1965void transport_generic_free_cmd_intr(
1966 struct se_cmd *cmd)
1967{
1968 transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
1969}
1970EXPORT_SYMBOL(transport_generic_free_cmd_intr);
1971
c66ac9db
NB
1972static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1973{
1974 struct se_task *task, *task_tmp;
1975 unsigned long flags;
1976 int ret = 0;
1977
1978 DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
e3d6f909 1979 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
1980
1981 /*
1982 * No tasks remain in the execution queue
1983 */
a1d8b49a 1984 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 1985 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 1986 &cmd->t_task_list, t_list) {
c66ac9db
NB
1987 DEBUG_TS("task_no[%d] - Processing task %p\n",
1988 task->task_no, task);
1989 /*
1990 * If the struct se_task has not been sent and is not active,
1991 * remove the struct se_task from the execution queue.
1992 */
1993 if (!atomic_read(&task->task_sent) &&
1994 !atomic_read(&task->task_active)) {
a1d8b49a 1995 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
1996 flags);
1997 transport_remove_task_from_execute_queue(task,
1998 task->se_dev);
1999
2000 DEBUG_TS("task_no[%d] - Removed from execute queue\n",
2001 task->task_no);
a1d8b49a 2002 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
2003 continue;
2004 }
2005
2006 /*
2007 * If the struct se_task is active, sleep until it is returned
2008 * from the plugin.
2009 */
2010 if (atomic_read(&task->task_active)) {
2011 atomic_set(&task->task_stop, 1);
a1d8b49a 2012 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
2013 flags);
2014
2015 DEBUG_TS("task_no[%d] - Waiting to complete\n",
2016 task->task_no);
2017 wait_for_completion(&task->task_stop_comp);
2018 DEBUG_TS("task_no[%d] - Stopped successfully\n",
2019 task->task_no);
2020
a1d8b49a
AG
2021 spin_lock_irqsave(&cmd->t_state_lock, flags);
2022 atomic_dec(&cmd->t_task_cdbs_left);
c66ac9db
NB
2023
2024 atomic_set(&task->task_active, 0);
2025 atomic_set(&task->task_stop, 0);
2026 } else {
2027 DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
2028 ret++;
2029 }
2030
2031 __transport_stop_task_timer(task, &flags);
2032 }
a1d8b49a 2033 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2034
2035 return ret;
2036}
2037
c66ac9db
NB
2038/*
2039 * Handle SAM-esque emulation for generic transport request failures.
2040 */
2041static void transport_generic_request_failure(
2042 struct se_cmd *cmd,
2043 struct se_device *dev,
2044 int complete,
2045 int sc)
2046{
07bde79a
NB
2047 int ret = 0;
2048
c66ac9db 2049 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
e3d6f909 2050 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 2051 cmd->t_task_cdb[0]);
c66ac9db
NB
2052 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2053 " %d/%d transport_error_status: %d\n",
e3d6f909 2054 cmd->se_tfo->get_cmd_state(cmd),
c66ac9db
NB
2055 cmd->t_state, cmd->deferred_t_state,
2056 cmd->transport_error_status);
2057 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2058 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2059 " t_transport_active: %d t_transport_stop: %d"
a1d8b49a
AG
2060 " t_transport_sent: %d\n", cmd->t_task_cdbs,
2061 atomic_read(&cmd->t_task_cdbs_left),
2062 atomic_read(&cmd->t_task_cdbs_sent),
2063 atomic_read(&cmd->t_task_cdbs_ex_left),
2064 atomic_read(&cmd->t_transport_active),
2065 atomic_read(&cmd->t_transport_stop),
2066 atomic_read(&cmd->t_transport_sent));
c66ac9db
NB
2067
2068 transport_stop_all_task_timers(cmd);
2069
2070 if (dev)
e3d6f909 2071 atomic_inc(&dev->depth_left);
c66ac9db
NB
2072 /*
2073 * For SAM Task Attribute emulation for failed struct se_cmd
2074 */
2075 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2076 transport_complete_task_attr(cmd);
2077
2078 if (complete) {
2079 transport_direct_request_timeout(cmd);
2080 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2081 }
2082
2083 switch (cmd->transport_error_status) {
2084 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
2085 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2086 break;
2087 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
2088 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
2089 break;
2090 case PYX_TRANSPORT_INVALID_CDB_FIELD:
2091 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2092 break;
2093 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
2094 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2095 break;
2096 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
2097 if (!sc)
2098 transport_new_cmd_failure(cmd);
2099 /*
2100 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
2101 * we force this session to fall back to session
2102 * recovery.
2103 */
e3d6f909
AG
2104 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
2105 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
c66ac9db
NB
2106
2107 goto check_stop;
2108 case PYX_TRANSPORT_LU_COMM_FAILURE:
2109 case PYX_TRANSPORT_ILLEGAL_REQUEST:
2110 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2111 break;
2112 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
2113 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
2114 break;
2115 case PYX_TRANSPORT_WRITE_PROTECTED:
2116 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
2117 break;
2118 case PYX_TRANSPORT_RESERVATION_CONFLICT:
2119 /*
2120 * No SENSE Data payload for this case, set SCSI Status
2121 * and queue the response to $FABRIC_MOD.
2122 *
2123 * Uses linux/include/scsi/scsi.h SAM status codes defs
2124 */
2125 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2126 /*
2127 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2128 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2129 * CONFLICT STATUS.
2130 *
2131 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2132 */
e3d6f909
AG
2133 if (cmd->se_sess &&
2134 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2135 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
c66ac9db
NB
2136 cmd->orig_fe_lun, 0x2C,
2137 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2138
07bde79a
NB
2139 ret = cmd->se_tfo->queue_status(cmd);
2140 if (ret == -EAGAIN)
2141 goto queue_full;
c66ac9db
NB
2142 goto check_stop;
2143 case PYX_TRANSPORT_USE_SENSE_REASON:
2144 /*
2145 * struct se_cmd->scsi_sense_reason already set
2146 */
2147 break;
2148 default:
2149 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
a1d8b49a 2150 cmd->t_task_cdb[0],
c66ac9db
NB
2151 cmd->transport_error_status);
2152 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2153 break;
2154 }
2155
2156 if (!sc)
2157 transport_new_cmd_failure(cmd);
07bde79a
NB
2158 else {
2159 ret = transport_send_check_condition_and_sense(cmd,
2160 cmd->scsi_sense_reason, 0);
2161 if (ret == -EAGAIN)
2162 goto queue_full;
2163 }
2164
c66ac9db
NB
2165check_stop:
2166 transport_lun_remove_cmd(cmd);
2167 if (!(transport_cmd_check_stop_to_fabric(cmd)))
2168 ;
07bde79a
NB
2169 return;
2170
2171queue_full:
2172 cmd->t_state = TRANSPORT_COMPLETE_OK;
2173 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
c66ac9db
NB
2174}
2175
2176static void transport_direct_request_timeout(struct se_cmd *cmd)
2177{
2178 unsigned long flags;
2179
a1d8b49a
AG
2180 spin_lock_irqsave(&cmd->t_state_lock, flags);
2181 if (!(atomic_read(&cmd->t_transport_timeout))) {
2182 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2183 return;
2184 }
a1d8b49a
AG
2185 if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
2186 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2187 return;
2188 }
2189
a1d8b49a
AG
2190 atomic_sub(atomic_read(&cmd->t_transport_timeout),
2191 &cmd->t_se_count);
2192 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2193}
2194
2195static void transport_generic_request_timeout(struct se_cmd *cmd)
2196{
2197 unsigned long flags;
2198
2199 /*
a1d8b49a 2200 * Reset cmd->t_se_count to allow transport_generic_remove()
c66ac9db
NB
2201 * to allow last call to free memory resources.
2202 */
a1d8b49a
AG
2203 spin_lock_irqsave(&cmd->t_state_lock, flags);
2204 if (atomic_read(&cmd->t_transport_timeout) > 1) {
2205 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
c66ac9db 2206
a1d8b49a 2207 atomic_sub(tmp, &cmd->t_se_count);
c66ac9db 2208 }
a1d8b49a 2209 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2210
35462975 2211 transport_generic_remove(cmd, 0);
c66ac9db
NB
2212}
2213
c66ac9db
NB
2214static inline u32 transport_lba_21(unsigned char *cdb)
2215{
2216 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2217}
2218
2219static inline u32 transport_lba_32(unsigned char *cdb)
2220{
2221 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2222}
2223
2224static inline unsigned long long transport_lba_64(unsigned char *cdb)
2225{
2226 unsigned int __v1, __v2;
2227
2228 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2229 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2230
2231 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2232}
2233
2234/*
2235 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2236 */
2237static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2238{
2239 unsigned int __v1, __v2;
2240
2241 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2242 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2243
2244 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2245}
2246
2247static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2248{
2249 unsigned long flags;
2250
a1d8b49a 2251 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
c66ac9db 2252 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
a1d8b49a 2253 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2254}
2255
2256/*
2257 * Called from interrupt context.
2258 */
2259static void transport_task_timeout_handler(unsigned long data)
2260{
2261 struct se_task *task = (struct se_task *)data;
e3d6f909 2262 struct se_cmd *cmd = task->task_se_cmd;
c66ac9db
NB
2263 unsigned long flags;
2264
2265 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2266
a1d8b49a 2267 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2268 if (task->task_flags & TF_STOP) {
a1d8b49a 2269 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2270 return;
2271 }
2272 task->task_flags &= ~TF_RUNNING;
2273
2274 /*
2275 * Determine if transport_complete_task() has already been called.
2276 */
2277 if (!(atomic_read(&task->task_active))) {
2278 DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2279 " == 0\n", task, cmd);
a1d8b49a 2280 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2281 return;
2282 }
2283
a1d8b49a
AG
2284 atomic_inc(&cmd->t_se_count);
2285 atomic_inc(&cmd->t_transport_timeout);
2286 cmd->t_tasks_failed = 1;
c66ac9db
NB
2287
2288 atomic_set(&task->task_timeout, 1);
2289 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2290 task->task_scsi_status = 1;
2291
2292 if (atomic_read(&task->task_stop)) {
2293 DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2294 " == 1\n", task, cmd);
a1d8b49a 2295 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2296 complete(&task->task_stop_comp);
2297 return;
2298 }
2299
a1d8b49a 2300 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
c66ac9db
NB
2301 DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2302 " t_task_cdbs_left\n", task, cmd);
a1d8b49a 2303 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2304 return;
2305 }
2306 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2307 task, cmd);
2308
2309 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
a1d8b49a 2310 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2311
2312 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2313}
2314
2315/*
a1d8b49a 2316 * Called with cmd->t_state_lock held.
c66ac9db
NB
2317 */
2318static void transport_start_task_timer(struct se_task *task)
2319{
2320 struct se_device *dev = task->se_dev;
2321 int timeout;
2322
2323 if (task->task_flags & TF_RUNNING)
2324 return;
2325 /*
2326 * If the task_timeout is disabled, exit now.
2327 */
e3d6f909 2328 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
c66ac9db
NB
2329 if (!(timeout))
2330 return;
2331
2332 init_timer(&task->task_timer);
2333 task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2334 task->task_timer.data = (unsigned long) task;
2335 task->task_timer.function = transport_task_timeout_handler;
2336
2337 task->task_flags |= TF_RUNNING;
2338 add_timer(&task->task_timer);
2339#if 0
2340 printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
2341 " %d\n", task->task_se_cmd, task, timeout);
2342#endif
2343}
2344
2345/*
a1d8b49a 2346 * Called with spin_lock_irq(&cmd->t_state_lock) held.
c66ac9db
NB
2347 */
2348void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2349{
e3d6f909 2350 struct se_cmd *cmd = task->task_se_cmd;
c66ac9db
NB
2351
2352 if (!(task->task_flags & TF_RUNNING))
2353 return;
2354
2355 task->task_flags |= TF_STOP;
a1d8b49a 2356 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
c66ac9db
NB
2357
2358 del_timer_sync(&task->task_timer);
2359
a1d8b49a 2360 spin_lock_irqsave(&cmd->t_state_lock, *flags);
c66ac9db
NB
2361 task->task_flags &= ~TF_RUNNING;
2362 task->task_flags &= ~TF_STOP;
2363}
2364
2365static void transport_stop_all_task_timers(struct se_cmd *cmd)
2366{
2367 struct se_task *task = NULL, *task_tmp;
2368 unsigned long flags;
2369
a1d8b49a 2370 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2371 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 2372 &cmd->t_task_list, t_list)
c66ac9db 2373 __transport_stop_task_timer(task, &flags);
a1d8b49a 2374 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2375}
2376
2377static inline int transport_tcq_window_closed(struct se_device *dev)
2378{
2379 if (dev->dev_tcq_window_closed++ <
2380 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2381 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2382 } else
2383 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2384
e3d6f909 2385 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
c66ac9db
NB
2386 return 0;
2387}
2388
2389/*
2390 * Called from Fabric Module context from transport_execute_tasks()
2391 *
2392 * The return of this function determins if the tasks from struct se_cmd
2393 * get added to the execution queue in transport_execute_tasks(),
2394 * or are added to the delayed or ordered lists here.
2395 */
2396static inline int transport_execute_task_attr(struct se_cmd *cmd)
2397{
5951146d 2398 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
2399 return 1;
2400 /*
25985edc 2401 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
c66ac9db
NB
2402 * to allow the passed struct se_cmd list of tasks to the front of the list.
2403 */
e66ecd50 2404 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
5951146d 2405 atomic_inc(&cmd->se_dev->dev_hoq_count);
c66ac9db
NB
2406 smp_mb__after_atomic_inc();
2407 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2408 " 0x%02x, se_ordered_id: %u\n",
a1d8b49a 2409 cmd->_task_cdb[0],
c66ac9db
NB
2410 cmd->se_ordered_id);
2411 return 1;
e66ecd50 2412 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
5951146d
AG
2413 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2414 list_add_tail(&cmd->se_ordered_node,
2415 &cmd->se_dev->ordered_cmd_list);
2416 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
c66ac9db 2417
5951146d 2418 atomic_inc(&cmd->se_dev->dev_ordered_sync);
c66ac9db
NB
2419 smp_mb__after_atomic_inc();
2420
2421 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2422 " list, se_ordered_id: %u\n",
a1d8b49a 2423 cmd->t_task_cdb[0],
c66ac9db
NB
2424 cmd->se_ordered_id);
2425 /*
2426 * Add ORDERED command to tail of execution queue if
2427 * no other older commands exist that need to be
2428 * completed first.
2429 */
5951146d 2430 if (!(atomic_read(&cmd->se_dev->simple_cmds)))
c66ac9db
NB
2431 return 1;
2432 } else {
2433 /*
2434 * For SIMPLE and UNTAGGED Task Attribute commands
2435 */
5951146d 2436 atomic_inc(&cmd->se_dev->simple_cmds);
c66ac9db
NB
2437 smp_mb__after_atomic_inc();
2438 }
2439 /*
2440 * Otherwise if one or more outstanding ORDERED task attribute exist,
2441 * add the dormant task(s) built for the passed struct se_cmd to the
2442 * execution queue and become in Active state for this struct se_device.
2443 */
5951146d 2444 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
c66ac9db
NB
2445 /*
2446 * Otherwise, add cmd w/ tasks to delayed cmd queue that
25985edc 2447 * will be drained upon completion of HEAD_OF_QUEUE task.
c66ac9db 2448 */
5951146d 2449 spin_lock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2450 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
5951146d
AG
2451 list_add_tail(&cmd->se_delayed_node,
2452 &cmd->se_dev->delayed_cmd_list);
2453 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db
NB
2454
2455 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2456 " delayed CMD list, se_ordered_id: %u\n",
a1d8b49a 2457 cmd->t_task_cdb[0], cmd->sam_task_attr,
c66ac9db
NB
2458 cmd->se_ordered_id);
2459 /*
2460 * Return zero to let transport_execute_tasks() know
2461 * not to add the delayed tasks to the execution list.
2462 */
2463 return 0;
2464 }
2465 /*
2466 * Otherwise, no ORDERED task attributes exist..
2467 */
2468 return 1;
2469}
2470
2471/*
2472 * Called from fabric module context in transport_generic_new_cmd() and
2473 * transport_generic_process_write()
2474 */
2475static int transport_execute_tasks(struct se_cmd *cmd)
2476{
2477 int add_tasks;
2478
db1620a2
CH
2479 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2480 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2481 transport_generic_request_failure(cmd, NULL, 0, 1);
2482 return 0;
c66ac9db 2483 }
db1620a2 2484
c66ac9db
NB
2485 /*
2486 * Call transport_cmd_check_stop() to see if a fabric exception
25985edc 2487 * has occurred that prevents execution.
c66ac9db
NB
2488 */
2489 if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
2490 /*
2491 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2492 * attribute for the tasks of the received struct se_cmd CDB
2493 */
2494 add_tasks = transport_execute_task_attr(cmd);
e3d6f909 2495 if (!add_tasks)
c66ac9db
NB
2496 goto execute_tasks;
2497 /*
2498 * This calls transport_add_tasks_from_cmd() to handle
2499 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2500 * (if enabled) in __transport_add_task_to_execute_queue() and
2501 * transport_add_task_check_sam_attr().
2502 */
2503 transport_add_tasks_from_cmd(cmd);
2504 }
2505 /*
2506 * Kick the execution queue for the cmd associated struct se_device
2507 * storage object.
2508 */
2509execute_tasks:
5951146d 2510 __transport_execute_tasks(cmd->se_dev);
c66ac9db
NB
2511 return 0;
2512}
2513
2514/*
2515 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2516 * from struct se_device->execute_task_list and
2517 *
2518 * Called from transport_processing_thread()
2519 */
2520static int __transport_execute_tasks(struct se_device *dev)
2521{
2522 int error;
2523 struct se_cmd *cmd = NULL;
e3d6f909 2524 struct se_task *task = NULL;
c66ac9db
NB
2525 unsigned long flags;
2526
2527 /*
2528 * Check if there is enough room in the device and HBA queue to send
a1d8b49a 2529 * struct se_tasks to the selected transport.
c66ac9db
NB
2530 */
2531check_depth:
e3d6f909 2532 if (!atomic_read(&dev->depth_left))
c66ac9db 2533 return transport_tcq_window_closed(dev);
c66ac9db 2534
e3d6f909 2535 dev->dev_tcq_window_closed = 0;
c66ac9db 2536
e3d6f909
AG
2537 spin_lock_irq(&dev->execute_task_lock);
2538 if (list_empty(&dev->execute_task_list)) {
2539 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db
NB
2540 return 0;
2541 }
e3d6f909
AG
2542 task = list_first_entry(&dev->execute_task_list,
2543 struct se_task, t_execute_list);
2544 list_del(&task->t_execute_list);
2545 atomic_set(&task->task_execute_queue, 0);
2546 atomic_dec(&dev->execute_tasks);
2547 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db
NB
2548
2549 atomic_dec(&dev->depth_left);
c66ac9db 2550
e3d6f909 2551 cmd = task->task_se_cmd;
c66ac9db 2552
a1d8b49a 2553 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
2554 atomic_set(&task->task_active, 1);
2555 atomic_set(&task->task_sent, 1);
a1d8b49a 2556 atomic_inc(&cmd->t_task_cdbs_sent);
c66ac9db 2557
a1d8b49a
AG
2558 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2559 cmd->t_task_list_num)
c66ac9db
NB
2560 atomic_set(&cmd->transport_sent, 1);
2561
2562 transport_start_task_timer(task);
a1d8b49a 2563 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2564 /*
2565 * The struct se_cmd->transport_emulate_cdb() function pointer is used
e3d6f909 2566 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
c66ac9db
NB
2567 * struct se_subsystem_api->do_task() caller below.
2568 */
2569 if (cmd->transport_emulate_cdb) {
2570 error = cmd->transport_emulate_cdb(cmd);
2571 if (error != 0) {
2572 cmd->transport_error_status = error;
2573 atomic_set(&task->task_active, 0);
2574 atomic_set(&cmd->transport_sent, 0);
2575 transport_stop_tasks_for_cmd(cmd);
2576 transport_generic_request_failure(cmd, dev, 0, 1);
2577 goto check_depth;
2578 }
2579 /*
2580 * Handle the successful completion for transport_emulate_cdb()
2581 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2582 * Otherwise the caller is expected to complete the task with
2583 * proper status.
2584 */
2585 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2586 cmd->scsi_status = SAM_STAT_GOOD;
2587 task->task_scsi_status = GOOD;
2588 transport_complete_task(task, 1);
2589 }
2590 } else {
2591 /*
2592 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2593 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2594 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2595 * LUN emulation code.
2596 *
2597 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2598 * call ->do_task() directly and let the underlying TCM subsystem plugin
2599 * code handle the CDB emulation.
2600 */
e3d6f909
AG
2601 if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2602 (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
c66ac9db
NB
2603 error = transport_emulate_control_cdb(task);
2604 else
e3d6f909 2605 error = dev->transport->do_task(task);
c66ac9db
NB
2606
2607 if (error != 0) {
2608 cmd->transport_error_status = error;
2609 atomic_set(&task->task_active, 0);
2610 atomic_set(&cmd->transport_sent, 0);
2611 transport_stop_tasks_for_cmd(cmd);
2612 transport_generic_request_failure(cmd, dev, 0, 1);
2613 }
2614 }
2615
2616 goto check_depth;
2617
2618 return 0;
2619}
2620
2621void transport_new_cmd_failure(struct se_cmd *se_cmd)
2622{
2623 unsigned long flags;
2624 /*
2625 * Any unsolicited data will get dumped for failed command inside of
2626 * the fabric plugin
2627 */
a1d8b49a 2628 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2629 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2630 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
a1d8b49a 2631 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2632}
2633
2634static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2635
2636static inline u32 transport_get_sectors_6(
2637 unsigned char *cdb,
2638 struct se_cmd *cmd,
2639 int *ret)
2640{
5951146d 2641 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2642
2643 /*
2644 * Assume TYPE_DISK for non struct se_device objects.
2645 * Use 8-bit sector value.
2646 */
2647 if (!dev)
2648 goto type_disk;
2649
2650 /*
2651 * Use 24-bit allocation length for TYPE_TAPE.
2652 */
e3d6f909 2653 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2654 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2655
2656 /*
2657 * Everything else assume TYPE_DISK Sector CDB location.
2658 * Use 8-bit sector value.
2659 */
2660type_disk:
2661 return (u32)cdb[4];
2662}
2663
2664static inline u32 transport_get_sectors_10(
2665 unsigned char *cdb,
2666 struct se_cmd *cmd,
2667 int *ret)
2668{
5951146d 2669 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2670
2671 /*
2672 * Assume TYPE_DISK for non struct se_device objects.
2673 * Use 16-bit sector value.
2674 */
2675 if (!dev)
2676 goto type_disk;
2677
2678 /*
2679 * XXX_10 is not defined in SSC, throw an exception
2680 */
e3d6f909
AG
2681 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2682 *ret = -EINVAL;
c66ac9db
NB
2683 return 0;
2684 }
2685
2686 /*
2687 * Everything else assume TYPE_DISK Sector CDB location.
2688 * Use 16-bit sector value.
2689 */
2690type_disk:
2691 return (u32)(cdb[7] << 8) + cdb[8];
2692}
2693
2694static inline u32 transport_get_sectors_12(
2695 unsigned char *cdb,
2696 struct se_cmd *cmd,
2697 int *ret)
2698{
5951146d 2699 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2700
2701 /*
2702 * Assume TYPE_DISK for non struct se_device objects.
2703 * Use 32-bit sector value.
2704 */
2705 if (!dev)
2706 goto type_disk;
2707
2708 /*
2709 * XXX_12 is not defined in SSC, throw an exception
2710 */
e3d6f909
AG
2711 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2712 *ret = -EINVAL;
c66ac9db
NB
2713 return 0;
2714 }
2715
2716 /*
2717 * Everything else assume TYPE_DISK Sector CDB location.
2718 * Use 32-bit sector value.
2719 */
2720type_disk:
2721 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2722}
2723
2724static inline u32 transport_get_sectors_16(
2725 unsigned char *cdb,
2726 struct se_cmd *cmd,
2727 int *ret)
2728{
5951146d 2729 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2730
2731 /*
2732 * Assume TYPE_DISK for non struct se_device objects.
2733 * Use 32-bit sector value.
2734 */
2735 if (!dev)
2736 goto type_disk;
2737
2738 /*
2739 * Use 24-bit allocation length for TYPE_TAPE.
2740 */
e3d6f909 2741 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2742 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2743
2744type_disk:
2745 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2746 (cdb[12] << 8) + cdb[13];
2747}
2748
2749/*
2750 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2751 */
2752static inline u32 transport_get_sectors_32(
2753 unsigned char *cdb,
2754 struct se_cmd *cmd,
2755 int *ret)
2756{
2757 /*
2758 * Assume TYPE_DISK for non struct se_device objects.
2759 * Use 32-bit sector value.
2760 */
2761 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2762 (cdb[30] << 8) + cdb[31];
2763
2764}
2765
2766static inline u32 transport_get_size(
2767 u32 sectors,
2768 unsigned char *cdb,
2769 struct se_cmd *cmd)
2770{
5951146d 2771 struct se_device *dev = cmd->se_dev;
c66ac9db 2772
e3d6f909 2773 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
c66ac9db 2774 if (cdb[1] & 1) { /* sectors */
e3d6f909 2775 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2776 } else /* bytes */
2777 return sectors;
2778 }
2779#if 0
2780 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
e3d6f909
AG
2781 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2782 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2783 dev->transport->name);
c66ac9db 2784#endif
e3d6f909 2785 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2786}
2787
2788unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
2789{
2790 unsigned char result = 0;
2791 /*
2792 * MSB
2793 */
2794 if ((val[0] >= 'a') && (val[0] <= 'f'))
2795 result = ((val[0] - 'a' + 10) & 0xf) << 4;
2796 else
2797 if ((val[0] >= 'A') && (val[0] <= 'F'))
2798 result = ((val[0] - 'A' + 10) & 0xf) << 4;
2799 else /* digit */
2800 result = ((val[0] - '0') & 0xf) << 4;
2801 /*
2802 * LSB
2803 */
2804 if ((val[1] >= 'a') && (val[1] <= 'f'))
2805 result |= ((val[1] - 'a' + 10) & 0xf);
2806 else
2807 if ((val[1] >= 'A') && (val[1] <= 'F'))
2808 result |= ((val[1] - 'A' + 10) & 0xf);
2809 else /* digit */
2810 result |= ((val[1] - '0') & 0xf);
2811
2812 return result;
2813}
2814EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
2815
2816static void transport_xor_callback(struct se_cmd *cmd)
2817{
2818 unsigned char *buf, *addr;
ec98f782 2819 struct scatterlist *sg;
c66ac9db
NB
2820 unsigned int offset;
2821 int i;
ec98f782 2822 int count;
c66ac9db
NB
2823 /*
2824 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2825 *
2826 * 1) read the specified logical block(s);
2827 * 2) transfer logical blocks from the data-out buffer;
2828 * 3) XOR the logical blocks transferred from the data-out buffer with
2829 * the logical blocks read, storing the resulting XOR data in a buffer;
2830 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2831 * blocks transferred from the data-out buffer; and
2832 * 5) transfer the resulting XOR data to the data-in buffer.
2833 */
2834 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2835 if (!(buf)) {
2836 printk(KERN_ERR "Unable to allocate xor_callback buf\n");
2837 return;
2838 }
2839 /*
ec98f782 2840 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
c66ac9db
NB
2841 * into the locally allocated *buf
2842 */
ec98f782
AG
2843 sg_copy_to_buffer(cmd->t_data_sg,
2844 cmd->t_data_nents,
2845 buf,
2846 cmd->data_length);
2847
c66ac9db
NB
2848 /*
2849 * Now perform the XOR against the BIDI read memory located at
a1d8b49a 2850 * cmd->t_mem_bidi_list
c66ac9db
NB
2851 */
2852
2853 offset = 0;
ec98f782
AG
2854 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2855 addr = kmap_atomic(sg_page(sg), KM_USER0);
2856 if (!addr)
c66ac9db
NB
2857 goto out;
2858
ec98f782
AG
2859 for (i = 0; i < sg->length; i++)
2860 *(addr + sg->offset + i) ^= *(buf + offset + i);
c66ac9db 2861
ec98f782 2862 offset += sg->length;
c66ac9db
NB
2863 kunmap_atomic(addr, KM_USER0);
2864 }
ec98f782 2865
c66ac9db
NB
2866out:
2867 kfree(buf);
2868}
2869
2870/*
2871 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2872 */
2873static int transport_get_sense_data(struct se_cmd *cmd)
2874{
2875 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2876 struct se_device *dev;
2877 struct se_task *task = NULL, *task_tmp;
2878 unsigned long flags;
2879 u32 offset = 0;
2880
e3d6f909
AG
2881 WARN_ON(!cmd->se_lun);
2882
a1d8b49a 2883 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2884 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 2885 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2886 return 0;
2887 }
2888
2889 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 2890 &cmd->t_task_list, t_list) {
c66ac9db
NB
2891
2892 if (!task->task_sense)
2893 continue;
2894
2895 dev = task->se_dev;
2896 if (!(dev))
2897 continue;
2898
e3d6f909
AG
2899 if (!dev->transport->get_sense_buffer) {
2900 printk(KERN_ERR "dev->transport->get_sense_buffer"
c66ac9db
NB
2901 " is NULL\n");
2902 continue;
2903 }
2904
e3d6f909 2905 sense_buffer = dev->transport->get_sense_buffer(task);
c66ac9db
NB
2906 if (!(sense_buffer)) {
2907 printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
2908 " sense buffer for task with sense\n",
e3d6f909 2909 cmd->se_tfo->get_task_tag(cmd), task->task_no);
c66ac9db
NB
2910 continue;
2911 }
a1d8b49a 2912 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2913
e3d6f909 2914 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
2915 TRANSPORT_SENSE_BUFFER);
2916
5951146d 2917 memcpy(&buffer[offset], sense_buffer,
c66ac9db
NB
2918 TRANSPORT_SENSE_BUFFER);
2919 cmd->scsi_status = task->task_scsi_status;
2920 /* Automatically padded */
2921 cmd->scsi_sense_length =
2922 (TRANSPORT_SENSE_BUFFER + offset);
2923
2924 printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2925 " and sense\n",
e3d6f909 2926 dev->se_hba->hba_id, dev->transport->name,
c66ac9db
NB
2927 cmd->scsi_status);
2928 return 0;
2929 }
a1d8b49a 2930 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2931
2932 return -1;
2933}
2934
c66ac9db
NB
2935static int
2936transport_handle_reservation_conflict(struct se_cmd *cmd)
2937{
2938 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
2939 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2940 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2941 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2942 /*
2943 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2944 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2945 * CONFLICT STATUS.
2946 *
2947 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2948 */
e3d6f909
AG
2949 if (cmd->se_sess &&
2950 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2951 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
c66ac9db
NB
2952 cmd->orig_fe_lun, 0x2C,
2953 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
5951146d 2954 return -EINVAL;
c66ac9db
NB
2955}
2956
ec98f782
AG
2957static inline long long transport_dev_end_lba(struct se_device *dev)
2958{
2959 return dev->transport->get_blocks(dev) + 1;
2960}
2961
2962static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2963{
2964 struct se_device *dev = cmd->se_dev;
2965 u32 sectors;
2966
2967 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2968 return 0;
2969
2970 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2971
2972 if ((cmd->t_task_lba + sectors) >
2973 transport_dev_end_lba(dev)) {
2974 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
2975 " transport_dev_end_lba(): %llu\n",
2976 cmd->t_task_lba, sectors,
2977 transport_dev_end_lba(dev));
2978 printk(KERN_ERR " We should return CHECK_CONDITION"
2979 " but we don't yet\n");
2980 return 0;
2981 }
2982
2983 return sectors;
2984}
2985
c66ac9db
NB
2986/* transport_generic_cmd_sequencer():
2987 *
2988 * Generic Command Sequencer that should work for most DAS transport
2989 * drivers.
2990 *
2991 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2992 * RX Thread.
2993 *
2994 * FIXME: Need to support other SCSI OPCODES where as well.
2995 */
2996static int transport_generic_cmd_sequencer(
2997 struct se_cmd *cmd,
2998 unsigned char *cdb)
2999{
5951146d 3000 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
3001 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
3002 int ret = 0, sector_ret = 0, passthrough;
3003 u32 sectors = 0, size = 0, pr_reg_type = 0;
3004 u16 service_action;
3005 u8 alua_ascq = 0;
3006 /*
3007 * Check for an existing UNIT ATTENTION condition
3008 */
3009 if (core_scsi3_ua_check(cmd, cdb) < 0) {
3010 cmd->transport_wait_for_tasks =
3011 &transport_nop_wait_for_tasks;
3012 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3013 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
5951146d 3014 return -EINVAL;
c66ac9db
NB
3015 }
3016 /*
3017 * Check status of Asymmetric Logical Unit Assignment port
3018 */
e3d6f909 3019 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
c66ac9db
NB
3020 if (ret != 0) {
3021 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3022 /*
25985edc 3023 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
c66ac9db
NB
3024 * The ALUA additional sense code qualifier (ASCQ) is determined
3025 * by the ALUA primary or secondary access state..
3026 */
3027 if (ret > 0) {
3028#if 0
3029 printk(KERN_INFO "[%s]: ALUA TG Port not available,"
3030 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
e3d6f909 3031 cmd->se_tfo->get_fabric_name(), alua_ascq);
c66ac9db
NB
3032#endif
3033 transport_set_sense_codes(cmd, 0x04, alua_ascq);
3034 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3035 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
5951146d 3036 return -EINVAL;
c66ac9db
NB
3037 }
3038 goto out_invalid_cdb_field;
3039 }
3040 /*
3041 * Check status for SPC-3 Persistent Reservations
3042 */
e3d6f909
AG
3043 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
3044 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
c66ac9db
NB
3045 cmd, cdb, pr_reg_type) != 0)
3046 return transport_handle_reservation_conflict(cmd);
3047 /*
3048 * This means the CDB is allowed for the SCSI Initiator port
3049 * when said port is *NOT* holding the legacy SPC-2 or
3050 * SPC-3 Persistent Reservation.
3051 */
3052 }
3053
3054 switch (cdb[0]) {
3055 case READ_6:
3056 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3057 if (sector_ret)
3058 goto out_unsupported_cdb;
3059 size = transport_get_size(sectors, cdb, cmd);
3060 cmd->transport_split_cdb = &split_cdb_XX_6;
a1d8b49a 3061 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
3062 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3063 break;
3064 case READ_10:
3065 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3066 if (sector_ret)
3067 goto out_unsupported_cdb;
3068 size = transport_get_size(sectors, cdb, cmd);
3069 cmd->transport_split_cdb = &split_cdb_XX_10;
a1d8b49a 3070 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
3071 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3072 break;
3073 case READ_12:
3074 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3075 if (sector_ret)
3076 goto out_unsupported_cdb;
3077 size = transport_get_size(sectors, cdb, cmd);
3078 cmd->transport_split_cdb = &split_cdb_XX_12;
a1d8b49a 3079 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
3080 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3081 break;
3082 case READ_16:
3083 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3084 if (sector_ret)
3085 goto out_unsupported_cdb;
3086 size = transport_get_size(sectors, cdb, cmd);
3087 cmd->transport_split_cdb = &split_cdb_XX_16;
a1d8b49a 3088 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
3089 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3090 break;
3091 case WRITE_6:
3092 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3093 if (sector_ret)
3094 goto out_unsupported_cdb;
3095 size = transport_get_size(sectors, cdb, cmd);
3096 cmd->transport_split_cdb = &split_cdb_XX_6;
a1d8b49a 3097 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
3098 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3099 break;
3100 case WRITE_10:
3101 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3102 if (sector_ret)
3103 goto out_unsupported_cdb;
3104 size = transport_get_size(sectors, cdb, cmd);
3105 cmd->transport_split_cdb = &split_cdb_XX_10;
a1d8b49a
AG
3106 cmd->t_task_lba = transport_lba_32(cdb);
3107 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
3108 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3109 break;
3110 case WRITE_12:
3111 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3112 if (sector_ret)
3113 goto out_unsupported_cdb;
3114 size = transport_get_size(sectors, cdb, cmd);
3115 cmd->transport_split_cdb = &split_cdb_XX_12;
a1d8b49a
AG
3116 cmd->t_task_lba = transport_lba_32(cdb);
3117 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
3118 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3119 break;
3120 case WRITE_16:
3121 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3122 if (sector_ret)
3123 goto out_unsupported_cdb;
3124 size = transport_get_size(sectors, cdb, cmd);
3125 cmd->transport_split_cdb = &split_cdb_XX_16;
a1d8b49a
AG
3126 cmd->t_task_lba = transport_lba_64(cdb);
3127 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
3128 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3129 break;
3130 case XDWRITEREAD_10:
3131 if ((cmd->data_direction != DMA_TO_DEVICE) ||
a1d8b49a 3132 !(cmd->t_tasks_bidi))
c66ac9db
NB
3133 goto out_invalid_cdb_field;
3134 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3135 if (sector_ret)
3136 goto out_unsupported_cdb;
3137 size = transport_get_size(sectors, cdb, cmd);
3138 cmd->transport_split_cdb = &split_cdb_XX_10;
a1d8b49a 3139 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db 3140 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
e3d6f909 3141 passthrough = (dev->transport->transport_type ==
c66ac9db
NB
3142 TRANSPORT_PLUGIN_PHBA_PDEV);
3143 /*
3144 * Skip the remaining assignments for TCM/PSCSI passthrough
3145 */
3146 if (passthrough)
3147 break;
3148 /*
3149 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3150 */
3151 cmd->transport_complete_callback = &transport_xor_callback;
a1d8b49a 3152 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
3153 break;
3154 case VARIABLE_LENGTH_CMD:
3155 service_action = get_unaligned_be16(&cdb[8]);
3156 /*
3157 * Determine if this is TCM/PSCSI device and we should disable
3158 * internal emulation for this CDB.
3159 */
e3d6f909 3160 passthrough = (dev->transport->transport_type ==
c66ac9db
NB
3161 TRANSPORT_PLUGIN_PHBA_PDEV);
3162
3163 switch (service_action) {
3164 case XDWRITEREAD_32:
3165 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3166 if (sector_ret)
3167 goto out_unsupported_cdb;
3168 size = transport_get_size(sectors, cdb, cmd);
3169 /*
3170 * Use WRITE_32 and READ_32 opcodes for the emulated
3171 * XDWRITE_READ_32 logic.
3172 */
3173 cmd->transport_split_cdb = &split_cdb_XX_32;
a1d8b49a 3174 cmd->t_task_lba = transport_lba_64_ext(cdb);
c66ac9db
NB
3175 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3176
3177 /*
3178 * Skip the remaining assignments for TCM/PSCSI passthrough
3179 */
3180 if (passthrough)
3181 break;
3182
3183 /*
3184 * Setup BIDI XOR callback to be run during
3185 * transport_generic_complete_ok()
3186 */
3187 cmd->transport_complete_callback = &transport_xor_callback;
a1d8b49a 3188 cmd->t_tasks_fua = (cdb[10] & 0x8);
c66ac9db
NB
3189 break;
3190 case WRITE_SAME_32:
3191 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3192 if (sector_ret)
3193 goto out_unsupported_cdb;
dd3a5ad8
NB
3194
3195 if (sectors != 0)
3196 size = transport_get_size(sectors, cdb, cmd);
3197 else
3198 size = dev->se_sub_dev->se_dev_attrib.block_size;
3199
a1d8b49a 3200 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
c66ac9db
NB
3201 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3202
3203 /*
3204 * Skip the remaining assignments for TCM/PSCSI passthrough
3205 */
3206 if (passthrough)
3207 break;
3208
3209 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3210 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3211 " bits not supported for Block Discard"
3212 " Emulation\n");
3213 goto out_invalid_cdb_field;
3214 }
3215 /*
3216 * Currently for the emulated case we only accept
3217 * tpws with the UNMAP=1 bit set.
3218 */
3219 if (!(cdb[10] & 0x08)) {
3220 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
3221 " supported for Block Discard Emulation\n");
3222 goto out_invalid_cdb_field;
3223 }
3224 break;
3225 default:
3226 printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
3227 " 0x%04x not supported\n", service_action);
3228 goto out_unsupported_cdb;
3229 }
3230 break;
e434f1f1 3231 case MAINTENANCE_IN:
e3d6f909 3232 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
3233 /* MAINTENANCE_IN from SCC-2 */
3234 /*
3235 * Check for emulated MI_REPORT_TARGET_PGS.
3236 */
3237 if (cdb[1] == MI_REPORT_TARGET_PGS) {
3238 cmd->transport_emulate_cdb =
e3d6f909 3239 (su_dev->t10_alua.alua_type ==
c66ac9db 3240 SPC3_ALUA_EMULATED) ?
e3d6f909 3241 core_emulate_report_target_port_groups :
c66ac9db
NB
3242 NULL;
3243 }
3244 size = (cdb[6] << 24) | (cdb[7] << 16) |
3245 (cdb[8] << 8) | cdb[9];
3246 } else {
3247 /* GPCMD_SEND_KEY from multi media commands */
3248 size = (cdb[8] << 8) + cdb[9];
3249 }
05d1c7c0 3250 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3251 break;
3252 case MODE_SELECT:
3253 size = cdb[4];
3254 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3255 break;
3256 case MODE_SELECT_10:
3257 size = (cdb[7] << 8) + cdb[8];
3258 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3259 break;
3260 case MODE_SENSE:
3261 size = cdb[4];
05d1c7c0 3262 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3263 break;
3264 case MODE_SENSE_10:
3265 case GPCMD_READ_BUFFER_CAPACITY:
3266 case GPCMD_SEND_OPC:
3267 case LOG_SELECT:
3268 case LOG_SENSE:
3269 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 3270 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3271 break;
3272 case READ_BLOCK_LIMITS:
3273 size = READ_BLOCK_LEN;
05d1c7c0 3274 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3275 break;
3276 case GPCMD_GET_CONFIGURATION:
3277 case GPCMD_READ_FORMAT_CAPACITIES:
3278 case GPCMD_READ_DISC_INFO:
3279 case GPCMD_READ_TRACK_RZONE_INFO:
3280 size = (cdb[7] << 8) + cdb[8];
3281 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3282 break;
3283 case PERSISTENT_RESERVE_IN:
3284 case PERSISTENT_RESERVE_OUT:
3285 cmd->transport_emulate_cdb =
e3d6f909 3286 (su_dev->t10_pr.res_type ==
c66ac9db 3287 SPC3_PERSISTENT_RESERVATIONS) ?
e3d6f909 3288 core_scsi3_emulate_pr : NULL;
c66ac9db 3289 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 3290 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3291 break;
3292 case GPCMD_MECHANISM_STATUS:
3293 case GPCMD_READ_DVD_STRUCTURE:
3294 size = (cdb[8] << 8) + cdb[9];
3295 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3296 break;
3297 case READ_POSITION:
3298 size = READ_POSITION_LEN;
05d1c7c0 3299 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db 3300 break;
e434f1f1 3301 case MAINTENANCE_OUT:
e3d6f909 3302 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
3303 /* MAINTENANCE_OUT from SCC-2
3304 *
3305 * Check for emulated MO_SET_TARGET_PGS.
3306 */
3307 if (cdb[1] == MO_SET_TARGET_PGS) {
3308 cmd->transport_emulate_cdb =
e3d6f909 3309 (su_dev->t10_alua.alua_type ==
c66ac9db 3310 SPC3_ALUA_EMULATED) ?
e3d6f909 3311 core_emulate_set_target_port_groups :
c66ac9db
NB
3312 NULL;
3313 }
3314
3315 size = (cdb[6] << 24) | (cdb[7] << 16) |
3316 (cdb[8] << 8) | cdb[9];
3317 } else {
3318 /* GPCMD_REPORT_KEY from multi media commands */
3319 size = (cdb[8] << 8) + cdb[9];
3320 }
05d1c7c0 3321 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3322 break;
3323 case INQUIRY:
3324 size = (cdb[3] << 8) + cdb[4];
3325 /*
3326 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3327 * See spc4r17 section 5.3
3328 */
5951146d 3329 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 3330 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 3331 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3332 break;
3333 case READ_BUFFER:
3334 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 3335 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3336 break;
3337 case READ_CAPACITY:
3338 size = READ_CAP_LEN;
05d1c7c0 3339 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3340 break;
3341 case READ_MEDIA_SERIAL_NUMBER:
3342 case SECURITY_PROTOCOL_IN:
3343 case SECURITY_PROTOCOL_OUT:
3344 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
05d1c7c0 3345 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3346 break;
3347 case SERVICE_ACTION_IN:
3348 case ACCESS_CONTROL_IN:
3349 case ACCESS_CONTROL_OUT:
3350 case EXTENDED_COPY:
3351 case READ_ATTRIBUTE:
3352 case RECEIVE_COPY_RESULTS:
3353 case WRITE_ATTRIBUTE:
3354 size = (cdb[10] << 24) | (cdb[11] << 16) |
3355 (cdb[12] << 8) | cdb[13];
05d1c7c0 3356 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3357 break;
3358 case RECEIVE_DIAGNOSTIC:
3359 case SEND_DIAGNOSTIC:
3360 size = (cdb[3] << 8) | cdb[4];
05d1c7c0 3361 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3362 break;
3363/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3364#if 0
3365 case GPCMD_READ_CD:
3366 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3367 size = (2336 * sectors);
05d1c7c0 3368 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3369 break;
3370#endif
3371 case READ_TOC:
3372 size = cdb[8];
05d1c7c0 3373 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3374 break;
3375 case REQUEST_SENSE:
3376 size = cdb[4];
05d1c7c0 3377 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3378 break;
3379 case READ_ELEMENT_STATUS:
3380 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
05d1c7c0 3381 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3382 break;
3383 case WRITE_BUFFER:
3384 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 3385 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3386 break;
3387 case RESERVE:
3388 case RESERVE_10:
3389 /*
3390 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3391 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3392 */
3393 if (cdb[0] == RESERVE_10)
3394 size = (cdb[7] << 8) | cdb[8];
3395 else
3396 size = cmd->data_length;
3397
3398 /*
3399 * Setup the legacy emulated handler for SPC-2 and
3400 * >= SPC-3 compatible reservation handling (CRH=1)
3401 * Otherwise, we assume the underlying SCSI logic is
3402 * is running in SPC_PASSTHROUGH, and wants reservations
3403 * emulation disabled.
3404 */
3405 cmd->transport_emulate_cdb =
e3d6f909 3406 (su_dev->t10_pr.res_type !=
c66ac9db 3407 SPC_PASSTHROUGH) ?
e3d6f909 3408 core_scsi2_emulate_crh : NULL;
c66ac9db
NB
3409 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3410 break;
3411 case RELEASE:
3412 case RELEASE_10:
3413 /*
3414 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3415 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3416 */
3417 if (cdb[0] == RELEASE_10)
3418 size = (cdb[7] << 8) | cdb[8];
3419 else
3420 size = cmd->data_length;
3421
3422 cmd->transport_emulate_cdb =
e3d6f909 3423 (su_dev->t10_pr.res_type !=
c66ac9db 3424 SPC_PASSTHROUGH) ?
e3d6f909 3425 core_scsi2_emulate_crh : NULL;
c66ac9db
NB
3426 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3427 break;
3428 case SYNCHRONIZE_CACHE:
3429 case 0x91: /* SYNCHRONIZE_CACHE_16: */
3430 /*
3431 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3432 */
3433 if (cdb[0] == SYNCHRONIZE_CACHE) {
3434 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
a1d8b49a 3435 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
3436 } else {
3437 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
a1d8b49a 3438 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
3439 }
3440 if (sector_ret)
3441 goto out_unsupported_cdb;
3442
3443 size = transport_get_size(sectors, cdb, cmd);
3444 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3445
3446 /*
3447 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3448 */
e3d6f909 3449 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
c66ac9db
NB
3450 break;
3451 /*
3452 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3453 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3454 */
3455 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3456 /*
3457 * Check to ensure that LBA + Range does not exceed past end of
3458 * device.
3459 */
a1d8b49a 3460 if (!transport_cmd_get_valid_sectors(cmd))
c66ac9db
NB
3461 goto out_invalid_cdb_field;
3462 break;
3463 case UNMAP:
3464 size = get_unaligned_be16(&cdb[7]);
05d1c7c0 3465 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3466 break;
3467 case WRITE_SAME_16:
3468 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3469 if (sector_ret)
3470 goto out_unsupported_cdb;
dd3a5ad8
NB
3471
3472 if (sectors != 0)
3473 size = transport_get_size(sectors, cdb, cmd);
3474 else
3475 size = dev->se_sub_dev->se_dev_attrib.block_size;
3476
a1d8b49a 3477 cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
e3d6f909 3478 passthrough = (dev->transport->transport_type ==
c66ac9db
NB
3479 TRANSPORT_PLUGIN_PHBA_PDEV);
3480 /*
3481 * Determine if the received WRITE_SAME_16 is used to for direct
3482 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
3483 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
3484 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3485 * TCM/FILEIO subsystem plugin backstores.
3486 */
3487 if (!(passthrough)) {
3488 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3489 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3490 " bits not supported for Block Discard"
3491 " Emulation\n");
3492 goto out_invalid_cdb_field;
3493 }
3494 /*
3495 * Currently for the emulated case we only accept
3496 * tpws with the UNMAP=1 bit set.
3497 */
3498 if (!(cdb[1] & 0x08)) {
3499 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
3500 " supported for Block Discard Emulation\n");
3501 goto out_invalid_cdb_field;
3502 }
3503 }
3504 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3505 break;
3506 case ALLOW_MEDIUM_REMOVAL:
3507 case GPCMD_CLOSE_TRACK:
3508 case ERASE:
3509 case INITIALIZE_ELEMENT_STATUS:
3510 case GPCMD_LOAD_UNLOAD:
3511 case REZERO_UNIT:
3512 case SEEK_10:
3513 case GPCMD_SET_SPEED:
3514 case SPACE:
3515 case START_STOP:
3516 case TEST_UNIT_READY:
3517 case VERIFY:
3518 case WRITE_FILEMARKS:
3519 case MOVE_MEDIUM:
3520 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3521 break;
3522 case REPORT_LUNS:
3523 cmd->transport_emulate_cdb =
e3d6f909 3524 transport_core_report_lun_response;
c66ac9db
NB
3525 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3526 /*
3527 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3528 * See spc4r17 section 5.3
3529 */
5951146d 3530 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 3531 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 3532 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3533 break;
3534 default:
3535 printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
3536 " 0x%02x, sending CHECK_CONDITION.\n",
e3d6f909 3537 cmd->se_tfo->get_fabric_name(), cdb[0]);
c66ac9db
NB
3538 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3539 goto out_unsupported_cdb;
3540 }
3541
3542 if (size != cmd->data_length) {
3543 printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
3544 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
e3d6f909 3545 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
c66ac9db
NB
3546 cmd->data_length, size, cdb[0]);
3547
3548 cmd->cmd_spdtl = size;
3549
3550 if (cmd->data_direction == DMA_TO_DEVICE) {
3551 printk(KERN_ERR "Rejecting underflow/overflow"
3552 " WRITE data\n");
3553 goto out_invalid_cdb_field;
3554 }
3555 /*
3556 * Reject READ_* or WRITE_* with overflow/underflow for
3557 * type SCF_SCSI_DATA_SG_IO_CDB.
3558 */
e3d6f909 3559 if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
c66ac9db
NB
3560 printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
3561 " CDB on non 512-byte sector setup subsystem"
e3d6f909 3562 " plugin: %s\n", dev->transport->name);
c66ac9db
NB
3563 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3564 goto out_invalid_cdb_field;
3565 }
3566
3567 if (size > cmd->data_length) {
3568 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3569 cmd->residual_count = (size - cmd->data_length);
3570 } else {
3571 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3572 cmd->residual_count = (cmd->data_length - size);
3573 }
3574 cmd->data_length = size;
3575 }
3576
d0229ae3
AG
3577 /* Let's limit control cdbs to a page, for simplicity's sake. */
3578 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3579 size > PAGE_SIZE)
3580 goto out_invalid_cdb_field;
3581
c66ac9db
NB
3582 transport_set_supported_SAM_opcode(cmd);
3583 return ret;
3584
3585out_unsupported_cdb:
3586 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3587 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
5951146d 3588 return -EINVAL;
c66ac9db
NB
3589out_invalid_cdb_field:
3590 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3591 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 3592 return -EINVAL;
c66ac9db
NB
3593}
3594
c66ac9db
NB
3595/*
3596 * Called from transport_generic_complete_ok() and
3597 * transport_generic_request_failure() to determine which dormant/delayed
3598 * and ordered cmds need to have their tasks added to the execution queue.
3599 */
3600static void transport_complete_task_attr(struct se_cmd *cmd)
3601{
5951146d 3602 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
3603 struct se_cmd *cmd_p, *cmd_tmp;
3604 int new_active_tasks = 0;
3605
e66ecd50 3606 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
c66ac9db
NB
3607 atomic_dec(&dev->simple_cmds);
3608 smp_mb__after_atomic_dec();
3609 dev->dev_cur_ordered_id++;
3610 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3611 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3612 cmd->se_ordered_id);
e66ecd50 3613 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db
NB
3614 atomic_dec(&dev->dev_hoq_count);
3615 smp_mb__after_atomic_dec();
3616 dev->dev_cur_ordered_id++;
3617 DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3618 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3619 cmd->se_ordered_id);
e66ecd50 3620 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
c66ac9db 3621 spin_lock(&dev->ordered_cmd_lock);
5951146d 3622 list_del(&cmd->se_ordered_node);
c66ac9db
NB
3623 atomic_dec(&dev->dev_ordered_sync);
3624 smp_mb__after_atomic_dec();
3625 spin_unlock(&dev->ordered_cmd_lock);
3626
3627 dev->dev_cur_ordered_id++;
3628 DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
3629 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3630 }
3631 /*
3632 * Process all commands up to the last received
3633 * ORDERED task attribute which requires another blocking
3634 * boundary
3635 */
3636 spin_lock(&dev->delayed_cmd_lock);
3637 list_for_each_entry_safe(cmd_p, cmd_tmp,
5951146d 3638 &dev->delayed_cmd_list, se_delayed_node) {
c66ac9db 3639
5951146d 3640 list_del(&cmd_p->se_delayed_node);
c66ac9db
NB
3641 spin_unlock(&dev->delayed_cmd_lock);
3642
3643 DEBUG_STA("Calling add_tasks() for"
3644 " cmd_p: 0x%02x Task Attr: 0x%02x"
3645 " Dormant -> Active, se_ordered_id: %u\n",
3646 T_TASK(cmd_p)->t_task_cdb[0],
3647 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3648
3649 transport_add_tasks_from_cmd(cmd_p);
3650 new_active_tasks++;
3651
3652 spin_lock(&dev->delayed_cmd_lock);
e66ecd50 3653 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
c66ac9db
NB
3654 break;
3655 }
3656 spin_unlock(&dev->delayed_cmd_lock);
3657 /*
3658 * If new tasks have become active, wake up the transport thread
3659 * to do the processing of the Active tasks.
3660 */
3661 if (new_active_tasks != 0)
e3d6f909 3662 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
c66ac9db
NB
3663}
3664
07bde79a
NB
3665static int transport_complete_qf(struct se_cmd *cmd)
3666{
3667 int ret = 0;
3668
3669 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
3670 return cmd->se_tfo->queue_status(cmd);
3671
3672 switch (cmd->data_direction) {
3673 case DMA_FROM_DEVICE:
3674 ret = cmd->se_tfo->queue_data_in(cmd);
3675 break;
3676 case DMA_TO_DEVICE:
ec98f782 3677 if (cmd->t_bidi_data_sg) {
07bde79a
NB
3678 ret = cmd->se_tfo->queue_data_in(cmd);
3679 if (ret < 0)
3680 return ret;
3681 }
3682 /* Fall through for DMA_TO_DEVICE */
3683 case DMA_NONE:
3684 ret = cmd->se_tfo->queue_status(cmd);
3685 break;
3686 default:
3687 break;
3688 }
3689
3690 return ret;
3691}
3692
3693static void transport_handle_queue_full(
3694 struct se_cmd *cmd,
3695 struct se_device *dev,
3696 int (*qf_callback)(struct se_cmd *))
3697{
3698 spin_lock_irq(&dev->qf_cmd_lock);
3699 cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
3700 cmd->transport_qf_callback = qf_callback;
3701 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3702 atomic_inc(&dev->dev_qf_count);
3703 smp_mb__after_atomic_inc();
3704 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3705
3706 schedule_work(&cmd->se_dev->qf_work_queue);
3707}
3708
c66ac9db
NB
3709static void transport_generic_complete_ok(struct se_cmd *cmd)
3710{
07bde79a 3711 int reason = 0, ret;
c66ac9db
NB
3712 /*
3713 * Check if we need to move delayed/dormant tasks from cmds on the
3714 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3715 * Attribute.
3716 */
5951146d 3717 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
c66ac9db 3718 transport_complete_task_attr(cmd);
07bde79a
NB
3719 /*
3720 * Check to schedule QUEUE_FULL work, or execute an existing
3721 * cmd->transport_qf_callback()
3722 */
3723 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3724 schedule_work(&cmd->se_dev->qf_work_queue);
3725
3726 if (cmd->transport_qf_callback) {
3727 ret = cmd->transport_qf_callback(cmd);
3728 if (ret < 0)
3729 goto queue_full;
3730
3731 cmd->transport_qf_callback = NULL;
3732 goto done;
3733 }
c66ac9db
NB
3734 /*
3735 * Check if we need to retrieve a sense buffer from
3736 * the struct se_cmd in question.
3737 */
3738 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3739 if (transport_get_sense_data(cmd) < 0)
3740 reason = TCM_NON_EXISTENT_LUN;
3741
3742 /*
3743 * Only set when an struct se_task->task_scsi_status returned
3744 * a non GOOD status.
3745 */
3746 if (cmd->scsi_status) {
07bde79a 3747 ret = transport_send_check_condition_and_sense(
c66ac9db 3748 cmd, reason, 1);
07bde79a
NB
3749 if (ret == -EAGAIN)
3750 goto queue_full;
3751
c66ac9db
NB
3752 transport_lun_remove_cmd(cmd);
3753 transport_cmd_check_stop_to_fabric(cmd);
3754 return;
3755 }
3756 }
3757 /*
25985edc 3758 * Check for a callback, used by amongst other things
c66ac9db
NB
3759 * XDWRITE_READ_10 emulation.
3760 */
3761 if (cmd->transport_complete_callback)
3762 cmd->transport_complete_callback(cmd);
3763
3764 switch (cmd->data_direction) {
3765 case DMA_FROM_DEVICE:
3766 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3767 if (cmd->se_lun->lun_sep) {
3768 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3769 cmd->data_length;
3770 }
3771 spin_unlock(&cmd->se_lun->lun_sep_lock);
c66ac9db 3772
07bde79a
NB
3773 ret = cmd->se_tfo->queue_data_in(cmd);
3774 if (ret == -EAGAIN)
3775 goto queue_full;
c66ac9db
NB
3776 break;
3777 case DMA_TO_DEVICE:
3778 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3779 if (cmd->se_lun->lun_sep) {
3780 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
c66ac9db
NB
3781 cmd->data_length;
3782 }
3783 spin_unlock(&cmd->se_lun->lun_sep_lock);
3784 /*
3785 * Check if we need to send READ payload for BIDI-COMMAND
3786 */
ec98f782 3787 if (cmd->t_bidi_data_sg) {
c66ac9db 3788 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3789 if (cmd->se_lun->lun_sep) {
3790 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3791 cmd->data_length;
3792 }
3793 spin_unlock(&cmd->se_lun->lun_sep_lock);
07bde79a
NB
3794 ret = cmd->se_tfo->queue_data_in(cmd);
3795 if (ret == -EAGAIN)
3796 goto queue_full;
c66ac9db
NB
3797 break;
3798 }
3799 /* Fall through for DMA_TO_DEVICE */
3800 case DMA_NONE:
07bde79a
NB
3801 ret = cmd->se_tfo->queue_status(cmd);
3802 if (ret == -EAGAIN)
3803 goto queue_full;
c66ac9db
NB
3804 break;
3805 default:
3806 break;
3807 }
3808
07bde79a 3809done:
c66ac9db
NB
3810 transport_lun_remove_cmd(cmd);
3811 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
3812 return;
3813
3814queue_full:
3815 printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3816 " data_direction: %d\n", cmd, cmd->data_direction);
3817 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
c66ac9db
NB
3818}
3819
3820static void transport_free_dev_tasks(struct se_cmd *cmd)
3821{
3822 struct se_task *task, *task_tmp;
3823 unsigned long flags;
3824
a1d8b49a 3825 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3826 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 3827 &cmd->t_task_list, t_list) {
c66ac9db
NB
3828 if (atomic_read(&task->task_active))
3829 continue;
3830
3831 kfree(task->task_sg_bidi);
3832 kfree(task->task_sg);
3833
3834 list_del(&task->t_list);
3835
a1d8b49a 3836 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3837 if (task->se_dev)
e3d6f909 3838 task->se_dev->transport->free_task(task);
c66ac9db
NB
3839 else
3840 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3841 task->task_no);
a1d8b49a 3842 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3843 }
a1d8b49a 3844 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3845}
3846
3847static inline void transport_free_pages(struct se_cmd *cmd)
3848{
ec98f782 3849 struct scatterlist *sg;
c66ac9db 3850 int free_page = 1;
ec98f782 3851 int count;
c66ac9db
NB
3852
3853 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3854 free_page = 0;
3855 if (cmd->se_dev->transport->do_se_mem_map)
3856 free_page = 0;
3857
ec98f782 3858 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) {
c66ac9db 3859 /*
ec98f782 3860 * Only called if
c66ac9db
NB
3861 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3862 */
3863 if (free_page)
ec98f782 3864 __free_page(sg_page(sg));
c66ac9db 3865
c66ac9db 3866 }
ec98f782
AG
3867 if (free_page)
3868 kfree(cmd->t_data_sg);
3869 cmd->t_data_sg = NULL;
3870 cmd->t_data_nents = 0;
c66ac9db 3871
ec98f782 3872 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
5951146d 3873 /*
ec98f782 3874 * Only called if
5951146d
AG
3875 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3876 */
3877 if (free_page)
ec98f782 3878 __free_page(sg_page(sg));
c66ac9db 3879
c66ac9db 3880 }
ec98f782
AG
3881 if (free_page)
3882 kfree(cmd->t_bidi_data_sg);
3883 cmd->t_bidi_data_sg = NULL;
3884 cmd->t_bidi_data_nents = 0;
c66ac9db
NB
3885}
3886
3887static inline void transport_release_tasks(struct se_cmd *cmd)
3888{
3889 transport_free_dev_tasks(cmd);
3890}
3891
3892static inline int transport_dec_and_check(struct se_cmd *cmd)
3893{
3894 unsigned long flags;
3895
a1d8b49a
AG
3896 spin_lock_irqsave(&cmd->t_state_lock, flags);
3897 if (atomic_read(&cmd->t_fe_count)) {
3898 if (!(atomic_dec_and_test(&cmd->t_fe_count))) {
3899 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
3900 flags);
3901 return 1;
3902 }
3903 }
3904
a1d8b49a
AG
3905 if (atomic_read(&cmd->t_se_count)) {
3906 if (!(atomic_dec_and_test(&cmd->t_se_count))) {
3907 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
3908 flags);
3909 return 1;
3910 }
3911 }
a1d8b49a 3912 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3913
3914 return 0;
3915}
3916
3917static void transport_release_fe_cmd(struct se_cmd *cmd)
3918{
3919 unsigned long flags;
3920
3921 if (transport_dec_and_check(cmd))
3922 return;
3923
a1d8b49a
AG
3924 spin_lock_irqsave(&cmd->t_state_lock, flags);
3925 if (!(atomic_read(&cmd->transport_dev_active))) {
3926 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3927 goto free_pages;
3928 }
a1d8b49a 3929 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 3930 transport_all_task_dev_remove_state(cmd);
a1d8b49a 3931 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3932
3933 transport_release_tasks(cmd);
3934free_pages:
3935 transport_free_pages(cmd);
3936 transport_free_se_cmd(cmd);
35462975 3937 cmd->se_tfo->release_cmd(cmd);
c66ac9db
NB
3938}
3939
35462975
CH
3940static int
3941transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
c66ac9db
NB
3942{
3943 unsigned long flags;
3944
c66ac9db
NB
3945 if (transport_dec_and_check(cmd)) {
3946 if (session_reinstatement) {
a1d8b49a 3947 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3948 transport_all_task_dev_remove_state(cmd);
a1d8b49a 3949 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
3950 flags);
3951 }
3952 return 1;
3953 }
3954
a1d8b49a
AG
3955 spin_lock_irqsave(&cmd->t_state_lock, flags);
3956 if (!(atomic_read(&cmd->transport_dev_active))) {
3957 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3958 goto free_pages;
3959 }
a1d8b49a 3960 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 3961 transport_all_task_dev_remove_state(cmd);
a1d8b49a 3962 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3963
3964 transport_release_tasks(cmd);
5951146d 3965
c66ac9db
NB
3966free_pages:
3967 transport_free_pages(cmd);
35462975 3968 transport_release_cmd(cmd);
c66ac9db
NB
3969 return 0;
3970}
3971
3972/*
ec98f782
AG
3973 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3974 * allocating in the core.
c66ac9db
NB
3975 * @cmd: Associated se_cmd descriptor
3976 * @mem: SGL style memory for TCM WRITE / READ
3977 * @sg_mem_num: Number of SGL elements
3978 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3979 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3980 *
3981 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3982 * of parameters.
3983 */
3984int transport_generic_map_mem_to_cmd(
3985 struct se_cmd *cmd,
5951146d
AG
3986 struct scatterlist *sgl,
3987 u32 sgl_count,
3988 struct scatterlist *sgl_bidi,
3989 u32 sgl_bidi_count)
c66ac9db 3990{
5951146d 3991 if (!sgl || !sgl_count)
c66ac9db 3992 return 0;
c66ac9db 3993
c66ac9db
NB
3994 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3995 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
c66ac9db 3996
ec98f782
AG
3997 cmd->t_data_sg = sgl;
3998 cmd->t_data_nents = sgl_count;
c66ac9db 3999
ec98f782
AG
4000 if (sgl_bidi && sgl_bidi_count) {
4001 cmd->t_bidi_data_sg = sgl_bidi;
4002 cmd->t_bidi_data_nents = sgl_bidi_count;
c66ac9db
NB
4003 }
4004 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
c66ac9db
NB
4005 }
4006
4007 return 0;
4008}
4009EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
4010
c66ac9db
NB
4011static int transport_new_cmd_obj(struct se_cmd *cmd)
4012{
5951146d 4013 struct se_device *dev = cmd->se_dev;
a1d8b49a
AG
4014 u32 task_cdbs;
4015 u32 rc;
ec98f782 4016 int set_counts = 1;
c66ac9db 4017
ec98f782
AG
4018 /*
4019 * Setup any BIDI READ tasks and memory from
4020 * cmd->t_mem_bidi_list so the READ struct se_tasks
4021 * are queued first for the non pSCSI passthrough case.
4022 */
4023 if (cmd->t_bidi_data_sg &&
4024 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4025 rc = transport_allocate_tasks(cmd,
4026 cmd->t_task_lba,
4027 DMA_FROM_DEVICE,
4028 cmd->t_bidi_data_sg,
4029 cmd->t_bidi_data_nents);
4030 if (!rc) {
c66ac9db
NB
4031 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4032 cmd->scsi_sense_reason =
ec98f782 4033 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
c66ac9db
NB
4034 return PYX_TRANSPORT_LU_COMM_FAILURE;
4035 }
ec98f782
AG
4036 atomic_inc(&cmd->t_fe_count);
4037 atomic_inc(&cmd->t_se_count);
4038 set_counts = 0;
4039 }
4040 /*
4041 * Setup the tasks and memory from cmd->t_mem_list
4042 * Note for BIDI transfers this will contain the WRITE payload
4043 */
4044 task_cdbs = transport_allocate_tasks(cmd,
4045 cmd->t_task_lba,
4046 cmd->data_direction,
4047 cmd->t_data_sg,
4048 cmd->t_data_nents);
4049 if (!task_cdbs) {
4050 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4051 cmd->scsi_sense_reason =
4052 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4053 return PYX_TRANSPORT_LU_COMM_FAILURE;
4054 }
c66ac9db 4055
ec98f782
AG
4056 if (set_counts) {
4057 atomic_inc(&cmd->t_fe_count);
4058 atomic_inc(&cmd->t_se_count);
c66ac9db
NB
4059 }
4060
ec98f782
AG
4061 cmd->t_task_list_num = task_cdbs;
4062
a1d8b49a
AG
4063 atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
4064 atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
4065 atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
c66ac9db
NB
4066 return 0;
4067}
4068
05d1c7c0
AG
4069void *transport_kmap_first_data_page(struct se_cmd *cmd)
4070{
ec98f782 4071 struct scatterlist *sg = cmd->t_data_sg;
05d1c7c0 4072
ec98f782 4073 BUG_ON(!sg);
05d1c7c0 4074 /*
ec98f782
AG
4075 * We need to take into account a possible offset here for fabrics like
4076 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
4077 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
05d1c7c0 4078 */
ec98f782 4079 return kmap(sg_page(sg)) + sg->offset;
05d1c7c0
AG
4080}
4081EXPORT_SYMBOL(transport_kmap_first_data_page);
4082
4083void transport_kunmap_first_data_page(struct se_cmd *cmd)
4084{
ec98f782 4085 kunmap(sg_page(cmd->t_data_sg));
05d1c7c0
AG
4086}
4087EXPORT_SYMBOL(transport_kunmap_first_data_page);
4088
c66ac9db 4089static int
05d1c7c0 4090transport_generic_get_mem(struct se_cmd *cmd)
c66ac9db 4091{
ec98f782
AG
4092 u32 length = cmd->data_length;
4093 unsigned int nents;
4094 struct page *page;
4095 int i = 0;
c66ac9db 4096
c66ac9db
NB
4097 /*
4098 * If the device uses memory mapping this is enough.
4099 */
4100 if (cmd->se_dev->transport->do_se_mem_map)
4101 return 0;
4102
ec98f782
AG
4103 nents = DIV_ROUND_UP(length, PAGE_SIZE);
4104 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
4105 if (!cmd->t_data_sg)
4106 return -ENOMEM;
c66ac9db 4107
ec98f782
AG
4108 cmd->t_data_nents = nents;
4109 sg_init_table(cmd->t_data_sg, nents);
c66ac9db 4110
ec98f782
AG
4111 while (length) {
4112 u32 page_len = min_t(u32, length, PAGE_SIZE);
4113 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4114 if (!page)
4115 goto out;
c66ac9db 4116
ec98f782
AG
4117 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
4118 length -= page_len;
4119 i++;
c66ac9db 4120 }
c66ac9db 4121 return 0;
c66ac9db 4122
ec98f782
AG
4123out:
4124 while (i >= 0) {
4125 __free_page(sg_page(&cmd->t_data_sg[i]));
4126 i--;
c66ac9db 4127 }
ec98f782
AG
4128 kfree(cmd->t_data_sg);
4129 cmd->t_data_sg = NULL;
4130 return -ENOMEM;
c66ac9db
NB
4131}
4132
a1d8b49a
AG
4133/* Reduce sectors if they are too long for the device */
4134static inline sector_t transport_limit_task_sectors(
c66ac9db
NB
4135 struct se_device *dev,
4136 unsigned long long lba,
a1d8b49a 4137 sector_t sectors)
c66ac9db 4138{
a1d8b49a 4139 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db 4140
a1d8b49a
AG
4141 if (dev->transport->get_device_type(dev) == TYPE_DISK)
4142 if ((lba + sectors) > transport_dev_end_lba(dev))
4143 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
c66ac9db 4144
a1d8b49a 4145 return sectors;
c66ac9db
NB
4146}
4147
c66ac9db
NB
4148
4149/*
4150 * This function can be used by HW target mode drivers to create a linked
4151 * scatterlist from all contiguously allocated struct se_task->task_sg[].
4152 * This is intended to be called during the completion path by TCM Core
4153 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
4154 */
4155void transport_do_task_sg_chain(struct se_cmd *cmd)
4156{
ec98f782
AG
4157 struct scatterlist *sg_first = NULL;
4158 struct scatterlist *sg_prev = NULL;
4159 int sg_prev_nents = 0;
4160 struct scatterlist *sg;
c66ac9db 4161 struct se_task *task;
ec98f782 4162 u32 chained_nents = 0;
c66ac9db
NB
4163 int i;
4164
ec98f782
AG
4165 BUG_ON(!cmd->se_tfo->task_sg_chaining);
4166
c66ac9db
NB
4167 /*
4168 * Walk the struct se_task list and setup scatterlist chains
a1d8b49a 4169 * for each contiguously allocated struct se_task->task_sg[].
c66ac9db 4170 */
a1d8b49a 4171 list_for_each_entry(task, &cmd->t_task_list, t_list) {
ec98f782 4172 if (!task->task_sg)
c66ac9db
NB
4173 continue;
4174
ec98f782 4175 BUG_ON(!task->task_padded_sg);
c66ac9db 4176
ec98f782
AG
4177 if (!sg_first) {
4178 sg_first = task->task_sg;
4179 chained_nents = task->task_sg_num;
97868c89 4180 } else {
ec98f782
AG
4181 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4182 chained_nents += task->task_sg_num;
97868c89 4183 }
ec98f782
AG
4184
4185 sg_prev = task->task_sg;
4186 sg_prev_nents = task->task_sg_num;
c66ac9db
NB
4187 }
4188 /*
4189 * Setup the starting pointer and total t_tasks_sg_linked_no including
4190 * padding SGs for linking and to mark the end.
4191 */
a1d8b49a 4192 cmd->t_tasks_sg_chained = sg_first;
ec98f782 4193 cmd->t_tasks_sg_chained_no = chained_nents;
c66ac9db 4194
a1d8b49a
AG
4195 DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4196 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
4197 cmd->t_tasks_sg_chained_no);
c66ac9db 4198
a1d8b49a
AG
4199 for_each_sg(cmd->t_tasks_sg_chained, sg,
4200 cmd->t_tasks_sg_chained_no, i) {
c66ac9db 4201
5951146d
AG
4202 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
4203 i, sg, sg_page(sg), sg->length, sg->offset);
c66ac9db
NB
4204 if (sg_is_chain(sg))
4205 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4206 if (sg_is_last(sg))
4207 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4208 }
c66ac9db
NB
4209}
4210EXPORT_SYMBOL(transport_do_task_sg_chain);
4211
a1d8b49a
AG
4212/*
4213 * Break up cmd into chunks transport can handle
4214 */
ec98f782 4215static int transport_allocate_data_tasks(
c66ac9db
NB
4216 struct se_cmd *cmd,
4217 unsigned long long lba,
c66ac9db 4218 enum dma_data_direction data_direction,
ec98f782
AG
4219 struct scatterlist *sgl,
4220 unsigned int sgl_nents)
c66ac9db
NB
4221{
4222 unsigned char *cdb = NULL;
4223 struct se_task *task;
5951146d 4224 struct se_device *dev = cmd->se_dev;
ec98f782
AG
4225 unsigned long flags;
4226 sector_t sectors;
4227 int task_count;
4228 int i;
4229 sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4230 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
4231 struct scatterlist *sg;
4232 struct scatterlist *cmd_sg;
a1d8b49a 4233
ec98f782
AG
4234 WARN_ON(cmd->data_length % sector_size);
4235 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
4236 task_count = DIV_ROUND_UP(sectors, dev_max_sectors);
c66ac9db 4237
ec98f782
AG
4238 cmd_sg = sgl;
4239 for (i = 0; i < task_count; i++) {
4240 unsigned int task_size;
4241 int count;
a1d8b49a 4242
c66ac9db 4243 task = transport_generic_get_task(cmd, data_direction);
a1d8b49a 4244 if (!task)
ec98f782 4245 return -ENOMEM;
c66ac9db 4246
c66ac9db 4247 task->task_lba = lba;
ec98f782
AG
4248 task->task_sectors = min(sectors, dev_max_sectors);
4249 task->task_size = task->task_sectors * sector_size;
c66ac9db 4250
e3d6f909 4251 cdb = dev->transport->get_cdb(task);
a1d8b49a
AG
4252 BUG_ON(!cdb);
4253
4254 memcpy(cdb, cmd->t_task_cdb,
4255 scsi_command_size(cmd->t_task_cdb));
4256
4257 /* Update new cdb with updated lba/sectors */
3a867205 4258 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
c66ac9db
NB
4259
4260 /*
ec98f782
AG
4261 * Check if the fabric module driver is requesting that all
4262 * struct se_task->task_sg[] be chained together.. If so,
4263 * then allocate an extra padding SG entry for linking and
4264 * marking the end of the chained SGL.
4265 * Possibly over-allocate task sgl size by using cmd sgl size.
4266 * It's so much easier and only a waste when task_count > 1.
4267 * That is extremely rare.
c66ac9db 4268 */
ec98f782
AG
4269 task->task_sg_num = sgl_nents;
4270 if (cmd->se_tfo->task_sg_chaining) {
4271 task->task_sg_num++;
4272 task->task_padded_sg = 1;
4273 }
c66ac9db 4274
ec98f782
AG
4275 task->task_sg = kmalloc(sizeof(struct scatterlist) * \
4276 task->task_sg_num, GFP_KERNEL);
4277 if (!task->task_sg) {
4278 cmd->se_dev->transport->free_task(task);
4279 return -ENOMEM;
4280 }
4281
4282 sg_init_table(task->task_sg, task->task_sg_num);
c66ac9db 4283
ec98f782
AG
4284 task_size = task->task_size;
4285
4286 /* Build new sgl, only up to task_size */
4287 for_each_sg(task->task_sg, sg, task->task_sg_num, count) {
4288 if (cmd_sg->length > task_size)
4289 break;
4290
4291 *sg = *cmd_sg;
4292 task_size -= cmd_sg->length;
4293 cmd_sg = sg_next(cmd_sg);
c66ac9db 4294 }
c66ac9db 4295
ec98f782
AG
4296 lba += task->task_sectors;
4297 sectors -= task->task_sectors;
c66ac9db 4298
ec98f782
AG
4299 spin_lock_irqsave(&cmd->t_state_lock, flags);
4300 list_add_tail(&task->t_list, &cmd->t_task_list);
4301 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4302 }
4303
ec98f782 4304 return task_count;
c66ac9db
NB
4305}
4306
4307static int
ec98f782 4308transport_allocate_control_task(struct se_cmd *cmd)
c66ac9db 4309{
5951146d 4310 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
4311 unsigned char *cdb;
4312 struct se_task *task;
ec98f782 4313 unsigned long flags;
c66ac9db
NB
4314
4315 task = transport_generic_get_task(cmd, cmd->data_direction);
4316 if (!task)
ec98f782 4317 return -ENOMEM;
c66ac9db 4318
e3d6f909 4319 cdb = dev->transport->get_cdb(task);
a1d8b49a
AG
4320 BUG_ON(!cdb);
4321 memcpy(cdb, cmd->t_task_cdb,
4322 scsi_command_size(cmd->t_task_cdb));
c66ac9db 4323
ec98f782
AG
4324 task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
4325 GFP_KERNEL);
4326 if (!task->task_sg) {
4327 cmd->se_dev->transport->free_task(task);
4328 return -ENOMEM;
4329 }
4330
4331 memcpy(task->task_sg, cmd->t_data_sg,
4332 sizeof(struct scatterlist) * cmd->t_data_nents);
c66ac9db 4333 task->task_size = cmd->data_length;
ec98f782 4334 task->task_sg_num = cmd->t_data_nents;
c66ac9db 4335
ec98f782
AG
4336 spin_lock_irqsave(&cmd->t_state_lock, flags);
4337 list_add_tail(&task->t_list, &cmd->t_task_list);
4338 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4339
4340 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
c66ac9db
NB
4341 if (dev->transport->map_task_SG)
4342 return dev->transport->map_task_SG(task);
4343 return 0;
c66ac9db
NB
4344 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
4345 if (dev->transport->cdb_none)
4346 return dev->transport->cdb_none(task);
4347 return 0;
4348 } else {
4349 BUG();
ec98f782
AG
4350 return -ENOMEM;
4351 }
4352}
4353
4354static u32 transport_allocate_tasks(
4355 struct se_cmd *cmd,
4356 unsigned long long lba,
4357 enum dma_data_direction data_direction,
4358 struct scatterlist *sgl,
4359 unsigned int sgl_nents)
4360{
4361 int ret;
4362
4363 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4364 return transport_allocate_data_tasks(cmd, lba, data_direction,
4365 sgl, sgl_nents);
4366 } else {
4367 ret = transport_allocate_control_task(cmd);
4368 if (ret < 0)
4369 return ret;
4370 else
4371 return 1;
c66ac9db
NB
4372 }
4373}
4374
ec98f782 4375
c66ac9db
NB
4376/* transport_generic_new_cmd(): Called from transport_processing_thread()
4377 *
4378 * Allocate storage transport resources from a set of values predefined
4379 * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
4380 * Any non zero return here is treated as an "out of resource' op here.
4381 */
4382 /*
4383 * Generate struct se_task(s) and/or their payloads for this CDB.
4384 */
a1d8b49a 4385int transport_generic_new_cmd(struct se_cmd *cmd)
c66ac9db 4386{
c66ac9db 4387 struct se_task *task;
5951146d 4388 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
4389 int ret = 0;
4390
4391 /*
4392 * Determine is the TCM fabric module has already allocated physical
4393 * memory, and is directly calling transport_generic_map_mem_to_cmd()
ec98f782 4394 * beforehand.
c66ac9db 4395 */
ec98f782
AG
4396 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
4397 cmd->data_length) {
05d1c7c0 4398 ret = transport_generic_get_mem(cmd);
c66ac9db
NB
4399 if (ret < 0)
4400 return ret;
4401 }
4402
c66ac9db
NB
4403 ret = transport_new_cmd_obj(cmd);
4404 if (ret < 0)
4405 return ret;
4406
ec98f782
AG
4407 list_for_each_entry(task, &cmd->t_task_list, t_list) {
4408 if (atomic_read(&task->task_sent))
4409 continue;
4410 if (!dev->transport->map_task_SG)
4411 continue;
c66ac9db 4412
ec98f782 4413 ret = dev->transport->map_task_SG(task);
c66ac9db
NB
4414 if (ret < 0)
4415 return ret;
4416 }
4417
4418 /*
a1d8b49a 4419 * For WRITEs, let the fabric know its buffer is ready..
c66ac9db
NB
4420 * This WRITE struct se_cmd (and all of its associated struct se_task's)
4421 * will be added to the struct se_device execution queue after its WRITE
4422 * data has arrived. (ie: It gets handled by the transport processing
4423 * thread a second time)
4424 */
4425 if (cmd->data_direction == DMA_TO_DEVICE) {
4426 transport_add_tasks_to_state_queue(cmd);
4427 return transport_generic_write_pending(cmd);
4428 }
4429 /*
4430 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
4431 * to the execution queue.
4432 */
4433 transport_execute_tasks(cmd);
4434 return 0;
4435}
a1d8b49a 4436EXPORT_SYMBOL(transport_generic_new_cmd);
c66ac9db
NB
4437
4438/* transport_generic_process_write():
4439 *
4440 *
4441 */
4442void transport_generic_process_write(struct se_cmd *cmd)
4443{
4444#if 0
4445 /*
4446 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
4447 * original EDTL
4448 */
4449 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
a1d8b49a 4450 if (!cmd->t_tasks_se_num) {
c66ac9db 4451 unsigned char *dst, *buf =
a1d8b49a 4452 (unsigned char *)cmd->t_task_buf;
c66ac9db
NB
4453
4454 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
4455 if (!(dst)) {
4456 printk(KERN_ERR "Unable to allocate memory for"
4457 " WRITE underflow\n");
4458 transport_generic_request_failure(cmd, NULL,
4459 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4460 return;
4461 }
4462 memcpy(dst, buf, cmd->cmd_spdtl);
4463
a1d8b49a
AG
4464 kfree(cmd->t_task_buf);
4465 cmd->t_task_buf = dst;
c66ac9db
NB
4466 } else {
4467 struct scatterlist *sg =
a1d8b49a 4468 (struct scatterlist *sg)cmd->t_task_buf;
c66ac9db
NB
4469 struct scatterlist *orig_sg;
4470
4471 orig_sg = kzalloc(sizeof(struct scatterlist) *
a1d8b49a 4472 cmd->t_tasks_se_num,
c66ac9db
NB
4473 GFP_KERNEL))) {
4474 if (!(orig_sg)) {
4475 printk(KERN_ERR "Unable to allocate memory"
4476 " for WRITE underflow\n");
4477 transport_generic_request_failure(cmd, NULL,
4478 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4479 return;
4480 }
4481
a1d8b49a 4482 memcpy(orig_sg, cmd->t_task_buf,
c66ac9db 4483 sizeof(struct scatterlist) *
a1d8b49a 4484 cmd->t_tasks_se_num);
c66ac9db
NB
4485
4486 cmd->data_length = cmd->cmd_spdtl;
4487 /*
4488 * FIXME, clear out original struct se_task and state
4489 * information.
4490 */
4491 if (transport_generic_new_cmd(cmd) < 0) {
4492 transport_generic_request_failure(cmd, NULL,
4493 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4494 kfree(orig_sg);
4495 return;
4496 }
4497
4498 transport_memcpy_write_sg(cmd, orig_sg);
4499 }
4500 }
4501#endif
4502 transport_execute_tasks(cmd);
4503}
4504EXPORT_SYMBOL(transport_generic_process_write);
4505
07bde79a
NB
4506static int transport_write_pending_qf(struct se_cmd *cmd)
4507{
4508 return cmd->se_tfo->write_pending(cmd);
4509}
4510
c66ac9db
NB
4511/* transport_generic_write_pending():
4512 *
4513 *
4514 */
4515static int transport_generic_write_pending(struct se_cmd *cmd)
4516{
4517 unsigned long flags;
4518 int ret;
4519
a1d8b49a 4520 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 4521 cmd->t_state = TRANSPORT_WRITE_PENDING;
a1d8b49a 4522 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
07bde79a
NB
4523
4524 if (cmd->transport_qf_callback) {
4525 ret = cmd->transport_qf_callback(cmd);
4526 if (ret == -EAGAIN)
4527 goto queue_full;
4528 else if (ret < 0)
4529 return ret;
4530
4531 cmd->transport_qf_callback = NULL;
4532 return 0;
4533 }
05d1c7c0 4534
c66ac9db
NB
4535 /*
4536 * Clear the se_cmd for WRITE_PENDING status in order to set
a1d8b49a 4537 * cmd->t_transport_active=0 so that transport_generic_handle_data
c66ac9db 4538 * can be called from HW target mode interrupt code. This is safe
e3d6f909 4539 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
c66ac9db
NB
4540 * because the se_cmd->se_lun pointer is not being cleared.
4541 */
4542 transport_cmd_check_stop(cmd, 1, 0);
4543
4544 /*
4545 * Call the fabric write_pending function here to let the
4546 * frontend know that WRITE buffers are ready.
4547 */
e3d6f909 4548 ret = cmd->se_tfo->write_pending(cmd);
07bde79a
NB
4549 if (ret == -EAGAIN)
4550 goto queue_full;
4551 else if (ret < 0)
c66ac9db
NB
4552 return ret;
4553
4554 return PYX_TRANSPORT_WRITE_PENDING;
07bde79a
NB
4555
4556queue_full:
4557 printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4558 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4559 transport_handle_queue_full(cmd, cmd->se_dev,
4560 transport_write_pending_qf);
4561 return ret;
c66ac9db
NB
4562}
4563
35462975 4564void transport_release_cmd(struct se_cmd *cmd)
c66ac9db 4565{
e3d6f909 4566 BUG_ON(!cmd->se_tfo);
c66ac9db
NB
4567
4568 transport_free_se_cmd(cmd);
35462975 4569 cmd->se_tfo->release_cmd(cmd);
c66ac9db 4570}
35462975 4571EXPORT_SYMBOL(transport_release_cmd);
c66ac9db
NB
4572
4573/* transport_generic_free_cmd():
4574 *
4575 * Called from processing frontend to release storage engine resources
4576 */
4577void transport_generic_free_cmd(
4578 struct se_cmd *cmd,
4579 int wait_for_tasks,
c66ac9db
NB
4580 int session_reinstatement)
4581{
5951146d 4582 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
35462975 4583 transport_release_cmd(cmd);
c66ac9db
NB
4584 else {
4585 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
4586
e3d6f909 4587 if (cmd->se_lun) {
c66ac9db
NB
4588#if 0
4589 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
e3d6f909
AG
4590 " cmd->se_lun\n", cmd,
4591 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4592#endif
4593 transport_lun_remove_cmd(cmd);
4594 }
4595
4596 if (wait_for_tasks && cmd->transport_wait_for_tasks)
4597 cmd->transport_wait_for_tasks(cmd, 0, 0);
4598
f4366772
NB
4599 transport_free_dev_tasks(cmd);
4600
35462975 4601 transport_generic_remove(cmd, session_reinstatement);
c66ac9db
NB
4602 }
4603}
4604EXPORT_SYMBOL(transport_generic_free_cmd);
4605
4606static void transport_nop_wait_for_tasks(
4607 struct se_cmd *cmd,
4608 int remove_cmd,
4609 int session_reinstatement)
4610{
4611 return;
4612}
4613
4614/* transport_lun_wait_for_tasks():
4615 *
4616 * Called from ConfigFS context to stop the passed struct se_cmd to allow
4617 * an struct se_lun to be successfully shutdown.
4618 */
4619static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4620{
4621 unsigned long flags;
4622 int ret;
4623 /*
4624 * If the frontend has already requested this struct se_cmd to
4625 * be stopped, we can safely ignore this struct se_cmd.
4626 */
a1d8b49a
AG
4627 spin_lock_irqsave(&cmd->t_state_lock, flags);
4628 if (atomic_read(&cmd->t_transport_stop)) {
4629 atomic_set(&cmd->transport_lun_stop, 0);
c66ac9db 4630 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
e3d6f909 4631 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4632 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4633 transport_cmd_check_stop(cmd, 1, 0);
e3d6f909 4634 return -EPERM;
c66ac9db 4635 }
a1d8b49a
AG
4636 atomic_set(&cmd->transport_lun_fe_stop, 1);
4637 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4638
5951146d 4639 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db
NB
4640
4641 ret = transport_stop_tasks_for_cmd(cmd);
4642
4643 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
a1d8b49a 4644 " %d\n", cmd, cmd->t_task_cdbs, ret);
c66ac9db
NB
4645 if (!ret) {
4646 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
e3d6f909 4647 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4648 wait_for_completion(&cmd->transport_lun_stop_comp);
c66ac9db 4649 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
e3d6f909 4650 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4651 }
5951146d 4652 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
4653
4654 return 0;
4655}
4656
4657/* #define DEBUG_CLEAR_LUN */
4658#ifdef DEBUG_CLEAR_LUN
4659#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
4660#else
4661#define DEBUG_CLEAR_L(x...)
4662#endif
4663
4664static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4665{
4666 struct se_cmd *cmd = NULL;
4667 unsigned long lun_flags, cmd_flags;
4668 /*
4669 * Do exception processing and return CHECK_CONDITION status to the
4670 * Initiator Port.
4671 */
4672 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5951146d
AG
4673 while (!list_empty(&lun->lun_cmd_list)) {
4674 cmd = list_first_entry(&lun->lun_cmd_list,
4675 struct se_cmd, se_lun_node);
4676 list_del(&cmd->se_lun_node);
4677
a1d8b49a 4678 atomic_set(&cmd->transport_lun_active, 0);
c66ac9db
NB
4679 /*
4680 * This will notify iscsi_target_transport.c:
4681 * transport_cmd_check_stop() that a LUN shutdown is in
4682 * progress for the iscsi_cmd_t.
4683 */
a1d8b49a
AG
4684 spin_lock(&cmd->t_state_lock);
4685 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport"
c66ac9db 4686 "_lun_stop for ITT: 0x%08x\n",
e3d6f909
AG
4687 cmd->se_lun->unpacked_lun,
4688 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a
AG
4689 atomic_set(&cmd->transport_lun_stop, 1);
4690 spin_unlock(&cmd->t_state_lock);
c66ac9db
NB
4691
4692 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4693
e3d6f909 4694 if (!(cmd->se_lun)) {
c66ac9db 4695 printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
e3d6f909
AG
4696 cmd->se_tfo->get_task_tag(cmd),
4697 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
c66ac9db
NB
4698 BUG();
4699 }
4700 /*
4701 * If the Storage engine still owns the iscsi_cmd_t, determine
4702 * and/or stop its context.
4703 */
4704 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
e3d6f909
AG
4705 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4706 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4707
e3d6f909 4708 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
c66ac9db
NB
4709 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4710 continue;
4711 }
4712
4713 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4714 "_wait_for_tasks(): SUCCESS\n",
e3d6f909
AG
4715 cmd->se_lun->unpacked_lun,
4716 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4717
a1d8b49a
AG
4718 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4719 if (!(atomic_read(&cmd->transport_dev_active))) {
4720 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4721 goto check_cond;
4722 }
a1d8b49a 4723 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 4724 transport_all_task_dev_remove_state(cmd);
a1d8b49a 4725 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4726
4727 transport_free_dev_tasks(cmd);
4728 /*
4729 * The Storage engine stopped this struct se_cmd before it was
4730 * send to the fabric frontend for delivery back to the
4731 * Initiator Node. Return this SCSI CDB back with an
4732 * CHECK_CONDITION status.
4733 */
4734check_cond:
4735 transport_send_check_condition_and_sense(cmd,
4736 TCM_NON_EXISTENT_LUN, 0);
4737 /*
4738 * If the fabric frontend is waiting for this iscsi_cmd_t to
4739 * be released, notify the waiting thread now that LU has
4740 * finished accessing it.
4741 */
a1d8b49a
AG
4742 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4743 if (atomic_read(&cmd->transport_lun_fe_stop)) {
c66ac9db
NB
4744 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
4745 " struct se_cmd: %p ITT: 0x%08x\n",
4746 lun->unpacked_lun,
e3d6f909 4747 cmd, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4748
a1d8b49a 4749 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
4750 cmd_flags);
4751 transport_cmd_check_stop(cmd, 1, 0);
a1d8b49a 4752 complete(&cmd->transport_lun_fe_stop_comp);
c66ac9db
NB
4753 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4754 continue;
4755 }
4756 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
e3d6f909 4757 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4758
a1d8b49a 4759 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4760 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4761 }
4762 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4763}
4764
4765static int transport_clear_lun_thread(void *p)
4766{
4767 struct se_lun *lun = (struct se_lun *)p;
4768
4769 __transport_clear_lun_from_sessions(lun);
4770 complete(&lun->lun_shutdown_comp);
4771
4772 return 0;
4773}
4774
4775int transport_clear_lun_from_sessions(struct se_lun *lun)
4776{
4777 struct task_struct *kt;
4778
5951146d 4779 kt = kthread_run(transport_clear_lun_thread, lun,
c66ac9db
NB
4780 "tcm_cl_%u", lun->unpacked_lun);
4781 if (IS_ERR(kt)) {
4782 printk(KERN_ERR "Unable to start clear_lun thread\n");
e3d6f909 4783 return PTR_ERR(kt);
c66ac9db
NB
4784 }
4785 wait_for_completion(&lun->lun_shutdown_comp);
4786
4787 return 0;
4788}
4789
4790/* transport_generic_wait_for_tasks():
4791 *
4792 * Called from frontend or passthrough context to wait for storage engine
4793 * to pause and/or release frontend generated struct se_cmd.
4794 */
4795static void transport_generic_wait_for_tasks(
4796 struct se_cmd *cmd,
4797 int remove_cmd,
4798 int session_reinstatement)
4799{
4800 unsigned long flags;
4801
4802 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
4803 return;
4804
a1d8b49a 4805 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
4806 /*
4807 * If we are already stopped due to an external event (ie: LUN shutdown)
4808 * sleep until the connection can have the passed struct se_cmd back.
a1d8b49a 4809 * The cmd->transport_lun_stopped_sem will be upped by
c66ac9db
NB
4810 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4811 * has completed its operation on the struct se_cmd.
4812 */
a1d8b49a 4813 if (atomic_read(&cmd->transport_lun_stop)) {
c66ac9db
NB
4814
4815 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
e3d6f909 4816 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
c66ac9db 4817 "_stop_comp); for ITT: 0x%08x\n",
e3d6f909 4818 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4819 /*
4820 * There is a special case for WRITES where a FE exception +
4821 * LUN shutdown means ConfigFS context is still sleeping on
4822 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4823 * We go ahead and up transport_lun_stop_comp just to be sure
4824 * here.
4825 */
a1d8b49a
AG
4826 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4827 complete(&cmd->transport_lun_stop_comp);
4828 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4829 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
4830
4831 transport_all_task_dev_remove_state(cmd);
4832 /*
4833 * At this point, the frontend who was the originator of this
4834 * struct se_cmd, now owns the structure and can be released through
4835 * normal means below.
4836 */
4837 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
e3d6f909 4838 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
c66ac9db 4839 "stop_comp); for ITT: 0x%08x\n",
e3d6f909 4840 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4841
a1d8b49a 4842 atomic_set(&cmd->transport_lun_stop, 0);
c66ac9db 4843 }
a1d8b49a
AG
4844 if (!atomic_read(&cmd->t_transport_active) ||
4845 atomic_read(&cmd->t_transport_aborted))
c66ac9db
NB
4846 goto remove;
4847
a1d8b49a 4848 atomic_set(&cmd->t_transport_stop, 1);
c66ac9db
NB
4849
4850 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
4851 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
e3d6f909
AG
4852 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
4853 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
c66ac9db
NB
4854 cmd->deferred_t_state);
4855
a1d8b49a 4856 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4857
5951146d 4858 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db 4859
a1d8b49a 4860 wait_for_completion(&cmd->t_transport_stop_comp);
c66ac9db 4861
a1d8b49a
AG
4862 spin_lock_irqsave(&cmd->t_state_lock, flags);
4863 atomic_set(&cmd->t_transport_active, 0);
4864 atomic_set(&cmd->t_transport_stop, 0);
c66ac9db
NB
4865
4866 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
a1d8b49a 4867 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
e3d6f909 4868 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4869remove:
a1d8b49a 4870 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4871 if (!remove_cmd)
4872 return;
4873
35462975 4874 transport_generic_free_cmd(cmd, 0, session_reinstatement);
c66ac9db
NB
4875}
4876
4877static int transport_get_sense_codes(
4878 struct se_cmd *cmd,
4879 u8 *asc,
4880 u8 *ascq)
4881{
4882 *asc = cmd->scsi_asc;
4883 *ascq = cmd->scsi_ascq;
4884
4885 return 0;
4886}
4887
4888static int transport_set_sense_codes(
4889 struct se_cmd *cmd,
4890 u8 asc,
4891 u8 ascq)
4892{
4893 cmd->scsi_asc = asc;
4894 cmd->scsi_ascq = ascq;
4895
4896 return 0;
4897}
4898
4899int transport_send_check_condition_and_sense(
4900 struct se_cmd *cmd,
4901 u8 reason,
4902 int from_transport)
4903{
4904 unsigned char *buffer = cmd->sense_buffer;
4905 unsigned long flags;
4906 int offset;
4907 u8 asc = 0, ascq = 0;
4908
a1d8b49a 4909 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 4910 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 4911 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4912 return 0;
4913 }
4914 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
a1d8b49a 4915 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4916
4917 if (!reason && from_transport)
4918 goto after_reason;
4919
4920 if (!from_transport)
4921 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4922 /*
4923 * Data Segment and SenseLength of the fabric response PDU.
4924 *
4925 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4926 * from include/scsi/scsi_cmnd.h
4927 */
e3d6f909 4928 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
4929 TRANSPORT_SENSE_BUFFER);
4930 /*
4931 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4932 * SENSE KEY values from include/scsi/scsi.h
4933 */
4934 switch (reason) {
4935 case TCM_NON_EXISTENT_LUN:
4936 case TCM_UNSUPPORTED_SCSI_OPCODE:
4937 case TCM_SECTOR_COUNT_TOO_MANY:
4938 /* CURRENT ERROR */
4939 buffer[offset] = 0x70;
4940 /* ILLEGAL REQUEST */
4941 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4942 /* INVALID COMMAND OPERATION CODE */
4943 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4944 break;
4945 case TCM_UNKNOWN_MODE_PAGE:
4946 /* CURRENT ERROR */
4947 buffer[offset] = 0x70;
4948 /* ILLEGAL REQUEST */
4949 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4950 /* INVALID FIELD IN CDB */
4951 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4952 break;
4953 case TCM_CHECK_CONDITION_ABORT_CMD:
4954 /* CURRENT ERROR */
4955 buffer[offset] = 0x70;
4956 /* ABORTED COMMAND */
4957 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4958 /* BUS DEVICE RESET FUNCTION OCCURRED */
4959 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4960 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4961 break;
4962 case TCM_INCORRECT_AMOUNT_OF_DATA:
4963 /* CURRENT ERROR */
4964 buffer[offset] = 0x70;
4965 /* ABORTED COMMAND */
4966 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4967 /* WRITE ERROR */
4968 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4969 /* NOT ENOUGH UNSOLICITED DATA */
4970 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4971 break;
4972 case TCM_INVALID_CDB_FIELD:
4973 /* CURRENT ERROR */
4974 buffer[offset] = 0x70;
4975 /* ABORTED COMMAND */
4976 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4977 /* INVALID FIELD IN CDB */
4978 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4979 break;
4980 case TCM_INVALID_PARAMETER_LIST:
4981 /* CURRENT ERROR */
4982 buffer[offset] = 0x70;
4983 /* ABORTED COMMAND */
4984 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4985 /* INVALID FIELD IN PARAMETER LIST */
4986 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4987 break;
4988 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4989 /* CURRENT ERROR */
4990 buffer[offset] = 0x70;
4991 /* ABORTED COMMAND */
4992 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4993 /* WRITE ERROR */
4994 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4995 /* UNEXPECTED_UNSOLICITED_DATA */
4996 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4997 break;
4998 case TCM_SERVICE_CRC_ERROR:
4999 /* CURRENT ERROR */
5000 buffer[offset] = 0x70;
5001 /* ABORTED COMMAND */
5002 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5003 /* PROTOCOL SERVICE CRC ERROR */
5004 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
5005 /* N/A */
5006 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
5007 break;
5008 case TCM_SNACK_REJECTED:
5009 /* CURRENT ERROR */
5010 buffer[offset] = 0x70;
5011 /* ABORTED COMMAND */
5012 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5013 /* READ ERROR */
5014 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
5015 /* FAILED RETRANSMISSION REQUEST */
5016 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
5017 break;
5018 case TCM_WRITE_PROTECTED:
5019 /* CURRENT ERROR */
5020 buffer[offset] = 0x70;
5021 /* DATA PROTECT */
5022 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
5023 /* WRITE PROTECTED */
5024 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
5025 break;
5026 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
5027 /* CURRENT ERROR */
5028 buffer[offset] = 0x70;
5029 /* UNIT ATTENTION */
5030 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
5031 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
5032 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5033 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5034 break;
5035 case TCM_CHECK_CONDITION_NOT_READY:
5036 /* CURRENT ERROR */
5037 buffer[offset] = 0x70;
5038 /* Not Ready */
5039 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
5040 transport_get_sense_codes(cmd, &asc, &ascq);
5041 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5042 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5043 break;
5044 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
5045 default:
5046 /* CURRENT ERROR */
5047 buffer[offset] = 0x70;
5048 /* ILLEGAL REQUEST */
5049 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5050 /* LOGICAL UNIT COMMUNICATION FAILURE */
5051 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
5052 break;
5053 }
5054 /*
5055 * This code uses linux/include/scsi/scsi.h SAM status codes!
5056 */
5057 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
5058 /*
5059 * Automatically padded, this value is encoded in the fabric's
5060 * data_length response PDU containing the SCSI defined sense data.
5061 */
5062 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
5063
5064after_reason:
07bde79a 5065 return cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
5066}
5067EXPORT_SYMBOL(transport_send_check_condition_and_sense);
5068
5069int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5070{
5071 int ret = 0;
5072
a1d8b49a 5073 if (atomic_read(&cmd->t_transport_aborted) != 0) {
c66ac9db
NB
5074 if (!(send_status) ||
5075 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5076 return 1;
5077#if 0
5078 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5079 " status for CDB: 0x%02x ITT: 0x%08x\n",
a1d8b49a 5080 cmd->t_task_cdb[0],
e3d6f909 5081 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
5082#endif
5083 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
e3d6f909 5084 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
5085 ret = 1;
5086 }
5087 return ret;
5088}
5089EXPORT_SYMBOL(transport_check_aborted_status);
5090
5091void transport_send_task_abort(struct se_cmd *cmd)
5092{
5093 /*
5094 * If there are still expected incoming fabric WRITEs, we wait
5095 * until until they have completed before sending a TASK_ABORTED
5096 * response. This response with TASK_ABORTED status will be
5097 * queued back to fabric module by transport_check_aborted_status().
5098 */
5099 if (cmd->data_direction == DMA_TO_DEVICE) {
e3d6f909 5100 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
a1d8b49a 5101 atomic_inc(&cmd->t_transport_aborted);
c66ac9db
NB
5102 smp_mb__after_atomic_inc();
5103 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5104 transport_new_cmd_failure(cmd);
5105 return;
5106 }
5107 }
5108 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5109#if 0
5110 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
a1d8b49a 5111 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
e3d6f909 5112 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 5113#endif
e3d6f909 5114 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
5115}
5116
5117/* transport_generic_do_tmr():
5118 *
5119 *
5120 */
5121int transport_generic_do_tmr(struct se_cmd *cmd)
5122{
5951146d 5123 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
5124 struct se_tmr_req *tmr = cmd->se_tmr_req;
5125 int ret;
5126
5127 switch (tmr->function) {
5c6cd613 5128 case TMR_ABORT_TASK:
c66ac9db
NB
5129 tmr->response = TMR_FUNCTION_REJECTED;
5130 break;
5c6cd613
NB
5131 case TMR_ABORT_TASK_SET:
5132 case TMR_CLEAR_ACA:
5133 case TMR_CLEAR_TASK_SET:
c66ac9db
NB
5134 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
5135 break;
5c6cd613 5136 case TMR_LUN_RESET:
c66ac9db
NB
5137 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
5138 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
5139 TMR_FUNCTION_REJECTED;
5140 break;
5c6cd613 5141 case TMR_TARGET_WARM_RESET:
c66ac9db
NB
5142 tmr->response = TMR_FUNCTION_REJECTED;
5143 break;
5c6cd613 5144 case TMR_TARGET_COLD_RESET:
c66ac9db
NB
5145 tmr->response = TMR_FUNCTION_REJECTED;
5146 break;
c66ac9db
NB
5147 default:
5148 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
5149 tmr->function);
5150 tmr->response = TMR_FUNCTION_REJECTED;
5151 break;
5152 }
5153
5154 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
e3d6f909 5155 cmd->se_tfo->queue_tm_rsp(cmd);
c66ac9db
NB
5156
5157 transport_cmd_check_stop(cmd, 2, 0);
5158 return 0;
5159}
5160
5161/*
5162 * Called with spin_lock_irq(&dev->execute_task_lock); held
5163 *
5164 */
5165static struct se_task *
5166transport_get_task_from_state_list(struct se_device *dev)
5167{
5168 struct se_task *task;
5169
5170 if (list_empty(&dev->state_task_list))
5171 return NULL;
5172
5173 list_for_each_entry(task, &dev->state_task_list, t_state_list)
5174 break;
5175
5176 list_del(&task->t_state_list);
5177 atomic_set(&task->task_state_active, 0);
5178
5179 return task;
5180}
5181
5182static void transport_processing_shutdown(struct se_device *dev)
5183{
5184 struct se_cmd *cmd;
c66ac9db 5185 struct se_task *task;
c66ac9db
NB
5186 unsigned long flags;
5187 /*
5188 * Empty the struct se_device's struct se_task state list.
5189 */
5190 spin_lock_irqsave(&dev->execute_task_lock, flags);
5191 while ((task = transport_get_task_from_state_list(dev))) {
e3d6f909
AG
5192 if (!task->task_se_cmd) {
5193 printk(KERN_ERR "task->task_se_cmd is NULL!\n");
c66ac9db
NB
5194 continue;
5195 }
e3d6f909 5196 cmd = task->task_se_cmd;
c66ac9db 5197
c66ac9db
NB
5198 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5199
a1d8b49a 5200 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
5201
5202 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5203 " i_state/def_i_state: %d/%d, t_state/def_t_state:"
5204 " %d/%d cdb: 0x%02x\n", cmd, task,
e3d6f909
AG
5205 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
5206 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
c66ac9db 5207 cmd->t_state, cmd->deferred_t_state,
a1d8b49a 5208 cmd->t_task_cdb[0]);
c66ac9db
NB
5209 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5210 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5211 " t_transport_stop: %d t_transport_sent: %d\n",
e3d6f909 5212 cmd->se_tfo->get_task_tag(cmd),
a1d8b49a
AG
5213 cmd->t_task_cdbs,
5214 atomic_read(&cmd->t_task_cdbs_left),
5215 atomic_read(&cmd->t_task_cdbs_sent),
5216 atomic_read(&cmd->t_transport_active),
5217 atomic_read(&cmd->t_transport_stop),
5218 atomic_read(&cmd->t_transport_sent));
c66ac9db
NB
5219
5220 if (atomic_read(&task->task_active)) {
5221 atomic_set(&task->task_stop, 1);
5222 spin_unlock_irqrestore(
a1d8b49a 5223 &cmd->t_state_lock, flags);
c66ac9db
NB
5224
5225 DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5226 " %p\n", task, dev);
5227 wait_for_completion(&task->task_stop_comp);
5228 DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5229 task, dev);
5230
a1d8b49a
AG
5231 spin_lock_irqsave(&cmd->t_state_lock, flags);
5232 atomic_dec(&cmd->t_task_cdbs_left);
c66ac9db
NB
5233
5234 atomic_set(&task->task_active, 0);
5235 atomic_set(&task->task_stop, 0);
52208ae3
NB
5236 } else {
5237 if (atomic_read(&task->task_execute_queue) != 0)
5238 transport_remove_task_from_execute_queue(task, dev);
c66ac9db
NB
5239 }
5240 __transport_stop_task_timer(task, &flags);
5241
a1d8b49a 5242 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
c66ac9db 5243 spin_unlock_irqrestore(
a1d8b49a 5244 &cmd->t_state_lock, flags);
c66ac9db
NB
5245
5246 DEBUG_DO("Skipping task: %p, dev: %p for"
5247 " t_task_cdbs_ex_left: %d\n", task, dev,
a1d8b49a 5248 atomic_read(&cmd->t_task_cdbs_ex_left));
c66ac9db
NB
5249
5250 spin_lock_irqsave(&dev->execute_task_lock, flags);
5251 continue;
5252 }
5253
a1d8b49a 5254 if (atomic_read(&cmd->t_transport_active)) {
c66ac9db
NB
5255 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5256 " %p\n", task, dev);
5257
a1d8b49a 5258 if (atomic_read(&cmd->t_fe_count)) {
c66ac9db 5259 spin_unlock_irqrestore(
a1d8b49a 5260 &cmd->t_state_lock, flags);
c66ac9db
NB
5261 transport_send_check_condition_and_sense(
5262 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5263 0);
5264 transport_remove_cmd_from_queue(cmd,
5951146d 5265 &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
5266
5267 transport_lun_remove_cmd(cmd);
5268 transport_cmd_check_stop(cmd, 1, 0);
5269 } else {
5270 spin_unlock_irqrestore(
a1d8b49a 5271 &cmd->t_state_lock, flags);
c66ac9db
NB
5272
5273 transport_remove_cmd_from_queue(cmd,
5951146d 5274 &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
5275
5276 transport_lun_remove_cmd(cmd);
5277
5278 if (transport_cmd_check_stop(cmd, 1, 0))
35462975 5279 transport_generic_remove(cmd, 0);
c66ac9db
NB
5280 }
5281
5282 spin_lock_irqsave(&dev->execute_task_lock, flags);
5283 continue;
5284 }
5285 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
5286 task, dev);
5287
a1d8b49a 5288 if (atomic_read(&cmd->t_fe_count)) {
c66ac9db 5289 spin_unlock_irqrestore(
a1d8b49a 5290 &cmd->t_state_lock, flags);
c66ac9db
NB
5291 transport_send_check_condition_and_sense(cmd,
5292 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5293 transport_remove_cmd_from_queue(cmd,
5951146d 5294 &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
5295
5296 transport_lun_remove_cmd(cmd);
5297 transport_cmd_check_stop(cmd, 1, 0);
5298 } else {
5299 spin_unlock_irqrestore(
a1d8b49a 5300 &cmd->t_state_lock, flags);
c66ac9db
NB
5301
5302 transport_remove_cmd_from_queue(cmd,
5951146d 5303 &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
5304 transport_lun_remove_cmd(cmd);
5305
5306 if (transport_cmd_check_stop(cmd, 1, 0))
35462975 5307 transport_generic_remove(cmd, 0);
c66ac9db
NB
5308 }
5309
5310 spin_lock_irqsave(&dev->execute_task_lock, flags);
5311 }
5312 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5313 /*
5314 * Empty the struct se_device's struct se_cmd list.
5315 */
5951146d 5316 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
c66ac9db
NB
5317
5318 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
5951146d 5319 cmd, cmd->t_state);
c66ac9db 5320
a1d8b49a 5321 if (atomic_read(&cmd->t_fe_count)) {
c66ac9db
NB
5322 transport_send_check_condition_and_sense(cmd,
5323 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5324
5325 transport_lun_remove_cmd(cmd);
5326 transport_cmd_check_stop(cmd, 1, 0);
5327 } else {
5328 transport_lun_remove_cmd(cmd);
5329 if (transport_cmd_check_stop(cmd, 1, 0))
35462975 5330 transport_generic_remove(cmd, 0);
c66ac9db 5331 }
c66ac9db 5332 }
c66ac9db
NB
5333}
5334
5335/* transport_processing_thread():
5336 *
5337 *
5338 */
5339static int transport_processing_thread(void *param)
5340{
5951146d 5341 int ret;
c66ac9db
NB
5342 struct se_cmd *cmd;
5343 struct se_device *dev = (struct se_device *) param;
c66ac9db
NB
5344
5345 set_user_nice(current, -20);
5346
5347 while (!kthread_should_stop()) {
e3d6f909
AG
5348 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
5349 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
c66ac9db
NB
5350 kthread_should_stop());
5351 if (ret < 0)
5352 goto out;
5353
5354 spin_lock_irq(&dev->dev_status_lock);
5355 if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
5356 spin_unlock_irq(&dev->dev_status_lock);
5357 transport_processing_shutdown(dev);
5358 continue;
5359 }
5360 spin_unlock_irq(&dev->dev_status_lock);
5361
5362get_cmd:
5363 __transport_execute_tasks(dev);
5364
5951146d
AG
5365 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
5366 if (!cmd)
c66ac9db
NB
5367 continue;
5368
5951146d 5369 switch (cmd->t_state) {
c66ac9db 5370 case TRANSPORT_NEW_CMD_MAP:
e3d6f909
AG
5371 if (!(cmd->se_tfo->new_cmd_map)) {
5372 printk(KERN_ERR "cmd->se_tfo->new_cmd_map is"
c66ac9db
NB
5373 " NULL for TRANSPORT_NEW_CMD_MAP\n");
5374 BUG();
5375 }
e3d6f909 5376 ret = cmd->se_tfo->new_cmd_map(cmd);
c66ac9db
NB
5377 if (ret < 0) {
5378 cmd->transport_error_status = ret;
5379 transport_generic_request_failure(cmd, NULL,
5380 0, (cmd->data_direction !=
5381 DMA_TO_DEVICE));
5382 break;
5383 }
5384 /* Fall through */
5385 case TRANSPORT_NEW_CMD:
5386 ret = transport_generic_new_cmd(cmd);
07bde79a
NB
5387 if (ret == -EAGAIN)
5388 break;
5389 else if (ret < 0) {
c66ac9db
NB
5390 cmd->transport_error_status = ret;
5391 transport_generic_request_failure(cmd, NULL,
5392 0, (cmd->data_direction !=
5393 DMA_TO_DEVICE));
5394 }
5395 break;
5396 case TRANSPORT_PROCESS_WRITE:
5397 transport_generic_process_write(cmd);
5398 break;
5399 case TRANSPORT_COMPLETE_OK:
5400 transport_stop_all_task_timers(cmd);
5401 transport_generic_complete_ok(cmd);
5402 break;
5403 case TRANSPORT_REMOVE:
35462975 5404 transport_generic_remove(cmd, 0);
c66ac9db 5405 break;
f4366772 5406 case TRANSPORT_FREE_CMD_INTR:
35462975 5407 transport_generic_free_cmd(cmd, 0, 0);
f4366772 5408 break;
c66ac9db
NB
5409 case TRANSPORT_PROCESS_TMR:
5410 transport_generic_do_tmr(cmd);
5411 break;
5412 case TRANSPORT_COMPLETE_FAILURE:
5413 transport_generic_request_failure(cmd, NULL, 1, 1);
5414 break;
5415 case TRANSPORT_COMPLETE_TIMEOUT:
5416 transport_stop_all_task_timers(cmd);
5417 transport_generic_request_timeout(cmd);
5418 break;
07bde79a
NB
5419 case TRANSPORT_COMPLETE_QF_WP:
5420 transport_generic_write_pending(cmd);
5421 break;
c66ac9db
NB
5422 default:
5423 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
5424 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
5951146d 5425 " %u\n", cmd->t_state, cmd->deferred_t_state,
e3d6f909
AG
5426 cmd->se_tfo->get_task_tag(cmd),
5427 cmd->se_tfo->get_cmd_state(cmd),
5428 cmd->se_lun->unpacked_lun);
c66ac9db
NB
5429 BUG();
5430 }
5431
5432 goto get_cmd;
5433 }
5434
5435out:
5436 transport_release_all_cmds(dev);
5437 dev->process_thread = NULL;
5438 return 0;
5439}