1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_device.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_transport.h>
47 #include <target/target_core_fabric_ops.h>
49 #include "target_core_alua.h"
50 #include "target_core_hba.h"
51 #include "target_core_pr.h"
52 #include "target_core_ua.h"
54 static void se_dev_start(struct se_device
*dev
);
55 static void se_dev_stop(struct se_device
*dev
);
57 static struct se_hba
*lun0_hba
;
58 static struct se_subsystem_dev
*lun0_su_dev
;
59 /* not static, needed by tpg.c */
60 struct se_device
*g_lun0_dev
;
62 int transport_get_lun_for_cmd(
63 struct se_cmd
*se_cmd
,
66 struct se_dev_entry
*deve
;
67 struct se_lun
*se_lun
= NULL
;
68 struct se_session
*se_sess
= se_cmd
->se_sess
;
72 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
) {
73 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
74 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
78 spin_lock_irq(&se_sess
->se_node_acl
->device_list_lock
);
79 deve
= se_cmd
->se_deve
=
80 &se_sess
->se_node_acl
->device_list
[unpacked_lun
];
81 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
84 deve
->total_bytes
+= se_cmd
->data_length
;
86 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
88 TRANSPORT_LUNFLAGS_READ_ONLY
) {
92 deve
->write_bytes
+= se_cmd
->data_length
;
93 } else if (se_cmd
->data_direction
==
95 deve
->read_bytes
+= se_cmd
->data_length
;
100 se_lun
= se_cmd
->se_lun
= deve
->se_lun
;
101 se_cmd
->pr_res_key
= deve
->pr_res_key
;
102 se_cmd
->orig_fe_lun
= unpacked_lun
;
103 se_cmd
->se_orig_obj_ptr
= se_cmd
->se_lun
->lun_se_dev
;
104 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
107 spin_unlock_irq(&se_sess
->se_node_acl
->device_list_lock
);
111 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
112 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
113 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
114 " Access for 0x%08x\n",
115 se_cmd
->se_tfo
->get_fabric_name(),
120 * Use the se_portal_group->tpg_virt_lun0 to allow for
121 * REPORT_LUNS, et al to be returned when no active
122 * MappedLUN=0 exists for this Initiator Port.
124 if (unpacked_lun
!= 0) {
125 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
126 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
127 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
128 " Access for 0x%08x\n",
129 se_cmd
->se_tfo
->get_fabric_name(),
134 * Force WRITE PROTECT for virtual LUN 0
136 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
137 (se_cmd
->data_direction
!= DMA_NONE
)) {
138 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
139 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
143 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
144 se_cmd
->se_tfo
->get_fabric_name());
146 se_lun
= se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
147 se_cmd
->orig_fe_lun
= 0;
148 se_cmd
->se_orig_obj_ptr
= se_cmd
->se_lun
->lun_se_dev
;
149 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
153 * Determine if the struct se_lun is online.
155 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
156 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
157 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
158 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
163 struct se_device
*dev
= se_lun
->lun_se_dev
;
164 spin_lock_irq(&dev
->stats_lock
);
166 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
167 dev
->write_bytes
+= se_cmd
->data_length
;
168 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
169 dev
->read_bytes
+= se_cmd
->data_length
;
170 spin_unlock_irq(&dev
->stats_lock
);
174 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
175 * for tracking state of struct se_cmds during LUN shutdown events.
177 spin_lock_irqsave(&se_lun
->lun_cmd_lock
, flags
);
178 list_add_tail(&se_cmd
->se_lun_list
, &se_lun
->lun_cmd_list
);
179 atomic_set(&se_cmd
->t_task
->transport_lun_active
, 1);
181 printk(KERN_INFO
"Adding ITT: 0x%08x to LUN LIST[%d]\n",
182 se_cmd
->se_tfo
->get_task_tag(se_cmd
), se_lun
->unpacked_lun
);
184 spin_unlock_irqrestore(&se_lun
->lun_cmd_lock
, flags
);
188 EXPORT_SYMBOL(transport_get_lun_for_cmd
);
190 int transport_get_lun_for_tmr(
191 struct se_cmd
*se_cmd
,
194 struct se_device
*dev
= NULL
;
195 struct se_dev_entry
*deve
;
196 struct se_lun
*se_lun
= NULL
;
197 struct se_session
*se_sess
= se_cmd
->se_sess
;
198 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
200 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
) {
201 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
202 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
206 spin_lock_irq(&se_sess
->se_node_acl
->device_list_lock
);
207 deve
= se_cmd
->se_deve
=
208 &se_sess
->se_node_acl
->device_list
[unpacked_lun
];
209 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
210 se_lun
= se_cmd
->se_lun
= se_tmr
->tmr_lun
= deve
->se_lun
;
211 dev
= se_lun
->lun_se_dev
;
212 se_cmd
->pr_res_key
= deve
->pr_res_key
;
213 se_cmd
->orig_fe_lun
= unpacked_lun
;
214 se_cmd
->se_orig_obj_ptr
= se_cmd
->se_lun
->lun_se_dev
;
215 /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
217 spin_unlock_irq(&se_sess
->se_node_acl
->device_list_lock
);
220 printk(KERN_INFO
"TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
221 " Access for 0x%08x\n",
222 se_cmd
->se_tfo
->get_fabric_name(),
224 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
228 * Determine if the struct se_lun is online.
230 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
231 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
232 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
235 se_tmr
->tmr_dev
= dev
;
237 spin_lock(&dev
->se_tmr_lock
);
238 list_add_tail(&se_tmr
->tmr_list
, &dev
->dev_tmr_list
);
239 spin_unlock(&dev
->se_tmr_lock
);
243 EXPORT_SYMBOL(transport_get_lun_for_tmr
);
246 * This function is called from core_scsi3_emulate_pro_register_and_move()
247 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
248 * when a matching rtpi is found.
250 struct se_dev_entry
*core_get_se_deve_from_rtpi(
251 struct se_node_acl
*nacl
,
254 struct se_dev_entry
*deve
;
256 struct se_port
*port
;
257 struct se_portal_group
*tpg
= nacl
->se_tpg
;
260 spin_lock_irq(&nacl
->device_list_lock
);
261 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
262 deve
= &nacl
->device_list
[i
];
264 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
269 printk(KERN_ERR
"%s device entries device pointer is"
270 " NULL, but Initiator has access.\n",
271 tpg
->se_tpg_tfo
->get_fabric_name());
276 printk(KERN_ERR
"%s device entries device pointer is"
277 " NULL, but Initiator has access.\n",
278 tpg
->se_tpg_tfo
->get_fabric_name());
281 if (port
->sep_rtpi
!= rtpi
)
284 atomic_inc(&deve
->pr_ref_count
);
285 smp_mb__after_atomic_inc();
286 spin_unlock_irq(&nacl
->device_list_lock
);
290 spin_unlock_irq(&nacl
->device_list_lock
);
295 int core_free_device_list_for_node(
296 struct se_node_acl
*nacl
,
297 struct se_portal_group
*tpg
)
299 struct se_dev_entry
*deve
;
303 if (!nacl
->device_list
)
306 spin_lock_irq(&nacl
->device_list_lock
);
307 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
308 deve
= &nacl
->device_list
[i
];
310 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
314 printk(KERN_ERR
"%s device entries device pointer is"
315 " NULL, but Initiator has access.\n",
316 tpg
->se_tpg_tfo
->get_fabric_name());
321 spin_unlock_irq(&nacl
->device_list_lock
);
322 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
323 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
324 spin_lock_irq(&nacl
->device_list_lock
);
326 spin_unlock_irq(&nacl
->device_list_lock
);
328 kfree(nacl
->device_list
);
329 nacl
->device_list
= NULL
;
334 void core_dec_lacl_count(struct se_node_acl
*se_nacl
, struct se_cmd
*se_cmd
)
336 struct se_dev_entry
*deve
;
338 spin_lock_irq(&se_nacl
->device_list_lock
);
339 deve
= &se_nacl
->device_list
[se_cmd
->orig_fe_lun
];
341 spin_unlock_irq(&se_nacl
->device_list_lock
);
344 void core_update_device_list_access(
347 struct se_node_acl
*nacl
)
349 struct se_dev_entry
*deve
;
351 spin_lock_irq(&nacl
->device_list_lock
);
352 deve
= &nacl
->device_list
[mapped_lun
];
353 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
354 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
355 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
357 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
358 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
360 spin_unlock_irq(&nacl
->device_list_lock
);
363 /* core_update_device_list_for_node():
367 int core_update_device_list_for_node(
369 struct se_lun_acl
*lun_acl
,
372 struct se_node_acl
*nacl
,
373 struct se_portal_group
*tpg
,
376 struct se_port
*port
= lun
->lun_sep
;
377 struct se_dev_entry
*deve
= &nacl
->device_list
[mapped_lun
];
380 * If the MappedLUN entry is being disabled, the entry in
381 * port->sep_alua_list must be removed now before clearing the
382 * struct se_dev_entry pointers below as logic in
383 * core_alua_do_transition_tg_pt() depends on these being present.
387 * deve->se_lun_acl will be NULL for demo-mode created LUNs
388 * that have not been explicitly concerted to MappedLUNs ->
389 * struct se_lun_acl, but we remove deve->alua_port_list from
390 * port->sep_alua_list. This also means that active UAs and
391 * NodeACL context specific PR metadata for demo-mode
392 * MappedLUN *deve will be released below..
394 spin_lock_bh(&port
->sep_alua_lock
);
395 list_del(&deve
->alua_port_list
);
396 spin_unlock_bh(&port
->sep_alua_lock
);
399 spin_lock_irq(&nacl
->device_list_lock
);
402 * Check if the call is handling demo mode -> explict LUN ACL
403 * transition. This transition must be for the same struct se_lun
404 * + mapped_lun that was setup in demo mode..
406 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
407 if (deve
->se_lun_acl
!= NULL
) {
408 printk(KERN_ERR
"struct se_dev_entry->se_lun_acl"
409 " already set for demo mode -> explict"
410 " LUN ACL transition\n");
411 spin_unlock_irq(&nacl
->device_list_lock
);
414 if (deve
->se_lun
!= lun
) {
415 printk(KERN_ERR
"struct se_dev_entry->se_lun does"
416 " match passed struct se_lun for demo mode"
417 " -> explict LUN ACL transition\n");
418 spin_unlock_irq(&nacl
->device_list_lock
);
421 deve
->se_lun_acl
= lun_acl
;
425 deve
->se_lun_acl
= lun_acl
;
426 deve
->mapped_lun
= mapped_lun
;
427 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
430 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
431 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
432 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
434 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
435 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
439 spin_unlock_irq(&nacl
->device_list_lock
);
442 deve
->creation_time
= get_jiffies_64();
443 deve
->attach_count
++;
444 spin_unlock_irq(&nacl
->device_list_lock
);
446 spin_lock_bh(&port
->sep_alua_lock
);
447 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
448 spin_unlock_bh(&port
->sep_alua_lock
);
453 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
454 * PR operation to complete.
456 spin_unlock_irq(&nacl
->device_list_lock
);
457 while (atomic_read(&deve
->pr_ref_count
) != 0)
459 spin_lock_irq(&nacl
->device_list_lock
);
461 * Disable struct se_dev_entry LUN ACL mapping
463 core_scsi3_ua_release_all(deve
);
465 deve
->se_lun_acl
= NULL
;
467 deve
->creation_time
= 0;
468 deve
->attach_count
--;
469 spin_unlock_irq(&nacl
->device_list_lock
);
471 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
475 /* core_clear_lun_from_tpg():
479 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
481 struct se_node_acl
*nacl
;
482 struct se_dev_entry
*deve
;
485 spin_lock_bh(&tpg
->acl_node_lock
);
486 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
487 spin_unlock_bh(&tpg
->acl_node_lock
);
489 spin_lock_irq(&nacl
->device_list_lock
);
490 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
491 deve
= &nacl
->device_list
[i
];
492 if (lun
!= deve
->se_lun
)
494 spin_unlock_irq(&nacl
->device_list_lock
);
496 core_update_device_list_for_node(lun
, NULL
,
497 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
500 spin_lock_irq(&nacl
->device_list_lock
);
502 spin_unlock_irq(&nacl
->device_list_lock
);
504 spin_lock_bh(&tpg
->acl_node_lock
);
506 spin_unlock_bh(&tpg
->acl_node_lock
);
509 static struct se_port
*core_alloc_port(struct se_device
*dev
)
511 struct se_port
*port
, *port_tmp
;
513 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
515 printk(KERN_ERR
"Unable to allocate struct se_port\n");
516 return ERR_PTR(-ENOMEM
);
518 INIT_LIST_HEAD(&port
->sep_alua_list
);
519 INIT_LIST_HEAD(&port
->sep_list
);
520 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
521 spin_lock_init(&port
->sep_alua_lock
);
522 mutex_init(&port
->sep_tg_pt_md_mutex
);
524 spin_lock(&dev
->se_port_lock
);
525 if (dev
->dev_port_count
== 0x0000ffff) {
526 printk(KERN_WARNING
"Reached dev->dev_port_count =="
528 spin_unlock(&dev
->se_port_lock
);
529 return ERR_PTR(-ENOSPC
);
533 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
534 * Here is the table from spc4r17 section 7.7.3.8.
536 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
540 * 1h Relative port 1, historically known as port A
541 * 2h Relative port 2, historically known as port B
542 * 3h to FFFFh Relative port 3 through 65 535
544 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
545 if (!(port
->sep_rtpi
))
548 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
550 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
553 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
556 spin_unlock(&dev
->se_port_lock
);
561 static void core_export_port(
562 struct se_device
*dev
,
563 struct se_portal_group
*tpg
,
564 struct se_port
*port
,
567 struct se_subsystem_dev
*su_dev
= dev
->se_sub_dev
;
568 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
570 spin_lock(&dev
->se_port_lock
);
571 spin_lock(&lun
->lun_sep_lock
);
575 spin_unlock(&lun
->lun_sep_lock
);
577 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
578 spin_unlock(&dev
->se_port_lock
);
580 if (su_dev
->t10_alua
.alua_type
== SPC3_ALUA_EMULATED
) {
581 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
582 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
583 printk(KERN_ERR
"Unable to allocate t10_alua_tg_pt"
587 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
588 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
589 su_dev
->t10_alua
.default_tg_pt_gp
);
590 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
591 printk(KERN_INFO
"%s/%s: Adding to default ALUA Target Port"
592 " Group: alua/default_tg_pt_gp\n",
593 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
596 dev
->dev_port_count
++;
597 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFER */
601 * Called with struct se_device->se_port_lock spinlock held.
603 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
604 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
607 * Wait for any port reference for PR ALL_TG_PT=1 operation
608 * to complete in __core_scsi3_alloc_registration()
610 spin_unlock(&dev
->se_port_lock
);
611 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
613 spin_lock(&dev
->se_port_lock
);
615 core_alua_free_tg_pt_gp_mem(port
);
617 list_del(&port
->sep_list
);
618 dev
->dev_port_count
--;
623 struct se_device
*dev
,
624 struct se_portal_group
*tpg
,
627 struct se_port
*port
;
629 port
= core_alloc_port(dev
);
631 return PTR_ERR(port
);
633 lun
->lun_se_dev
= dev
;
636 atomic_inc(&dev
->dev_export_obj
.obj_access_count
);
637 core_export_port(dev
, tpg
, port
, lun
);
641 void core_dev_unexport(
642 struct se_device
*dev
,
643 struct se_portal_group
*tpg
,
646 struct se_port
*port
= lun
->lun_sep
;
648 spin_lock(&lun
->lun_sep_lock
);
649 if (lun
->lun_se_dev
== NULL
) {
650 spin_unlock(&lun
->lun_sep_lock
);
653 spin_unlock(&lun
->lun_sep_lock
);
655 spin_lock(&dev
->se_port_lock
);
656 atomic_dec(&dev
->dev_export_obj
.obj_access_count
);
657 core_release_port(dev
, port
);
658 spin_unlock(&dev
->se_port_lock
);
661 lun
->lun_se_dev
= NULL
;
664 int transport_core_report_lun_response(struct se_cmd
*se_cmd
)
666 struct se_dev_entry
*deve
;
667 struct se_lun
*se_lun
;
668 struct se_session
*se_sess
= se_cmd
->se_sess
;
669 struct se_task
*se_task
;
670 unsigned char *buf
= se_cmd
->t_task
->t_task_buf
;
671 u32 cdb_offset
= 0, lun_count
= 0, offset
= 8, i
;
673 list_for_each_entry(se_task
, &se_cmd
->t_task
->t_task_list
, t_list
)
677 printk(KERN_ERR
"Unable to locate struct se_task for struct se_cmd\n");
678 return PYX_TRANSPORT_LU_COMM_FAILURE
;
682 * If no struct se_session pointer is present, this struct se_cmd is
683 * coming via a target_core_mod PASSTHROUGH op, and not through
684 * a $FABRIC_MOD. In that case, report LUN=0 only.
687 int_to_scsilun(0, (struct scsi_lun
*)&buf
[offset
]);
692 spin_lock_irq(&se_sess
->se_node_acl
->device_list_lock
);
693 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
694 deve
= &se_sess
->se_node_acl
->device_list
[i
];
695 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
697 se_lun
= deve
->se_lun
;
699 * We determine the correct LUN LIST LENGTH even once we
700 * have reached the initial allocation length.
704 if ((cdb_offset
+ 8) >= se_cmd
->data_length
)
707 int_to_scsilun(deve
->mapped_lun
, (struct scsi_lun
*)&buf
[offset
]);
711 spin_unlock_irq(&se_sess
->se_node_acl
->device_list_lock
);
714 * See SPC3 r07, page 159.
718 buf
[0] = ((lun_count
>> 24) & 0xff);
719 buf
[1] = ((lun_count
>> 16) & 0xff);
720 buf
[2] = ((lun_count
>> 8) & 0xff);
721 buf
[3] = (lun_count
& 0xff);
723 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
726 /* se_release_device_for_hba():
730 void se_release_device_for_hba(struct se_device
*dev
)
732 struct se_hba
*hba
= dev
->se_hba
;
734 if ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
735 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) ||
736 (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
) ||
737 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) ||
738 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
))
742 kthread_stop(dev
->process_thread
);
743 if (dev
->transport
->free_device
)
744 dev
->transport
->free_device(dev
->dev_ptr
);
747 spin_lock(&hba
->device_lock
);
748 list_del(&dev
->dev_list
);
750 spin_unlock(&hba
->device_lock
);
752 core_scsi3_free_all_registrations(dev
);
753 se_release_vpd_for_dev(dev
);
758 void se_release_vpd_for_dev(struct se_device
*dev
)
760 struct t10_vpd
*vpd
, *vpd_tmp
;
762 spin_lock(&dev
->se_sub_dev
->t10_wwn
.t10_vpd_lock
);
763 list_for_each_entry_safe(vpd
, vpd_tmp
,
764 &dev
->se_sub_dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
765 list_del(&vpd
->vpd_list
);
768 spin_unlock(&dev
->se_sub_dev
->t10_wwn
.t10_vpd_lock
);
771 /* se_free_virtual_device():
773 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
775 int se_free_virtual_device(struct se_device
*dev
, struct se_hba
*hba
)
777 if (!list_empty(&dev
->dev_sep_list
))
780 core_alua_free_lu_gp_mem(dev
);
781 se_release_device_for_hba(dev
);
786 static void se_dev_start(struct se_device
*dev
)
788 struct se_hba
*hba
= dev
->se_hba
;
790 spin_lock(&hba
->device_lock
);
791 atomic_inc(&dev
->dev_obj
.obj_access_count
);
792 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 1) {
793 if (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) {
794 dev
->dev_status
&= ~TRANSPORT_DEVICE_DEACTIVATED
;
795 dev
->dev_status
|= TRANSPORT_DEVICE_ACTIVATED
;
796 } else if (dev
->dev_status
&
797 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
) {
799 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
800 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
803 spin_unlock(&hba
->device_lock
);
806 static void se_dev_stop(struct se_device
*dev
)
808 struct se_hba
*hba
= dev
->se_hba
;
810 spin_lock(&hba
->device_lock
);
811 atomic_dec(&dev
->dev_obj
.obj_access_count
);
812 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 0) {
813 if (dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) {
814 dev
->dev_status
&= ~TRANSPORT_DEVICE_ACTIVATED
;
815 dev
->dev_status
|= TRANSPORT_DEVICE_DEACTIVATED
;
816 } else if (dev
->dev_status
&
817 TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) {
818 dev
->dev_status
&= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
819 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
822 spin_unlock(&hba
->device_lock
);
825 int se_dev_check_online(struct se_device
*dev
)
829 spin_lock_irq(&dev
->dev_status_lock
);
830 ret
= ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
831 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
)) ? 0 : 1;
832 spin_unlock_irq(&dev
->dev_status_lock
);
837 int se_dev_check_shutdown(struct se_device
*dev
)
841 spin_lock_irq(&dev
->dev_status_lock
);
842 ret
= (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
);
843 spin_unlock_irq(&dev
->dev_status_lock
);
848 void se_dev_set_default_attribs(
849 struct se_device
*dev
,
850 struct se_dev_limits
*dev_limits
)
852 struct queue_limits
*limits
= &dev_limits
->limits
;
854 dev
->se_sub_dev
->se_dev_attrib
.emulate_dpo
= DA_EMULATE_DPO
;
855 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
856 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_read
= DA_EMULATE_FUA_READ
;
857 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
858 dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
859 dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
860 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
861 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
862 dev
->se_sub_dev
->se_dev_attrib
.emulate_reservations
= DA_EMULATE_RESERVATIONS
;
863 dev
->se_sub_dev
->se_dev_attrib
.emulate_alua
= DA_EMULATE_ALUA
;
864 dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
866 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
867 * iblock_create_virtdevice() from struct queue_limits values
868 * if blk_queue_discard()==1
870 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
871 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
=
872 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
873 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
874 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
875 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
877 * block_size is based on subsystem plugin dependent requirements.
879 dev
->se_sub_dev
->se_dev_attrib
.hw_block_size
= limits
->logical_block_size
;
880 dev
->se_sub_dev
->se_dev_attrib
.block_size
= limits
->logical_block_size
;
882 * max_sectors is based on subsystem plugin dependent requirements.
884 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
= limits
->max_hw_sectors
;
885 dev
->se_sub_dev
->se_dev_attrib
.max_sectors
= limits
->max_sectors
;
887 * Set optimal_sectors from max_sectors, which can be lowered via
890 dev
->se_sub_dev
->se_dev_attrib
.optimal_sectors
= limits
->max_sectors
;
892 * queue_depth is based on subsystem plugin dependent requirements.
894 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
= dev_limits
->hw_queue_depth
;
895 dev
->se_sub_dev
->se_dev_attrib
.queue_depth
= dev_limits
->queue_depth
;
898 int se_dev_set_task_timeout(struct se_device
*dev
, u32 task_timeout
)
900 if (task_timeout
> DA_TASK_TIMEOUT_MAX
) {
901 printk(KERN_ERR
"dev[%p]: Passed task_timeout: %u larger then"
902 " DA_TASK_TIMEOUT_MAX\n", dev
, task_timeout
);
905 dev
->se_sub_dev
->se_dev_attrib
.task_timeout
= task_timeout
;
906 printk(KERN_INFO
"dev[%p]: Set SE Device task_timeout: %u\n",
913 int se_dev_set_max_unmap_lba_count(
914 struct se_device
*dev
,
915 u32 max_unmap_lba_count
)
917 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
918 printk(KERN_INFO
"dev[%p]: Set max_unmap_lba_count: %u\n",
919 dev
, dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
);
923 int se_dev_set_max_unmap_block_desc_count(
924 struct se_device
*dev
,
925 u32 max_unmap_block_desc_count
)
927 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
=
928 max_unmap_block_desc_count
;
929 printk(KERN_INFO
"dev[%p]: Set max_unmap_block_desc_count: %u\n",
930 dev
, dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
);
934 int se_dev_set_unmap_granularity(
935 struct se_device
*dev
,
936 u32 unmap_granularity
)
938 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
= unmap_granularity
;
939 printk(KERN_INFO
"dev[%p]: Set unmap_granularity: %u\n",
940 dev
, dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
);
944 int se_dev_set_unmap_granularity_alignment(
945 struct se_device
*dev
,
946 u32 unmap_granularity_alignment
)
948 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
949 printk(KERN_INFO
"dev[%p]: Set unmap_granularity_alignment: %u\n",
950 dev
, dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
);
954 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
956 if ((flag
!= 0) && (flag
!= 1)) {
957 printk(KERN_ERR
"Illegal value %d\n", flag
);
960 if (dev
->transport
->dpo_emulated
== NULL
) {
961 printk(KERN_ERR
"dev->transport->dpo_emulated is NULL\n");
964 if (dev
->transport
->dpo_emulated(dev
) == 0) {
965 printk(KERN_ERR
"dev->transport->dpo_emulated not supported\n");
968 dev
->se_sub_dev
->se_dev_attrib
.emulate_dpo
= flag
;
969 printk(KERN_INFO
"dev[%p]: SE Device Page Out (DPO) Emulation"
970 " bit: %d\n", dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_dpo
);
974 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
976 if ((flag
!= 0) && (flag
!= 1)) {
977 printk(KERN_ERR
"Illegal value %d\n", flag
);
980 if (dev
->transport
->fua_write_emulated
== NULL
) {
981 printk(KERN_ERR
"dev->transport->fua_write_emulated is NULL\n");
984 if (dev
->transport
->fua_write_emulated(dev
) == 0) {
985 printk(KERN_ERR
"dev->transport->fua_write_emulated not supported\n");
988 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
= flag
;
989 printk(KERN_INFO
"dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
990 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
);
994 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
996 if ((flag
!= 0) && (flag
!= 1)) {
997 printk(KERN_ERR
"Illegal value %d\n", flag
);
1000 if (dev
->transport
->fua_read_emulated
== NULL
) {
1001 printk(KERN_ERR
"dev->transport->fua_read_emulated is NULL\n");
1004 if (dev
->transport
->fua_read_emulated(dev
) == 0) {
1005 printk(KERN_ERR
"dev->transport->fua_read_emulated not supported\n");
1008 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_read
= flag
;
1009 printk(KERN_INFO
"dev[%p]: SE Device Forced Unit Access READs: %d\n",
1010 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_read
);
1014 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
1016 if ((flag
!= 0) && (flag
!= 1)) {
1017 printk(KERN_ERR
"Illegal value %d\n", flag
);
1020 if (dev
->transport
->write_cache_emulated
== NULL
) {
1021 printk(KERN_ERR
"dev->transport->write_cache_emulated is NULL\n");
1024 if (dev
->transport
->write_cache_emulated(dev
) == 0) {
1025 printk(KERN_ERR
"dev->transport->write_cache_emulated not supported\n");
1028 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
= flag
;
1029 printk(KERN_INFO
"dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1030 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
);
1034 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
1036 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
1037 printk(KERN_ERR
"Illegal value %d\n", flag
);
1041 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1042 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1043 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1045 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1048 dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
1049 printk(KERN_INFO
"dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1050 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
);
1055 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
1057 if ((flag
!= 0) && (flag
!= 1)) {
1058 printk(KERN_ERR
"Illegal value %d\n", flag
);
1062 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1063 printk(KERN_ERR
"dev[%p]: Unable to change SE Device TAS while"
1064 " dev_export_obj: %d count exists\n", dev
,
1065 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1068 dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
= flag
;
1069 printk(KERN_INFO
"dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1070 dev
, (dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
1075 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
1077 if ((flag
!= 0) && (flag
!= 1)) {
1078 printk(KERN_ERR
"Illegal value %d\n", flag
);
1082 * We expect this value to be non-zero when generic Block Layer
1083 * Discard supported is detected iblock_create_virtdevice().
1085 if (!(dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
)) {
1086 printk(KERN_ERR
"Generic Block Discard not supported\n");
1090 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpu
= flag
;
1091 printk(KERN_INFO
"dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1096 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
1098 if ((flag
!= 0) && (flag
!= 1)) {
1099 printk(KERN_ERR
"Illegal value %d\n", flag
);
1103 * We expect this value to be non-zero when generic Block Layer
1104 * Discard supported is detected iblock_create_virtdevice().
1106 if (!(dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
)) {
1107 printk(KERN_ERR
"Generic Block Discard not supported\n");
1111 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpws
= flag
;
1112 printk(KERN_INFO
"dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1117 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1119 if ((flag
!= 0) && (flag
!= 1)) {
1120 printk(KERN_ERR
"Illegal value %d\n", flag
);
1123 dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
= flag
;
1124 printk(KERN_INFO
"dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1125 (dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1130 * Note, this can only be called on unexported SE Device Object.
1132 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1134 u32 orig_queue_depth
= dev
->queue_depth
;
1136 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1137 printk(KERN_ERR
"dev[%p]: Unable to change SE Device TCQ while"
1138 " dev_export_obj: %d count exists\n", dev
,
1139 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1142 if (!(queue_depth
)) {
1143 printk(KERN_ERR
"dev[%p]: Illegal ZERO value for queue"
1148 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1149 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
) {
1150 printk(KERN_ERR
"dev[%p]: Passed queue_depth: %u"
1151 " exceeds TCM/SE_Device TCQ: %u\n",
1153 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
);
1157 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.queue_depth
) {
1158 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
) {
1159 printk(KERN_ERR
"dev[%p]: Passed queue_depth:"
1160 " %u exceeds TCM/SE_Device MAX"
1161 " TCQ: %u\n", dev
, queue_depth
,
1162 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
);
1168 dev
->se_sub_dev
->se_dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1169 if (queue_depth
> orig_queue_depth
)
1170 atomic_add(queue_depth
- orig_queue_depth
, &dev
->depth_left
);
1171 else if (queue_depth
< orig_queue_depth
)
1172 atomic_sub(orig_queue_depth
- queue_depth
, &dev
->depth_left
);
1174 printk(KERN_INFO
"dev[%p]: SE Device TCQ Depth changed to: %u\n",
1179 int se_dev_set_max_sectors(struct se_device
*dev
, u32 max_sectors
)
1181 int force
= 0; /* Force setting for VDEVS */
1183 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1184 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1185 " max_sectors while dev_export_obj: %d count exists\n",
1186 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1189 if (!(max_sectors
)) {
1190 printk(KERN_ERR
"dev[%p]: Illegal ZERO value for"
1191 " max_sectors\n", dev
);
1194 if (max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1195 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u less than"
1196 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, max_sectors
,
1197 DA_STATUS_MAX_SECTORS_MIN
);
1200 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1201 if (max_sectors
> dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
) {
1202 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1203 " greater than TCM/SE_Device max_sectors:"
1204 " %u\n", dev
, max_sectors
,
1205 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
1209 if (!(force
) && (max_sectors
>
1210 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
)) {
1211 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1212 " greater than TCM/SE_Device max_sectors"
1213 ": %u, use force=1 to override.\n", dev
,
1214 max_sectors
, dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
1217 if (max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1218 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1219 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1220 " %u\n", dev
, max_sectors
,
1221 DA_STATUS_MAX_SECTORS_MAX
);
1226 dev
->se_sub_dev
->se_dev_attrib
.max_sectors
= max_sectors
;
1227 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1232 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1234 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1235 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1236 " optimal_sectors while dev_export_obj: %d count exists\n",
1237 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1240 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1241 printk(KERN_ERR
"dev[%p]: Passed optimal_sectors cannot be"
1242 " changed for TCM/pSCSI\n", dev
);
1245 if (optimal_sectors
> dev
->se_sub_dev
->se_dev_attrib
.max_sectors
) {
1246 printk(KERN_ERR
"dev[%p]: Passed optimal_sectors %u cannot be"
1247 " greater than max_sectors: %u\n", dev
,
1248 optimal_sectors
, dev
->se_sub_dev
->se_dev_attrib
.max_sectors
);
1252 dev
->se_sub_dev
->se_dev_attrib
.optimal_sectors
= optimal_sectors
;
1253 printk(KERN_INFO
"dev[%p]: SE Device optimal_sectors changed to %u\n",
1254 dev
, optimal_sectors
);
1258 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1260 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1261 printk(KERN_ERR
"dev[%p]: Unable to change SE Device block_size"
1262 " while dev_export_obj: %d count exists\n", dev
,
1263 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1267 if ((block_size
!= 512) &&
1268 (block_size
!= 1024) &&
1269 (block_size
!= 2048) &&
1270 (block_size
!= 4096)) {
1271 printk(KERN_ERR
"dev[%p]: Illegal value for block_device: %u"
1272 " for SE device, must be 512, 1024, 2048 or 4096\n",
1277 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1278 printk(KERN_ERR
"dev[%p]: Not allowed to change block_size for"
1279 " Physical Device, use for Linux/SCSI to change"
1280 " block_size for underlying hardware\n", dev
);
1284 dev
->se_sub_dev
->se_dev_attrib
.block_size
= block_size
;
1285 printk(KERN_INFO
"dev[%p]: SE Device block_size changed to %u\n",
1290 struct se_lun
*core_dev_add_lun(
1291 struct se_portal_group
*tpg
,
1293 struct se_device
*dev
,
1296 struct se_lun
*lun_p
;
1299 if (atomic_read(&dev
->dev_access_obj
.obj_access_count
) != 0) {
1300 printk(KERN_ERR
"Unable to export struct se_device while dev_access_obj: %d\n",
1301 atomic_read(&dev
->dev_access_obj
.obj_access_count
));
1305 lun_p
= core_tpg_pre_addlun(tpg
, lun
);
1306 if ((IS_ERR(lun_p
)) || !(lun_p
))
1309 if (dev
->dev_flags
& DF_READ_ONLY
)
1310 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1312 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
1314 if (core_tpg_post_addlun(tpg
, lun_p
, lun_access
, dev
) < 0)
1317 printk(KERN_INFO
"%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1318 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1319 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun_p
->unpacked_lun
,
1320 tpg
->se_tpg_tfo
->get_fabric_name(), hba
->hba_id
);
1322 * Update LUN maps for dynamically added initiators when
1323 * generate_node_acl is enabled.
1325 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1326 struct se_node_acl
*acl
;
1327 spin_lock_bh(&tpg
->acl_node_lock
);
1328 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1329 if (acl
->dynamic_node_acl
) {
1330 spin_unlock_bh(&tpg
->acl_node_lock
);
1331 core_tpg_add_node_to_devs(acl
, tpg
);
1332 spin_lock_bh(&tpg
->acl_node_lock
);
1335 spin_unlock_bh(&tpg
->acl_node_lock
);
1341 /* core_dev_del_lun():
1345 int core_dev_del_lun(
1346 struct se_portal_group
*tpg
,
1352 lun
= core_tpg_pre_dellun(tpg
, unpacked_lun
, &ret
);
1356 core_tpg_post_dellun(tpg
, lun
);
1358 printk(KERN_INFO
"%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1359 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1360 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
,
1361 tpg
->se_tpg_tfo
->get_fabric_name());
1366 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1370 spin_lock(&tpg
->tpg_lun_lock
);
1371 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1372 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1373 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1374 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1375 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1376 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1377 spin_unlock(&tpg
->tpg_lun_lock
);
1380 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1382 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1383 printk(KERN_ERR
"%s Logical Unit Number: %u is not free on"
1384 " Target Portal Group: %hu, ignoring request.\n",
1385 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1386 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1387 spin_unlock(&tpg
->tpg_lun_lock
);
1390 spin_unlock(&tpg
->tpg_lun_lock
);
1395 /* core_dev_get_lun():
1399 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1403 spin_lock(&tpg
->tpg_lun_lock
);
1404 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1405 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1406 "_TPG-1: %u for Target Portal Group: %hu\n",
1407 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1408 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1409 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1410 spin_unlock(&tpg
->tpg_lun_lock
);
1413 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1415 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1416 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
1417 " Target Portal Group: %hu, ignoring request.\n",
1418 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1419 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1420 spin_unlock(&tpg
->tpg_lun_lock
);
1423 spin_unlock(&tpg
->tpg_lun_lock
);
1428 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1429 struct se_portal_group
*tpg
,
1431 char *initiatorname
,
1434 struct se_lun_acl
*lacl
;
1435 struct se_node_acl
*nacl
;
1437 if (strlen(initiatorname
) >= TRANSPORT_IQN_LEN
) {
1438 printk(KERN_ERR
"%s InitiatorName exceeds maximum size.\n",
1439 tpg
->se_tpg_tfo
->get_fabric_name());
1443 nacl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
1448 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1450 printk(KERN_ERR
"Unable to allocate memory for struct se_lun_acl.\n");
1455 INIT_LIST_HEAD(&lacl
->lacl_list
);
1456 lacl
->mapped_lun
= mapped_lun
;
1457 lacl
->se_lun_nacl
= nacl
;
1458 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
1463 int core_dev_add_initiator_node_lun_acl(
1464 struct se_portal_group
*tpg
,
1465 struct se_lun_acl
*lacl
,
1470 struct se_node_acl
*nacl
;
1472 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1474 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
1475 " Target Portal Group: %hu, ignoring request.\n",
1476 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1477 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1481 nacl
= lacl
->se_lun_nacl
;
1485 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1486 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1487 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1491 if (core_update_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1492 lun_access
, nacl
, tpg
, 1) < 0)
1495 spin_lock(&lun
->lun_acl_lock
);
1496 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1497 atomic_inc(&lun
->lun_acl_count
);
1498 smp_mb__after_atomic_inc();
1499 spin_unlock(&lun
->lun_acl_lock
);
1501 printk(KERN_INFO
"%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1502 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1503 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1504 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1505 lacl
->initiatorname
);
1507 * Check to see if there are any existing persistent reservation APTPL
1508 * pre-registrations that need to be enabled for this LUN ACL..
1510 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, lacl
);
1514 /* core_dev_del_initiator_node_lun_acl():
1518 int core_dev_del_initiator_node_lun_acl(
1519 struct se_portal_group
*tpg
,
1521 struct se_lun_acl
*lacl
)
1523 struct se_node_acl
*nacl
;
1525 nacl
= lacl
->se_lun_nacl
;
1529 spin_lock(&lun
->lun_acl_lock
);
1530 list_del(&lacl
->lacl_list
);
1531 atomic_dec(&lun
->lun_acl_count
);
1532 smp_mb__after_atomic_dec();
1533 spin_unlock(&lun
->lun_acl_lock
);
1535 core_update_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1536 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
1538 lacl
->se_lun
= NULL
;
1540 printk(KERN_INFO
"%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1541 " InitiatorNode: %s Mapped LUN: %u\n",
1542 tpg
->se_tpg_tfo
->get_fabric_name(),
1543 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1544 lacl
->initiatorname
, lacl
->mapped_lun
);
1549 void core_dev_free_initiator_node_lun_acl(
1550 struct se_portal_group
*tpg
,
1551 struct se_lun_acl
*lacl
)
1553 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1554 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1555 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1556 tpg
->se_tpg_tfo
->get_fabric_name(),
1557 lacl
->initiatorname
, lacl
->mapped_lun
);
1562 int core_dev_setup_virtual_lun0(void)
1565 struct se_device
*dev
;
1566 struct se_subsystem_dev
*se_dev
= NULL
;
1567 struct se_subsystem_api
*t
;
1571 hba
= core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE
);
1573 return PTR_ERR(hba
);
1578 se_dev
= kzalloc(sizeof(struct se_subsystem_dev
), GFP_KERNEL
);
1580 printk(KERN_ERR
"Unable to allocate memory for"
1581 " struct se_subsystem_dev\n");
1585 INIT_LIST_HEAD(&se_dev
->se_dev_node
);
1586 INIT_LIST_HEAD(&se_dev
->t10_wwn
.t10_vpd_list
);
1587 spin_lock_init(&se_dev
->t10_wwn
.t10_vpd_lock
);
1588 INIT_LIST_HEAD(&se_dev
->t10_pr
.registration_list
);
1589 INIT_LIST_HEAD(&se_dev
->t10_pr
.aptpl_reg_list
);
1590 spin_lock_init(&se_dev
->t10_pr
.registration_lock
);
1591 spin_lock_init(&se_dev
->t10_pr
.aptpl_reg_lock
);
1592 INIT_LIST_HEAD(&se_dev
->t10_alua
.tg_pt_gps_list
);
1593 spin_lock_init(&se_dev
->t10_alua
.tg_pt_gps_lock
);
1594 spin_lock_init(&se_dev
->se_dev_lock
);
1595 se_dev
->t10_pr
.pr_aptpl_buf_len
= PR_APTPL_BUF_LEN
;
1596 se_dev
->t10_wwn
.t10_sub_dev
= se_dev
;
1597 se_dev
->t10_alua
.t10_sub_dev
= se_dev
;
1598 se_dev
->se_dev_attrib
.da_sub_dev
= se_dev
;
1599 se_dev
->se_dev_hba
= hba
;
1601 se_dev
->se_dev_su_ptr
= t
->allocate_virtdevice(hba
, "virt_lun0");
1602 if (!(se_dev
->se_dev_su_ptr
)) {
1603 printk(KERN_ERR
"Unable to locate subsystem dependent pointer"
1604 " from allocate_virtdevice()\n");
1608 lun0_su_dev
= se_dev
;
1611 sprintf(buf
, "rd_pages=8");
1612 t
->set_configfs_dev_params(hba
, se_dev
, buf
, sizeof(buf
));
1614 dev
= t
->create_virtdevice(hba
, se_dev
, se_dev
->se_dev_su_ptr
);
1619 se_dev
->se_dev_ptr
= dev
;
1627 core_delete_hba(lun0_hba
);
1634 void core_dev_release_virtual_lun0(void)
1636 struct se_hba
*hba
= lun0_hba
;
1637 struct se_subsystem_dev
*su_dev
= lun0_su_dev
;
1643 se_free_virtual_device(g_lun0_dev
, hba
);
1646 core_delete_hba(hba
);