gma500: udelay(20000) it too long again
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / target / target_core_device.c
1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <net/sock.h>
39 #include <net/tcp.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_device.h>
42
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_transport.h>
47 #include <target/target_core_fabric_ops.h>
48
49 #include "target_core_alua.h"
50 #include "target_core_hba.h"
51 #include "target_core_pr.h"
52 #include "target_core_ua.h"
53
54 static void se_dev_start(struct se_device *dev);
55 static void se_dev_stop(struct se_device *dev);
56
57 static struct se_hba *lun0_hba;
58 static struct se_subsystem_dev *lun0_su_dev;
59 /* not static, needed by tpg.c */
60 struct se_device *g_lun0_dev;
61
62 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
63 {
64 struct se_lun *se_lun = NULL;
65 struct se_session *se_sess = se_cmd->se_sess;
66 struct se_device *dev;
67 unsigned long flags;
68
69 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
70 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
71 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
72 return -ENODEV;
73 }
74
75 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
76 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
77 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
78 struct se_dev_entry *deve = se_cmd->se_deve;
79
80 deve->total_cmds++;
81 deve->total_bytes += se_cmd->data_length;
82
83 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
87 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
88 " Access for 0x%08x\n",
89 se_cmd->se_tfo->get_fabric_name(),
90 unpacked_lun);
91 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
92 return -EACCES;
93 }
94
95 if (se_cmd->data_direction == DMA_TO_DEVICE)
96 deve->write_bytes += se_cmd->data_length;
97 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
98 deve->read_bytes += se_cmd->data_length;
99
100 deve->deve_cmds++;
101
102 se_lun = deve->se_lun;
103 se_cmd->se_lun = deve->se_lun;
104 se_cmd->pr_res_key = deve->pr_res_key;
105 se_cmd->orig_fe_lun = unpacked_lun;
106 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
108 }
109 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
110
111 if (!se_lun) {
112 /*
113 * Use the se_portal_group->tpg_virt_lun0 to allow for
114 * REPORT_LUNS, et al to be returned when no active
115 * MappedLUN=0 exists for this Initiator Port.
116 */
117 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
120 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
121 " Access for 0x%08x\n",
122 se_cmd->se_tfo->get_fabric_name(),
123 unpacked_lun);
124 return -ENODEV;
125 }
126 /*
127 * Force WRITE PROTECT for virtual LUN 0
128 */
129 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
130 (se_cmd->data_direction != DMA_NONE)) {
131 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
132 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
133 return -EACCES;
134 }
135
136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->orig_fe_lun = 0;
139 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
140 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
141 }
142 /*
143 * Determine if the struct se_lun is online.
144 * FIXME: Check for LUN_RESET + UNIT Attention
145 */
146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
149 return -ENODEV;
150 }
151
152 /* Directly associate cmd with se_dev */
153 se_cmd->se_dev = se_lun->lun_se_dev;
154
155 /* TODO: get rid of this and use atomics for stats */
156 dev = se_lun->lun_se_dev;
157 spin_lock_irqsave(&dev->stats_lock, flags);
158 dev->num_cmds++;
159 if (se_cmd->data_direction == DMA_TO_DEVICE)
160 dev->write_bytes += se_cmd->data_length;
161 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
162 dev->read_bytes += se_cmd->data_length;
163 spin_unlock_irqrestore(&dev->stats_lock, flags);
164
165 /*
166 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
167 * for tracking state of struct se_cmds during LUN shutdown events.
168 */
169 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
170 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
171 atomic_set(&se_cmd->transport_lun_active, 1);
172 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
173
174 return 0;
175 }
176 EXPORT_SYMBOL(transport_lookup_cmd_lun);
177
178 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
179 {
180 struct se_dev_entry *deve;
181 struct se_lun *se_lun = NULL;
182 struct se_session *se_sess = se_cmd->se_sess;
183 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
184 unsigned long flags;
185
186 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
187 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
188 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
189 return -ENODEV;
190 }
191
192 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
193 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
194 deve = se_cmd->se_deve;
195
196 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
197 se_tmr->tmr_lun = deve->se_lun;
198 se_cmd->se_lun = deve->se_lun;
199 se_lun = deve->se_lun;
200 se_cmd->pr_res_key = deve->pr_res_key;
201 se_cmd->orig_fe_lun = unpacked_lun;
202 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
203 }
204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
205
206 if (!se_lun) {
207 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
208 " Access for 0x%08x\n",
209 se_cmd->se_tfo->get_fabric_name(),
210 unpacked_lun);
211 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
212 return -ENODEV;
213 }
214 /*
215 * Determine if the struct se_lun is online.
216 * FIXME: Check for LUN_RESET + UNIT Attention
217 */
218 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
219 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
220 return -ENODEV;
221 }
222
223 /* Directly associate cmd with se_dev */
224 se_cmd->se_dev = se_lun->lun_se_dev;
225 se_tmr->tmr_dev = se_lun->lun_se_dev;
226
227 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
228 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
229 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
230
231 return 0;
232 }
233 EXPORT_SYMBOL(transport_lookup_tmr_lun);
234
235 /*
236 * This function is called from core_scsi3_emulate_pro_register_and_move()
237 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
238 * when a matching rtpi is found.
239 */
240 struct se_dev_entry *core_get_se_deve_from_rtpi(
241 struct se_node_acl *nacl,
242 u16 rtpi)
243 {
244 struct se_dev_entry *deve;
245 struct se_lun *lun;
246 struct se_port *port;
247 struct se_portal_group *tpg = nacl->se_tpg;
248 u32 i;
249
250 spin_lock_irq(&nacl->device_list_lock);
251 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
252 deve = &nacl->device_list[i];
253
254 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
255 continue;
256
257 lun = deve->se_lun;
258 if (!lun) {
259 pr_err("%s device entries device pointer is"
260 " NULL, but Initiator has access.\n",
261 tpg->se_tpg_tfo->get_fabric_name());
262 continue;
263 }
264 port = lun->lun_sep;
265 if (!port) {
266 pr_err("%s device entries device pointer is"
267 " NULL, but Initiator has access.\n",
268 tpg->se_tpg_tfo->get_fabric_name());
269 continue;
270 }
271 if (port->sep_rtpi != rtpi)
272 continue;
273
274 atomic_inc(&deve->pr_ref_count);
275 smp_mb__after_atomic_inc();
276 spin_unlock_irq(&nacl->device_list_lock);
277
278 return deve;
279 }
280 spin_unlock_irq(&nacl->device_list_lock);
281
282 return NULL;
283 }
284
285 int core_free_device_list_for_node(
286 struct se_node_acl *nacl,
287 struct se_portal_group *tpg)
288 {
289 struct se_dev_entry *deve;
290 struct se_lun *lun;
291 u32 i;
292
293 if (!nacl->device_list)
294 return 0;
295
296 spin_lock_irq(&nacl->device_list_lock);
297 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
298 deve = &nacl->device_list[i];
299
300 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
301 continue;
302
303 if (!deve->se_lun) {
304 pr_err("%s device entries device pointer is"
305 " NULL, but Initiator has access.\n",
306 tpg->se_tpg_tfo->get_fabric_name());
307 continue;
308 }
309 lun = deve->se_lun;
310
311 spin_unlock_irq(&nacl->device_list_lock);
312 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
313 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
314 spin_lock_irq(&nacl->device_list_lock);
315 }
316 spin_unlock_irq(&nacl->device_list_lock);
317
318 kfree(nacl->device_list);
319 nacl->device_list = NULL;
320
321 return 0;
322 }
323
324 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
325 {
326 struct se_dev_entry *deve;
327
328 spin_lock_irq(&se_nacl->device_list_lock);
329 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
330 deve->deve_cmds--;
331 spin_unlock_irq(&se_nacl->device_list_lock);
332 }
333
334 void core_update_device_list_access(
335 u32 mapped_lun,
336 u32 lun_access,
337 struct se_node_acl *nacl)
338 {
339 struct se_dev_entry *deve;
340
341 spin_lock_irq(&nacl->device_list_lock);
342 deve = &nacl->device_list[mapped_lun];
343 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
344 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
345 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
346 } else {
347 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
348 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
349 }
350 spin_unlock_irq(&nacl->device_list_lock);
351 }
352
353 /* core_update_device_list_for_node():
354 *
355 *
356 */
357 int core_update_device_list_for_node(
358 struct se_lun *lun,
359 struct se_lun_acl *lun_acl,
360 u32 mapped_lun,
361 u32 lun_access,
362 struct se_node_acl *nacl,
363 struct se_portal_group *tpg,
364 int enable)
365 {
366 struct se_port *port = lun->lun_sep;
367 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
368 int trans = 0;
369 /*
370 * If the MappedLUN entry is being disabled, the entry in
371 * port->sep_alua_list must be removed now before clearing the
372 * struct se_dev_entry pointers below as logic in
373 * core_alua_do_transition_tg_pt() depends on these being present.
374 */
375 if (!enable) {
376 /*
377 * deve->se_lun_acl will be NULL for demo-mode created LUNs
378 * that have not been explicitly concerted to MappedLUNs ->
379 * struct se_lun_acl, but we remove deve->alua_port_list from
380 * port->sep_alua_list. This also means that active UAs and
381 * NodeACL context specific PR metadata for demo-mode
382 * MappedLUN *deve will be released below..
383 */
384 spin_lock_bh(&port->sep_alua_lock);
385 list_del(&deve->alua_port_list);
386 spin_unlock_bh(&port->sep_alua_lock);
387 }
388
389 spin_lock_irq(&nacl->device_list_lock);
390 if (enable) {
391 /*
392 * Check if the call is handling demo mode -> explict LUN ACL
393 * transition. This transition must be for the same struct se_lun
394 * + mapped_lun that was setup in demo mode..
395 */
396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
397 if (deve->se_lun_acl != NULL) {
398 pr_err("struct se_dev_entry->se_lun_acl"
399 " already set for demo mode -> explict"
400 " LUN ACL transition\n");
401 spin_unlock_irq(&nacl->device_list_lock);
402 return -EINVAL;
403 }
404 if (deve->se_lun != lun) {
405 pr_err("struct se_dev_entry->se_lun does"
406 " match passed struct se_lun for demo mode"
407 " -> explict LUN ACL transition\n");
408 spin_unlock_irq(&nacl->device_list_lock);
409 return -EINVAL;
410 }
411 deve->se_lun_acl = lun_acl;
412 trans = 1;
413 } else {
414 deve->se_lun = lun;
415 deve->se_lun_acl = lun_acl;
416 deve->mapped_lun = mapped_lun;
417 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
418 }
419
420 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
421 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
422 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
423 } else {
424 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
425 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
426 }
427
428 if (trans) {
429 spin_unlock_irq(&nacl->device_list_lock);
430 return 0;
431 }
432 deve->creation_time = get_jiffies_64();
433 deve->attach_count++;
434 spin_unlock_irq(&nacl->device_list_lock);
435
436 spin_lock_bh(&port->sep_alua_lock);
437 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
438 spin_unlock_bh(&port->sep_alua_lock);
439
440 return 0;
441 }
442 /*
443 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
444 * PR operation to complete.
445 */
446 spin_unlock_irq(&nacl->device_list_lock);
447 while (atomic_read(&deve->pr_ref_count) != 0)
448 cpu_relax();
449 spin_lock_irq(&nacl->device_list_lock);
450 /*
451 * Disable struct se_dev_entry LUN ACL mapping
452 */
453 core_scsi3_ua_release_all(deve);
454 deve->se_lun = NULL;
455 deve->se_lun_acl = NULL;
456 deve->lun_flags = 0;
457 deve->creation_time = 0;
458 deve->attach_count--;
459 spin_unlock_irq(&nacl->device_list_lock);
460
461 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
462 return 0;
463 }
464
465 /* core_clear_lun_from_tpg():
466 *
467 *
468 */
469 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
470 {
471 struct se_node_acl *nacl;
472 struct se_dev_entry *deve;
473 u32 i;
474
475 spin_lock_bh(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_bh(&tpg->acl_node_lock);
478
479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
481 deve = &nacl->device_list[i];
482 if (lun != deve->se_lun)
483 continue;
484 spin_unlock_irq(&nacl->device_list_lock);
485
486 core_update_device_list_for_node(lun, NULL,
487 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
488 nacl, tpg, 0);
489
490 spin_lock_irq(&nacl->device_list_lock);
491 }
492 spin_unlock_irq(&nacl->device_list_lock);
493
494 spin_lock_bh(&tpg->acl_node_lock);
495 }
496 spin_unlock_bh(&tpg->acl_node_lock);
497 }
498
499 static struct se_port *core_alloc_port(struct se_device *dev)
500 {
501 struct se_port *port, *port_tmp;
502
503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 if (!port) {
505 pr_err("Unable to allocate struct se_port\n");
506 return ERR_PTR(-ENOMEM);
507 }
508 INIT_LIST_HEAD(&port->sep_alua_list);
509 INIT_LIST_HEAD(&port->sep_list);
510 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
511 spin_lock_init(&port->sep_alua_lock);
512 mutex_init(&port->sep_tg_pt_md_mutex);
513
514 spin_lock(&dev->se_port_lock);
515 if (dev->dev_port_count == 0x0000ffff) {
516 pr_warn("Reached dev->dev_port_count =="
517 " 0x0000ffff\n");
518 spin_unlock(&dev->se_port_lock);
519 return ERR_PTR(-ENOSPC);
520 }
521 again:
522 /*
523 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
524 * Here is the table from spc4r17 section 7.7.3.8.
525 *
526 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
527 *
528 * Code Description
529 * 0h Reserved
530 * 1h Relative port 1, historically known as port A
531 * 2h Relative port 2, historically known as port B
532 * 3h to FFFFh Relative port 3 through 65 535
533 */
534 port->sep_rtpi = dev->dev_rpti_counter++;
535 if (!port->sep_rtpi)
536 goto again;
537
538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
539 /*
540 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
541 * for 16-bit wrap..
542 */
543 if (port->sep_rtpi == port_tmp->sep_rtpi)
544 goto again;
545 }
546 spin_unlock(&dev->se_port_lock);
547
548 return port;
549 }
550
551 static void core_export_port(
552 struct se_device *dev,
553 struct se_portal_group *tpg,
554 struct se_port *port,
555 struct se_lun *lun)
556 {
557 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
558 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
559
560 spin_lock(&dev->se_port_lock);
561 spin_lock(&lun->lun_sep_lock);
562 port->sep_tpg = tpg;
563 port->sep_lun = lun;
564 lun->lun_sep = port;
565 spin_unlock(&lun->lun_sep_lock);
566
567 list_add_tail(&port->sep_list, &dev->dev_sep_list);
568 spin_unlock(&dev->se_port_lock);
569
570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
573 pr_err("Unable to allocate t10_alua_tg_pt"
574 "_gp_member_t\n");
575 return;
576 }
577 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
579 su_dev->t10_alua.default_tg_pt_gp);
580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
581 pr_debug("%s/%s: Adding to default ALUA Target Port"
582 " Group: alua/default_tg_pt_gp\n",
583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
584 }
585
586 dev->dev_port_count++;
587 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
588 }
589
590 /*
591 * Called with struct se_device->se_port_lock spinlock held.
592 */
593 static void core_release_port(struct se_device *dev, struct se_port *port)
594 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
595 {
596 /*
597 * Wait for any port reference for PR ALL_TG_PT=1 operation
598 * to complete in __core_scsi3_alloc_registration()
599 */
600 spin_unlock(&dev->se_port_lock);
601 if (atomic_read(&port->sep_tg_pt_ref_cnt))
602 cpu_relax();
603 spin_lock(&dev->se_port_lock);
604
605 core_alua_free_tg_pt_gp_mem(port);
606
607 list_del(&port->sep_list);
608 dev->dev_port_count--;
609 kfree(port);
610 }
611
612 int core_dev_export(
613 struct se_device *dev,
614 struct se_portal_group *tpg,
615 struct se_lun *lun)
616 {
617 struct se_port *port;
618
619 port = core_alloc_port(dev);
620 if (IS_ERR(port))
621 return PTR_ERR(port);
622
623 lun->lun_se_dev = dev;
624 se_dev_start(dev);
625
626 atomic_inc(&dev->dev_export_obj.obj_access_count);
627 core_export_port(dev, tpg, port, lun);
628 return 0;
629 }
630
631 void core_dev_unexport(
632 struct se_device *dev,
633 struct se_portal_group *tpg,
634 struct se_lun *lun)
635 {
636 struct se_port *port = lun->lun_sep;
637
638 spin_lock(&lun->lun_sep_lock);
639 if (lun->lun_se_dev == NULL) {
640 spin_unlock(&lun->lun_sep_lock);
641 return;
642 }
643 spin_unlock(&lun->lun_sep_lock);
644
645 spin_lock(&dev->se_port_lock);
646 atomic_dec(&dev->dev_export_obj.obj_access_count);
647 core_release_port(dev, port);
648 spin_unlock(&dev->se_port_lock);
649
650 se_dev_stop(dev);
651 lun->lun_se_dev = NULL;
652 }
653
654 int transport_core_report_lun_response(struct se_cmd *se_cmd)
655 {
656 struct se_dev_entry *deve;
657 struct se_lun *se_lun;
658 struct se_session *se_sess = se_cmd->se_sess;
659 struct se_task *se_task;
660 unsigned char *buf;
661 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
662
663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
664 break;
665
666 if (!se_task) {
667 pr_err("Unable to locate struct se_task for struct se_cmd\n");
668 return PYX_TRANSPORT_LU_COMM_FAILURE;
669 }
670
671 buf = transport_kmap_first_data_page(se_cmd);
672
673 /*
674 * If no struct se_session pointer is present, this struct se_cmd is
675 * coming via a target_core_mod PASSTHROUGH op, and not through
676 * a $FABRIC_MOD. In that case, report LUN=0 only.
677 */
678 if (!se_sess) {
679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
680 lun_count = 1;
681 goto done;
682 }
683
684 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
685 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
686 deve = &se_sess->se_node_acl->device_list[i];
687 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
688 continue;
689 se_lun = deve->se_lun;
690 /*
691 * We determine the correct LUN LIST LENGTH even once we
692 * have reached the initial allocation length.
693 * See SPC2-R20 7.19.
694 */
695 lun_count++;
696 if ((cdb_offset + 8) >= se_cmd->data_length)
697 continue;
698
699 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
700 offset += 8;
701 cdb_offset += 8;
702 }
703 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
704
705 /*
706 * See SPC3 r07, page 159.
707 */
708 done:
709 transport_kunmap_first_data_page(se_cmd);
710 lun_count *= 8;
711 buf[0] = ((lun_count >> 24) & 0xff);
712 buf[1] = ((lun_count >> 16) & 0xff);
713 buf[2] = ((lun_count >> 8) & 0xff);
714 buf[3] = (lun_count & 0xff);
715
716 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
717 }
718
719 /* se_release_device_for_hba():
720 *
721 *
722 */
723 void se_release_device_for_hba(struct se_device *dev)
724 {
725 struct se_hba *hba = dev->se_hba;
726
727 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
728 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
729 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
730 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
731 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
732 se_dev_stop(dev);
733
734 if (dev->dev_ptr) {
735 kthread_stop(dev->process_thread);
736 if (dev->transport->free_device)
737 dev->transport->free_device(dev->dev_ptr);
738 }
739
740 spin_lock(&hba->device_lock);
741 list_del(&dev->dev_list);
742 hba->dev_count--;
743 spin_unlock(&hba->device_lock);
744
745 core_scsi3_free_all_registrations(dev);
746 se_release_vpd_for_dev(dev);
747
748 kfree(dev);
749 }
750
751 void se_release_vpd_for_dev(struct se_device *dev)
752 {
753 struct t10_vpd *vpd, *vpd_tmp;
754
755 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
756 list_for_each_entry_safe(vpd, vpd_tmp,
757 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
758 list_del(&vpd->vpd_list);
759 kfree(vpd);
760 }
761 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
762 }
763
764 /* se_free_virtual_device():
765 *
766 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
767 */
768 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
769 {
770 if (!list_empty(&dev->dev_sep_list))
771 dump_stack();
772
773 core_alua_free_lu_gp_mem(dev);
774 se_release_device_for_hba(dev);
775
776 return 0;
777 }
778
779 static void se_dev_start(struct se_device *dev)
780 {
781 struct se_hba *hba = dev->se_hba;
782
783 spin_lock(&hba->device_lock);
784 atomic_inc(&dev->dev_obj.obj_access_count);
785 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
786 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
787 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
788 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
789 } else if (dev->dev_status &
790 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
791 dev->dev_status &=
792 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
793 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
794 }
795 }
796 spin_unlock(&hba->device_lock);
797 }
798
799 static void se_dev_stop(struct se_device *dev)
800 {
801 struct se_hba *hba = dev->se_hba;
802
803 spin_lock(&hba->device_lock);
804 atomic_dec(&dev->dev_obj.obj_access_count);
805 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
806 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
807 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
808 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
809 } else if (dev->dev_status &
810 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
811 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
812 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
813 }
814 }
815 spin_unlock(&hba->device_lock);
816 }
817
818 int se_dev_check_online(struct se_device *dev)
819 {
820 unsigned long flags;
821 int ret;
822
823 spin_lock_irqsave(&dev->dev_status_lock, flags);
824 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
825 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
826 spin_unlock_irqrestore(&dev->dev_status_lock, flags);
827
828 return ret;
829 }
830
831 int se_dev_check_shutdown(struct se_device *dev)
832 {
833 int ret;
834
835 spin_lock_irq(&dev->dev_status_lock);
836 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
837 spin_unlock_irq(&dev->dev_status_lock);
838
839 return ret;
840 }
841
842 void se_dev_set_default_attribs(
843 struct se_device *dev,
844 struct se_dev_limits *dev_limits)
845 {
846 struct queue_limits *limits = &dev_limits->limits;
847
848 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
849 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
850 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
851 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
852 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
853 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
854 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
855 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
856 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
857 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
858 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
859 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
860 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
861 /*
862 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
863 * iblock_create_virtdevice() from struct queue_limits values
864 * if blk_queue_discard()==1
865 */
866 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
867 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
868 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
869 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
870 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
871 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
872 /*
873 * block_size is based on subsystem plugin dependent requirements.
874 */
875 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
876 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
877 /*
878 * max_sectors is based on subsystem plugin dependent requirements.
879 */
880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
881 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
882 /*
883 * Set optimal_sectors from max_sectors, which can be lowered via
884 * configfs.
885 */
886 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
887 /*
888 * queue_depth is based on subsystem plugin dependent requirements.
889 */
890 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
891 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
892 }
893
894 int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
895 {
896 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
897 pr_err("dev[%p]: Passed task_timeout: %u larger then"
898 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
899 return -EINVAL;
900 } else {
901 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
902 pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
903 dev, task_timeout);
904 }
905
906 return 0;
907 }
908
909 int se_dev_set_max_unmap_lba_count(
910 struct se_device *dev,
911 u32 max_unmap_lba_count)
912 {
913 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
914 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
915 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
916 return 0;
917 }
918
919 int se_dev_set_max_unmap_block_desc_count(
920 struct se_device *dev,
921 u32 max_unmap_block_desc_count)
922 {
923 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
924 max_unmap_block_desc_count;
925 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
926 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
927 return 0;
928 }
929
930 int se_dev_set_unmap_granularity(
931 struct se_device *dev,
932 u32 unmap_granularity)
933 {
934 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
935 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
936 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
937 return 0;
938 }
939
940 int se_dev_set_unmap_granularity_alignment(
941 struct se_device *dev,
942 u32 unmap_granularity_alignment)
943 {
944 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
945 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
946 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
947 return 0;
948 }
949
950 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
951 {
952 if ((flag != 0) && (flag != 1)) {
953 pr_err("Illegal value %d\n", flag);
954 return -EINVAL;
955 }
956 if (dev->transport->dpo_emulated == NULL) {
957 pr_err("dev->transport->dpo_emulated is NULL\n");
958 return -EINVAL;
959 }
960 if (dev->transport->dpo_emulated(dev) == 0) {
961 pr_err("dev->transport->dpo_emulated not supported\n");
962 return -EINVAL;
963 }
964 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
965 pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
966 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
967 return 0;
968 }
969
970 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
971 {
972 if ((flag != 0) && (flag != 1)) {
973 pr_err("Illegal value %d\n", flag);
974 return -EINVAL;
975 }
976 if (dev->transport->fua_write_emulated == NULL) {
977 pr_err("dev->transport->fua_write_emulated is NULL\n");
978 return -EINVAL;
979 }
980 if (dev->transport->fua_write_emulated(dev) == 0) {
981 pr_err("dev->transport->fua_write_emulated not supported\n");
982 return -EINVAL;
983 }
984 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
985 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
986 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
987 return 0;
988 }
989
990 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
991 {
992 if ((flag != 0) && (flag != 1)) {
993 pr_err("Illegal value %d\n", flag);
994 return -EINVAL;
995 }
996 if (dev->transport->fua_read_emulated == NULL) {
997 pr_err("dev->transport->fua_read_emulated is NULL\n");
998 return -EINVAL;
999 }
1000 if (dev->transport->fua_read_emulated(dev) == 0) {
1001 pr_err("dev->transport->fua_read_emulated not supported\n");
1002 return -EINVAL;
1003 }
1004 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
1005 pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
1006 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
1007 return 0;
1008 }
1009
1010 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1011 {
1012 if ((flag != 0) && (flag != 1)) {
1013 pr_err("Illegal value %d\n", flag);
1014 return -EINVAL;
1015 }
1016 if (dev->transport->write_cache_emulated == NULL) {
1017 pr_err("dev->transport->write_cache_emulated is NULL\n");
1018 return -EINVAL;
1019 }
1020 if (dev->transport->write_cache_emulated(dev) == 0) {
1021 pr_err("dev->transport->write_cache_emulated not supported\n");
1022 return -EINVAL;
1023 }
1024 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1025 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1026 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1027 return 0;
1028 }
1029
1030 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1031 {
1032 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1033 pr_err("Illegal value %d\n", flag);
1034 return -EINVAL;
1035 }
1036
1037 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1038 pr_err("dev[%p]: Unable to change SE Device"
1039 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1040 " exists\n", dev,
1041 atomic_read(&dev->dev_export_obj.obj_access_count));
1042 return -EINVAL;
1043 }
1044 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1045 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1046 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1047
1048 return 0;
1049 }
1050
1051 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1052 {
1053 if ((flag != 0) && (flag != 1)) {
1054 pr_err("Illegal value %d\n", flag);
1055 return -EINVAL;
1056 }
1057
1058 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1059 pr_err("dev[%p]: Unable to change SE Device TAS while"
1060 " dev_export_obj: %d count exists\n", dev,
1061 atomic_read(&dev->dev_export_obj.obj_access_count));
1062 return -EINVAL;
1063 }
1064 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1065 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1066 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1067
1068 return 0;
1069 }
1070
1071 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1072 {
1073 if ((flag != 0) && (flag != 1)) {
1074 pr_err("Illegal value %d\n", flag);
1075 return -EINVAL;
1076 }
1077 /*
1078 * We expect this value to be non-zero when generic Block Layer
1079 * Discard supported is detected iblock_create_virtdevice().
1080 */
1081 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1082 pr_err("Generic Block Discard not supported\n");
1083 return -ENOSYS;
1084 }
1085
1086 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1087 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1088 dev, flag);
1089 return 0;
1090 }
1091
1092 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1093 {
1094 if ((flag != 0) && (flag != 1)) {
1095 pr_err("Illegal value %d\n", flag);
1096 return -EINVAL;
1097 }
1098 /*
1099 * We expect this value to be non-zero when generic Block Layer
1100 * Discard supported is detected iblock_create_virtdevice().
1101 */
1102 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1103 pr_err("Generic Block Discard not supported\n");
1104 return -ENOSYS;
1105 }
1106
1107 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1108 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1109 dev, flag);
1110 return 0;
1111 }
1112
1113 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1114 {
1115 if ((flag != 0) && (flag != 1)) {
1116 pr_err("Illegal value %d\n", flag);
1117 return -EINVAL;
1118 }
1119 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1120 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1121 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1122 return 0;
1123 }
1124
1125 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1126 {
1127 if ((flag != 0) && (flag != 1)) {
1128 printk(KERN_ERR "Illegal value %d\n", flag);
1129 return -EINVAL;
1130 }
1131 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1132 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1133 dev, flag);
1134 return 0;
1135 }
1136
1137 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1138 {
1139 if (flag != 0) {
1140 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1141 " reordering not implemented\n", dev);
1142 return -ENOSYS;
1143 }
1144 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1145 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1146 return 0;
1147 }
1148
1149 /*
1150 * Note, this can only be called on unexported SE Device Object.
1151 */
1152 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1153 {
1154 u32 orig_queue_depth = dev->queue_depth;
1155
1156 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1157 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1158 " dev_export_obj: %d count exists\n", dev,
1159 atomic_read(&dev->dev_export_obj.obj_access_count));
1160 return -EINVAL;
1161 }
1162 if (!queue_depth) {
1163 pr_err("dev[%p]: Illegal ZERO value for queue"
1164 "_depth\n", dev);
1165 return -EINVAL;
1166 }
1167
1168 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1169 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1170 pr_err("dev[%p]: Passed queue_depth: %u"
1171 " exceeds TCM/SE_Device TCQ: %u\n",
1172 dev, queue_depth,
1173 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1174 return -EINVAL;
1175 }
1176 } else {
1177 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1178 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1179 pr_err("dev[%p]: Passed queue_depth:"
1180 " %u exceeds TCM/SE_Device MAX"
1181 " TCQ: %u\n", dev, queue_depth,
1182 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1183 return -EINVAL;
1184 }
1185 }
1186 }
1187
1188 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1189 if (queue_depth > orig_queue_depth)
1190 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1191 else if (queue_depth < orig_queue_depth)
1192 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1193
1194 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1195 dev, queue_depth);
1196 return 0;
1197 }
1198
1199 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1200 {
1201 int force = 0; /* Force setting for VDEVS */
1202
1203 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1204 pr_err("dev[%p]: Unable to change SE Device"
1205 " max_sectors while dev_export_obj: %d count exists\n",
1206 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1207 return -EINVAL;
1208 }
1209 if (!max_sectors) {
1210 pr_err("dev[%p]: Illegal ZERO value for"
1211 " max_sectors\n", dev);
1212 return -EINVAL;
1213 }
1214 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1215 pr_err("dev[%p]: Passed max_sectors: %u less than"
1216 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1217 DA_STATUS_MAX_SECTORS_MIN);
1218 return -EINVAL;
1219 }
1220 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1221 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1222 pr_err("dev[%p]: Passed max_sectors: %u"
1223 " greater than TCM/SE_Device max_sectors:"
1224 " %u\n", dev, max_sectors,
1225 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1226 return -EINVAL;
1227 }
1228 } else {
1229 if (!force && (max_sectors >
1230 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1231 pr_err("dev[%p]: Passed max_sectors: %u"
1232 " greater than TCM/SE_Device max_sectors"
1233 ": %u, use force=1 to override.\n", dev,
1234 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1235 return -EINVAL;
1236 }
1237 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1238 pr_err("dev[%p]: Passed max_sectors: %u"
1239 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1240 " %u\n", dev, max_sectors,
1241 DA_STATUS_MAX_SECTORS_MAX);
1242 return -EINVAL;
1243 }
1244 }
1245
1246 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1247 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1248 dev, max_sectors);
1249 return 0;
1250 }
1251
1252 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1253 {
1254 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1255 pr_err("dev[%p]: Unable to change SE Device"
1256 " optimal_sectors while dev_export_obj: %d count exists\n",
1257 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1258 return -EINVAL;
1259 }
1260 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1261 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1262 " changed for TCM/pSCSI\n", dev);
1263 return -EINVAL;
1264 }
1265 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1266 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1267 " greater than max_sectors: %u\n", dev,
1268 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1269 return -EINVAL;
1270 }
1271
1272 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1273 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1274 dev, optimal_sectors);
1275 return 0;
1276 }
1277
1278 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1279 {
1280 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1281 pr_err("dev[%p]: Unable to change SE Device block_size"
1282 " while dev_export_obj: %d count exists\n", dev,
1283 atomic_read(&dev->dev_export_obj.obj_access_count));
1284 return -EINVAL;
1285 }
1286
1287 if ((block_size != 512) &&
1288 (block_size != 1024) &&
1289 (block_size != 2048) &&
1290 (block_size != 4096)) {
1291 pr_err("dev[%p]: Illegal value for block_device: %u"
1292 " for SE device, must be 512, 1024, 2048 or 4096\n",
1293 dev, block_size);
1294 return -EINVAL;
1295 }
1296
1297 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1298 pr_err("dev[%p]: Not allowed to change block_size for"
1299 " Physical Device, use for Linux/SCSI to change"
1300 " block_size for underlying hardware\n", dev);
1301 return -EINVAL;
1302 }
1303
1304 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1305 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1306 dev, block_size);
1307 return 0;
1308 }
1309
1310 struct se_lun *core_dev_add_lun(
1311 struct se_portal_group *tpg,
1312 struct se_hba *hba,
1313 struct se_device *dev,
1314 u32 lun)
1315 {
1316 struct se_lun *lun_p;
1317 u32 lun_access = 0;
1318
1319 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1320 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1321 atomic_read(&dev->dev_access_obj.obj_access_count));
1322 return NULL;
1323 }
1324
1325 lun_p = core_tpg_pre_addlun(tpg, lun);
1326 if ((IS_ERR(lun_p)) || !lun_p)
1327 return NULL;
1328
1329 if (dev->dev_flags & DF_READ_ONLY)
1330 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1331 else
1332 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1333
1334 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1335 return NULL;
1336
1337 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1338 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1339 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1340 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1341 /*
1342 * Update LUN maps for dynamically added initiators when
1343 * generate_node_acl is enabled.
1344 */
1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1346 struct se_node_acl *acl;
1347 spin_lock_bh(&tpg->acl_node_lock);
1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1349 if (acl->dynamic_node_acl) {
1350 spin_unlock_bh(&tpg->acl_node_lock);
1351 core_tpg_add_node_to_devs(acl, tpg);
1352 spin_lock_bh(&tpg->acl_node_lock);
1353 }
1354 }
1355 spin_unlock_bh(&tpg->acl_node_lock);
1356 }
1357
1358 return lun_p;
1359 }
1360
1361 /* core_dev_del_lun():
1362 *
1363 *
1364 */
1365 int core_dev_del_lun(
1366 struct se_portal_group *tpg,
1367 u32 unpacked_lun)
1368 {
1369 struct se_lun *lun;
1370 int ret = 0;
1371
1372 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1373 if (!lun)
1374 return ret;
1375
1376 core_tpg_post_dellun(tpg, lun);
1377
1378 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1379 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1380 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1381 tpg->se_tpg_tfo->get_fabric_name());
1382
1383 return 0;
1384 }
1385
1386 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1387 {
1388 struct se_lun *lun;
1389
1390 spin_lock(&tpg->tpg_lun_lock);
1391 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1392 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1393 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1394 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1395 TRANSPORT_MAX_LUNS_PER_TPG-1,
1396 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1397 spin_unlock(&tpg->tpg_lun_lock);
1398 return NULL;
1399 }
1400 lun = &tpg->tpg_lun_list[unpacked_lun];
1401
1402 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1403 pr_err("%s Logical Unit Number: %u is not free on"
1404 " Target Portal Group: %hu, ignoring request.\n",
1405 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1406 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1407 spin_unlock(&tpg->tpg_lun_lock);
1408 return NULL;
1409 }
1410 spin_unlock(&tpg->tpg_lun_lock);
1411
1412 return lun;
1413 }
1414
1415 /* core_dev_get_lun():
1416 *
1417 *
1418 */
1419 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1420 {
1421 struct se_lun *lun;
1422
1423 spin_lock(&tpg->tpg_lun_lock);
1424 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1425 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1426 "_TPG-1: %u for Target Portal Group: %hu\n",
1427 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1428 TRANSPORT_MAX_LUNS_PER_TPG-1,
1429 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1430 spin_unlock(&tpg->tpg_lun_lock);
1431 return NULL;
1432 }
1433 lun = &tpg->tpg_lun_list[unpacked_lun];
1434
1435 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1436 pr_err("%s Logical Unit Number: %u is not active on"
1437 " Target Portal Group: %hu, ignoring request.\n",
1438 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1439 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1440 spin_unlock(&tpg->tpg_lun_lock);
1441 return NULL;
1442 }
1443 spin_unlock(&tpg->tpg_lun_lock);
1444
1445 return lun;
1446 }
1447
1448 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1449 struct se_portal_group *tpg,
1450 u32 mapped_lun,
1451 char *initiatorname,
1452 int *ret)
1453 {
1454 struct se_lun_acl *lacl;
1455 struct se_node_acl *nacl;
1456
1457 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1458 pr_err("%s InitiatorName exceeds maximum size.\n",
1459 tpg->se_tpg_tfo->get_fabric_name());
1460 *ret = -EOVERFLOW;
1461 return NULL;
1462 }
1463 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1464 if (!nacl) {
1465 *ret = -EINVAL;
1466 return NULL;
1467 }
1468 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1469 if (!lacl) {
1470 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1471 *ret = -ENOMEM;
1472 return NULL;
1473 }
1474
1475 INIT_LIST_HEAD(&lacl->lacl_list);
1476 lacl->mapped_lun = mapped_lun;
1477 lacl->se_lun_nacl = nacl;
1478 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1479
1480 return lacl;
1481 }
1482
1483 int core_dev_add_initiator_node_lun_acl(
1484 struct se_portal_group *tpg,
1485 struct se_lun_acl *lacl,
1486 u32 unpacked_lun,
1487 u32 lun_access)
1488 {
1489 struct se_lun *lun;
1490 struct se_node_acl *nacl;
1491
1492 lun = core_dev_get_lun(tpg, unpacked_lun);
1493 if (!lun) {
1494 pr_err("%s Logical Unit Number: %u is not active on"
1495 " Target Portal Group: %hu, ignoring request.\n",
1496 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1497 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1498 return -EINVAL;
1499 }
1500
1501 nacl = lacl->se_lun_nacl;
1502 if (!nacl)
1503 return -EINVAL;
1504
1505 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1506 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1507 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1508
1509 lacl->se_lun = lun;
1510
1511 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1512 lun_access, nacl, tpg, 1) < 0)
1513 return -EINVAL;
1514
1515 spin_lock(&lun->lun_acl_lock);
1516 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1517 atomic_inc(&lun->lun_acl_count);
1518 smp_mb__after_atomic_inc();
1519 spin_unlock(&lun->lun_acl_lock);
1520
1521 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1522 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1523 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1524 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1525 lacl->initiatorname);
1526 /*
1527 * Check to see if there are any existing persistent reservation APTPL
1528 * pre-registrations that need to be enabled for this LUN ACL..
1529 */
1530 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1531 return 0;
1532 }
1533
1534 /* core_dev_del_initiator_node_lun_acl():
1535 *
1536 *
1537 */
1538 int core_dev_del_initiator_node_lun_acl(
1539 struct se_portal_group *tpg,
1540 struct se_lun *lun,
1541 struct se_lun_acl *lacl)
1542 {
1543 struct se_node_acl *nacl;
1544
1545 nacl = lacl->se_lun_nacl;
1546 if (!nacl)
1547 return -EINVAL;
1548
1549 spin_lock(&lun->lun_acl_lock);
1550 list_del(&lacl->lacl_list);
1551 atomic_dec(&lun->lun_acl_count);
1552 smp_mb__after_atomic_dec();
1553 spin_unlock(&lun->lun_acl_lock);
1554
1555 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1556 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1557
1558 lacl->se_lun = NULL;
1559
1560 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1561 " InitiatorNode: %s Mapped LUN: %u\n",
1562 tpg->se_tpg_tfo->get_fabric_name(),
1563 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1564 lacl->initiatorname, lacl->mapped_lun);
1565
1566 return 0;
1567 }
1568
1569 void core_dev_free_initiator_node_lun_acl(
1570 struct se_portal_group *tpg,
1571 struct se_lun_acl *lacl)
1572 {
1573 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1574 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1575 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1576 tpg->se_tpg_tfo->get_fabric_name(),
1577 lacl->initiatorname, lacl->mapped_lun);
1578
1579 kfree(lacl);
1580 }
1581
1582 int core_dev_setup_virtual_lun0(void)
1583 {
1584 struct se_hba *hba;
1585 struct se_device *dev;
1586 struct se_subsystem_dev *se_dev = NULL;
1587 struct se_subsystem_api *t;
1588 char buf[16];
1589 int ret;
1590
1591 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1592 if (IS_ERR(hba))
1593 return PTR_ERR(hba);
1594
1595 lun0_hba = hba;
1596 t = hba->transport;
1597
1598 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1599 if (!se_dev) {
1600 pr_err("Unable to allocate memory for"
1601 " struct se_subsystem_dev\n");
1602 ret = -ENOMEM;
1603 goto out;
1604 }
1605 INIT_LIST_HEAD(&se_dev->se_dev_node);
1606 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1607 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1608 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1609 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1610 spin_lock_init(&se_dev->t10_pr.registration_lock);
1611 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1612 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1613 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1614 spin_lock_init(&se_dev->se_dev_lock);
1615 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1616 se_dev->t10_wwn.t10_sub_dev = se_dev;
1617 se_dev->t10_alua.t10_sub_dev = se_dev;
1618 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1619 se_dev->se_dev_hba = hba;
1620
1621 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1622 if (!se_dev->se_dev_su_ptr) {
1623 pr_err("Unable to locate subsystem dependent pointer"
1624 " from allocate_virtdevice()\n");
1625 ret = -ENOMEM;
1626 goto out;
1627 }
1628 lun0_su_dev = se_dev;
1629
1630 memset(buf, 0, 16);
1631 sprintf(buf, "rd_pages=8");
1632 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1633
1634 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1635 if (IS_ERR(dev)) {
1636 ret = PTR_ERR(dev);
1637 goto out;
1638 }
1639 se_dev->se_dev_ptr = dev;
1640 g_lun0_dev = dev;
1641
1642 return 0;
1643 out:
1644 lun0_su_dev = NULL;
1645 kfree(se_dev);
1646 if (lun0_hba) {
1647 core_delete_hba(lun0_hba);
1648 lun0_hba = NULL;
1649 }
1650 return ret;
1651 }
1652
1653
1654 void core_dev_release_virtual_lun0(void)
1655 {
1656 struct se_hba *hba = lun0_hba;
1657 struct se_subsystem_dev *su_dev = lun0_su_dev;
1658
1659 if (!hba)
1660 return;
1661
1662 if (g_lun0_dev)
1663 se_free_virtual_device(g_lun0_dev, hba);
1664
1665 kfree(su_dev);
1666 core_delete_hba(hba);
1667 }