gma500: udelay(20000) it too long again
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / target / target_core_cdb.c
1 /*
2 * CDB emulation for non-READ/WRITE commands.
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6 * Copyright (c) 2007-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26 #include <linux/kernel.h>
27 #include <asm/unaligned.h>
28 #include <scsi/scsi.h>
29
30 #include <target/target_core_base.h>
31 #include <target/target_core_transport.h>
32 #include <target/target_core_fabric_ops.h>
33 #include "target_core_ua.h"
34
35 static void
36 target_fill_alua_data(struct se_port *port, unsigned char *buf)
37 {
38 struct t10_alua_tg_pt_gp *tg_pt_gp;
39 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
40
41 /*
42 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
43 */
44 buf[5] = 0x80;
45
46 /*
47 * Set TPGS field for explict and/or implict ALUA access type
48 * and opteration.
49 *
50 * See spc4r17 section 6.4.2 Table 135
51 */
52 if (!port)
53 return;
54 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
55 if (!tg_pt_gp_mem)
56 return;
57
58 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
59 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
60 if (tg_pt_gp)
61 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
62 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63 }
64
65 static int
66 target_emulate_inquiry_std(struct se_cmd *cmd)
67 {
68 struct se_lun *lun = cmd->se_lun;
69 struct se_device *dev = cmd->se_dev;
70 unsigned char *buf;
71
72 /*
73 * Make sure we at least have 6 bytes of INQUIRY response
74 * payload going back for EVPD=0
75 */
76 if (cmd->data_length < 6) {
77 pr_err("SCSI Inquiry payload length: %u"
78 " too small for EVPD=0\n", cmd->data_length);
79 return -EINVAL;
80 }
81
82 buf = transport_kmap_first_data_page(cmd);
83
84 buf[0] = dev->transport->get_device_type(dev);
85 if (buf[0] == TYPE_TAPE)
86 buf[1] = 0x80;
87 buf[2] = dev->transport->get_device_rev(dev);
88
89 /*
90 * Enable SCCS and TPGS fields for Emulated ALUA
91 */
92 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
93 target_fill_alua_data(lun->lun_sep, buf);
94
95 if (cmd->data_length < 8) {
96 buf[4] = 1; /* Set additional length to 1 */
97 goto out;
98 }
99
100 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
101
102 /*
103 * Do not include vendor, product, reversion info in INQUIRY
104 * response payload for cdbs with a small allocation length.
105 */
106 if (cmd->data_length < 36) {
107 buf[4] = 3; /* Set additional length to 3 */
108 goto out;
109 }
110
111 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
112 snprintf((unsigned char *)&buf[16], 16, "%s",
113 &dev->se_sub_dev->t10_wwn.model[0]);
114 snprintf((unsigned char *)&buf[32], 4, "%s",
115 &dev->se_sub_dev->t10_wwn.revision[0]);
116 buf[4] = 31; /* Set additional length to 31 */
117
118 out:
119 transport_kunmap_first_data_page(cmd);
120 return 0;
121 }
122
123 /* unit serial number */
124 static int
125 target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
126 {
127 struct se_device *dev = cmd->se_dev;
128 u16 len = 0;
129
130 if (dev->se_sub_dev->su_dev_flags &
131 SDF_EMULATED_VPD_UNIT_SERIAL) {
132 u32 unit_serial_len;
133
134 unit_serial_len =
135 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
136 unit_serial_len++; /* For NULL Terminator */
137
138 if (((len + 4) + unit_serial_len) > cmd->data_length) {
139 len += unit_serial_len;
140 buf[2] = ((len >> 8) & 0xff);
141 buf[3] = (len & 0xff);
142 return 0;
143 }
144 len += sprintf((unsigned char *)&buf[4], "%s",
145 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
146 len++; /* Extra Byte for NULL Terminator */
147 buf[3] = len;
148 }
149 return 0;
150 }
151
152 /*
153 * Device identification VPD, for a complete list of
154 * DESIGNATOR TYPEs see spc4r17 Table 459.
155 */
156 static int
157 target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
158 {
159 struct se_device *dev = cmd->se_dev;
160 struct se_lun *lun = cmd->se_lun;
161 struct se_port *port = NULL;
162 struct se_portal_group *tpg = NULL;
163 struct t10_alua_lu_gp_member *lu_gp_mem;
164 struct t10_alua_tg_pt_gp *tg_pt_gp;
165 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
166 unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
167 u32 prod_len;
168 u32 unit_serial_len, off = 0;
169 u16 len = 0, id_len;
170
171 off = 4;
172
173 /*
174 * NAA IEEE Registered Extended Assigned designator format, see
175 * spc4r17 section 7.7.3.6.5
176 *
177 * We depend upon a target_core_mod/ConfigFS provided
178 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
179 * value in order to return the NAA id.
180 */
181 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
182 goto check_t10_vend_desc;
183
184 if (off + 20 > cmd->data_length)
185 goto check_t10_vend_desc;
186
187 /* CODE SET == Binary */
188 buf[off++] = 0x1;
189
190 /* Set ASSOCIATION == addressed logical unit: 0)b */
191 buf[off] = 0x00;
192
193 /* Identifier/Designator type == NAA identifier */
194 buf[off++] |= 0x3;
195 off++;
196
197 /* Identifier/Designator length */
198 buf[off++] = 0x10;
199
200 /*
201 * Start NAA IEEE Registered Extended Identifier/Designator
202 */
203 buf[off++] = (0x6 << 4);
204
205 /*
206 * Use OpenFabrics IEEE Company ID: 00 14 05
207 */
208 buf[off++] = 0x01;
209 buf[off++] = 0x40;
210 buf[off] = (0x5 << 4);
211
212 /*
213 * Return ConfigFS Unit Serial Number information for
214 * VENDOR_SPECIFIC_IDENTIFIER and
215 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
216 */
217 buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
218 hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
219
220 len = 20;
221 off = (len + 4);
222
223 check_t10_vend_desc:
224 /*
225 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
226 */
227 id_len = 8; /* For Vendor field */
228 prod_len = 4; /* For VPD Header */
229 prod_len += 8; /* For Vendor field */
230 prod_len += strlen(prod);
231 prod_len++; /* For : */
232
233 if (dev->se_sub_dev->su_dev_flags &
234 SDF_EMULATED_VPD_UNIT_SERIAL) {
235 unit_serial_len =
236 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
237 unit_serial_len++; /* For NULL Terminator */
238
239 if ((len + (id_len + 4) +
240 (prod_len + unit_serial_len)) >
241 cmd->data_length) {
242 len += (prod_len + unit_serial_len);
243 goto check_port;
244 }
245 id_len += sprintf((unsigned char *)&buf[off+12],
246 "%s:%s", prod,
247 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
248 }
249 buf[off] = 0x2; /* ASCII */
250 buf[off+1] = 0x1; /* T10 Vendor ID */
251 buf[off+2] = 0x0;
252 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
253 /* Extra Byte for NULL Terminator */
254 id_len++;
255 /* Identifier Length */
256 buf[off+3] = id_len;
257 /* Header size for Designation descriptor */
258 len += (id_len + 4);
259 off += (id_len + 4);
260 /*
261 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
262 */
263 check_port:
264 port = lun->lun_sep;
265 if (port) {
266 struct t10_alua_lu_gp *lu_gp;
267 u32 padding, scsi_name_len;
268 u16 lu_gp_id = 0;
269 u16 tg_pt_gp_id = 0;
270 u16 tpgt;
271
272 tpg = port->sep_tpg;
273 /*
274 * Relative target port identifer, see spc4r17
275 * section 7.7.3.7
276 *
277 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
278 * section 7.5.1 Table 362
279 */
280 if (((len + 4) + 8) > cmd->data_length) {
281 len += 8;
282 goto check_tpgi;
283 }
284 buf[off] =
285 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
286 buf[off++] |= 0x1; /* CODE SET == Binary */
287 buf[off] = 0x80; /* Set PIV=1 */
288 /* Set ASSOCIATION == target port: 01b */
289 buf[off] |= 0x10;
290 /* DESIGNATOR TYPE == Relative target port identifer */
291 buf[off++] |= 0x4;
292 off++; /* Skip over Reserved */
293 buf[off++] = 4; /* DESIGNATOR LENGTH */
294 /* Skip over Obsolete field in RTPI payload
295 * in Table 472 */
296 off += 2;
297 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
298 buf[off++] = (port->sep_rtpi & 0xff);
299 len += 8; /* Header size + Designation descriptor */
300 /*
301 * Target port group identifier, see spc4r17
302 * section 7.7.3.8
303 *
304 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
305 * section 7.5.1 Table 362
306 */
307 check_tpgi:
308 if (dev->se_sub_dev->t10_alua.alua_type !=
309 SPC3_ALUA_EMULATED)
310 goto check_scsi_name;
311
312 if (((len + 4) + 8) > cmd->data_length) {
313 len += 8;
314 goto check_lu_gp;
315 }
316 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
317 if (!tg_pt_gp_mem)
318 goto check_lu_gp;
319
320 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
321 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
322 if (!tg_pt_gp) {
323 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
324 goto check_lu_gp;
325 }
326 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
327 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
328
329 buf[off] =
330 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
331 buf[off++] |= 0x1; /* CODE SET == Binary */
332 buf[off] = 0x80; /* Set PIV=1 */
333 /* Set ASSOCIATION == target port: 01b */
334 buf[off] |= 0x10;
335 /* DESIGNATOR TYPE == Target port group identifier */
336 buf[off++] |= 0x5;
337 off++; /* Skip over Reserved */
338 buf[off++] = 4; /* DESIGNATOR LENGTH */
339 off += 2; /* Skip over Reserved Field */
340 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
341 buf[off++] = (tg_pt_gp_id & 0xff);
342 len += 8; /* Header size + Designation descriptor */
343 /*
344 * Logical Unit Group identifier, see spc4r17
345 * section 7.7.3.8
346 */
347 check_lu_gp:
348 if (((len + 4) + 8) > cmd->data_length) {
349 len += 8;
350 goto check_scsi_name;
351 }
352 lu_gp_mem = dev->dev_alua_lu_gp_mem;
353 if (!lu_gp_mem)
354 goto check_scsi_name;
355
356 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
357 lu_gp = lu_gp_mem->lu_gp;
358 if (!lu_gp) {
359 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
360 goto check_scsi_name;
361 }
362 lu_gp_id = lu_gp->lu_gp_id;
363 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
364
365 buf[off++] |= 0x1; /* CODE SET == Binary */
366 /* DESIGNATOR TYPE == Logical Unit Group identifier */
367 buf[off++] |= 0x6;
368 off++; /* Skip over Reserved */
369 buf[off++] = 4; /* DESIGNATOR LENGTH */
370 off += 2; /* Skip over Reserved Field */
371 buf[off++] = ((lu_gp_id >> 8) & 0xff);
372 buf[off++] = (lu_gp_id & 0xff);
373 len += 8; /* Header size + Designation descriptor */
374 /*
375 * SCSI name string designator, see spc4r17
376 * section 7.7.3.11
377 *
378 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
379 * section 7.5.1 Table 362
380 */
381 check_scsi_name:
382 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
383 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
384 scsi_name_len += 10;
385 /* Check for 4-byte padding */
386 padding = ((-scsi_name_len) & 3);
387 if (padding != 0)
388 scsi_name_len += padding;
389 /* Header size + Designation descriptor */
390 scsi_name_len += 4;
391
392 if (((len + 4) + scsi_name_len) > cmd->data_length) {
393 len += scsi_name_len;
394 goto set_len;
395 }
396 buf[off] =
397 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
398 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
399 buf[off] = 0x80; /* Set PIV=1 */
400 /* Set ASSOCIATION == target port: 01b */
401 buf[off] |= 0x10;
402 /* DESIGNATOR TYPE == SCSI name string */
403 buf[off++] |= 0x8;
404 off += 2; /* Skip over Reserved and length */
405 /*
406 * SCSI name string identifer containing, $FABRIC_MOD
407 * dependent information. For LIO-Target and iSCSI
408 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
409 * UTF-8 encoding.
410 */
411 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
412 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
413 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
414 scsi_name_len += 1 /* Include NULL terminator */;
415 /*
416 * The null-terminated, null-padded (see 4.4.2) SCSI
417 * NAME STRING field contains a UTF-8 format string.
418 * The number of bytes in the SCSI NAME STRING field
419 * (i.e., the value in the DESIGNATOR LENGTH field)
420 * shall be no larger than 256 and shall be a multiple
421 * of four.
422 */
423 if (padding)
424 scsi_name_len += padding;
425
426 buf[off-1] = scsi_name_len;
427 off += scsi_name_len;
428 /* Header size + Designation descriptor */
429 len += (scsi_name_len + 4);
430 }
431 set_len:
432 buf[2] = ((len >> 8) & 0xff);
433 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
434 return 0;
435 }
436
437 /* Extended INQUIRY Data VPD Page */
438 static int
439 target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
440 {
441 if (cmd->data_length < 60)
442 return 0;
443
444 buf[2] = 0x3c;
445 /* Set HEADSUP, ORDSUP, SIMPSUP */
446 buf[5] = 0x07;
447
448 /* If WriteCache emulation is enabled, set V_SUP */
449 if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
450 buf[6] = 0x01;
451 return 0;
452 }
453
454 /* Block Limits VPD page */
455 static int
456 target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
457 {
458 struct se_device *dev = cmd->se_dev;
459 int have_tp = 0;
460
461 /*
462 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
463 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
464 * different page length for Thin Provisioning.
465 */
466 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
467 have_tp = 1;
468
469 if (cmd->data_length < (0x10 + 4)) {
470 pr_debug("Received data_length: %u"
471 " too small for EVPD 0xb0\n",
472 cmd->data_length);
473 return -EINVAL;
474 }
475
476 if (have_tp && cmd->data_length < (0x3c + 4)) {
477 pr_debug("Received data_length: %u"
478 " too small for TPE=1 EVPD 0xb0\n",
479 cmd->data_length);
480 have_tp = 0;
481 }
482
483 buf[0] = dev->transport->get_device_type(dev);
484 buf[3] = have_tp ? 0x3c : 0x10;
485
486 /* Set WSNZ to 1 */
487 buf[4] = 0x01;
488
489 /*
490 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
491 */
492 put_unaligned_be16(1, &buf[6]);
493
494 /*
495 * Set MAXIMUM TRANSFER LENGTH
496 */
497 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
498
499 /*
500 * Set OPTIMAL TRANSFER LENGTH
501 */
502 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
503
504 /*
505 * Exit now if we don't support TP or the initiator sent a too
506 * short buffer.
507 */
508 if (!have_tp || cmd->data_length < (0x3c + 4))
509 return 0;
510
511 /*
512 * Set MAXIMUM UNMAP LBA COUNT
513 */
514 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
515
516 /*
517 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
518 */
519 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
520 &buf[24]);
521
522 /*
523 * Set OPTIMAL UNMAP GRANULARITY
524 */
525 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
526
527 /*
528 * UNMAP GRANULARITY ALIGNMENT
529 */
530 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
531 &buf[32]);
532 if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
533 buf[32] |= 0x80; /* Set the UGAVALID bit */
534
535 return 0;
536 }
537
538 /* Block Device Characteristics VPD page */
539 static int
540 target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
541 {
542 struct se_device *dev = cmd->se_dev;
543
544 buf[0] = dev->transport->get_device_type(dev);
545 buf[3] = 0x3c;
546
547 if (cmd->data_length >= 5 &&
548 dev->se_sub_dev->se_dev_attrib.is_nonrot)
549 buf[5] = 1;
550
551 return 0;
552 }
553
554 /* Thin Provisioning VPD */
555 static int
556 target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
557 {
558 struct se_device *dev = cmd->se_dev;
559
560 /*
561 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
562 *
563 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
564 * zero, then the page length shall be set to 0004h. If the DP bit
565 * is set to one, then the page length shall be set to the value
566 * defined in table 162.
567 */
568 buf[0] = dev->transport->get_device_type(dev);
569
570 /*
571 * Set Hardcoded length mentioned above for DP=0
572 */
573 put_unaligned_be16(0x0004, &buf[2]);
574
575 /*
576 * The THRESHOLD EXPONENT field indicates the threshold set size in
577 * LBAs as a power of 2 (i.e., the threshold set size is equal to
578 * 2(threshold exponent)).
579 *
580 * Note that this is currently set to 0x00 as mkp says it will be
581 * changing again. We can enable this once it has settled in T10
582 * and is actually used by Linux/SCSI ML code.
583 */
584 buf[4] = 0x00;
585
586 /*
587 * A TPU bit set to one indicates that the device server supports
588 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
589 * that the device server does not support the UNMAP command.
590 */
591 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
592 buf[5] = 0x80;
593
594 /*
595 * A TPWS bit set to one indicates that the device server supports
596 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
597 * A TPWS bit set to zero indicates that the device server does not
598 * support the use of the WRITE SAME (16) command to unmap LBAs.
599 */
600 if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
601 buf[5] |= 0x40;
602
603 return 0;
604 }
605
606 static int
607 target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
608
609 static struct {
610 uint8_t page;
611 int (*emulate)(struct se_cmd *, unsigned char *);
612 } evpd_handlers[] = {
613 { .page = 0x00, .emulate = target_emulate_evpd_00 },
614 { .page = 0x80, .emulate = target_emulate_evpd_80 },
615 { .page = 0x83, .emulate = target_emulate_evpd_83 },
616 { .page = 0x86, .emulate = target_emulate_evpd_86 },
617 { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
618 { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
619 { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
620 };
621
622 /* supported vital product data pages */
623 static int
624 target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
625 {
626 int p;
627
628 if (cmd->data_length < 8)
629 return 0;
630 /*
631 * Only report the INQUIRY EVPD=1 pages after a valid NAA
632 * Registered Extended LUN WWN has been set via ConfigFS
633 * during device creation/restart.
634 */
635 if (cmd->se_dev->se_sub_dev->su_dev_flags &
636 SDF_EMULATED_VPD_UNIT_SERIAL) {
637 buf[3] = ARRAY_SIZE(evpd_handlers);
638 for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
639 cmd->data_length - 4); ++p)
640 buf[p + 4] = evpd_handlers[p].page;
641 }
642
643 return 0;
644 }
645
646 static int
647 target_emulate_inquiry(struct se_cmd *cmd)
648 {
649 struct se_device *dev = cmd->se_dev;
650 unsigned char *buf;
651 unsigned char *cdb = cmd->t_task_cdb;
652 int p, ret;
653
654 if (!(cdb[1] & 0x1))
655 return target_emulate_inquiry_std(cmd);
656
657 /*
658 * Make sure we at least have 4 bytes of INQUIRY response
659 * payload for 0x00 going back for EVPD=1. Note that 0x80
660 * and 0x83 will check for enough payload data length and
661 * jump to set_len: label when there is not enough inquiry EVPD
662 * payload length left for the next outgoing EVPD metadata
663 */
664 if (cmd->data_length < 4) {
665 pr_err("SCSI Inquiry payload length: %u"
666 " too small for EVPD=1\n", cmd->data_length);
667 return -EINVAL;
668 }
669
670 buf = transport_kmap_first_data_page(cmd);
671
672 buf[0] = dev->transport->get_device_type(dev);
673
674 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
675 if (cdb[2] == evpd_handlers[p].page) {
676 buf[1] = cdb[2];
677 ret = evpd_handlers[p].emulate(cmd, buf);
678 transport_kunmap_first_data_page(cmd);
679 return ret;
680 }
681
682 transport_kunmap_first_data_page(cmd);
683 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
684 return -EINVAL;
685 }
686
687 static int
688 target_emulate_readcapacity(struct se_cmd *cmd)
689 {
690 struct se_device *dev = cmd->se_dev;
691 unsigned char *buf;
692 unsigned long long blocks_long = dev->transport->get_blocks(dev);
693 u32 blocks;
694
695 if (blocks_long >= 0x00000000ffffffff)
696 blocks = 0xffffffff;
697 else
698 blocks = (u32)blocks_long;
699
700 buf = transport_kmap_first_data_page(cmd);
701
702 buf[0] = (blocks >> 24) & 0xff;
703 buf[1] = (blocks >> 16) & 0xff;
704 buf[2] = (blocks >> 8) & 0xff;
705 buf[3] = blocks & 0xff;
706 buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
707 buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
708 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
709 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
710 /*
711 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
712 */
713 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
714 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
715
716 transport_kunmap_first_data_page(cmd);
717
718 return 0;
719 }
720
721 static int
722 target_emulate_readcapacity_16(struct se_cmd *cmd)
723 {
724 struct se_device *dev = cmd->se_dev;
725 unsigned char *buf;
726 unsigned long long blocks = dev->transport->get_blocks(dev);
727
728 buf = transport_kmap_first_data_page(cmd);
729
730 buf[0] = (blocks >> 56) & 0xff;
731 buf[1] = (blocks >> 48) & 0xff;
732 buf[2] = (blocks >> 40) & 0xff;
733 buf[3] = (blocks >> 32) & 0xff;
734 buf[4] = (blocks >> 24) & 0xff;
735 buf[5] = (blocks >> 16) & 0xff;
736 buf[6] = (blocks >> 8) & 0xff;
737 buf[7] = blocks & 0xff;
738 buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
739 buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
740 buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
741 buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
742 /*
743 * Set Thin Provisioning Enable bit following sbc3r22 in section
744 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
745 */
746 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
747 buf[14] = 0x80;
748
749 transport_kunmap_first_data_page(cmd);
750
751 return 0;
752 }
753
754 static int
755 target_modesense_rwrecovery(unsigned char *p)
756 {
757 p[0] = 0x01;
758 p[1] = 0x0a;
759
760 return 12;
761 }
762
763 static int
764 target_modesense_control(struct se_device *dev, unsigned char *p)
765 {
766 p[0] = 0x0a;
767 p[1] = 0x0a;
768 p[2] = 2;
769 /*
770 * From spc4r23, 7.4.7 Control mode page
771 *
772 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
773 * restrictions on the algorithm used for reordering commands
774 * having the SIMPLE task attribute (see SAM-4).
775 *
776 * Table 368 -- QUEUE ALGORITHM MODIFIER field
777 * Code Description
778 * 0h Restricted reordering
779 * 1h Unrestricted reordering allowed
780 * 2h to 7h Reserved
781 * 8h to Fh Vendor specific
782 *
783 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
784 * the device server shall order the processing sequence of commands
785 * having the SIMPLE task attribute such that data integrity is maintained
786 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
787 * requests is halted at any time, the final value of all data observable
788 * on the medium shall be the same as if all the commands had been processed
789 * with the ORDERED task attribute).
790 *
791 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
792 * device server may reorder the processing sequence of commands having the
793 * SIMPLE task attribute in any manner. Any data integrity exposures related to
794 * command sequence order shall be explicitly handled by the application client
795 * through the selection of appropriate ommands and task attributes.
796 */
797 p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
798 /*
799 * From spc4r17, section 7.4.6 Control mode Page
800 *
801 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
802 *
803 * 00b: The logical unit shall clear any unit attention condition
804 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
805 * status and shall not establish a unit attention condition when a com-
806 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
807 * status.
808 *
809 * 10b: The logical unit shall not clear any unit attention condition
810 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
811 * status and shall not establish a unit attention condition when
812 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
813 * CONFLICT status.
814 *
815 * 11b a The logical unit shall not clear any unit attention condition
816 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
817 * status and shall establish a unit attention condition for the
818 * initiator port associated with the I_T nexus on which the BUSY,
819 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
820 * Depending on the status, the additional sense code shall be set to
821 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
822 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
823 * command, a unit attention condition shall be established only once
824 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
825 * to the number of commands completed with one of those status codes.
826 */
827 p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
828 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
829 /*
830 * From spc4r17, section 7.4.6 Control mode Page
831 *
832 * Task Aborted Status (TAS) bit set to zero.
833 *
834 * A task aborted status (TAS) bit set to zero specifies that aborted
835 * tasks shall be terminated by the device server without any response
836 * to the application client. A TAS bit set to one specifies that tasks
837 * aborted by the actions of an I_T nexus other than the I_T nexus on
838 * which the command was received shall be completed with TASK ABORTED
839 * status (see SAM-4).
840 */
841 p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
842 p[8] = 0xff;
843 p[9] = 0xff;
844 p[11] = 30;
845
846 return 12;
847 }
848
849 static int
850 target_modesense_caching(struct se_device *dev, unsigned char *p)
851 {
852 p[0] = 0x08;
853 p[1] = 0x12;
854 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
855 p[2] = 0x04; /* Write Cache Enable */
856 p[12] = 0x20; /* Disabled Read Ahead */
857
858 return 20;
859 }
860
861 static void
862 target_modesense_write_protect(unsigned char *buf, int type)
863 {
864 /*
865 * I believe that the WP bit (bit 7) in the mode header is the same for
866 * all device types..
867 */
868 switch (type) {
869 case TYPE_DISK:
870 case TYPE_TAPE:
871 default:
872 buf[0] |= 0x80; /* WP bit */
873 break;
874 }
875 }
876
877 static void
878 target_modesense_dpofua(unsigned char *buf, int type)
879 {
880 switch (type) {
881 case TYPE_DISK:
882 buf[0] |= 0x10; /* DPOFUA bit */
883 break;
884 default:
885 break;
886 }
887 }
888
889 static int
890 target_emulate_modesense(struct se_cmd *cmd, int ten)
891 {
892 struct se_device *dev = cmd->se_dev;
893 char *cdb = cmd->t_task_cdb;
894 unsigned char *rbuf;
895 int type = dev->transport->get_device_type(dev);
896 int offset = (ten) ? 8 : 4;
897 int length = 0;
898 unsigned char buf[SE_MODE_PAGE_BUF];
899
900 memset(buf, 0, SE_MODE_PAGE_BUF);
901
902 switch (cdb[2] & 0x3f) {
903 case 0x01:
904 length = target_modesense_rwrecovery(&buf[offset]);
905 break;
906 case 0x08:
907 length = target_modesense_caching(dev, &buf[offset]);
908 break;
909 case 0x0a:
910 length = target_modesense_control(dev, &buf[offset]);
911 break;
912 case 0x3f:
913 length = target_modesense_rwrecovery(&buf[offset]);
914 length += target_modesense_caching(dev, &buf[offset+length]);
915 length += target_modesense_control(dev, &buf[offset+length]);
916 break;
917 default:
918 pr_err("Got Unknown Mode Page: 0x%02x\n",
919 cdb[2] & 0x3f);
920 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
921 }
922 offset += length;
923
924 if (ten) {
925 offset -= 2;
926 buf[0] = (offset >> 8) & 0xff;
927 buf[1] = offset & 0xff;
928
929 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
930 (cmd->se_deve &&
931 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
932 target_modesense_write_protect(&buf[3], type);
933
934 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
935 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
936 target_modesense_dpofua(&buf[3], type);
937
938 if ((offset + 2) > cmd->data_length)
939 offset = cmd->data_length;
940
941 } else {
942 offset -= 1;
943 buf[0] = offset & 0xff;
944
945 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
946 (cmd->se_deve &&
947 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
948 target_modesense_write_protect(&buf[2], type);
949
950 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
951 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
952 target_modesense_dpofua(&buf[2], type);
953
954 if ((offset + 1) > cmd->data_length)
955 offset = cmd->data_length;
956 }
957
958 rbuf = transport_kmap_first_data_page(cmd);
959 memcpy(rbuf, buf, offset);
960 transport_kunmap_first_data_page(cmd);
961
962 return 0;
963 }
964
965 static int
966 target_emulate_request_sense(struct se_cmd *cmd)
967 {
968 unsigned char *cdb = cmd->t_task_cdb;
969 unsigned char *buf;
970 u8 ua_asc = 0, ua_ascq = 0;
971 int err = 0;
972
973 if (cdb[1] & 0x01) {
974 pr_err("REQUEST_SENSE description emulation not"
975 " supported\n");
976 return PYX_TRANSPORT_INVALID_CDB_FIELD;
977 }
978
979 buf = transport_kmap_first_data_page(cmd);
980
981 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
982 /*
983 * CURRENT ERROR, UNIT ATTENTION
984 */
985 buf[0] = 0x70;
986 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
987 /*
988 * Make sure request data length is enough for additional
989 * sense data.
990 */
991 if (cmd->data_length <= 18) {
992 buf[7] = 0x00;
993 err = -EINVAL;
994 goto end;
995 }
996 /*
997 * The Additional Sense Code (ASC) from the UNIT ATTENTION
998 */
999 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1000 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1001 buf[7] = 0x0A;
1002 } else {
1003 /*
1004 * CURRENT ERROR, NO SENSE
1005 */
1006 buf[0] = 0x70;
1007 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1008 /*
1009 * Make sure request data length is enough for additional
1010 * sense data.
1011 */
1012 if (cmd->data_length <= 18) {
1013 buf[7] = 0x00;
1014 err = -EINVAL;
1015 goto end;
1016 }
1017 /*
1018 * NO ADDITIONAL SENSE INFORMATION
1019 */
1020 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1021 buf[7] = 0x0A;
1022 }
1023
1024 end:
1025 transport_kunmap_first_data_page(cmd);
1026
1027 return 0;
1028 }
1029
1030 /*
1031 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1032 * Note this is not used for TCM/pSCSI passthrough
1033 */
1034 static int
1035 target_emulate_unmap(struct se_task *task)
1036 {
1037 struct se_cmd *cmd = task->task_se_cmd;
1038 struct se_device *dev = cmd->se_dev;
1039 unsigned char *buf, *ptr = NULL;
1040 unsigned char *cdb = &cmd->t_task_cdb[0];
1041 sector_t lba;
1042 unsigned int size = cmd->data_length, range;
1043 int ret = 0, offset;
1044 unsigned short dl, bd_dl;
1045
1046 /* First UNMAP block descriptor starts at 8 byte offset */
1047 offset = 8;
1048 size -= 8;
1049 dl = get_unaligned_be16(&cdb[0]);
1050 bd_dl = get_unaligned_be16(&cdb[2]);
1051
1052 buf = transport_kmap_first_data_page(cmd);
1053
1054 ptr = &buf[offset];
1055 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
1056 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1057
1058 while (size) {
1059 lba = get_unaligned_be64(&ptr[0]);
1060 range = get_unaligned_be32(&ptr[8]);
1061 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1062 (unsigned long long)lba, range);
1063
1064 ret = dev->transport->do_discard(dev, lba, range);
1065 if (ret < 0) {
1066 pr_err("blkdev_issue_discard() failed: %d\n",
1067 ret);
1068 goto err;
1069 }
1070
1071 ptr += 16;
1072 size -= 16;
1073 }
1074
1075 task->task_scsi_status = GOOD;
1076 transport_complete_task(task, 1);
1077 err:
1078 transport_kunmap_first_data_page(cmd);
1079
1080 return ret;
1081 }
1082
1083 /*
1084 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1085 * Note this is not used for TCM/pSCSI passthrough
1086 */
1087 static int
1088 target_emulate_write_same(struct se_task *task, int write_same32)
1089 {
1090 struct se_cmd *cmd = task->task_se_cmd;
1091 struct se_device *dev = cmd->se_dev;
1092 sector_t range;
1093 sector_t lba = cmd->t_task_lba;
1094 unsigned int num_blocks;
1095 int ret;
1096 /*
1097 * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
1098 * range when non zero is supplied, otherwise calculate the remaining
1099 * range based on ->get_blocks() - starting LBA.
1100 */
1101 if (write_same32)
1102 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1103 else
1104 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1105
1106 if (num_blocks != 0)
1107 range = num_blocks;
1108 else
1109 range = (dev->transport->get_blocks(dev) - lba);
1110
1111 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
1112 (unsigned long long)lba, (unsigned long long)range);
1113
1114 ret = dev->transport->do_discard(dev, lba, range);
1115 if (ret < 0) {
1116 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
1117 return ret;
1118 }
1119
1120 task->task_scsi_status = GOOD;
1121 transport_complete_task(task, 1);
1122 return 0;
1123 }
1124
1125 int
1126 transport_emulate_control_cdb(struct se_task *task)
1127 {
1128 struct se_cmd *cmd = task->task_se_cmd;
1129 struct se_device *dev = cmd->se_dev;
1130 unsigned short service_action;
1131 int ret = 0;
1132
1133 switch (cmd->t_task_cdb[0]) {
1134 case INQUIRY:
1135 ret = target_emulate_inquiry(cmd);
1136 break;
1137 case READ_CAPACITY:
1138 ret = target_emulate_readcapacity(cmd);
1139 break;
1140 case MODE_SENSE:
1141 ret = target_emulate_modesense(cmd, 0);
1142 break;
1143 case MODE_SENSE_10:
1144 ret = target_emulate_modesense(cmd, 1);
1145 break;
1146 case SERVICE_ACTION_IN:
1147 switch (cmd->t_task_cdb[1] & 0x1f) {
1148 case SAI_READ_CAPACITY_16:
1149 ret = target_emulate_readcapacity_16(cmd);
1150 break;
1151 default:
1152 pr_err("Unsupported SA: 0x%02x\n",
1153 cmd->t_task_cdb[1] & 0x1f);
1154 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1155 }
1156 break;
1157 case REQUEST_SENSE:
1158 ret = target_emulate_request_sense(cmd);
1159 break;
1160 case UNMAP:
1161 if (!dev->transport->do_discard) {
1162 pr_err("UNMAP emulation not supported for: %s\n",
1163 dev->transport->name);
1164 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1165 }
1166 ret = target_emulate_unmap(task);
1167 break;
1168 case WRITE_SAME_16:
1169 if (!dev->transport->do_discard) {
1170 pr_err("WRITE_SAME_16 emulation not supported"
1171 " for: %s\n", dev->transport->name);
1172 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1173 }
1174 ret = target_emulate_write_same(task, 0);
1175 break;
1176 case VARIABLE_LENGTH_CMD:
1177 service_action =
1178 get_unaligned_be16(&cmd->t_task_cdb[8]);
1179 switch (service_action) {
1180 case WRITE_SAME_32:
1181 if (!dev->transport->do_discard) {
1182 pr_err("WRITE_SAME_32 SA emulation not"
1183 " supported for: %s\n",
1184 dev->transport->name);
1185 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1186 }
1187 ret = target_emulate_write_same(task, 1);
1188 break;
1189 default:
1190 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
1191 " 0x%02x\n", service_action);
1192 break;
1193 }
1194 break;
1195 case SYNCHRONIZE_CACHE:
1196 case 0x91: /* SYNCHRONIZE_CACHE_16: */
1197 if (!dev->transport->do_sync_cache) {
1198 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1199 " for: %s\n", dev->transport->name);
1200 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1201 }
1202 dev->transport->do_sync_cache(task);
1203 break;
1204 case ALLOW_MEDIUM_REMOVAL:
1205 case ERASE:
1206 case REZERO_UNIT:
1207 case SEEK_10:
1208 case SPACE:
1209 case START_STOP:
1210 case TEST_UNIT_READY:
1211 case VERIFY:
1212 case WRITE_FILEMARKS:
1213 break;
1214 default:
1215 pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
1216 cmd->t_task_cdb[0], dev->transport->name);
1217 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1218 }
1219
1220 if (ret < 0)
1221 return ret;
1222 task->task_scsi_status = GOOD;
1223 transport_complete_task(task, 1);
1224
1225 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1226 }