irq: Better struct irqaction layout
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / scsi_debug.c
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8 *
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
13 *
14 *
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
16 *
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
26 */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57
58 #include "sd.h"
59 #include "scsi_logging.h"
60
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
79
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
82
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
84
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
91 */
92 #define DEF_DELAY 1
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
96 #define DEF_OPTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
98 #define DEF_PTYPE 0
99 #define DEF_D_SENSE 0
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
105 #define DEF_DIX 0
106 #define DEF_DIF 0
107 #define DEF_GUARD 0
108 #define DEF_ATO 1
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_OPT_BLKS 64
112 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
113 #define DEF_UNMAP_MAX_DESC 256
114 #define DEF_UNMAP_GRANULARITY 1
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_TPWS 0
117 #define DEF_TPU 0
118
119 /* bit mask values for scsi_debug_opts */
120 #define SCSI_DEBUG_OPT_NOISE 1
121 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
122 #define SCSI_DEBUG_OPT_TIMEOUT 4
123 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
124 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
125 #define SCSI_DEBUG_OPT_DIF_ERR 32
126 #define SCSI_DEBUG_OPT_DIX_ERR 64
127 /* When "every_nth" > 0 then modulo "every_nth" commands:
128 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
129 * - a RECOVERED_ERROR is simulated on successful read and write
130 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
131 * - a TRANSPORT_ERROR is simulated on successful read and write
132 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
133 *
134 * When "every_nth" < 0 then after "- every_nth" commands:
135 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
136 * - a RECOVERED_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
138 * - a TRANSPORT_ERROR is simulated on successful read and write
139 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
140 * This will continue until some other action occurs (e.g. the user
141 * writing a new value (other than -1 or 1) to every_nth via sysfs).
142 */
143
144 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
145 * sector on read commands: */
146 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
147
148 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
149 * or "peripheral device" addressing (value 0) */
150 #define SAM2_LUN_ADDRESS_METHOD 0
151 #define SAM2_WLUN_REPORT_LUNS 0xc101
152
153 /* Can queue up to this number of commands. Typically commands that
154 * that have a non-zero delay are queued. */
155 #define SCSI_DEBUG_CANQUEUE 255
156
157 static int scsi_debug_add_host = DEF_NUM_HOST;
158 static int scsi_debug_delay = DEF_DELAY;
159 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
160 static int scsi_debug_every_nth = DEF_EVERY_NTH;
161 static int scsi_debug_max_luns = DEF_MAX_LUNS;
162 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
163 static int scsi_debug_num_parts = DEF_NUM_PARTS;
164 static int scsi_debug_no_uld = 0;
165 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
166 static int scsi_debug_opts = DEF_OPTS;
167 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
168 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
171 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
172 static int scsi_debug_fake_rw = DEF_FAKE_RW;
173 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
174 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
175 static int scsi_debug_dix = DEF_DIX;
176 static int scsi_debug_dif = DEF_DIF;
177 static int scsi_debug_guard = DEF_GUARD;
178 static int scsi_debug_ato = DEF_ATO;
179 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
180 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
181 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
182 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
183 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
184 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
185 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
186 static unsigned int scsi_debug_tpws = DEF_TPWS;
187 static unsigned int scsi_debug_tpu = DEF_TPU;
188
189 static int scsi_debug_cmnd_count = 0;
190
191 #define DEV_READONLY(TGT) (0)
192 #define DEV_REMOVEABLE(TGT) (0)
193
194 static unsigned int sdebug_store_sectors;
195 static sector_t sdebug_capacity; /* in sectors */
196
197 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
198 may still need them */
199 static int sdebug_heads; /* heads per disk */
200 static int sdebug_cylinders_per; /* cylinders per surface */
201 static int sdebug_sectors_per; /* sectors per cylinder */
202
203 #define SDEBUG_MAX_PARTS 4
204
205 #define SDEBUG_SENSE_LEN 32
206
207 #define SCSI_DEBUG_MAX_CMD_LEN 32
208
209 struct sdebug_dev_info {
210 struct list_head dev_list;
211 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
212 unsigned int channel;
213 unsigned int target;
214 unsigned int lun;
215 struct sdebug_host_info *sdbg_host;
216 unsigned int wlun;
217 char reset;
218 char stopped;
219 char used;
220 };
221
222 struct sdebug_host_info {
223 struct list_head host_list;
224 struct Scsi_Host *shost;
225 struct device dev;
226 struct list_head dev_info_list;
227 };
228
229 #define to_sdebug_host(d) \
230 container_of(d, struct sdebug_host_info, dev)
231
232 static LIST_HEAD(sdebug_host_list);
233 static DEFINE_SPINLOCK(sdebug_host_list_lock);
234
235 typedef void (* done_funct_t) (struct scsi_cmnd *);
236
237 struct sdebug_queued_cmd {
238 int in_use;
239 struct timer_list cmnd_timer;
240 done_funct_t done_funct;
241 struct scsi_cmnd * a_cmnd;
242 int scsi_result;
243 };
244 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
245
246 static unsigned char * fake_storep; /* ramdisk storage */
247 static unsigned char *dif_storep; /* protection info */
248 static void *map_storep; /* provisioning map */
249
250 static unsigned long map_size;
251 static int num_aborts = 0;
252 static int num_dev_resets = 0;
253 static int num_bus_resets = 0;
254 static int num_host_resets = 0;
255 static int dix_writes;
256 static int dix_reads;
257 static int dif_errors;
258
259 static DEFINE_SPINLOCK(queued_arr_lock);
260 static DEFINE_RWLOCK(atomic_rw);
261
262 static char sdebug_proc_name[] = "scsi_debug";
263
264 static struct bus_type pseudo_lld_bus;
265
266 static inline sector_t dif_offset(sector_t sector)
267 {
268 return sector << 3;
269 }
270
271 static struct device_driver sdebug_driverfs_driver = {
272 .name = sdebug_proc_name,
273 .bus = &pseudo_lld_bus,
274 };
275
276 static const int check_condition_result =
277 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
278
279 static const int illegal_condition_result =
280 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
281
282 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
283 0, 0, 0x2, 0x4b};
284 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
285 0, 0, 0x0, 0x0};
286
287 static int sdebug_add_adapter(void);
288 static void sdebug_remove_adapter(void);
289
290 static void sdebug_max_tgts_luns(void)
291 {
292 struct sdebug_host_info *sdbg_host;
293 struct Scsi_Host *hpnt;
294
295 spin_lock(&sdebug_host_list_lock);
296 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
297 hpnt = sdbg_host->shost;
298 if ((hpnt->this_id >= 0) &&
299 (scsi_debug_num_tgts > hpnt->this_id))
300 hpnt->max_id = scsi_debug_num_tgts + 1;
301 else
302 hpnt->max_id = scsi_debug_num_tgts;
303 /* scsi_debug_max_luns; */
304 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
305 }
306 spin_unlock(&sdebug_host_list_lock);
307 }
308
309 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
310 int asc, int asq)
311 {
312 unsigned char *sbuff;
313
314 sbuff = devip->sense_buff;
315 memset(sbuff, 0, SDEBUG_SENSE_LEN);
316
317 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
318
319 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
320 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
321 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
322 }
323
324 static void get_data_transfer_info(unsigned char *cmd,
325 unsigned long long *lba, unsigned int *num,
326 u32 *ei_lba)
327 {
328 *ei_lba = 0;
329
330 switch (*cmd) {
331 case VARIABLE_LENGTH_CMD:
332 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
333 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
334 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
335 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
336
337 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
338 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
339
340 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
341 (u32)cmd[28] << 24;
342 break;
343
344 case WRITE_SAME_16:
345 case WRITE_16:
346 case READ_16:
347 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
348 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
349 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
350 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
351
352 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
353 (u32)cmd[10] << 24;
354 break;
355 case WRITE_12:
356 case READ_12:
357 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
358 (u32)cmd[2] << 24;
359
360 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
361 (u32)cmd[6] << 24;
362 break;
363 case WRITE_SAME:
364 case WRITE_10:
365 case READ_10:
366 case XDWRITEREAD_10:
367 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
368 (u32)cmd[2] << 24;
369
370 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
371 break;
372 case WRITE_6:
373 case READ_6:
374 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
375 (u32)(cmd[1] & 0x1f) << 16;
376 *num = (0 == cmd[4]) ? 256 : cmd[4];
377 break;
378 default:
379 break;
380 }
381 }
382
383 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
384 {
385 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
386 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
387 }
388 return -EINVAL;
389 /* return -ENOTTY; // correct return but upsets fdisk */
390 }
391
392 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
393 struct sdebug_dev_info * devip)
394 {
395 if (devip->reset) {
396 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
397 printk(KERN_INFO "scsi_debug: Reporting Unit "
398 "attention: power on reset\n");
399 devip->reset = 0;
400 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
401 return check_condition_result;
402 }
403 if ((0 == reset_only) && devip->stopped) {
404 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
405 printk(KERN_INFO "scsi_debug: Reporting Not "
406 "ready: initializing command required\n");
407 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
408 0x2);
409 return check_condition_result;
410 }
411 return 0;
412 }
413
414 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
415 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
416 int arr_len)
417 {
418 int act_len;
419 struct scsi_data_buffer *sdb = scsi_in(scp);
420
421 if (!sdb->length)
422 return 0;
423 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
424 return (DID_ERROR << 16);
425
426 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
427 arr, arr_len);
428 if (sdb->resid)
429 sdb->resid -= act_len;
430 else
431 sdb->resid = scsi_bufflen(scp) - act_len;
432
433 return 0;
434 }
435
436 /* Returns number of bytes fetched into 'arr' or -1 if error. */
437 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
438 int arr_len)
439 {
440 if (!scsi_bufflen(scp))
441 return 0;
442 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
443 return -1;
444
445 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
446 }
447
448
449 static const char * inq_vendor_id = "Linux ";
450 static const char * inq_product_id = "scsi_debug ";
451 static const char * inq_product_rev = "0004";
452
453 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
454 int target_dev_id, int dev_id_num,
455 const char * dev_id_str,
456 int dev_id_str_len)
457 {
458 int num, port_a;
459 char b[32];
460
461 port_a = target_dev_id + 1;
462 /* T10 vendor identifier field format (faked) */
463 arr[0] = 0x2; /* ASCII */
464 arr[1] = 0x1;
465 arr[2] = 0x0;
466 memcpy(&arr[4], inq_vendor_id, 8);
467 memcpy(&arr[12], inq_product_id, 16);
468 memcpy(&arr[28], dev_id_str, dev_id_str_len);
469 num = 8 + 16 + dev_id_str_len;
470 arr[3] = num;
471 num += 4;
472 if (dev_id_num >= 0) {
473 /* NAA-5, Logical unit identifier (binary) */
474 arr[num++] = 0x1; /* binary (not necessarily sas) */
475 arr[num++] = 0x3; /* PIV=0, lu, naa */
476 arr[num++] = 0x0;
477 arr[num++] = 0x8;
478 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
479 arr[num++] = 0x33;
480 arr[num++] = 0x33;
481 arr[num++] = 0x30;
482 arr[num++] = (dev_id_num >> 24);
483 arr[num++] = (dev_id_num >> 16) & 0xff;
484 arr[num++] = (dev_id_num >> 8) & 0xff;
485 arr[num++] = dev_id_num & 0xff;
486 /* Target relative port number */
487 arr[num++] = 0x61; /* proto=sas, binary */
488 arr[num++] = 0x94; /* PIV=1, target port, rel port */
489 arr[num++] = 0x0; /* reserved */
490 arr[num++] = 0x4; /* length */
491 arr[num++] = 0x0; /* reserved */
492 arr[num++] = 0x0; /* reserved */
493 arr[num++] = 0x0;
494 arr[num++] = 0x1; /* relative port A */
495 }
496 /* NAA-5, Target port identifier */
497 arr[num++] = 0x61; /* proto=sas, binary */
498 arr[num++] = 0x93; /* piv=1, target port, naa */
499 arr[num++] = 0x0;
500 arr[num++] = 0x8;
501 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
502 arr[num++] = 0x22;
503 arr[num++] = 0x22;
504 arr[num++] = 0x20;
505 arr[num++] = (port_a >> 24);
506 arr[num++] = (port_a >> 16) & 0xff;
507 arr[num++] = (port_a >> 8) & 0xff;
508 arr[num++] = port_a & 0xff;
509 /* NAA-5, Target port group identifier */
510 arr[num++] = 0x61; /* proto=sas, binary */
511 arr[num++] = 0x95; /* piv=1, target port group id */
512 arr[num++] = 0x0;
513 arr[num++] = 0x4;
514 arr[num++] = 0;
515 arr[num++] = 0;
516 arr[num++] = (port_group_id >> 8) & 0xff;
517 arr[num++] = port_group_id & 0xff;
518 /* NAA-5, Target device identifier */
519 arr[num++] = 0x61; /* proto=sas, binary */
520 arr[num++] = 0xa3; /* piv=1, target device, naa */
521 arr[num++] = 0x0;
522 arr[num++] = 0x8;
523 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
524 arr[num++] = 0x22;
525 arr[num++] = 0x22;
526 arr[num++] = 0x20;
527 arr[num++] = (target_dev_id >> 24);
528 arr[num++] = (target_dev_id >> 16) & 0xff;
529 arr[num++] = (target_dev_id >> 8) & 0xff;
530 arr[num++] = target_dev_id & 0xff;
531 /* SCSI name string: Target device identifier */
532 arr[num++] = 0x63; /* proto=sas, UTF-8 */
533 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
534 arr[num++] = 0x0;
535 arr[num++] = 24;
536 memcpy(arr + num, "naa.52222220", 12);
537 num += 12;
538 snprintf(b, sizeof(b), "%08X", target_dev_id);
539 memcpy(arr + num, b, 8);
540 num += 8;
541 memset(arr + num, 0, 4);
542 num += 4;
543 return num;
544 }
545
546
547 static unsigned char vpd84_data[] = {
548 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
549 0x22,0x22,0x22,0x0,0xbb,0x1,
550 0x22,0x22,0x22,0x0,0xbb,0x2,
551 };
552
553 static int inquiry_evpd_84(unsigned char * arr)
554 {
555 memcpy(arr, vpd84_data, sizeof(vpd84_data));
556 return sizeof(vpd84_data);
557 }
558
559 static int inquiry_evpd_85(unsigned char * arr)
560 {
561 int num = 0;
562 const char * na1 = "https://www.kernel.org/config";
563 const char * na2 = "http://www.kernel.org/log";
564 int plen, olen;
565
566 arr[num++] = 0x1; /* lu, storage config */
567 arr[num++] = 0x0; /* reserved */
568 arr[num++] = 0x0;
569 olen = strlen(na1);
570 plen = olen + 1;
571 if (plen % 4)
572 plen = ((plen / 4) + 1) * 4;
573 arr[num++] = plen; /* length, null termianted, padded */
574 memcpy(arr + num, na1, olen);
575 memset(arr + num + olen, 0, plen - olen);
576 num += plen;
577
578 arr[num++] = 0x4; /* lu, logging */
579 arr[num++] = 0x0; /* reserved */
580 arr[num++] = 0x0;
581 olen = strlen(na2);
582 plen = olen + 1;
583 if (plen % 4)
584 plen = ((plen / 4) + 1) * 4;
585 arr[num++] = plen; /* length, null terminated, padded */
586 memcpy(arr + num, na2, olen);
587 memset(arr + num + olen, 0, plen - olen);
588 num += plen;
589
590 return num;
591 }
592
593 /* SCSI ports VPD page */
594 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
595 {
596 int num = 0;
597 int port_a, port_b;
598
599 port_a = target_dev_id + 1;
600 port_b = port_a + 1;
601 arr[num++] = 0x0; /* reserved */
602 arr[num++] = 0x0; /* reserved */
603 arr[num++] = 0x0;
604 arr[num++] = 0x1; /* relative port 1 (primary) */
605 memset(arr + num, 0, 6);
606 num += 6;
607 arr[num++] = 0x0;
608 arr[num++] = 12; /* length tp descriptor */
609 /* naa-5 target port identifier (A) */
610 arr[num++] = 0x61; /* proto=sas, binary */
611 arr[num++] = 0x93; /* PIV=1, target port, NAA */
612 arr[num++] = 0x0; /* reserved */
613 arr[num++] = 0x8; /* length */
614 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
615 arr[num++] = 0x22;
616 arr[num++] = 0x22;
617 arr[num++] = 0x20;
618 arr[num++] = (port_a >> 24);
619 arr[num++] = (port_a >> 16) & 0xff;
620 arr[num++] = (port_a >> 8) & 0xff;
621 arr[num++] = port_a & 0xff;
622
623 arr[num++] = 0x0; /* reserved */
624 arr[num++] = 0x0; /* reserved */
625 arr[num++] = 0x0;
626 arr[num++] = 0x2; /* relative port 2 (secondary) */
627 memset(arr + num, 0, 6);
628 num += 6;
629 arr[num++] = 0x0;
630 arr[num++] = 12; /* length tp descriptor */
631 /* naa-5 target port identifier (B) */
632 arr[num++] = 0x61; /* proto=sas, binary */
633 arr[num++] = 0x93; /* PIV=1, target port, NAA */
634 arr[num++] = 0x0; /* reserved */
635 arr[num++] = 0x8; /* length */
636 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
637 arr[num++] = 0x22;
638 arr[num++] = 0x22;
639 arr[num++] = 0x20;
640 arr[num++] = (port_b >> 24);
641 arr[num++] = (port_b >> 16) & 0xff;
642 arr[num++] = (port_b >> 8) & 0xff;
643 arr[num++] = port_b & 0xff;
644
645 return num;
646 }
647
648
649 static unsigned char vpd89_data[] = {
650 /* from 4th byte */ 0,0,0,0,
651 'l','i','n','u','x',' ',' ',' ',
652 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
653 '1','2','3','4',
654 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
655 0xec,0,0,0,
656 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
657 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
658 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
659 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
660 0x53,0x41,
661 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
662 0x20,0x20,
663 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
664 0x10,0x80,
665 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
666 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
667 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
668 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
669 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
670 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
671 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
672 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
673 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
675 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
676 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
677 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
678 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
691 };
692
693 static int inquiry_evpd_89(unsigned char * arr)
694 {
695 memcpy(arr, vpd89_data, sizeof(vpd89_data));
696 return sizeof(vpd89_data);
697 }
698
699
700 /* Block limits VPD page (SBC-3) */
701 static unsigned char vpdb0_data[] = {
702 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 };
707
708 static int inquiry_evpd_b0(unsigned char * arr)
709 {
710 unsigned int gran;
711
712 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
713
714 /* Optimal transfer length granularity */
715 gran = 1 << scsi_debug_physblk_exp;
716 arr[2] = (gran >> 8) & 0xff;
717 arr[3] = gran & 0xff;
718
719 /* Maximum Transfer Length */
720 if (sdebug_store_sectors > 0x400) {
721 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
722 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
723 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
724 arr[7] = sdebug_store_sectors & 0xff;
725 }
726
727 /* Optimal Transfer Length */
728 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
729
730 if (scsi_debug_tpu) {
731 /* Maximum Unmap LBA Count */
732 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
733
734 /* Maximum Unmap Block Descriptor Count */
735 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
736 }
737
738 /* Unmap Granularity Alignment */
739 if (scsi_debug_unmap_alignment) {
740 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
741 arr[28] |= 0x80; /* UGAVALID */
742 }
743
744 /* Optimal Unmap Granularity */
745 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
746
747 return 0x3c; /* Mandatory page length for thin provisioning */
748
749 return sizeof(vpdb0_data);
750 }
751
752 /* Block device characteristics VPD page (SBC-3) */
753 static int inquiry_evpd_b1(unsigned char *arr)
754 {
755 memset(arr, 0, 0x3c);
756 arr[0] = 0;
757 arr[1] = 1; /* non rotating medium (e.g. solid state) */
758 arr[2] = 0;
759 arr[3] = 5; /* less than 1.8" */
760
761 return 0x3c;
762 }
763
764 /* Thin provisioning VPD page (SBC-3) */
765 static int inquiry_evpd_b2(unsigned char *arr)
766 {
767 memset(arr, 0, 0x8);
768 arr[0] = 0; /* threshold exponent */
769
770 if (scsi_debug_tpu)
771 arr[1] = 1 << 7;
772
773 if (scsi_debug_tpws)
774 arr[1] |= 1 << 6;
775
776 return 0x8;
777 }
778
779 #define SDEBUG_LONG_INQ_SZ 96
780 #define SDEBUG_MAX_INQ_ARR_SZ 584
781
782 static int resp_inquiry(struct scsi_cmnd * scp, int target,
783 struct sdebug_dev_info * devip)
784 {
785 unsigned char pq_pdt;
786 unsigned char * arr;
787 unsigned char *cmd = (unsigned char *)scp->cmnd;
788 int alloc_len, n, ret;
789
790 alloc_len = (cmd[3] << 8) + cmd[4];
791 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
792 if (! arr)
793 return DID_REQUEUE << 16;
794 if (devip->wlun)
795 pq_pdt = 0x1e; /* present, wlun */
796 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
797 pq_pdt = 0x7f; /* not present, no device type */
798 else
799 pq_pdt = (scsi_debug_ptype & 0x1f);
800 arr[0] = pq_pdt;
801 if (0x2 & cmd[1]) { /* CMDDT bit set */
802 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
803 0);
804 kfree(arr);
805 return check_condition_result;
806 } else if (0x1 & cmd[1]) { /* EVPD bit set */
807 int lu_id_num, port_group_id, target_dev_id, len;
808 char lu_id_str[6];
809 int host_no = devip->sdbg_host->shost->host_no;
810
811 port_group_id = (((host_no + 1) & 0x7f) << 8) +
812 (devip->channel & 0x7f);
813 if (0 == scsi_debug_vpd_use_hostno)
814 host_no = 0;
815 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
816 (devip->target * 1000) + devip->lun);
817 target_dev_id = ((host_no + 1) * 2000) +
818 (devip->target * 1000) - 3;
819 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
820 if (0 == cmd[2]) { /* supported vital product data pages */
821 arr[1] = cmd[2]; /*sanity */
822 n = 4;
823 arr[n++] = 0x0; /* this page */
824 arr[n++] = 0x80; /* unit serial number */
825 arr[n++] = 0x83; /* device identification */
826 arr[n++] = 0x84; /* software interface ident. */
827 arr[n++] = 0x85; /* management network addresses */
828 arr[n++] = 0x86; /* extended inquiry */
829 arr[n++] = 0x87; /* mode page policy */
830 arr[n++] = 0x88; /* SCSI ports */
831 arr[n++] = 0x89; /* ATA information */
832 arr[n++] = 0xb0; /* Block limits (SBC) */
833 arr[n++] = 0xb1; /* Block characteristics (SBC) */
834 arr[n++] = 0xb2; /* Thin provisioning (SBC) */
835 arr[3] = n - 4; /* number of supported VPD pages */
836 } else if (0x80 == cmd[2]) { /* unit serial number */
837 arr[1] = cmd[2]; /*sanity */
838 arr[3] = len;
839 memcpy(&arr[4], lu_id_str, len);
840 } else if (0x83 == cmd[2]) { /* device identification */
841 arr[1] = cmd[2]; /*sanity */
842 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
843 target_dev_id, lu_id_num,
844 lu_id_str, len);
845 } else if (0x84 == cmd[2]) { /* Software interface ident. */
846 arr[1] = cmd[2]; /*sanity */
847 arr[3] = inquiry_evpd_84(&arr[4]);
848 } else if (0x85 == cmd[2]) { /* Management network addresses */
849 arr[1] = cmd[2]; /*sanity */
850 arr[3] = inquiry_evpd_85(&arr[4]);
851 } else if (0x86 == cmd[2]) { /* extended inquiry */
852 arr[1] = cmd[2]; /*sanity */
853 arr[3] = 0x3c; /* number of following entries */
854 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
855 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
856 else if (scsi_debug_dif)
857 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
858 else
859 arr[4] = 0x0; /* no protection stuff */
860 arr[5] = 0x7; /* head of q, ordered + simple q's */
861 } else if (0x87 == cmd[2]) { /* mode page policy */
862 arr[1] = cmd[2]; /*sanity */
863 arr[3] = 0x8; /* number of following entries */
864 arr[4] = 0x2; /* disconnect-reconnect mp */
865 arr[6] = 0x80; /* mlus, shared */
866 arr[8] = 0x18; /* protocol specific lu */
867 arr[10] = 0x82; /* mlus, per initiator port */
868 } else if (0x88 == cmd[2]) { /* SCSI Ports */
869 arr[1] = cmd[2]; /*sanity */
870 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
871 } else if (0x89 == cmd[2]) { /* ATA information */
872 arr[1] = cmd[2]; /*sanity */
873 n = inquiry_evpd_89(&arr[4]);
874 arr[2] = (n >> 8);
875 arr[3] = (n & 0xff);
876 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
877 arr[1] = cmd[2]; /*sanity */
878 arr[3] = inquiry_evpd_b0(&arr[4]);
879 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
880 arr[1] = cmd[2]; /*sanity */
881 arr[3] = inquiry_evpd_b1(&arr[4]);
882 } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
883 arr[1] = cmd[2]; /*sanity */
884 arr[3] = inquiry_evpd_b2(&arr[4]);
885 } else {
886 /* Illegal request, invalid field in cdb */
887 mk_sense_buffer(devip, ILLEGAL_REQUEST,
888 INVALID_FIELD_IN_CDB, 0);
889 kfree(arr);
890 return check_condition_result;
891 }
892 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
893 ret = fill_from_dev_buffer(scp, arr,
894 min(len, SDEBUG_MAX_INQ_ARR_SZ));
895 kfree(arr);
896 return ret;
897 }
898 /* drops through here for a standard inquiry */
899 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
900 arr[2] = scsi_debug_scsi_level;
901 arr[3] = 2; /* response_data_format==2 */
902 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
903 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
904 if (0 == scsi_debug_vpd_use_hostno)
905 arr[5] = 0x10; /* claim: implicit TGPS */
906 arr[6] = 0x10; /* claim: MultiP */
907 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
908 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
909 memcpy(&arr[8], inq_vendor_id, 8);
910 memcpy(&arr[16], inq_product_id, 16);
911 memcpy(&arr[32], inq_product_rev, 4);
912 /* version descriptors (2 bytes each) follow */
913 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
914 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
915 n = 62;
916 if (scsi_debug_ptype == 0) {
917 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
918 } else if (scsi_debug_ptype == 1) {
919 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
920 }
921 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
922 ret = fill_from_dev_buffer(scp, arr,
923 min(alloc_len, SDEBUG_LONG_INQ_SZ));
924 kfree(arr);
925 return ret;
926 }
927
928 static int resp_requests(struct scsi_cmnd * scp,
929 struct sdebug_dev_info * devip)
930 {
931 unsigned char * sbuff;
932 unsigned char *cmd = (unsigned char *)scp->cmnd;
933 unsigned char arr[SDEBUG_SENSE_LEN];
934 int want_dsense;
935 int len = 18;
936
937 memset(arr, 0, sizeof(arr));
938 if (devip->reset == 1)
939 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
940 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
941 sbuff = devip->sense_buff;
942 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
943 if (want_dsense) {
944 arr[0] = 0x72;
945 arr[1] = 0x0; /* NO_SENSE in sense_key */
946 arr[2] = THRESHOLD_EXCEEDED;
947 arr[3] = 0xff; /* TEST set and MRIE==6 */
948 } else {
949 arr[0] = 0x70;
950 arr[2] = 0x0; /* NO_SENSE in sense_key */
951 arr[7] = 0xa; /* 18 byte sense buffer */
952 arr[12] = THRESHOLD_EXCEEDED;
953 arr[13] = 0xff; /* TEST set and MRIE==6 */
954 }
955 } else {
956 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
957 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
958 /* DESC bit set and sense_buff in fixed format */
959 memset(arr, 0, sizeof(arr));
960 arr[0] = 0x72;
961 arr[1] = sbuff[2]; /* sense key */
962 arr[2] = sbuff[12]; /* asc */
963 arr[3] = sbuff[13]; /* ascq */
964 len = 8;
965 }
966 }
967 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
968 return fill_from_dev_buffer(scp, arr, len);
969 }
970
971 static int resp_start_stop(struct scsi_cmnd * scp,
972 struct sdebug_dev_info * devip)
973 {
974 unsigned char *cmd = (unsigned char *)scp->cmnd;
975 int power_cond, errsts, start;
976
977 if ((errsts = check_readiness(scp, 1, devip)))
978 return errsts;
979 power_cond = (cmd[4] & 0xf0) >> 4;
980 if (power_cond) {
981 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
982 0);
983 return check_condition_result;
984 }
985 start = cmd[4] & 1;
986 if (start == devip->stopped)
987 devip->stopped = !start;
988 return 0;
989 }
990
991 static sector_t get_sdebug_capacity(void)
992 {
993 if (scsi_debug_virtual_gb > 0)
994 return (sector_t)scsi_debug_virtual_gb *
995 (1073741824 / scsi_debug_sector_size);
996 else
997 return sdebug_store_sectors;
998 }
999
1000 #define SDEBUG_READCAP_ARR_SZ 8
1001 static int resp_readcap(struct scsi_cmnd * scp,
1002 struct sdebug_dev_info * devip)
1003 {
1004 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1005 unsigned int capac;
1006 int errsts;
1007
1008 if ((errsts = check_readiness(scp, 1, devip)))
1009 return errsts;
1010 /* following just in case virtual_gb changed */
1011 sdebug_capacity = get_sdebug_capacity();
1012 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1013 if (sdebug_capacity < 0xffffffff) {
1014 capac = (unsigned int)sdebug_capacity - 1;
1015 arr[0] = (capac >> 24);
1016 arr[1] = (capac >> 16) & 0xff;
1017 arr[2] = (capac >> 8) & 0xff;
1018 arr[3] = capac & 0xff;
1019 } else {
1020 arr[0] = 0xff;
1021 arr[1] = 0xff;
1022 arr[2] = 0xff;
1023 arr[3] = 0xff;
1024 }
1025 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1026 arr[7] = scsi_debug_sector_size & 0xff;
1027 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1028 }
1029
1030 #define SDEBUG_READCAP16_ARR_SZ 32
1031 static int resp_readcap16(struct scsi_cmnd * scp,
1032 struct sdebug_dev_info * devip)
1033 {
1034 unsigned char *cmd = (unsigned char *)scp->cmnd;
1035 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1036 unsigned long long capac;
1037 int errsts, k, alloc_len;
1038
1039 if ((errsts = check_readiness(scp, 1, devip)))
1040 return errsts;
1041 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1042 + cmd[13]);
1043 /* following just in case virtual_gb changed */
1044 sdebug_capacity = get_sdebug_capacity();
1045 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1046 capac = sdebug_capacity - 1;
1047 for (k = 0; k < 8; ++k, capac >>= 8)
1048 arr[7 - k] = capac & 0xff;
1049 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1050 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1051 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1052 arr[11] = scsi_debug_sector_size & 0xff;
1053 arr[13] = scsi_debug_physblk_exp & 0xf;
1054 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1055
1056 if (scsi_debug_tpu || scsi_debug_tpws)
1057 arr[14] |= 0x80; /* TPE */
1058
1059 arr[15] = scsi_debug_lowest_aligned & 0xff;
1060
1061 if (scsi_debug_dif) {
1062 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1063 arr[12] |= 1; /* PROT_EN */
1064 }
1065
1066 return fill_from_dev_buffer(scp, arr,
1067 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1068 }
1069
1070 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1071
1072 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1073 struct sdebug_dev_info * devip)
1074 {
1075 unsigned char *cmd = (unsigned char *)scp->cmnd;
1076 unsigned char * arr;
1077 int host_no = devip->sdbg_host->shost->host_no;
1078 int n, ret, alen, rlen;
1079 int port_group_a, port_group_b, port_a, port_b;
1080
1081 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1082 + cmd[9]);
1083
1084 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1085 if (! arr)
1086 return DID_REQUEUE << 16;
1087 /*
1088 * EVPD page 0x88 states we have two ports, one
1089 * real and a fake port with no device connected.
1090 * So we create two port groups with one port each
1091 * and set the group with port B to unavailable.
1092 */
1093 port_a = 0x1; /* relative port A */
1094 port_b = 0x2; /* relative port B */
1095 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1096 (devip->channel & 0x7f);
1097 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1098 (devip->channel & 0x7f) + 0x80;
1099
1100 /*
1101 * The asymmetric access state is cycled according to the host_id.
1102 */
1103 n = 4;
1104 if (0 == scsi_debug_vpd_use_hostno) {
1105 arr[n++] = host_no % 3; /* Asymm access state */
1106 arr[n++] = 0x0F; /* claim: all states are supported */
1107 } else {
1108 arr[n++] = 0x0; /* Active/Optimized path */
1109 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1110 }
1111 arr[n++] = (port_group_a >> 8) & 0xff;
1112 arr[n++] = port_group_a & 0xff;
1113 arr[n++] = 0; /* Reserved */
1114 arr[n++] = 0; /* Status code */
1115 arr[n++] = 0; /* Vendor unique */
1116 arr[n++] = 0x1; /* One port per group */
1117 arr[n++] = 0; /* Reserved */
1118 arr[n++] = 0; /* Reserved */
1119 arr[n++] = (port_a >> 8) & 0xff;
1120 arr[n++] = port_a & 0xff;
1121 arr[n++] = 3; /* Port unavailable */
1122 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1123 arr[n++] = (port_group_b >> 8) & 0xff;
1124 arr[n++] = port_group_b & 0xff;
1125 arr[n++] = 0; /* Reserved */
1126 arr[n++] = 0; /* Status code */
1127 arr[n++] = 0; /* Vendor unique */
1128 arr[n++] = 0x1; /* One port per group */
1129 arr[n++] = 0; /* Reserved */
1130 arr[n++] = 0; /* Reserved */
1131 arr[n++] = (port_b >> 8) & 0xff;
1132 arr[n++] = port_b & 0xff;
1133
1134 rlen = n - 4;
1135 arr[0] = (rlen >> 24) & 0xff;
1136 arr[1] = (rlen >> 16) & 0xff;
1137 arr[2] = (rlen >> 8) & 0xff;
1138 arr[3] = rlen & 0xff;
1139
1140 /*
1141 * Return the smallest value of either
1142 * - The allocated length
1143 * - The constructed command length
1144 * - The maximum array size
1145 */
1146 rlen = min(alen,n);
1147 ret = fill_from_dev_buffer(scp, arr,
1148 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1149 kfree(arr);
1150 return ret;
1151 }
1152
1153 /* <<Following mode page info copied from ST318451LW>> */
1154
1155 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1156 { /* Read-Write Error Recovery page for mode_sense */
1157 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1158 5, 0, 0xff, 0xff};
1159
1160 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1161 if (1 == pcontrol)
1162 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1163 return sizeof(err_recov_pg);
1164 }
1165
1166 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1167 { /* Disconnect-Reconnect page for mode_sense */
1168 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1169 0, 0, 0, 0, 0, 0, 0, 0};
1170
1171 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1172 if (1 == pcontrol)
1173 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1174 return sizeof(disconnect_pg);
1175 }
1176
1177 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1178 { /* Format device page for mode_sense */
1179 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1180 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0x40, 0, 0, 0};
1182
1183 memcpy(p, format_pg, sizeof(format_pg));
1184 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1185 p[11] = sdebug_sectors_per & 0xff;
1186 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1187 p[13] = scsi_debug_sector_size & 0xff;
1188 if (DEV_REMOVEABLE(target))
1189 p[20] |= 0x20; /* should agree with INQUIRY */
1190 if (1 == pcontrol)
1191 memset(p + 2, 0, sizeof(format_pg) - 2);
1192 return sizeof(format_pg);
1193 }
1194
1195 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1196 { /* Caching page for mode_sense */
1197 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1198 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1199
1200 memcpy(p, caching_pg, sizeof(caching_pg));
1201 if (1 == pcontrol)
1202 memset(p + 2, 0, sizeof(caching_pg) - 2);
1203 return sizeof(caching_pg);
1204 }
1205
1206 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1207 { /* Control mode page for mode_sense */
1208 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1209 0, 0, 0, 0};
1210 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1211 0, 0, 0x2, 0x4b};
1212
1213 if (scsi_debug_dsense)
1214 ctrl_m_pg[2] |= 0x4;
1215 else
1216 ctrl_m_pg[2] &= ~0x4;
1217
1218 if (scsi_debug_ato)
1219 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1220
1221 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1222 if (1 == pcontrol)
1223 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1224 else if (2 == pcontrol)
1225 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1226 return sizeof(ctrl_m_pg);
1227 }
1228
1229
1230 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1231 { /* Informational Exceptions control mode page for mode_sense */
1232 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1233 0, 0, 0x0, 0x0};
1234 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1235 0, 0, 0x0, 0x0};
1236
1237 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1238 if (1 == pcontrol)
1239 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1240 else if (2 == pcontrol)
1241 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1242 return sizeof(iec_m_pg);
1243 }
1244
1245 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1246 { /* SAS SSP mode page - short format for mode_sense */
1247 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1248 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1249
1250 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1251 if (1 == pcontrol)
1252 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1253 return sizeof(sas_sf_m_pg);
1254 }
1255
1256
1257 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1258 int target_dev_id)
1259 { /* SAS phy control and discover mode page for mode_sense */
1260 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1261 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1262 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1263 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1264 0x2, 0, 0, 0, 0, 0, 0, 0,
1265 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1266 0, 0, 0, 0, 0, 0, 0, 0,
1267 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1268 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1269 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1270 0x3, 0, 0, 0, 0, 0, 0, 0,
1271 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1272 0, 0, 0, 0, 0, 0, 0, 0,
1273 };
1274 int port_a, port_b;
1275
1276 port_a = target_dev_id + 1;
1277 port_b = port_a + 1;
1278 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1279 p[20] = (port_a >> 24);
1280 p[21] = (port_a >> 16) & 0xff;
1281 p[22] = (port_a >> 8) & 0xff;
1282 p[23] = port_a & 0xff;
1283 p[48 + 20] = (port_b >> 24);
1284 p[48 + 21] = (port_b >> 16) & 0xff;
1285 p[48 + 22] = (port_b >> 8) & 0xff;
1286 p[48 + 23] = port_b & 0xff;
1287 if (1 == pcontrol)
1288 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1289 return sizeof(sas_pcd_m_pg);
1290 }
1291
1292 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1293 { /* SAS SSP shared protocol specific port mode subpage */
1294 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1295 0, 0, 0, 0, 0, 0, 0, 0,
1296 };
1297
1298 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1299 if (1 == pcontrol)
1300 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1301 return sizeof(sas_sha_m_pg);
1302 }
1303
1304 #define SDEBUG_MAX_MSENSE_SZ 256
1305
1306 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1307 struct sdebug_dev_info * devip)
1308 {
1309 unsigned char dbd, llbaa;
1310 int pcontrol, pcode, subpcode, bd_len;
1311 unsigned char dev_spec;
1312 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1313 unsigned char * ap;
1314 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1315 unsigned char *cmd = (unsigned char *)scp->cmnd;
1316
1317 if ((errsts = check_readiness(scp, 1, devip)))
1318 return errsts;
1319 dbd = !!(cmd[1] & 0x8);
1320 pcontrol = (cmd[2] & 0xc0) >> 6;
1321 pcode = cmd[2] & 0x3f;
1322 subpcode = cmd[3];
1323 msense_6 = (MODE_SENSE == cmd[0]);
1324 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1325 if ((0 == scsi_debug_ptype) && (0 == dbd))
1326 bd_len = llbaa ? 16 : 8;
1327 else
1328 bd_len = 0;
1329 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1330 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1331 if (0x3 == pcontrol) { /* Saving values not supported */
1332 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1333 0);
1334 return check_condition_result;
1335 }
1336 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1337 (devip->target * 1000) - 3;
1338 /* set DPOFUA bit for disks */
1339 if (0 == scsi_debug_ptype)
1340 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1341 else
1342 dev_spec = 0x0;
1343 if (msense_6) {
1344 arr[2] = dev_spec;
1345 arr[3] = bd_len;
1346 offset = 4;
1347 } else {
1348 arr[3] = dev_spec;
1349 if (16 == bd_len)
1350 arr[4] = 0x1; /* set LONGLBA bit */
1351 arr[7] = bd_len; /* assume 255 or less */
1352 offset = 8;
1353 }
1354 ap = arr + offset;
1355 if ((bd_len > 0) && (!sdebug_capacity))
1356 sdebug_capacity = get_sdebug_capacity();
1357
1358 if (8 == bd_len) {
1359 if (sdebug_capacity > 0xfffffffe) {
1360 ap[0] = 0xff;
1361 ap[1] = 0xff;
1362 ap[2] = 0xff;
1363 ap[3] = 0xff;
1364 } else {
1365 ap[0] = (sdebug_capacity >> 24) & 0xff;
1366 ap[1] = (sdebug_capacity >> 16) & 0xff;
1367 ap[2] = (sdebug_capacity >> 8) & 0xff;
1368 ap[3] = sdebug_capacity & 0xff;
1369 }
1370 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1371 ap[7] = scsi_debug_sector_size & 0xff;
1372 offset += bd_len;
1373 ap = arr + offset;
1374 } else if (16 == bd_len) {
1375 unsigned long long capac = sdebug_capacity;
1376
1377 for (k = 0; k < 8; ++k, capac >>= 8)
1378 ap[7 - k] = capac & 0xff;
1379 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1380 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1381 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1382 ap[15] = scsi_debug_sector_size & 0xff;
1383 offset += bd_len;
1384 ap = arr + offset;
1385 }
1386
1387 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1388 /* TODO: Control Extension page */
1389 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1390 0);
1391 return check_condition_result;
1392 }
1393 switch (pcode) {
1394 case 0x1: /* Read-Write error recovery page, direct access */
1395 len = resp_err_recov_pg(ap, pcontrol, target);
1396 offset += len;
1397 break;
1398 case 0x2: /* Disconnect-Reconnect page, all devices */
1399 len = resp_disconnect_pg(ap, pcontrol, target);
1400 offset += len;
1401 break;
1402 case 0x3: /* Format device page, direct access */
1403 len = resp_format_pg(ap, pcontrol, target);
1404 offset += len;
1405 break;
1406 case 0x8: /* Caching page, direct access */
1407 len = resp_caching_pg(ap, pcontrol, target);
1408 offset += len;
1409 break;
1410 case 0xa: /* Control Mode page, all devices */
1411 len = resp_ctrl_m_pg(ap, pcontrol, target);
1412 offset += len;
1413 break;
1414 case 0x19: /* if spc==1 then sas phy, control+discover */
1415 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1416 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1417 INVALID_FIELD_IN_CDB, 0);
1418 return check_condition_result;
1419 }
1420 len = 0;
1421 if ((0x0 == subpcode) || (0xff == subpcode))
1422 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1423 if ((0x1 == subpcode) || (0xff == subpcode))
1424 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1425 target_dev_id);
1426 if ((0x2 == subpcode) || (0xff == subpcode))
1427 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1428 offset += len;
1429 break;
1430 case 0x1c: /* Informational Exceptions Mode page, all devices */
1431 len = resp_iec_m_pg(ap, pcontrol, target);
1432 offset += len;
1433 break;
1434 case 0x3f: /* Read all Mode pages */
1435 if ((0 == subpcode) || (0xff == subpcode)) {
1436 len = resp_err_recov_pg(ap, pcontrol, target);
1437 len += resp_disconnect_pg(ap + len, pcontrol, target);
1438 len += resp_format_pg(ap + len, pcontrol, target);
1439 len += resp_caching_pg(ap + len, pcontrol, target);
1440 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1441 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1442 if (0xff == subpcode) {
1443 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1444 target, target_dev_id);
1445 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1446 }
1447 len += resp_iec_m_pg(ap + len, pcontrol, target);
1448 } else {
1449 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1450 INVALID_FIELD_IN_CDB, 0);
1451 return check_condition_result;
1452 }
1453 offset += len;
1454 break;
1455 default:
1456 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1457 0);
1458 return check_condition_result;
1459 }
1460 if (msense_6)
1461 arr[0] = offset - 1;
1462 else {
1463 arr[0] = ((offset - 2) >> 8) & 0xff;
1464 arr[1] = (offset - 2) & 0xff;
1465 }
1466 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1467 }
1468
1469 #define SDEBUG_MAX_MSELECT_SZ 512
1470
1471 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1472 struct sdebug_dev_info * devip)
1473 {
1474 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1475 int param_len, res, errsts, mpage;
1476 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1477 unsigned char *cmd = (unsigned char *)scp->cmnd;
1478
1479 if ((errsts = check_readiness(scp, 1, devip)))
1480 return errsts;
1481 memset(arr, 0, sizeof(arr));
1482 pf = cmd[1] & 0x10;
1483 sp = cmd[1] & 0x1;
1484 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1485 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1486 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1487 INVALID_FIELD_IN_CDB, 0);
1488 return check_condition_result;
1489 }
1490 res = fetch_to_dev_buffer(scp, arr, param_len);
1491 if (-1 == res)
1492 return (DID_ERROR << 16);
1493 else if ((res < param_len) &&
1494 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1495 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1496 " IO sent=%d bytes\n", param_len, res);
1497 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1498 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1499 if (md_len > 2) {
1500 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1501 INVALID_FIELD_IN_PARAM_LIST, 0);
1502 return check_condition_result;
1503 }
1504 off = bd_len + (mselect6 ? 4 : 8);
1505 mpage = arr[off] & 0x3f;
1506 ps = !!(arr[off] & 0x80);
1507 if (ps) {
1508 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1509 INVALID_FIELD_IN_PARAM_LIST, 0);
1510 return check_condition_result;
1511 }
1512 spf = !!(arr[off] & 0x40);
1513 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1514 (arr[off + 1] + 2);
1515 if ((pg_len + off) > param_len) {
1516 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1517 PARAMETER_LIST_LENGTH_ERR, 0);
1518 return check_condition_result;
1519 }
1520 switch (mpage) {
1521 case 0xa: /* Control Mode page */
1522 if (ctrl_m_pg[1] == arr[off + 1]) {
1523 memcpy(ctrl_m_pg + 2, arr + off + 2,
1524 sizeof(ctrl_m_pg) - 2);
1525 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1526 return 0;
1527 }
1528 break;
1529 case 0x1c: /* Informational Exceptions Mode page */
1530 if (iec_m_pg[1] == arr[off + 1]) {
1531 memcpy(iec_m_pg + 2, arr + off + 2,
1532 sizeof(iec_m_pg) - 2);
1533 return 0;
1534 }
1535 break;
1536 default:
1537 break;
1538 }
1539 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1540 INVALID_FIELD_IN_PARAM_LIST, 0);
1541 return check_condition_result;
1542 }
1543
1544 static int resp_temp_l_pg(unsigned char * arr)
1545 {
1546 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1547 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1548 };
1549
1550 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1551 return sizeof(temp_l_pg);
1552 }
1553
1554 static int resp_ie_l_pg(unsigned char * arr)
1555 {
1556 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1557 };
1558
1559 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1560 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1561 arr[4] = THRESHOLD_EXCEEDED;
1562 arr[5] = 0xff;
1563 }
1564 return sizeof(ie_l_pg);
1565 }
1566
1567 #define SDEBUG_MAX_LSENSE_SZ 512
1568
1569 static int resp_log_sense(struct scsi_cmnd * scp,
1570 struct sdebug_dev_info * devip)
1571 {
1572 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1573 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1574 unsigned char *cmd = (unsigned char *)scp->cmnd;
1575
1576 if ((errsts = check_readiness(scp, 1, devip)))
1577 return errsts;
1578 memset(arr, 0, sizeof(arr));
1579 ppc = cmd[1] & 0x2;
1580 sp = cmd[1] & 0x1;
1581 if (ppc || sp) {
1582 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1583 INVALID_FIELD_IN_CDB, 0);
1584 return check_condition_result;
1585 }
1586 pcontrol = (cmd[2] & 0xc0) >> 6;
1587 pcode = cmd[2] & 0x3f;
1588 subpcode = cmd[3] & 0xff;
1589 alloc_len = (cmd[7] << 8) + cmd[8];
1590 arr[0] = pcode;
1591 if (0 == subpcode) {
1592 switch (pcode) {
1593 case 0x0: /* Supported log pages log page */
1594 n = 4;
1595 arr[n++] = 0x0; /* this page */
1596 arr[n++] = 0xd; /* Temperature */
1597 arr[n++] = 0x2f; /* Informational exceptions */
1598 arr[3] = n - 4;
1599 break;
1600 case 0xd: /* Temperature log page */
1601 arr[3] = resp_temp_l_pg(arr + 4);
1602 break;
1603 case 0x2f: /* Informational exceptions log page */
1604 arr[3] = resp_ie_l_pg(arr + 4);
1605 break;
1606 default:
1607 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1608 INVALID_FIELD_IN_CDB, 0);
1609 return check_condition_result;
1610 }
1611 } else if (0xff == subpcode) {
1612 arr[0] |= 0x40;
1613 arr[1] = subpcode;
1614 switch (pcode) {
1615 case 0x0: /* Supported log pages and subpages log page */
1616 n = 4;
1617 arr[n++] = 0x0;
1618 arr[n++] = 0x0; /* 0,0 page */
1619 arr[n++] = 0x0;
1620 arr[n++] = 0xff; /* this page */
1621 arr[n++] = 0xd;
1622 arr[n++] = 0x0; /* Temperature */
1623 arr[n++] = 0x2f;
1624 arr[n++] = 0x0; /* Informational exceptions */
1625 arr[3] = n - 4;
1626 break;
1627 case 0xd: /* Temperature subpages */
1628 n = 4;
1629 arr[n++] = 0xd;
1630 arr[n++] = 0x0; /* Temperature */
1631 arr[3] = n - 4;
1632 break;
1633 case 0x2f: /* Informational exceptions subpages */
1634 n = 4;
1635 arr[n++] = 0x2f;
1636 arr[n++] = 0x0; /* Informational exceptions */
1637 arr[3] = n - 4;
1638 break;
1639 default:
1640 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1641 INVALID_FIELD_IN_CDB, 0);
1642 return check_condition_result;
1643 }
1644 } else {
1645 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1646 INVALID_FIELD_IN_CDB, 0);
1647 return check_condition_result;
1648 }
1649 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1650 return fill_from_dev_buffer(scp, arr,
1651 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1652 }
1653
1654 static int check_device_access_params(struct sdebug_dev_info *devi,
1655 unsigned long long lba, unsigned int num)
1656 {
1657 if (lba + num > sdebug_capacity) {
1658 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1659 return check_condition_result;
1660 }
1661 /* transfer length excessive (tie in to block limits VPD page) */
1662 if (num > sdebug_store_sectors) {
1663 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1664 return check_condition_result;
1665 }
1666 return 0;
1667 }
1668
1669 static int do_device_access(struct scsi_cmnd *scmd,
1670 struct sdebug_dev_info *devi,
1671 unsigned long long lba, unsigned int num, int write)
1672 {
1673 int ret;
1674 unsigned int block, rest = 0;
1675 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1676
1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1678
1679 block = do_div(lba, sdebug_store_sectors);
1680 if (block + num > sdebug_store_sectors)
1681 rest = block + num - sdebug_store_sectors;
1682
1683 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1684 (num - rest) * scsi_debug_sector_size);
1685 if (!ret && rest)
1686 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1687
1688 return ret;
1689 }
1690
1691 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1692 unsigned int sectors, u32 ei_lba)
1693 {
1694 unsigned int i, resid;
1695 struct scatterlist *psgl;
1696 struct sd_dif_tuple *sdt;
1697 sector_t sector;
1698 sector_t tmp_sec = start_sec;
1699 void *paddr;
1700
1701 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1702
1703 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1704
1705 for (i = 0 ; i < sectors ; i++) {
1706 u16 csum;
1707
1708 if (sdt[i].app_tag == 0xffff)
1709 continue;
1710
1711 sector = start_sec + i;
1712
1713 switch (scsi_debug_guard) {
1714 case 1:
1715 csum = ip_compute_csum(fake_storep +
1716 sector * scsi_debug_sector_size,
1717 scsi_debug_sector_size);
1718 break;
1719 case 0:
1720 csum = crc_t10dif(fake_storep +
1721 sector * scsi_debug_sector_size,
1722 scsi_debug_sector_size);
1723 csum = cpu_to_be16(csum);
1724 break;
1725 default:
1726 BUG();
1727 }
1728
1729 if (sdt[i].guard_tag != csum) {
1730 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1731 " rcvd 0x%04x, data 0x%04x\n", __func__,
1732 (unsigned long)sector,
1733 be16_to_cpu(sdt[i].guard_tag),
1734 be16_to_cpu(csum));
1735 dif_errors++;
1736 return 0x01;
1737 }
1738
1739 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1740 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1741 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1742 __func__, (unsigned long)sector);
1743 dif_errors++;
1744 return 0x03;
1745 }
1746
1747 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1748 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1749 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1750 __func__, (unsigned long)sector);
1751 dif_errors++;
1752 return 0x03;
1753 }
1754
1755 ei_lba++;
1756 }
1757
1758 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1759 sector = start_sec;
1760
1761 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1762 int len = min(psgl->length, resid);
1763
1764 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1765 memcpy(paddr, dif_storep + dif_offset(sector), len);
1766
1767 sector += len >> 3;
1768 if (sector >= sdebug_store_sectors) {
1769 /* Force wrap */
1770 tmp_sec = sector;
1771 sector = do_div(tmp_sec, sdebug_store_sectors);
1772 }
1773 resid -= len;
1774 kunmap_atomic(paddr, KM_IRQ0);
1775 }
1776
1777 dix_reads++;
1778
1779 return 0;
1780 }
1781
1782 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1783 unsigned int num, struct sdebug_dev_info *devip,
1784 u32 ei_lba)
1785 {
1786 unsigned long iflags;
1787 int ret;
1788
1789 ret = check_device_access_params(devip, lba, num);
1790 if (ret)
1791 return ret;
1792
1793 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1794 (lba <= OPT_MEDIUM_ERR_ADDR) &&
1795 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1796 /* claim unrecoverable read error */
1797 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
1798 0);
1799 /* set info field and valid bit for fixed descriptor */
1800 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1801 devip->sense_buff[0] |= 0x80; /* Valid bit */
1802 ret = OPT_MEDIUM_ERR_ADDR;
1803 devip->sense_buff[3] = (ret >> 24) & 0xff;
1804 devip->sense_buff[4] = (ret >> 16) & 0xff;
1805 devip->sense_buff[5] = (ret >> 8) & 0xff;
1806 devip->sense_buff[6] = ret & 0xff;
1807 }
1808 return check_condition_result;
1809 }
1810
1811 /* DIX + T10 DIF */
1812 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1813 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1814
1815 if (prot_ret) {
1816 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1817 return illegal_condition_result;
1818 }
1819 }
1820
1821 read_lock_irqsave(&atomic_rw, iflags);
1822 ret = do_device_access(SCpnt, devip, lba, num, 0);
1823 read_unlock_irqrestore(&atomic_rw, iflags);
1824 return ret;
1825 }
1826
1827 void dump_sector(unsigned char *buf, int len)
1828 {
1829 int i, j;
1830
1831 printk(KERN_ERR ">>> Sector Dump <<<\n");
1832
1833 for (i = 0 ; i < len ; i += 16) {
1834 printk(KERN_ERR "%04d: ", i);
1835
1836 for (j = 0 ; j < 16 ; j++) {
1837 unsigned char c = buf[i+j];
1838 if (c >= 0x20 && c < 0x7e)
1839 printk(" %c ", buf[i+j]);
1840 else
1841 printk("%02x ", buf[i+j]);
1842 }
1843
1844 printk("\n");
1845 }
1846 }
1847
1848 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1849 unsigned int sectors, u32 ei_lba)
1850 {
1851 int i, j, ret;
1852 struct sd_dif_tuple *sdt;
1853 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1854 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1855 void *daddr, *paddr;
1856 sector_t tmp_sec = start_sec;
1857 sector_t sector;
1858 int ppage_offset;
1859 unsigned short csum;
1860
1861 sector = do_div(tmp_sec, sdebug_store_sectors);
1862
1863 BUG_ON(scsi_sg_count(SCpnt) == 0);
1864 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1865
1866 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1867 ppage_offset = 0;
1868
1869 /* For each data page */
1870 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1871 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1872
1873 /* For each sector-sized chunk in data page */
1874 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1875
1876 /* If we're at the end of the current
1877 * protection page advance to the next one
1878 */
1879 if (ppage_offset >= psgl->length) {
1880 kunmap_atomic(paddr, KM_IRQ1);
1881 psgl = sg_next(psgl);
1882 BUG_ON(psgl == NULL);
1883 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1884 + psgl->offset;
1885 ppage_offset = 0;
1886 }
1887
1888 sdt = paddr + ppage_offset;
1889
1890 switch (scsi_debug_guard) {
1891 case 1:
1892 csum = ip_compute_csum(daddr,
1893 scsi_debug_sector_size);
1894 break;
1895 case 0:
1896 csum = cpu_to_be16(crc_t10dif(daddr,
1897 scsi_debug_sector_size));
1898 break;
1899 default:
1900 BUG();
1901 ret = 0;
1902 goto out;
1903 }
1904
1905 if (sdt->guard_tag != csum) {
1906 printk(KERN_ERR
1907 "%s: GUARD check failed on sector %lu " \
1908 "rcvd 0x%04x, calculated 0x%04x\n",
1909 __func__, (unsigned long)sector,
1910 be16_to_cpu(sdt->guard_tag),
1911 be16_to_cpu(csum));
1912 ret = 0x01;
1913 dump_sector(daddr, scsi_debug_sector_size);
1914 goto out;
1915 }
1916
1917 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1918 be32_to_cpu(sdt->ref_tag)
1919 != (start_sec & 0xffffffff)) {
1920 printk(KERN_ERR
1921 "%s: REF check failed on sector %lu\n",
1922 __func__, (unsigned long)sector);
1923 ret = 0x03;
1924 dump_sector(daddr, scsi_debug_sector_size);
1925 goto out;
1926 }
1927
1928 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1929 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1930 printk(KERN_ERR
1931 "%s: REF check failed on sector %lu\n",
1932 __func__, (unsigned long)sector);
1933 ret = 0x03;
1934 dump_sector(daddr, scsi_debug_sector_size);
1935 goto out;
1936 }
1937
1938 /* Would be great to copy this in bigger
1939 * chunks. However, for the sake of
1940 * correctness we need to verify each sector
1941 * before writing it to "stable" storage
1942 */
1943 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1944
1945 sector++;
1946
1947 if (sector == sdebug_store_sectors)
1948 sector = 0; /* Force wrap */
1949
1950 start_sec++;
1951 ei_lba++;
1952 daddr += scsi_debug_sector_size;
1953 ppage_offset += sizeof(struct sd_dif_tuple);
1954 }
1955
1956 kunmap_atomic(daddr, KM_IRQ0);
1957 }
1958
1959 kunmap_atomic(paddr, KM_IRQ1);
1960
1961 dix_writes++;
1962
1963 return 0;
1964
1965 out:
1966 dif_errors++;
1967 kunmap_atomic(daddr, KM_IRQ0);
1968 kunmap_atomic(paddr, KM_IRQ1);
1969 return ret;
1970 }
1971
1972 static unsigned int map_state(sector_t lba, unsigned int *num)
1973 {
1974 unsigned int granularity, alignment, mapped;
1975 sector_t block, next, end;
1976
1977 granularity = scsi_debug_unmap_granularity;
1978 alignment = granularity - scsi_debug_unmap_alignment;
1979 block = lba + alignment;
1980 do_div(block, granularity);
1981
1982 mapped = test_bit(block, map_storep);
1983
1984 if (mapped)
1985 next = find_next_zero_bit(map_storep, map_size, block);
1986 else
1987 next = find_next_bit(map_storep, map_size, block);
1988
1989 end = next * granularity - scsi_debug_unmap_alignment;
1990 *num = end - lba;
1991
1992 return mapped;
1993 }
1994
1995 static void map_region(sector_t lba, unsigned int len)
1996 {
1997 unsigned int granularity, alignment;
1998 sector_t end = lba + len;
1999
2000 granularity = scsi_debug_unmap_granularity;
2001 alignment = granularity - scsi_debug_unmap_alignment;
2002
2003 while (lba < end) {
2004 sector_t block, rem;
2005
2006 block = lba + alignment;
2007 rem = do_div(block, granularity);
2008
2009 if (block < map_size)
2010 set_bit(block, map_storep);
2011
2012 lba += granularity - rem;
2013 }
2014 }
2015
2016 static void unmap_region(sector_t lba, unsigned int len)
2017 {
2018 unsigned int granularity, alignment;
2019 sector_t end = lba + len;
2020
2021 granularity = scsi_debug_unmap_granularity;
2022 alignment = granularity - scsi_debug_unmap_alignment;
2023
2024 while (lba < end) {
2025 sector_t block, rem;
2026
2027 block = lba + alignment;
2028 rem = do_div(block, granularity);
2029
2030 if (rem == 0 && lba + granularity <= end &&
2031 block < map_size)
2032 clear_bit(block, map_storep);
2033
2034 lba += granularity - rem;
2035 }
2036 }
2037
2038 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2039 unsigned int num, struct sdebug_dev_info *devip,
2040 u32 ei_lba)
2041 {
2042 unsigned long iflags;
2043 int ret;
2044
2045 ret = check_device_access_params(devip, lba, num);
2046 if (ret)
2047 return ret;
2048
2049 /* DIX + T10 DIF */
2050 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2051 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2052
2053 if (prot_ret) {
2054 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2055 return illegal_condition_result;
2056 }
2057 }
2058
2059 write_lock_irqsave(&atomic_rw, iflags);
2060 ret = do_device_access(SCpnt, devip, lba, num, 1);
2061 if (scsi_debug_unmap_granularity)
2062 map_region(lba, num);
2063 write_unlock_irqrestore(&atomic_rw, iflags);
2064 if (-1 == ret)
2065 return (DID_ERROR << 16);
2066 else if ((ret < (num * scsi_debug_sector_size)) &&
2067 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2068 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2069 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2070
2071 return 0;
2072 }
2073
2074 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2075 unsigned int num, struct sdebug_dev_info *devip,
2076 u32 ei_lba, unsigned int unmap)
2077 {
2078 unsigned long iflags;
2079 unsigned long long i;
2080 int ret;
2081
2082 ret = check_device_access_params(devip, lba, num);
2083 if (ret)
2084 return ret;
2085
2086 write_lock_irqsave(&atomic_rw, iflags);
2087
2088 if (unmap && scsi_debug_unmap_granularity) {
2089 unmap_region(lba, num);
2090 goto out;
2091 }
2092
2093 /* Else fetch one logical block */
2094 ret = fetch_to_dev_buffer(scmd,
2095 fake_storep + (lba * scsi_debug_sector_size),
2096 scsi_debug_sector_size);
2097
2098 if (-1 == ret) {
2099 write_unlock_irqrestore(&atomic_rw, iflags);
2100 return (DID_ERROR << 16);
2101 } else if ((ret < (num * scsi_debug_sector_size)) &&
2102 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2103 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2104 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2105
2106 /* Copy first sector to remaining blocks */
2107 for (i = 1 ; i < num ; i++)
2108 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2109 fake_storep + (lba * scsi_debug_sector_size),
2110 scsi_debug_sector_size);
2111
2112 if (scsi_debug_unmap_granularity)
2113 map_region(lba, num);
2114 out:
2115 write_unlock_irqrestore(&atomic_rw, iflags);
2116
2117 return 0;
2118 }
2119
2120 struct unmap_block_desc {
2121 __be64 lba;
2122 __be32 blocks;
2123 __be32 __reserved;
2124 };
2125
2126 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2127 {
2128 unsigned char *buf;
2129 struct unmap_block_desc *desc;
2130 unsigned int i, payload_len, descriptors;
2131 int ret;
2132
2133 ret = check_readiness(scmd, 1, devip);
2134 if (ret)
2135 return ret;
2136
2137 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2138 BUG_ON(scsi_bufflen(scmd) != payload_len);
2139
2140 descriptors = (payload_len - 8) / 16;
2141
2142 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2143 if (!buf)
2144 return check_condition_result;
2145
2146 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2147
2148 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2149 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2150
2151 desc = (void *)&buf[8];
2152
2153 for (i = 0 ; i < descriptors ; i++) {
2154 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2155 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2156
2157 ret = check_device_access_params(devip, lba, num);
2158 if (ret)
2159 goto out;
2160
2161 unmap_region(lba, num);
2162 }
2163
2164 ret = 0;
2165
2166 out:
2167 kfree(buf);
2168
2169 return ret;
2170 }
2171
2172 #define SDEBUG_GET_LBA_STATUS_LEN 32
2173
2174 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2175 struct sdebug_dev_info * devip)
2176 {
2177 unsigned long long lba;
2178 unsigned int alloc_len, mapped, num;
2179 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2180 int ret;
2181
2182 ret = check_readiness(scmd, 1, devip);
2183 if (ret)
2184 return ret;
2185
2186 lba = get_unaligned_be64(&scmd->cmnd[2]);
2187 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2188
2189 if (alloc_len < 24)
2190 return 0;
2191
2192 ret = check_device_access_params(devip, lba, 1);
2193 if (ret)
2194 return ret;
2195
2196 mapped = map_state(lba, &num);
2197
2198 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2199 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2200 put_unaligned_be64(lba, &arr[8]); /* LBA */
2201 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2202 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2203
2204 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2205 }
2206
2207 #define SDEBUG_RLUN_ARR_SZ 256
2208
2209 static int resp_report_luns(struct scsi_cmnd * scp,
2210 struct sdebug_dev_info * devip)
2211 {
2212 unsigned int alloc_len;
2213 int lun_cnt, i, upper, num, n, wlun, lun;
2214 unsigned char *cmd = (unsigned char *)scp->cmnd;
2215 int select_report = (int)cmd[2];
2216 struct scsi_lun *one_lun;
2217 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2218 unsigned char * max_addr;
2219
2220 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2221 if ((alloc_len < 4) || (select_report > 2)) {
2222 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2223 0);
2224 return check_condition_result;
2225 }
2226 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2227 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2228 lun_cnt = scsi_debug_max_luns;
2229 if (1 == select_report)
2230 lun_cnt = 0;
2231 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2232 --lun_cnt;
2233 wlun = (select_report > 0) ? 1 : 0;
2234 num = lun_cnt + wlun;
2235 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2236 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2237 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2238 sizeof(struct scsi_lun)), num);
2239 if (n < num) {
2240 wlun = 0;
2241 lun_cnt = n;
2242 }
2243 one_lun = (struct scsi_lun *) &arr[8];
2244 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2245 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2246 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2247 i++, lun++) {
2248 upper = (lun >> 8) & 0x3f;
2249 if (upper)
2250 one_lun[i].scsi_lun[0] =
2251 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2252 one_lun[i].scsi_lun[1] = lun & 0xff;
2253 }
2254 if (wlun) {
2255 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2256 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2257 i++;
2258 }
2259 alloc_len = (unsigned char *)(one_lun + i) - arr;
2260 return fill_from_dev_buffer(scp, arr,
2261 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2262 }
2263
2264 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2265 unsigned int num, struct sdebug_dev_info *devip)
2266 {
2267 int i, j, ret = -1;
2268 unsigned char *kaddr, *buf;
2269 unsigned int offset;
2270 struct scatterlist *sg;
2271 struct scsi_data_buffer *sdb = scsi_in(scp);
2272
2273 /* better not to use temporary buffer. */
2274 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2275 if (!buf)
2276 return ret;
2277
2278 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2279
2280 offset = 0;
2281 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2282 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2283 if (!kaddr)
2284 goto out;
2285
2286 for (j = 0; j < sg->length; j++)
2287 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2288
2289 offset += sg->length;
2290 kunmap_atomic(kaddr, KM_USER0);
2291 }
2292 ret = 0;
2293 out:
2294 kfree(buf);
2295
2296 return ret;
2297 }
2298
2299 /* When timer goes off this function is called. */
2300 static void timer_intr_handler(unsigned long indx)
2301 {
2302 struct sdebug_queued_cmd * sqcp;
2303 unsigned long iflags;
2304
2305 if (indx >= scsi_debug_max_queue) {
2306 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2307 "large\n");
2308 return;
2309 }
2310 spin_lock_irqsave(&queued_arr_lock, iflags);
2311 sqcp = &queued_arr[(int)indx];
2312 if (! sqcp->in_use) {
2313 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2314 "interrupt\n");
2315 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2316 return;
2317 }
2318 sqcp->in_use = 0;
2319 if (sqcp->done_funct) {
2320 sqcp->a_cmnd->result = sqcp->scsi_result;
2321 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2322 }
2323 sqcp->done_funct = NULL;
2324 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2325 }
2326
2327
2328 static struct sdebug_dev_info *
2329 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2330 {
2331 struct sdebug_dev_info *devip;
2332
2333 devip = kzalloc(sizeof(*devip), flags);
2334 if (devip) {
2335 devip->sdbg_host = sdbg_host;
2336 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2337 }
2338 return devip;
2339 }
2340
2341 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2342 {
2343 struct sdebug_host_info * sdbg_host;
2344 struct sdebug_dev_info * open_devip = NULL;
2345 struct sdebug_dev_info * devip =
2346 (struct sdebug_dev_info *)sdev->hostdata;
2347
2348 if (devip)
2349 return devip;
2350 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2351 if (!sdbg_host) {
2352 printk(KERN_ERR "Host info NULL\n");
2353 return NULL;
2354 }
2355 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2356 if ((devip->used) && (devip->channel == sdev->channel) &&
2357 (devip->target == sdev->id) &&
2358 (devip->lun == sdev->lun))
2359 return devip;
2360 else {
2361 if ((!devip->used) && (!open_devip))
2362 open_devip = devip;
2363 }
2364 }
2365 if (!open_devip) { /* try and make a new one */
2366 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2367 if (!open_devip) {
2368 printk(KERN_ERR "%s: out of memory at line %d\n",
2369 __func__, __LINE__);
2370 return NULL;
2371 }
2372 }
2373
2374 open_devip->channel = sdev->channel;
2375 open_devip->target = sdev->id;
2376 open_devip->lun = sdev->lun;
2377 open_devip->sdbg_host = sdbg_host;
2378 open_devip->reset = 1;
2379 open_devip->used = 1;
2380 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2381 if (scsi_debug_dsense)
2382 open_devip->sense_buff[0] = 0x72;
2383 else {
2384 open_devip->sense_buff[0] = 0x70;
2385 open_devip->sense_buff[7] = 0xa;
2386 }
2387 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2388 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2389
2390 return open_devip;
2391 }
2392
2393 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2394 {
2395 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2396 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2397 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2398 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2399 return 0;
2400 }
2401
2402 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2403 {
2404 struct sdebug_dev_info *devip;
2405
2406 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2407 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2408 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2409 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2410 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2411 devip = devInfoReg(sdp);
2412 if (NULL == devip)
2413 return 1; /* no resources, will be marked offline */
2414 sdp->hostdata = devip;
2415 if (sdp->host->cmd_per_lun)
2416 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2417 sdp->host->cmd_per_lun);
2418 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2419 if (scsi_debug_no_uld)
2420 sdp->no_uld_attach = 1;
2421 return 0;
2422 }
2423
2424 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2425 {
2426 struct sdebug_dev_info *devip =
2427 (struct sdebug_dev_info *)sdp->hostdata;
2428
2429 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2430 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2431 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2432 if (devip) {
2433 /* make this slot avaliable for re-use */
2434 devip->used = 0;
2435 sdp->hostdata = NULL;
2436 }
2437 }
2438
2439 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2440 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2441 {
2442 unsigned long iflags;
2443 int k;
2444 struct sdebug_queued_cmd *sqcp;
2445
2446 spin_lock_irqsave(&queued_arr_lock, iflags);
2447 for (k = 0; k < scsi_debug_max_queue; ++k) {
2448 sqcp = &queued_arr[k];
2449 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2450 del_timer_sync(&sqcp->cmnd_timer);
2451 sqcp->in_use = 0;
2452 sqcp->a_cmnd = NULL;
2453 break;
2454 }
2455 }
2456 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2457 return (k < scsi_debug_max_queue) ? 1 : 0;
2458 }
2459
2460 /* Deletes (stops) timers of all queued commands */
2461 static void stop_all_queued(void)
2462 {
2463 unsigned long iflags;
2464 int k;
2465 struct sdebug_queued_cmd *sqcp;
2466
2467 spin_lock_irqsave(&queued_arr_lock, iflags);
2468 for (k = 0; k < scsi_debug_max_queue; ++k) {
2469 sqcp = &queued_arr[k];
2470 if (sqcp->in_use && sqcp->a_cmnd) {
2471 del_timer_sync(&sqcp->cmnd_timer);
2472 sqcp->in_use = 0;
2473 sqcp->a_cmnd = NULL;
2474 }
2475 }
2476 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2477 }
2478
2479 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2480 {
2481 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2482 printk(KERN_INFO "scsi_debug: abort\n");
2483 ++num_aborts;
2484 stop_queued_cmnd(SCpnt);
2485 return SUCCESS;
2486 }
2487
2488 static int scsi_debug_biosparam(struct scsi_device *sdev,
2489 struct block_device * bdev, sector_t capacity, int *info)
2490 {
2491 int res;
2492 unsigned char *buf;
2493
2494 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2495 printk(KERN_INFO "scsi_debug: biosparam\n");
2496 buf = scsi_bios_ptable(bdev);
2497 if (buf) {
2498 res = scsi_partsize(buf, capacity,
2499 &info[2], &info[0], &info[1]);
2500 kfree(buf);
2501 if (! res)
2502 return res;
2503 }
2504 info[0] = sdebug_heads;
2505 info[1] = sdebug_sectors_per;
2506 info[2] = sdebug_cylinders_per;
2507 return 0;
2508 }
2509
2510 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2511 {
2512 struct sdebug_dev_info * devip;
2513
2514 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2515 printk(KERN_INFO "scsi_debug: device_reset\n");
2516 ++num_dev_resets;
2517 if (SCpnt) {
2518 devip = devInfoReg(SCpnt->device);
2519 if (devip)
2520 devip->reset = 1;
2521 }
2522 return SUCCESS;
2523 }
2524
2525 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2526 {
2527 struct sdebug_host_info *sdbg_host;
2528 struct sdebug_dev_info * dev_info;
2529 struct scsi_device * sdp;
2530 struct Scsi_Host * hp;
2531
2532 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2533 printk(KERN_INFO "scsi_debug: bus_reset\n");
2534 ++num_bus_resets;
2535 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2536 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2537 if (sdbg_host) {
2538 list_for_each_entry(dev_info,
2539 &sdbg_host->dev_info_list,
2540 dev_list)
2541 dev_info->reset = 1;
2542 }
2543 }
2544 return SUCCESS;
2545 }
2546
2547 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2548 {
2549 struct sdebug_host_info * sdbg_host;
2550 struct sdebug_dev_info * dev_info;
2551
2552 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2553 printk(KERN_INFO "scsi_debug: host_reset\n");
2554 ++num_host_resets;
2555 spin_lock(&sdebug_host_list_lock);
2556 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2557 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2558 dev_list)
2559 dev_info->reset = 1;
2560 }
2561 spin_unlock(&sdebug_host_list_lock);
2562 stop_all_queued();
2563 return SUCCESS;
2564 }
2565
2566 /* Initializes timers in queued array */
2567 static void __init init_all_queued(void)
2568 {
2569 unsigned long iflags;
2570 int k;
2571 struct sdebug_queued_cmd * sqcp;
2572
2573 spin_lock_irqsave(&queued_arr_lock, iflags);
2574 for (k = 0; k < scsi_debug_max_queue; ++k) {
2575 sqcp = &queued_arr[k];
2576 init_timer(&sqcp->cmnd_timer);
2577 sqcp->in_use = 0;
2578 sqcp->a_cmnd = NULL;
2579 }
2580 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2581 }
2582
2583 static void __init sdebug_build_parts(unsigned char *ramp,
2584 unsigned long store_size)
2585 {
2586 struct partition * pp;
2587 int starts[SDEBUG_MAX_PARTS + 2];
2588 int sectors_per_part, num_sectors, k;
2589 int heads_by_sects, start_sec, end_sec;
2590
2591 /* assume partition table already zeroed */
2592 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2593 return;
2594 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2595 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2596 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2597 "partitions to %d\n", SDEBUG_MAX_PARTS);
2598 }
2599 num_sectors = (int)sdebug_store_sectors;
2600 sectors_per_part = (num_sectors - sdebug_sectors_per)
2601 / scsi_debug_num_parts;
2602 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2603 starts[0] = sdebug_sectors_per;
2604 for (k = 1; k < scsi_debug_num_parts; ++k)
2605 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2606 * heads_by_sects;
2607 starts[scsi_debug_num_parts] = num_sectors;
2608 starts[scsi_debug_num_parts + 1] = 0;
2609
2610 ramp[510] = 0x55; /* magic partition markings */
2611 ramp[511] = 0xAA;
2612 pp = (struct partition *)(ramp + 0x1be);
2613 for (k = 0; starts[k + 1]; ++k, ++pp) {
2614 start_sec = starts[k];
2615 end_sec = starts[k + 1] - 1;
2616 pp->boot_ind = 0;
2617
2618 pp->cyl = start_sec / heads_by_sects;
2619 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2620 / sdebug_sectors_per;
2621 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2622
2623 pp->end_cyl = end_sec / heads_by_sects;
2624 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2625 / sdebug_sectors_per;
2626 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2627
2628 pp->start_sect = start_sec;
2629 pp->nr_sects = end_sec - start_sec + 1;
2630 pp->sys_ind = 0x83; /* plain Linux partition */
2631 }
2632 }
2633
2634 static int schedule_resp(struct scsi_cmnd * cmnd,
2635 struct sdebug_dev_info * devip,
2636 done_funct_t done, int scsi_result, int delta_jiff)
2637 {
2638 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2639 if (scsi_result) {
2640 struct scsi_device * sdp = cmnd->device;
2641
2642 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2643 "non-zero result=0x%x\n", sdp->host->host_no,
2644 sdp->channel, sdp->id, sdp->lun, scsi_result);
2645 }
2646 }
2647 if (cmnd && devip) {
2648 /* simulate autosense by this driver */
2649 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2650 memcpy(cmnd->sense_buffer, devip->sense_buff,
2651 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2652 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2653 }
2654 if (delta_jiff <= 0) {
2655 if (cmnd)
2656 cmnd->result = scsi_result;
2657 if (done)
2658 done(cmnd);
2659 return 0;
2660 } else {
2661 unsigned long iflags;
2662 int k;
2663 struct sdebug_queued_cmd * sqcp = NULL;
2664
2665 spin_lock_irqsave(&queued_arr_lock, iflags);
2666 for (k = 0; k < scsi_debug_max_queue; ++k) {
2667 sqcp = &queued_arr[k];
2668 if (! sqcp->in_use)
2669 break;
2670 }
2671 if (k >= scsi_debug_max_queue) {
2672 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2673 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2674 return 1; /* report busy to mid level */
2675 }
2676 sqcp->in_use = 1;
2677 sqcp->a_cmnd = cmnd;
2678 sqcp->scsi_result = scsi_result;
2679 sqcp->done_funct = done;
2680 sqcp->cmnd_timer.function = timer_intr_handler;
2681 sqcp->cmnd_timer.data = k;
2682 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2683 add_timer(&sqcp->cmnd_timer);
2684 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2685 if (cmnd)
2686 cmnd->result = 0;
2687 return 0;
2688 }
2689 }
2690 /* Note: The following macros create attribute files in the
2691 /sys/module/scsi_debug/parameters directory. Unfortunately this
2692 driver is unaware of a change and cannot trigger auxiliary actions
2693 as it can when the corresponding attribute in the
2694 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2695 */
2696 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2697 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2698 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2699 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2700 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2701 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2702 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2703 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2704 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2705 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2706 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2707 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2708 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2709 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2710 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2711 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2712 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2713 S_IRUGO | S_IWUSR);
2714 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2715 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2716 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2717 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2718 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2719 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2720 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2721 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2722 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2723 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2724 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2725 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2726 module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
2727 module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
2728
2729 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2730 MODULE_DESCRIPTION("SCSI debug adapter driver");
2731 MODULE_LICENSE("GPL");
2732 MODULE_VERSION(SCSI_DEBUG_VERSION);
2733
2734 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2735 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2736 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2737 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2738 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2739 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2740 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2741 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2742 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2743 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2744 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2745 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2746 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2747 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2748 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2749 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2750 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2751 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2752 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2753 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2754 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2755 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2756 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2757 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2758 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2759 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2760 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2761 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2762 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2763 MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
2764 MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
2765
2766 static char sdebug_info[256];
2767
2768 static const char * scsi_debug_info(struct Scsi_Host * shp)
2769 {
2770 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2771 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2772 scsi_debug_version_date, scsi_debug_dev_size_mb,
2773 scsi_debug_opts);
2774 return sdebug_info;
2775 }
2776
2777 /* scsi_debug_proc_info
2778 * Used if the driver currently has no own support for /proc/scsi
2779 */
2780 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2781 int length, int inout)
2782 {
2783 int len, pos, begin;
2784 int orig_length;
2785
2786 orig_length = length;
2787
2788 if (inout == 1) {
2789 char arr[16];
2790 int minLen = length > 15 ? 15 : length;
2791
2792 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2793 return -EACCES;
2794 memcpy(arr, buffer, minLen);
2795 arr[minLen] = '\0';
2796 if (1 != sscanf(arr, "%d", &pos))
2797 return -EINVAL;
2798 scsi_debug_opts = pos;
2799 if (scsi_debug_every_nth != 0)
2800 scsi_debug_cmnd_count = 0;
2801 return length;
2802 }
2803 begin = 0;
2804 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2805 "%s [%s]\n"
2806 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2807 "every_nth=%d(curr:%d)\n"
2808 "delay=%d, max_luns=%d, scsi_level=%d\n"
2809 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2810 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2811 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2812 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2813 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2814 scsi_debug_cmnd_count, scsi_debug_delay,
2815 scsi_debug_max_luns, scsi_debug_scsi_level,
2816 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2817 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2818 num_host_resets, dix_reads, dix_writes, dif_errors);
2819 if (pos < offset) {
2820 len = 0;
2821 begin = pos;
2822 }
2823 *start = buffer + (offset - begin); /* Start of wanted data */
2824 len -= (offset - begin);
2825 if (len > length)
2826 len = length;
2827 return len;
2828 }
2829
2830 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2831 {
2832 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2833 }
2834
2835 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2836 const char * buf, size_t count)
2837 {
2838 int delay;
2839 char work[20];
2840
2841 if (1 == sscanf(buf, "%10s", work)) {
2842 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2843 scsi_debug_delay = delay;
2844 return count;
2845 }
2846 }
2847 return -EINVAL;
2848 }
2849 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2850 sdebug_delay_store);
2851
2852 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2853 {
2854 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2855 }
2856
2857 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2858 const char * buf, size_t count)
2859 {
2860 int opts;
2861 char work[20];
2862
2863 if (1 == sscanf(buf, "%10s", work)) {
2864 if (0 == strnicmp(work,"0x", 2)) {
2865 if (1 == sscanf(&work[2], "%x", &opts))
2866 goto opts_done;
2867 } else {
2868 if (1 == sscanf(work, "%d", &opts))
2869 goto opts_done;
2870 }
2871 }
2872 return -EINVAL;
2873 opts_done:
2874 scsi_debug_opts = opts;
2875 scsi_debug_cmnd_count = 0;
2876 return count;
2877 }
2878 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2879 sdebug_opts_store);
2880
2881 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2882 {
2883 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2884 }
2885 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2886 const char * buf, size_t count)
2887 {
2888 int n;
2889
2890 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2891 scsi_debug_ptype = n;
2892 return count;
2893 }
2894 return -EINVAL;
2895 }
2896 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2897
2898 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2899 {
2900 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2901 }
2902 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2903 const char * buf, size_t count)
2904 {
2905 int n;
2906
2907 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2908 scsi_debug_dsense = n;
2909 return count;
2910 }
2911 return -EINVAL;
2912 }
2913 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2914 sdebug_dsense_store);
2915
2916 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2917 {
2918 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2919 }
2920 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2921 const char * buf, size_t count)
2922 {
2923 int n;
2924
2925 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2926 scsi_debug_fake_rw = n;
2927 return count;
2928 }
2929 return -EINVAL;
2930 }
2931 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2932 sdebug_fake_rw_store);
2933
2934 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2935 {
2936 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2937 }
2938 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2939 const char * buf, size_t count)
2940 {
2941 int n;
2942
2943 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2944 scsi_debug_no_lun_0 = n;
2945 return count;
2946 }
2947 return -EINVAL;
2948 }
2949 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2950 sdebug_no_lun_0_store);
2951
2952 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2953 {
2954 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2955 }
2956 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2957 const char * buf, size_t count)
2958 {
2959 int n;
2960
2961 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2962 scsi_debug_num_tgts = n;
2963 sdebug_max_tgts_luns();
2964 return count;
2965 }
2966 return -EINVAL;
2967 }
2968 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2969 sdebug_num_tgts_store);
2970
2971 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
2972 {
2973 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
2974 }
2975 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
2976
2977 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
2978 {
2979 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
2980 }
2981 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
2982
2983 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
2984 {
2985 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
2986 }
2987 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
2988 const char * buf, size_t count)
2989 {
2990 int nth;
2991
2992 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
2993 scsi_debug_every_nth = nth;
2994 scsi_debug_cmnd_count = 0;
2995 return count;
2996 }
2997 return -EINVAL;
2998 }
2999 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3000 sdebug_every_nth_store);
3001
3002 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3003 {
3004 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3005 }
3006 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3007 const char * buf, size_t count)
3008 {
3009 int n;
3010
3011 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3012 scsi_debug_max_luns = n;
3013 sdebug_max_tgts_luns();
3014 return count;
3015 }
3016 return -EINVAL;
3017 }
3018 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3019 sdebug_max_luns_store);
3020
3021 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3022 {
3023 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3024 }
3025 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3026 const char * buf, size_t count)
3027 {
3028 int n;
3029
3030 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3031 (n <= SCSI_DEBUG_CANQUEUE)) {
3032 scsi_debug_max_queue = n;
3033 return count;
3034 }
3035 return -EINVAL;
3036 }
3037 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3038 sdebug_max_queue_store);
3039
3040 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3041 {
3042 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3043 }
3044 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3045
3046 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3047 {
3048 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3049 }
3050 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3051
3052 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3053 {
3054 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3055 }
3056 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3057 const char * buf, size_t count)
3058 {
3059 int n;
3060
3061 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3062 scsi_debug_virtual_gb = n;
3063
3064 sdebug_capacity = get_sdebug_capacity();
3065
3066 return count;
3067 }
3068 return -EINVAL;
3069 }
3070 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3071 sdebug_virtual_gb_store);
3072
3073 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3074 {
3075 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3076 }
3077
3078 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3079 const char * buf, size_t count)
3080 {
3081 int delta_hosts;
3082
3083 if (sscanf(buf, "%d", &delta_hosts) != 1)
3084 return -EINVAL;
3085 if (delta_hosts > 0) {
3086 do {
3087 sdebug_add_adapter();
3088 } while (--delta_hosts);
3089 } else if (delta_hosts < 0) {
3090 do {
3091 sdebug_remove_adapter();
3092 } while (++delta_hosts);
3093 }
3094 return count;
3095 }
3096 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3097 sdebug_add_host_store);
3098
3099 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3100 char * buf)
3101 {
3102 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3103 }
3104 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3105 const char * buf, size_t count)
3106 {
3107 int n;
3108
3109 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3110 scsi_debug_vpd_use_hostno = n;
3111 return count;
3112 }
3113 return -EINVAL;
3114 }
3115 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3116 sdebug_vpd_use_hostno_store);
3117
3118 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3119 {
3120 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3121 }
3122 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3123
3124 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3125 {
3126 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3127 }
3128 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3129
3130 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3131 {
3132 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3133 }
3134 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3135
3136 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3137 {
3138 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3139 }
3140 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3141
3142 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3143 {
3144 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3145 }
3146 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3147
3148 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3149 {
3150 ssize_t count;
3151
3152 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
3153 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3154 sdebug_store_sectors);
3155
3156 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3157
3158 buf[count++] = '\n';
3159 buf[count++] = 0;
3160
3161 return count;
3162 }
3163 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3164
3165
3166 /* Note: The following function creates attribute files in the
3167 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3168 files (over those found in the /sys/module/scsi_debug/parameters
3169 directory) is that auxiliary actions can be triggered when an attribute
3170 is changed. For example see: sdebug_add_host_store() above.
3171 */
3172 static int do_create_driverfs_files(void)
3173 {
3174 int ret;
3175
3176 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3177 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3178 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3179 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3180 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3181 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3182 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3183 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3184 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3185 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3186 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3187 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3188 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3189 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3190 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3191 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3192 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3193 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3194 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3195 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3196 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3197 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3198 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3199 return ret;
3200 }
3201
3202 static void do_remove_driverfs_files(void)
3203 {
3204 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3205 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3206 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3207 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3208 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3209 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3210 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3211 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3212 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3213 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3214 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3215 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3216 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3217 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3218 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3219 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3220 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3221 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3222 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3223 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3224 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3225 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3226 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3227 }
3228
3229 struct device *pseudo_primary;
3230
3231 static int __init scsi_debug_init(void)
3232 {
3233 unsigned long sz;
3234 int host_to_add;
3235 int k;
3236 int ret;
3237
3238 switch (scsi_debug_sector_size) {
3239 case 512:
3240 case 1024:
3241 case 2048:
3242 case 4096:
3243 break;
3244 default:
3245 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3246 scsi_debug_sector_size);
3247 return -EINVAL;
3248 }
3249
3250 switch (scsi_debug_dif) {
3251
3252 case SD_DIF_TYPE0_PROTECTION:
3253 case SD_DIF_TYPE1_PROTECTION:
3254 case SD_DIF_TYPE2_PROTECTION:
3255 case SD_DIF_TYPE3_PROTECTION:
3256 break;
3257
3258 default:
3259 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3260 return -EINVAL;
3261 }
3262
3263 if (scsi_debug_guard > 1) {
3264 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3265 return -EINVAL;
3266 }
3267
3268 if (scsi_debug_ato > 1) {
3269 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3270 return -EINVAL;
3271 }
3272
3273 if (scsi_debug_physblk_exp > 15) {
3274 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3275 scsi_debug_physblk_exp);
3276 return -EINVAL;
3277 }
3278
3279 if (scsi_debug_lowest_aligned > 0x3fff) {
3280 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3281 scsi_debug_lowest_aligned);
3282 return -EINVAL;
3283 }
3284
3285 if (scsi_debug_dev_size_mb < 1)
3286 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3287 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3288 sdebug_store_sectors = sz / scsi_debug_sector_size;
3289 sdebug_capacity = get_sdebug_capacity();
3290
3291 /* play around with geometry, don't waste too much on track 0 */
3292 sdebug_heads = 8;
3293 sdebug_sectors_per = 32;
3294 if (scsi_debug_dev_size_mb >= 16)
3295 sdebug_heads = 32;
3296 else if (scsi_debug_dev_size_mb >= 256)
3297 sdebug_heads = 64;
3298 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3299 (sdebug_sectors_per * sdebug_heads);
3300 if (sdebug_cylinders_per >= 1024) {
3301 /* other LLDs do this; implies >= 1GB ram disk ... */
3302 sdebug_heads = 255;
3303 sdebug_sectors_per = 63;
3304 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3305 (sdebug_sectors_per * sdebug_heads);
3306 }
3307
3308 fake_storep = vmalloc(sz);
3309 if (NULL == fake_storep) {
3310 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3311 return -ENOMEM;
3312 }
3313 memset(fake_storep, 0, sz);
3314 if (scsi_debug_num_parts > 0)
3315 sdebug_build_parts(fake_storep, sz);
3316
3317 if (scsi_debug_dif) {
3318 int dif_size;
3319
3320 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3321 dif_storep = vmalloc(dif_size);
3322
3323 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3324 dif_size, dif_storep);
3325
3326 if (dif_storep == NULL) {
3327 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3328 ret = -ENOMEM;
3329 goto free_vm;
3330 }
3331
3332 memset(dif_storep, 0xff, dif_size);
3333 }
3334
3335 /* Thin Provisioning */
3336 if (scsi_debug_tpu || scsi_debug_tpws) {
3337 unsigned int map_bytes;
3338
3339 scsi_debug_unmap_max_blocks =
3340 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3341
3342 scsi_debug_unmap_max_desc =
3343 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3344
3345 scsi_debug_unmap_granularity =
3346 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3347
3348 if (scsi_debug_unmap_alignment &&
3349 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3350 printk(KERN_ERR
3351 "%s: ERR: unmap_granularity < unmap_alignment\n",
3352 __func__);
3353 return -EINVAL;
3354 }
3355
3356 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3357 map_bytes = map_size >> 3;
3358 map_storep = vmalloc(map_bytes);
3359
3360 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3361 map_size);
3362
3363 if (map_storep == NULL) {
3364 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3365 ret = -ENOMEM;
3366 goto free_vm;
3367 }
3368
3369 memset(map_storep, 0x0, map_bytes);
3370
3371 /* Map first 1KB for partition table */
3372 if (scsi_debug_num_parts)
3373 map_region(0, 2);
3374 }
3375
3376 pseudo_primary = root_device_register("pseudo_0");
3377 if (IS_ERR(pseudo_primary)) {
3378 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3379 ret = PTR_ERR(pseudo_primary);
3380 goto free_vm;
3381 }
3382 ret = bus_register(&pseudo_lld_bus);
3383 if (ret < 0) {
3384 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3385 ret);
3386 goto dev_unreg;
3387 }
3388 ret = driver_register(&sdebug_driverfs_driver);
3389 if (ret < 0) {
3390 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3391 ret);
3392 goto bus_unreg;
3393 }
3394 ret = do_create_driverfs_files();
3395 if (ret < 0) {
3396 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3397 ret);
3398 goto del_files;
3399 }
3400
3401 init_all_queued();
3402
3403 host_to_add = scsi_debug_add_host;
3404 scsi_debug_add_host = 0;
3405
3406 for (k = 0; k < host_to_add; k++) {
3407 if (sdebug_add_adapter()) {
3408 printk(KERN_ERR "scsi_debug_init: "
3409 "sdebug_add_adapter failed k=%d\n", k);
3410 break;
3411 }
3412 }
3413
3414 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3415 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3416 scsi_debug_add_host);
3417 }
3418 return 0;
3419
3420 del_files:
3421 do_remove_driverfs_files();
3422 driver_unregister(&sdebug_driverfs_driver);
3423 bus_unreg:
3424 bus_unregister(&pseudo_lld_bus);
3425 dev_unreg:
3426 root_device_unregister(pseudo_primary);
3427 free_vm:
3428 if (map_storep)
3429 vfree(map_storep);
3430 if (dif_storep)
3431 vfree(dif_storep);
3432 vfree(fake_storep);
3433
3434 return ret;
3435 }
3436
3437 static void __exit scsi_debug_exit(void)
3438 {
3439 int k = scsi_debug_add_host;
3440
3441 stop_all_queued();
3442 for (; k; k--)
3443 sdebug_remove_adapter();
3444 do_remove_driverfs_files();
3445 driver_unregister(&sdebug_driverfs_driver);
3446 bus_unregister(&pseudo_lld_bus);
3447 root_device_unregister(pseudo_primary);
3448
3449 if (dif_storep)
3450 vfree(dif_storep);
3451
3452 vfree(fake_storep);
3453 }
3454
3455 device_initcall(scsi_debug_init);
3456 module_exit(scsi_debug_exit);
3457
3458 static void sdebug_release_adapter(struct device * dev)
3459 {
3460 struct sdebug_host_info *sdbg_host;
3461
3462 sdbg_host = to_sdebug_host(dev);
3463 kfree(sdbg_host);
3464 }
3465
3466 static int sdebug_add_adapter(void)
3467 {
3468 int k, devs_per_host;
3469 int error = 0;
3470 struct sdebug_host_info *sdbg_host;
3471 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3472
3473 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3474 if (NULL == sdbg_host) {
3475 printk(KERN_ERR "%s: out of memory at line %d\n",
3476 __func__, __LINE__);
3477 return -ENOMEM;
3478 }
3479
3480 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3481
3482 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3483 for (k = 0; k < devs_per_host; k++) {
3484 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3485 if (!sdbg_devinfo) {
3486 printk(KERN_ERR "%s: out of memory at line %d\n",
3487 __func__, __LINE__);
3488 error = -ENOMEM;
3489 goto clean;
3490 }
3491 }
3492
3493 spin_lock(&sdebug_host_list_lock);
3494 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3495 spin_unlock(&sdebug_host_list_lock);
3496
3497 sdbg_host->dev.bus = &pseudo_lld_bus;
3498 sdbg_host->dev.parent = pseudo_primary;
3499 sdbg_host->dev.release = &sdebug_release_adapter;
3500 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3501
3502 error = device_register(&sdbg_host->dev);
3503
3504 if (error)
3505 goto clean;
3506
3507 ++scsi_debug_add_host;
3508 return error;
3509
3510 clean:
3511 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3512 dev_list) {
3513 list_del(&sdbg_devinfo->dev_list);
3514 kfree(sdbg_devinfo);
3515 }
3516
3517 kfree(sdbg_host);
3518 return error;
3519 }
3520
3521 static void sdebug_remove_adapter(void)
3522 {
3523 struct sdebug_host_info * sdbg_host = NULL;
3524
3525 spin_lock(&sdebug_host_list_lock);
3526 if (!list_empty(&sdebug_host_list)) {
3527 sdbg_host = list_entry(sdebug_host_list.prev,
3528 struct sdebug_host_info, host_list);
3529 list_del(&sdbg_host->host_list);
3530 }
3531 spin_unlock(&sdebug_host_list_lock);
3532
3533 if (!sdbg_host)
3534 return;
3535
3536 device_unregister(&sdbg_host->dev);
3537 --scsi_debug_add_host;
3538 }
3539
3540 static
3541 int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3542 {
3543 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3544 int len, k;
3545 unsigned int num;
3546 unsigned long long lba;
3547 u32 ei_lba;
3548 int errsts = 0;
3549 int target = SCpnt->device->id;
3550 struct sdebug_dev_info *devip = NULL;
3551 int inj_recovered = 0;
3552 int inj_transport = 0;
3553 int inj_dif = 0;
3554 int inj_dix = 0;
3555 int delay_override = 0;
3556 int unmap = 0;
3557
3558 scsi_set_resid(SCpnt, 0);
3559 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3560 printk(KERN_INFO "scsi_debug: cmd ");
3561 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3562 printk("%02x ", (int)cmd[k]);
3563 printk("\n");
3564 }
3565
3566 if (target == SCpnt->device->host->hostt->this_id) {
3567 printk(KERN_INFO "scsi_debug: initiator's id used as "
3568 "target!\n");
3569 return schedule_resp(SCpnt, NULL, done,
3570 DID_NO_CONNECT << 16, 0);
3571 }
3572
3573 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3574 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3575 return schedule_resp(SCpnt, NULL, done,
3576 DID_NO_CONNECT << 16, 0);
3577 devip = devInfoReg(SCpnt->device);
3578 if (NULL == devip)
3579 return schedule_resp(SCpnt, NULL, done,
3580 DID_NO_CONNECT << 16, 0);
3581
3582 if ((scsi_debug_every_nth != 0) &&
3583 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3584 scsi_debug_cmnd_count = 0;
3585 if (scsi_debug_every_nth < -1)
3586 scsi_debug_every_nth = -1;
3587 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3588 return 0; /* ignore command causing timeout */
3589 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3590 inj_recovered = 1; /* to reads and writes below */
3591 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3592 inj_transport = 1; /* to reads and writes below */
3593 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3594 inj_dif = 1; /* to reads and writes below */
3595 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3596 inj_dix = 1; /* to reads and writes below */
3597 }
3598
3599 if (devip->wlun) {
3600 switch (*cmd) {
3601 case INQUIRY:
3602 case REQUEST_SENSE:
3603 case TEST_UNIT_READY:
3604 case REPORT_LUNS:
3605 break; /* only allowable wlun commands */
3606 default:
3607 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3608 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3609 "not supported for wlun\n", *cmd);
3610 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3611 INVALID_OPCODE, 0);
3612 errsts = check_condition_result;
3613 return schedule_resp(SCpnt, devip, done, errsts,
3614 0);
3615 }
3616 }
3617
3618 switch (*cmd) {
3619 case INQUIRY: /* mandatory, ignore unit attention */
3620 delay_override = 1;
3621 errsts = resp_inquiry(SCpnt, target, devip);
3622 break;
3623 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3624 delay_override = 1;
3625 errsts = resp_requests(SCpnt, devip);
3626 break;
3627 case REZERO_UNIT: /* actually this is REWIND for SSC */
3628 case START_STOP:
3629 errsts = resp_start_stop(SCpnt, devip);
3630 break;
3631 case ALLOW_MEDIUM_REMOVAL:
3632 errsts = check_readiness(SCpnt, 1, devip);
3633 if (errsts)
3634 break;
3635 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3636 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3637 cmd[4] ? "inhibited" : "enabled");
3638 break;
3639 case SEND_DIAGNOSTIC: /* mandatory */
3640 errsts = check_readiness(SCpnt, 1, devip);
3641 break;
3642 case TEST_UNIT_READY: /* mandatory */
3643 delay_override = 1;
3644 errsts = check_readiness(SCpnt, 0, devip);
3645 break;
3646 case RESERVE:
3647 errsts = check_readiness(SCpnt, 1, devip);
3648 break;
3649 case RESERVE_10:
3650 errsts = check_readiness(SCpnt, 1, devip);
3651 break;
3652 case RELEASE:
3653 errsts = check_readiness(SCpnt, 1, devip);
3654 break;
3655 case RELEASE_10:
3656 errsts = check_readiness(SCpnt, 1, devip);
3657 break;
3658 case READ_CAPACITY:
3659 errsts = resp_readcap(SCpnt, devip);
3660 break;
3661 case SERVICE_ACTION_IN:
3662 if (cmd[1] == SAI_READ_CAPACITY_16)
3663 errsts = resp_readcap16(SCpnt, devip);
3664 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3665
3666 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
3667 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3668 INVALID_COMMAND_OPCODE, 0);
3669 errsts = check_condition_result;
3670 } else
3671 errsts = resp_get_lba_status(SCpnt, devip);
3672 } else {
3673 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3674 INVALID_OPCODE, 0);
3675 errsts = check_condition_result;
3676 }
3677 break;
3678 case MAINTENANCE_IN:
3679 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3680 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3681 INVALID_OPCODE, 0);
3682 errsts = check_condition_result;
3683 break;
3684 }
3685 errsts = resp_report_tgtpgs(SCpnt, devip);
3686 break;
3687 case READ_16:
3688 case READ_12:
3689 case READ_10:
3690 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3691 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3692 cmd[1] & 0xe0) {
3693 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3694 INVALID_COMMAND_OPCODE, 0);
3695 errsts = check_condition_result;
3696 break;
3697 }
3698
3699 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3700 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3701 (cmd[1] & 0xe0) == 0)
3702 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3703
3704 /* fall through */
3705 case READ_6:
3706 read:
3707 errsts = check_readiness(SCpnt, 0, devip);
3708 if (errsts)
3709 break;
3710 if (scsi_debug_fake_rw)
3711 break;
3712 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3713 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3714 if (inj_recovered && (0 == errsts)) {
3715 mk_sense_buffer(devip, RECOVERED_ERROR,
3716 THRESHOLD_EXCEEDED, 0);
3717 errsts = check_condition_result;
3718 } else if (inj_transport && (0 == errsts)) {
3719 mk_sense_buffer(devip, ABORTED_COMMAND,
3720 TRANSPORT_PROBLEM, ACK_NAK_TO);
3721 errsts = check_condition_result;
3722 } else if (inj_dif && (0 == errsts)) {
3723 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3724 errsts = illegal_condition_result;
3725 } else if (inj_dix && (0 == errsts)) {
3726 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3727 errsts = illegal_condition_result;
3728 }
3729 break;
3730 case REPORT_LUNS: /* mandatory, ignore unit attention */
3731 delay_override = 1;
3732 errsts = resp_report_luns(SCpnt, devip);
3733 break;
3734 case VERIFY: /* 10 byte SBC-2 command */
3735 errsts = check_readiness(SCpnt, 0, devip);
3736 break;
3737 case WRITE_16:
3738 case WRITE_12:
3739 case WRITE_10:
3740 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3741 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3742 cmd[1] & 0xe0) {
3743 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3744 INVALID_COMMAND_OPCODE, 0);
3745 errsts = check_condition_result;
3746 break;
3747 }
3748
3749 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3750 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3751 (cmd[1] & 0xe0) == 0)
3752 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3753
3754 /* fall through */
3755 case WRITE_6:
3756 write:
3757 errsts = check_readiness(SCpnt, 0, devip);
3758 if (errsts)
3759 break;
3760 if (scsi_debug_fake_rw)
3761 break;
3762 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3763 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3764 if (inj_recovered && (0 == errsts)) {
3765 mk_sense_buffer(devip, RECOVERED_ERROR,
3766 THRESHOLD_EXCEEDED, 0);
3767 errsts = check_condition_result;
3768 } else if (inj_dif && (0 == errsts)) {
3769 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3770 errsts = illegal_condition_result;
3771 } else if (inj_dix && (0 == errsts)) {
3772 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3773 errsts = illegal_condition_result;
3774 }
3775 break;
3776 case WRITE_SAME_16:
3777 if (cmd[1] & 0x8) {
3778 if (scsi_debug_tpws == 0) {
3779 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3780 INVALID_FIELD_IN_CDB, 0);
3781 errsts = check_condition_result;
3782 } else
3783 unmap = 1;
3784 }
3785 if (errsts)
3786 break;
3787 /* fall through */
3788 case WRITE_SAME:
3789 errsts = check_readiness(SCpnt, 0, devip);
3790 if (errsts)
3791 break;
3792 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3793 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3794 break;
3795 case UNMAP:
3796 errsts = check_readiness(SCpnt, 0, devip);
3797 if (errsts)
3798 break;
3799
3800 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
3801 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3802 INVALID_COMMAND_OPCODE, 0);
3803 errsts = check_condition_result;
3804 } else
3805 errsts = resp_unmap(SCpnt, devip);
3806 break;
3807 case MODE_SENSE:
3808 case MODE_SENSE_10:
3809 errsts = resp_mode_sense(SCpnt, target, devip);
3810 break;
3811 case MODE_SELECT:
3812 errsts = resp_mode_select(SCpnt, 1, devip);
3813 break;
3814 case MODE_SELECT_10:
3815 errsts = resp_mode_select(SCpnt, 0, devip);
3816 break;
3817 case LOG_SENSE:
3818 errsts = resp_log_sense(SCpnt, devip);
3819 break;
3820 case SYNCHRONIZE_CACHE:
3821 delay_override = 1;
3822 errsts = check_readiness(SCpnt, 0, devip);
3823 break;
3824 case WRITE_BUFFER:
3825 errsts = check_readiness(SCpnt, 1, devip);
3826 break;
3827 case XDWRITEREAD_10:
3828 if (!scsi_bidi_cmnd(SCpnt)) {
3829 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3830 INVALID_FIELD_IN_CDB, 0);
3831 errsts = check_condition_result;
3832 break;
3833 }
3834
3835 errsts = check_readiness(SCpnt, 0, devip);
3836 if (errsts)
3837 break;
3838 if (scsi_debug_fake_rw)
3839 break;
3840 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3841 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3842 if (errsts)
3843 break;
3844 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3845 if (errsts)
3846 break;
3847 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3848 break;
3849 case VARIABLE_LENGTH_CMD:
3850 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3851
3852 if ((cmd[10] & 0xe0) == 0)
3853 printk(KERN_ERR
3854 "Unprotected RD/WR to DIF device\n");
3855
3856 if (cmd[9] == READ_32) {
3857 BUG_ON(SCpnt->cmd_len < 32);
3858 goto read;
3859 }
3860
3861 if (cmd[9] == WRITE_32) {
3862 BUG_ON(SCpnt->cmd_len < 32);
3863 goto write;
3864 }
3865 }
3866
3867 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3868 INVALID_FIELD_IN_CDB, 0);
3869 errsts = check_condition_result;
3870 break;
3871
3872 default:
3873 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3874 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3875 "supported\n", *cmd);
3876 errsts = check_readiness(SCpnt, 1, devip);
3877 if (errsts)
3878 break; /* Unit attention takes precedence */
3879 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3880 errsts = check_condition_result;
3881 break;
3882 }
3883 return schedule_resp(SCpnt, devip, done, errsts,
3884 (delay_override ? 0 : scsi_debug_delay));
3885 }
3886
3887 static struct scsi_host_template sdebug_driver_template = {
3888 .proc_info = scsi_debug_proc_info,
3889 .proc_name = sdebug_proc_name,
3890 .name = "SCSI DEBUG",
3891 .info = scsi_debug_info,
3892 .slave_alloc = scsi_debug_slave_alloc,
3893 .slave_configure = scsi_debug_slave_configure,
3894 .slave_destroy = scsi_debug_slave_destroy,
3895 .ioctl = scsi_debug_ioctl,
3896 .queuecommand = scsi_debug_queuecommand,
3897 .eh_abort_handler = scsi_debug_abort,
3898 .eh_bus_reset_handler = scsi_debug_bus_reset,
3899 .eh_device_reset_handler = scsi_debug_device_reset,
3900 .eh_host_reset_handler = scsi_debug_host_reset,
3901 .bios_param = scsi_debug_biosparam,
3902 .can_queue = SCSI_DEBUG_CANQUEUE,
3903 .this_id = 7,
3904 .sg_tablesize = 256,
3905 .cmd_per_lun = 16,
3906 .max_sectors = 0xffff,
3907 .use_clustering = DISABLE_CLUSTERING,
3908 .module = THIS_MODULE,
3909 };
3910
3911 static int sdebug_driver_probe(struct device * dev)
3912 {
3913 int error = 0;
3914 struct sdebug_host_info *sdbg_host;
3915 struct Scsi_Host *hpnt;
3916 int host_prot;
3917
3918 sdbg_host = to_sdebug_host(dev);
3919
3920 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3921 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3922 if (NULL == hpnt) {
3923 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3924 error = -ENODEV;
3925 return error;
3926 }
3927
3928 sdbg_host->shost = hpnt;
3929 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3930 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3931 hpnt->max_id = scsi_debug_num_tgts + 1;
3932 else
3933 hpnt->max_id = scsi_debug_num_tgts;
3934 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3935
3936 host_prot = 0;
3937
3938 switch (scsi_debug_dif) {
3939
3940 case SD_DIF_TYPE1_PROTECTION:
3941 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3942 if (scsi_debug_dix)
3943 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3944 break;
3945
3946 case SD_DIF_TYPE2_PROTECTION:
3947 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3948 if (scsi_debug_dix)
3949 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3950 break;
3951
3952 case SD_DIF_TYPE3_PROTECTION:
3953 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3954 if (scsi_debug_dix)
3955 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3956 break;
3957
3958 default:
3959 if (scsi_debug_dix)
3960 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3961 break;
3962 }
3963
3964 scsi_host_set_prot(hpnt, host_prot);
3965
3966 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3967 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3968 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3969 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3970 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3971 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3972 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3973 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3974
3975 if (scsi_debug_guard == 1)
3976 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3977 else
3978 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3979
3980 error = scsi_add_host(hpnt, &sdbg_host->dev);
3981 if (error) {
3982 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3983 error = -ENODEV;
3984 scsi_host_put(hpnt);
3985 } else
3986 scsi_scan_host(hpnt);
3987
3988
3989 return error;
3990 }
3991
3992 static int sdebug_driver_remove(struct device * dev)
3993 {
3994 struct sdebug_host_info *sdbg_host;
3995 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3996
3997 sdbg_host = to_sdebug_host(dev);
3998
3999 if (!sdbg_host) {
4000 printk(KERN_ERR "%s: Unable to locate host info\n",
4001 __func__);
4002 return -ENODEV;
4003 }
4004
4005 scsi_remove_host(sdbg_host->shost);
4006
4007 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4008 dev_list) {
4009 list_del(&sdbg_devinfo->dev_list);
4010 kfree(sdbg_devinfo);
4011 }
4012
4013 scsi_host_put(sdbg_host->shost);
4014 return 0;
4015 }
4016
4017 static int pseudo_lld_bus_match(struct device *dev,
4018 struct device_driver *dev_driver)
4019 {
4020 return 1;
4021 }
4022
4023 static struct bus_type pseudo_lld_bus = {
4024 .name = "pseudo",
4025 .match = pseudo_lld_bus_match,
4026 .probe = sdebug_driver_probe,
4027 .remove = sdebug_driver_remove,
4028 };