Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / scsi_debug.c
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8 *
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
13 *
14 *
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
16 *
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
26 */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57
58 #include "sd.h"
59 #include "scsi_logging.h"
60
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
79
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
82
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
84
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
91 */
92 #define DEF_ATO 1
93 #define DEF_DELAY 1
94 #define DEF_DEV_SIZE_MB 8
95 #define DEF_DIF 0
96 #define DEF_DIX 0
97 #define DEF_D_SENSE 0
98 #define DEF_EVERY_NTH 0
99 #define DEF_FAKE_RW 0
100 #define DEF_GUARD 0
101 #define DEF_LBPU 0
102 #define DEF_LBPWS 0
103 #define DEF_LBPWS10 0
104 #define DEF_LBPRZ 1
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
108 #define DEF_OPTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
111 #define DEF_PTYPE 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
122
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
138 *
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
147 */
148
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
153
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
158
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
162
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
198
199 static int scsi_debug_cmnd_count = 0;
200
201 #define DEV_READONLY(TGT) (0)
202
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity; /* in sectors */
205
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads; /* heads per disk */
209 static int sdebug_cylinders_per; /* cylinders per surface */
210 static int sdebug_sectors_per; /* sectors per cylinder */
211
212 #define SDEBUG_MAX_PARTS 4
213
214 #define SDEBUG_SENSE_LEN 32
215
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
217
218 static unsigned int scsi_debug_lbp(void)
219 {
220 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
221 }
222
223 struct sdebug_dev_info {
224 struct list_head dev_list;
225 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
226 unsigned int channel;
227 unsigned int target;
228 unsigned int lun;
229 struct sdebug_host_info *sdbg_host;
230 unsigned int wlun;
231 char reset;
232 char stopped;
233 char used;
234 };
235
236 struct sdebug_host_info {
237 struct list_head host_list;
238 struct Scsi_Host *shost;
239 struct device dev;
240 struct list_head dev_info_list;
241 };
242
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
245
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
248
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
250
251 struct sdebug_queued_cmd {
252 int in_use;
253 struct timer_list cmnd_timer;
254 done_funct_t done_funct;
255 struct scsi_cmnd * a_cmnd;
256 int scsi_result;
257 };
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
259
260 static unsigned char * fake_storep; /* ramdisk storage */
261 static unsigned char *dif_storep; /* protection info */
262 static void *map_storep; /* provisioning map */
263
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
272
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
275
276 static char sdebug_proc_name[] = "scsi_debug";
277
278 static struct bus_type pseudo_lld_bus;
279
280 static inline sector_t dif_offset(sector_t sector)
281 {
282 return sector << 3;
283 }
284
285 static struct device_driver sdebug_driverfs_driver = {
286 .name = sdebug_proc_name,
287 .bus = &pseudo_lld_bus,
288 };
289
290 static const int check_condition_result =
291 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
292
293 static const int illegal_condition_result =
294 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
295
296 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
297 0, 0, 0x2, 0x4b};
298 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
299 0, 0, 0x0, 0x0};
300
301 static int sdebug_add_adapter(void);
302 static void sdebug_remove_adapter(void);
303
304 static void sdebug_max_tgts_luns(void)
305 {
306 struct sdebug_host_info *sdbg_host;
307 struct Scsi_Host *hpnt;
308
309 spin_lock(&sdebug_host_list_lock);
310 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
311 hpnt = sdbg_host->shost;
312 if ((hpnt->this_id >= 0) &&
313 (scsi_debug_num_tgts > hpnt->this_id))
314 hpnt->max_id = scsi_debug_num_tgts + 1;
315 else
316 hpnt->max_id = scsi_debug_num_tgts;
317 /* scsi_debug_max_luns; */
318 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
319 }
320 spin_unlock(&sdebug_host_list_lock);
321 }
322
323 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
324 int asc, int asq)
325 {
326 unsigned char *sbuff;
327
328 sbuff = devip->sense_buff;
329 memset(sbuff, 0, SDEBUG_SENSE_LEN);
330
331 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
332
333 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
334 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
335 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
336 }
337
338 static void get_data_transfer_info(unsigned char *cmd,
339 unsigned long long *lba, unsigned int *num,
340 u32 *ei_lba)
341 {
342 *ei_lba = 0;
343
344 switch (*cmd) {
345 case VARIABLE_LENGTH_CMD:
346 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
347 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
348 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
349 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
350
351 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
352 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
353
354 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
355 (u32)cmd[28] << 24;
356 break;
357
358 case WRITE_SAME_16:
359 case WRITE_16:
360 case READ_16:
361 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
362 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
363 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
364 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
365
366 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
367 (u32)cmd[10] << 24;
368 break;
369 case WRITE_12:
370 case READ_12:
371 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
372 (u32)cmd[2] << 24;
373
374 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
375 (u32)cmd[6] << 24;
376 break;
377 case WRITE_SAME:
378 case WRITE_10:
379 case READ_10:
380 case XDWRITEREAD_10:
381 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
382 (u32)cmd[2] << 24;
383
384 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
385 break;
386 case WRITE_6:
387 case READ_6:
388 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
389 (u32)(cmd[1] & 0x1f) << 16;
390 *num = (0 == cmd[4]) ? 256 : cmd[4];
391 break;
392 default:
393 break;
394 }
395 }
396
397 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
398 {
399 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
400 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
401 }
402 return -EINVAL;
403 /* return -ENOTTY; // correct return but upsets fdisk */
404 }
405
406 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
407 struct sdebug_dev_info * devip)
408 {
409 if (devip->reset) {
410 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
411 printk(KERN_INFO "scsi_debug: Reporting Unit "
412 "attention: power on reset\n");
413 devip->reset = 0;
414 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
415 return check_condition_result;
416 }
417 if ((0 == reset_only) && devip->stopped) {
418 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
419 printk(KERN_INFO "scsi_debug: Reporting Not "
420 "ready: initializing command required\n");
421 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
422 0x2);
423 return check_condition_result;
424 }
425 return 0;
426 }
427
428 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
429 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
430 int arr_len)
431 {
432 int act_len;
433 struct scsi_data_buffer *sdb = scsi_in(scp);
434
435 if (!sdb->length)
436 return 0;
437 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
438 return (DID_ERROR << 16);
439
440 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
441 arr, arr_len);
442 if (sdb->resid)
443 sdb->resid -= act_len;
444 else
445 sdb->resid = scsi_bufflen(scp) - act_len;
446
447 return 0;
448 }
449
450 /* Returns number of bytes fetched into 'arr' or -1 if error. */
451 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
452 int arr_len)
453 {
454 if (!scsi_bufflen(scp))
455 return 0;
456 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
457 return -1;
458
459 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
460 }
461
462
463 static const char * inq_vendor_id = "Linux ";
464 static const char * inq_product_id = "scsi_debug ";
465 static const char * inq_product_rev = "0004";
466
467 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
468 int target_dev_id, int dev_id_num,
469 const char * dev_id_str,
470 int dev_id_str_len)
471 {
472 int num, port_a;
473 char b[32];
474
475 port_a = target_dev_id + 1;
476 /* T10 vendor identifier field format (faked) */
477 arr[0] = 0x2; /* ASCII */
478 arr[1] = 0x1;
479 arr[2] = 0x0;
480 memcpy(&arr[4], inq_vendor_id, 8);
481 memcpy(&arr[12], inq_product_id, 16);
482 memcpy(&arr[28], dev_id_str, dev_id_str_len);
483 num = 8 + 16 + dev_id_str_len;
484 arr[3] = num;
485 num += 4;
486 if (dev_id_num >= 0) {
487 /* NAA-5, Logical unit identifier (binary) */
488 arr[num++] = 0x1; /* binary (not necessarily sas) */
489 arr[num++] = 0x3; /* PIV=0, lu, naa */
490 arr[num++] = 0x0;
491 arr[num++] = 0x8;
492 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
493 arr[num++] = 0x33;
494 arr[num++] = 0x33;
495 arr[num++] = 0x30;
496 arr[num++] = (dev_id_num >> 24);
497 arr[num++] = (dev_id_num >> 16) & 0xff;
498 arr[num++] = (dev_id_num >> 8) & 0xff;
499 arr[num++] = dev_id_num & 0xff;
500 /* Target relative port number */
501 arr[num++] = 0x61; /* proto=sas, binary */
502 arr[num++] = 0x94; /* PIV=1, target port, rel port */
503 arr[num++] = 0x0; /* reserved */
504 arr[num++] = 0x4; /* length */
505 arr[num++] = 0x0; /* reserved */
506 arr[num++] = 0x0; /* reserved */
507 arr[num++] = 0x0;
508 arr[num++] = 0x1; /* relative port A */
509 }
510 /* NAA-5, Target port identifier */
511 arr[num++] = 0x61; /* proto=sas, binary */
512 arr[num++] = 0x93; /* piv=1, target port, naa */
513 arr[num++] = 0x0;
514 arr[num++] = 0x8;
515 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
516 arr[num++] = 0x22;
517 arr[num++] = 0x22;
518 arr[num++] = 0x20;
519 arr[num++] = (port_a >> 24);
520 arr[num++] = (port_a >> 16) & 0xff;
521 arr[num++] = (port_a >> 8) & 0xff;
522 arr[num++] = port_a & 0xff;
523 /* NAA-5, Target port group identifier */
524 arr[num++] = 0x61; /* proto=sas, binary */
525 arr[num++] = 0x95; /* piv=1, target port group id */
526 arr[num++] = 0x0;
527 arr[num++] = 0x4;
528 arr[num++] = 0;
529 arr[num++] = 0;
530 arr[num++] = (port_group_id >> 8) & 0xff;
531 arr[num++] = port_group_id & 0xff;
532 /* NAA-5, Target device identifier */
533 arr[num++] = 0x61; /* proto=sas, binary */
534 arr[num++] = 0xa3; /* piv=1, target device, naa */
535 arr[num++] = 0x0;
536 arr[num++] = 0x8;
537 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
538 arr[num++] = 0x22;
539 arr[num++] = 0x22;
540 arr[num++] = 0x20;
541 arr[num++] = (target_dev_id >> 24);
542 arr[num++] = (target_dev_id >> 16) & 0xff;
543 arr[num++] = (target_dev_id >> 8) & 0xff;
544 arr[num++] = target_dev_id & 0xff;
545 /* SCSI name string: Target device identifier */
546 arr[num++] = 0x63; /* proto=sas, UTF-8 */
547 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
548 arr[num++] = 0x0;
549 arr[num++] = 24;
550 memcpy(arr + num, "naa.52222220", 12);
551 num += 12;
552 snprintf(b, sizeof(b), "%08X", target_dev_id);
553 memcpy(arr + num, b, 8);
554 num += 8;
555 memset(arr + num, 0, 4);
556 num += 4;
557 return num;
558 }
559
560
561 static unsigned char vpd84_data[] = {
562 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
563 0x22,0x22,0x22,0x0,0xbb,0x1,
564 0x22,0x22,0x22,0x0,0xbb,0x2,
565 };
566
567 static int inquiry_evpd_84(unsigned char * arr)
568 {
569 memcpy(arr, vpd84_data, sizeof(vpd84_data));
570 return sizeof(vpd84_data);
571 }
572
573 static int inquiry_evpd_85(unsigned char * arr)
574 {
575 int num = 0;
576 const char * na1 = "https://www.kernel.org/config";
577 const char * na2 = "http://www.kernel.org/log";
578 int plen, olen;
579
580 arr[num++] = 0x1; /* lu, storage config */
581 arr[num++] = 0x0; /* reserved */
582 arr[num++] = 0x0;
583 olen = strlen(na1);
584 plen = olen + 1;
585 if (plen % 4)
586 plen = ((plen / 4) + 1) * 4;
587 arr[num++] = plen; /* length, null termianted, padded */
588 memcpy(arr + num, na1, olen);
589 memset(arr + num + olen, 0, plen - olen);
590 num += plen;
591
592 arr[num++] = 0x4; /* lu, logging */
593 arr[num++] = 0x0; /* reserved */
594 arr[num++] = 0x0;
595 olen = strlen(na2);
596 plen = olen + 1;
597 if (plen % 4)
598 plen = ((plen / 4) + 1) * 4;
599 arr[num++] = plen; /* length, null terminated, padded */
600 memcpy(arr + num, na2, olen);
601 memset(arr + num + olen, 0, plen - olen);
602 num += plen;
603
604 return num;
605 }
606
607 /* SCSI ports VPD page */
608 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
609 {
610 int num = 0;
611 int port_a, port_b;
612
613 port_a = target_dev_id + 1;
614 port_b = port_a + 1;
615 arr[num++] = 0x0; /* reserved */
616 arr[num++] = 0x0; /* reserved */
617 arr[num++] = 0x0;
618 arr[num++] = 0x1; /* relative port 1 (primary) */
619 memset(arr + num, 0, 6);
620 num += 6;
621 arr[num++] = 0x0;
622 arr[num++] = 12; /* length tp descriptor */
623 /* naa-5 target port identifier (A) */
624 arr[num++] = 0x61; /* proto=sas, binary */
625 arr[num++] = 0x93; /* PIV=1, target port, NAA */
626 arr[num++] = 0x0; /* reserved */
627 arr[num++] = 0x8; /* length */
628 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
629 arr[num++] = 0x22;
630 arr[num++] = 0x22;
631 arr[num++] = 0x20;
632 arr[num++] = (port_a >> 24);
633 arr[num++] = (port_a >> 16) & 0xff;
634 arr[num++] = (port_a >> 8) & 0xff;
635 arr[num++] = port_a & 0xff;
636
637 arr[num++] = 0x0; /* reserved */
638 arr[num++] = 0x0; /* reserved */
639 arr[num++] = 0x0;
640 arr[num++] = 0x2; /* relative port 2 (secondary) */
641 memset(arr + num, 0, 6);
642 num += 6;
643 arr[num++] = 0x0;
644 arr[num++] = 12; /* length tp descriptor */
645 /* naa-5 target port identifier (B) */
646 arr[num++] = 0x61; /* proto=sas, binary */
647 arr[num++] = 0x93; /* PIV=1, target port, NAA */
648 arr[num++] = 0x0; /* reserved */
649 arr[num++] = 0x8; /* length */
650 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
651 arr[num++] = 0x22;
652 arr[num++] = 0x22;
653 arr[num++] = 0x20;
654 arr[num++] = (port_b >> 24);
655 arr[num++] = (port_b >> 16) & 0xff;
656 arr[num++] = (port_b >> 8) & 0xff;
657 arr[num++] = port_b & 0xff;
658
659 return num;
660 }
661
662
663 static unsigned char vpd89_data[] = {
664 /* from 4th byte */ 0,0,0,0,
665 'l','i','n','u','x',' ',' ',' ',
666 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
667 '1','2','3','4',
668 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
669 0xec,0,0,0,
670 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
671 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
672 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
673 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
674 0x53,0x41,
675 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
676 0x20,0x20,
677 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
678 0x10,0x80,
679 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
680 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
681 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
683 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
684 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
685 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
690 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
691 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
692 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
705 };
706
707 static int inquiry_evpd_89(unsigned char * arr)
708 {
709 memcpy(arr, vpd89_data, sizeof(vpd89_data));
710 return sizeof(vpd89_data);
711 }
712
713
714 /* Block limits VPD page (SBC-3) */
715 static unsigned char vpdb0_data[] = {
716 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
717 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
718 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
719 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
720 };
721
722 static int inquiry_evpd_b0(unsigned char * arr)
723 {
724 unsigned int gran;
725
726 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
727
728 /* Optimal transfer length granularity */
729 gran = 1 << scsi_debug_physblk_exp;
730 arr[2] = (gran >> 8) & 0xff;
731 arr[3] = gran & 0xff;
732
733 /* Maximum Transfer Length */
734 if (sdebug_store_sectors > 0x400) {
735 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
736 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
737 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
738 arr[7] = sdebug_store_sectors & 0xff;
739 }
740
741 /* Optimal Transfer Length */
742 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
743
744 if (scsi_debug_lbpu) {
745 /* Maximum Unmap LBA Count */
746 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
747
748 /* Maximum Unmap Block Descriptor Count */
749 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
750 }
751
752 /* Unmap Granularity Alignment */
753 if (scsi_debug_unmap_alignment) {
754 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
755 arr[28] |= 0x80; /* UGAVALID */
756 }
757
758 /* Optimal Unmap Granularity */
759 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
760
761 /* Maximum WRITE SAME Length */
762 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
763
764 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
765
766 return sizeof(vpdb0_data);
767 }
768
769 /* Block device characteristics VPD page (SBC-3) */
770 static int inquiry_evpd_b1(unsigned char *arr)
771 {
772 memset(arr, 0, 0x3c);
773 arr[0] = 0;
774 arr[1] = 1; /* non rotating medium (e.g. solid state) */
775 arr[2] = 0;
776 arr[3] = 5; /* less than 1.8" */
777
778 return 0x3c;
779 }
780
781 /* Logical block provisioning VPD page (SBC-3) */
782 static int inquiry_evpd_b2(unsigned char *arr)
783 {
784 memset(arr, 0, 0x4);
785 arr[0] = 0; /* threshold exponent */
786
787 if (scsi_debug_lbpu)
788 arr[1] = 1 << 7;
789
790 if (scsi_debug_lbpws)
791 arr[1] |= 1 << 6;
792
793 if (scsi_debug_lbpws10)
794 arr[1] |= 1 << 5;
795
796 if (scsi_debug_lbprz)
797 arr[1] |= 1 << 2;
798
799 return 0x4;
800 }
801
802 #define SDEBUG_LONG_INQ_SZ 96
803 #define SDEBUG_MAX_INQ_ARR_SZ 584
804
805 static int resp_inquiry(struct scsi_cmnd * scp, int target,
806 struct sdebug_dev_info * devip)
807 {
808 unsigned char pq_pdt;
809 unsigned char * arr;
810 unsigned char *cmd = (unsigned char *)scp->cmnd;
811 int alloc_len, n, ret;
812
813 alloc_len = (cmd[3] << 8) + cmd[4];
814 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
815 if (! arr)
816 return DID_REQUEUE << 16;
817 if (devip->wlun)
818 pq_pdt = 0x1e; /* present, wlun */
819 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
820 pq_pdt = 0x7f; /* not present, no device type */
821 else
822 pq_pdt = (scsi_debug_ptype & 0x1f);
823 arr[0] = pq_pdt;
824 if (0x2 & cmd[1]) { /* CMDDT bit set */
825 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
826 0);
827 kfree(arr);
828 return check_condition_result;
829 } else if (0x1 & cmd[1]) { /* EVPD bit set */
830 int lu_id_num, port_group_id, target_dev_id, len;
831 char lu_id_str[6];
832 int host_no = devip->sdbg_host->shost->host_no;
833
834 port_group_id = (((host_no + 1) & 0x7f) << 8) +
835 (devip->channel & 0x7f);
836 if (0 == scsi_debug_vpd_use_hostno)
837 host_no = 0;
838 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
839 (devip->target * 1000) + devip->lun);
840 target_dev_id = ((host_no + 1) * 2000) +
841 (devip->target * 1000) - 3;
842 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
843 if (0 == cmd[2]) { /* supported vital product data pages */
844 arr[1] = cmd[2]; /*sanity */
845 n = 4;
846 arr[n++] = 0x0; /* this page */
847 arr[n++] = 0x80; /* unit serial number */
848 arr[n++] = 0x83; /* device identification */
849 arr[n++] = 0x84; /* software interface ident. */
850 arr[n++] = 0x85; /* management network addresses */
851 arr[n++] = 0x86; /* extended inquiry */
852 arr[n++] = 0x87; /* mode page policy */
853 arr[n++] = 0x88; /* SCSI ports */
854 arr[n++] = 0x89; /* ATA information */
855 arr[n++] = 0xb0; /* Block limits (SBC) */
856 arr[n++] = 0xb1; /* Block characteristics (SBC) */
857 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
858 arr[n++] = 0xb2;
859 arr[3] = n - 4; /* number of supported VPD pages */
860 } else if (0x80 == cmd[2]) { /* unit serial number */
861 arr[1] = cmd[2]; /*sanity */
862 arr[3] = len;
863 memcpy(&arr[4], lu_id_str, len);
864 } else if (0x83 == cmd[2]) { /* device identification */
865 arr[1] = cmd[2]; /*sanity */
866 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
867 target_dev_id, lu_id_num,
868 lu_id_str, len);
869 } else if (0x84 == cmd[2]) { /* Software interface ident. */
870 arr[1] = cmd[2]; /*sanity */
871 arr[3] = inquiry_evpd_84(&arr[4]);
872 } else if (0x85 == cmd[2]) { /* Management network addresses */
873 arr[1] = cmd[2]; /*sanity */
874 arr[3] = inquiry_evpd_85(&arr[4]);
875 } else if (0x86 == cmd[2]) { /* extended inquiry */
876 arr[1] = cmd[2]; /*sanity */
877 arr[3] = 0x3c; /* number of following entries */
878 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
879 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
880 else if (scsi_debug_dif)
881 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
882 else
883 arr[4] = 0x0; /* no protection stuff */
884 arr[5] = 0x7; /* head of q, ordered + simple q's */
885 } else if (0x87 == cmd[2]) { /* mode page policy */
886 arr[1] = cmd[2]; /*sanity */
887 arr[3] = 0x8; /* number of following entries */
888 arr[4] = 0x2; /* disconnect-reconnect mp */
889 arr[6] = 0x80; /* mlus, shared */
890 arr[8] = 0x18; /* protocol specific lu */
891 arr[10] = 0x82; /* mlus, per initiator port */
892 } else if (0x88 == cmd[2]) { /* SCSI Ports */
893 arr[1] = cmd[2]; /*sanity */
894 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
895 } else if (0x89 == cmd[2]) { /* ATA information */
896 arr[1] = cmd[2]; /*sanity */
897 n = inquiry_evpd_89(&arr[4]);
898 arr[2] = (n >> 8);
899 arr[3] = (n & 0xff);
900 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
901 arr[1] = cmd[2]; /*sanity */
902 arr[3] = inquiry_evpd_b0(&arr[4]);
903 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
904 arr[1] = cmd[2]; /*sanity */
905 arr[3] = inquiry_evpd_b1(&arr[4]);
906 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
907 arr[1] = cmd[2]; /*sanity */
908 arr[3] = inquiry_evpd_b2(&arr[4]);
909 } else {
910 /* Illegal request, invalid field in cdb */
911 mk_sense_buffer(devip, ILLEGAL_REQUEST,
912 INVALID_FIELD_IN_CDB, 0);
913 kfree(arr);
914 return check_condition_result;
915 }
916 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
917 ret = fill_from_dev_buffer(scp, arr,
918 min(len, SDEBUG_MAX_INQ_ARR_SZ));
919 kfree(arr);
920 return ret;
921 }
922 /* drops through here for a standard inquiry */
923 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
924 arr[2] = scsi_debug_scsi_level;
925 arr[3] = 2; /* response_data_format==2 */
926 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
927 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
928 if (0 == scsi_debug_vpd_use_hostno)
929 arr[5] = 0x10; /* claim: implicit TGPS */
930 arr[6] = 0x10; /* claim: MultiP */
931 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
932 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
933 memcpy(&arr[8], inq_vendor_id, 8);
934 memcpy(&arr[16], inq_product_id, 16);
935 memcpy(&arr[32], inq_product_rev, 4);
936 /* version descriptors (2 bytes each) follow */
937 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
938 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
939 n = 62;
940 if (scsi_debug_ptype == 0) {
941 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
942 } else if (scsi_debug_ptype == 1) {
943 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
944 }
945 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
946 ret = fill_from_dev_buffer(scp, arr,
947 min(alloc_len, SDEBUG_LONG_INQ_SZ));
948 kfree(arr);
949 return ret;
950 }
951
952 static int resp_requests(struct scsi_cmnd * scp,
953 struct sdebug_dev_info * devip)
954 {
955 unsigned char * sbuff;
956 unsigned char *cmd = (unsigned char *)scp->cmnd;
957 unsigned char arr[SDEBUG_SENSE_LEN];
958 int want_dsense;
959 int len = 18;
960
961 memset(arr, 0, sizeof(arr));
962 if (devip->reset == 1)
963 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
964 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
965 sbuff = devip->sense_buff;
966 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
967 if (want_dsense) {
968 arr[0] = 0x72;
969 arr[1] = 0x0; /* NO_SENSE in sense_key */
970 arr[2] = THRESHOLD_EXCEEDED;
971 arr[3] = 0xff; /* TEST set and MRIE==6 */
972 } else {
973 arr[0] = 0x70;
974 arr[2] = 0x0; /* NO_SENSE in sense_key */
975 arr[7] = 0xa; /* 18 byte sense buffer */
976 arr[12] = THRESHOLD_EXCEEDED;
977 arr[13] = 0xff; /* TEST set and MRIE==6 */
978 }
979 } else {
980 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
981 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
982 /* DESC bit set and sense_buff in fixed format */
983 memset(arr, 0, sizeof(arr));
984 arr[0] = 0x72;
985 arr[1] = sbuff[2]; /* sense key */
986 arr[2] = sbuff[12]; /* asc */
987 arr[3] = sbuff[13]; /* ascq */
988 len = 8;
989 }
990 }
991 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
992 return fill_from_dev_buffer(scp, arr, len);
993 }
994
995 static int resp_start_stop(struct scsi_cmnd * scp,
996 struct sdebug_dev_info * devip)
997 {
998 unsigned char *cmd = (unsigned char *)scp->cmnd;
999 int power_cond, errsts, start;
1000
1001 if ((errsts = check_readiness(scp, 1, devip)))
1002 return errsts;
1003 power_cond = (cmd[4] & 0xf0) >> 4;
1004 if (power_cond) {
1005 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1006 0);
1007 return check_condition_result;
1008 }
1009 start = cmd[4] & 1;
1010 if (start == devip->stopped)
1011 devip->stopped = !start;
1012 return 0;
1013 }
1014
1015 static sector_t get_sdebug_capacity(void)
1016 {
1017 if (scsi_debug_virtual_gb > 0)
1018 return (sector_t)scsi_debug_virtual_gb *
1019 (1073741824 / scsi_debug_sector_size);
1020 else
1021 return sdebug_store_sectors;
1022 }
1023
1024 #define SDEBUG_READCAP_ARR_SZ 8
1025 static int resp_readcap(struct scsi_cmnd * scp,
1026 struct sdebug_dev_info * devip)
1027 {
1028 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1029 unsigned int capac;
1030 int errsts;
1031
1032 if ((errsts = check_readiness(scp, 1, devip)))
1033 return errsts;
1034 /* following just in case virtual_gb changed */
1035 sdebug_capacity = get_sdebug_capacity();
1036 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1037 if (sdebug_capacity < 0xffffffff) {
1038 capac = (unsigned int)sdebug_capacity - 1;
1039 arr[0] = (capac >> 24);
1040 arr[1] = (capac >> 16) & 0xff;
1041 arr[2] = (capac >> 8) & 0xff;
1042 arr[3] = capac & 0xff;
1043 } else {
1044 arr[0] = 0xff;
1045 arr[1] = 0xff;
1046 arr[2] = 0xff;
1047 arr[3] = 0xff;
1048 }
1049 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1050 arr[7] = scsi_debug_sector_size & 0xff;
1051 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1052 }
1053
1054 #define SDEBUG_READCAP16_ARR_SZ 32
1055 static int resp_readcap16(struct scsi_cmnd * scp,
1056 struct sdebug_dev_info * devip)
1057 {
1058 unsigned char *cmd = (unsigned char *)scp->cmnd;
1059 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1060 unsigned long long capac;
1061 int errsts, k, alloc_len;
1062
1063 if ((errsts = check_readiness(scp, 1, devip)))
1064 return errsts;
1065 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1066 + cmd[13]);
1067 /* following just in case virtual_gb changed */
1068 sdebug_capacity = get_sdebug_capacity();
1069 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1070 capac = sdebug_capacity - 1;
1071 for (k = 0; k < 8; ++k, capac >>= 8)
1072 arr[7 - k] = capac & 0xff;
1073 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1074 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1075 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1076 arr[11] = scsi_debug_sector_size & 0xff;
1077 arr[13] = scsi_debug_physblk_exp & 0xf;
1078 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1079
1080 if (scsi_debug_lbp()) {
1081 arr[14] |= 0x80; /* LBPME */
1082 if (scsi_debug_lbprz)
1083 arr[14] |= 0x40; /* LBPRZ */
1084 }
1085
1086 arr[15] = scsi_debug_lowest_aligned & 0xff;
1087
1088 if (scsi_debug_dif) {
1089 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1090 arr[12] |= 1; /* PROT_EN */
1091 }
1092
1093 return fill_from_dev_buffer(scp, arr,
1094 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1095 }
1096
1097 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1098
1099 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1100 struct sdebug_dev_info * devip)
1101 {
1102 unsigned char *cmd = (unsigned char *)scp->cmnd;
1103 unsigned char * arr;
1104 int host_no = devip->sdbg_host->shost->host_no;
1105 int n, ret, alen, rlen;
1106 int port_group_a, port_group_b, port_a, port_b;
1107
1108 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1109 + cmd[9]);
1110
1111 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1112 if (! arr)
1113 return DID_REQUEUE << 16;
1114 /*
1115 * EVPD page 0x88 states we have two ports, one
1116 * real and a fake port with no device connected.
1117 * So we create two port groups with one port each
1118 * and set the group with port B to unavailable.
1119 */
1120 port_a = 0x1; /* relative port A */
1121 port_b = 0x2; /* relative port B */
1122 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1123 (devip->channel & 0x7f);
1124 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1125 (devip->channel & 0x7f) + 0x80;
1126
1127 /*
1128 * The asymmetric access state is cycled according to the host_id.
1129 */
1130 n = 4;
1131 if (0 == scsi_debug_vpd_use_hostno) {
1132 arr[n++] = host_no % 3; /* Asymm access state */
1133 arr[n++] = 0x0F; /* claim: all states are supported */
1134 } else {
1135 arr[n++] = 0x0; /* Active/Optimized path */
1136 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1137 }
1138 arr[n++] = (port_group_a >> 8) & 0xff;
1139 arr[n++] = port_group_a & 0xff;
1140 arr[n++] = 0; /* Reserved */
1141 arr[n++] = 0; /* Status code */
1142 arr[n++] = 0; /* Vendor unique */
1143 arr[n++] = 0x1; /* One port per group */
1144 arr[n++] = 0; /* Reserved */
1145 arr[n++] = 0; /* Reserved */
1146 arr[n++] = (port_a >> 8) & 0xff;
1147 arr[n++] = port_a & 0xff;
1148 arr[n++] = 3; /* Port unavailable */
1149 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1150 arr[n++] = (port_group_b >> 8) & 0xff;
1151 arr[n++] = port_group_b & 0xff;
1152 arr[n++] = 0; /* Reserved */
1153 arr[n++] = 0; /* Status code */
1154 arr[n++] = 0; /* Vendor unique */
1155 arr[n++] = 0x1; /* One port per group */
1156 arr[n++] = 0; /* Reserved */
1157 arr[n++] = 0; /* Reserved */
1158 arr[n++] = (port_b >> 8) & 0xff;
1159 arr[n++] = port_b & 0xff;
1160
1161 rlen = n - 4;
1162 arr[0] = (rlen >> 24) & 0xff;
1163 arr[1] = (rlen >> 16) & 0xff;
1164 arr[2] = (rlen >> 8) & 0xff;
1165 arr[3] = rlen & 0xff;
1166
1167 /*
1168 * Return the smallest value of either
1169 * - The allocated length
1170 * - The constructed command length
1171 * - The maximum array size
1172 */
1173 rlen = min(alen,n);
1174 ret = fill_from_dev_buffer(scp, arr,
1175 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1176 kfree(arr);
1177 return ret;
1178 }
1179
1180 /* <<Following mode page info copied from ST318451LW>> */
1181
1182 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1183 { /* Read-Write Error Recovery page for mode_sense */
1184 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1185 5, 0, 0xff, 0xff};
1186
1187 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1188 if (1 == pcontrol)
1189 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1190 return sizeof(err_recov_pg);
1191 }
1192
1193 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1194 { /* Disconnect-Reconnect page for mode_sense */
1195 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1196 0, 0, 0, 0, 0, 0, 0, 0};
1197
1198 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1199 if (1 == pcontrol)
1200 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1201 return sizeof(disconnect_pg);
1202 }
1203
1204 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1205 { /* Format device page for mode_sense */
1206 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1207 0, 0, 0, 0, 0, 0, 0, 0,
1208 0, 0, 0, 0, 0x40, 0, 0, 0};
1209
1210 memcpy(p, format_pg, sizeof(format_pg));
1211 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1212 p[11] = sdebug_sectors_per & 0xff;
1213 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1214 p[13] = scsi_debug_sector_size & 0xff;
1215 if (scsi_debug_removable)
1216 p[20] |= 0x20; /* should agree with INQUIRY */
1217 if (1 == pcontrol)
1218 memset(p + 2, 0, sizeof(format_pg) - 2);
1219 return sizeof(format_pg);
1220 }
1221
1222 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1223 { /* Caching page for mode_sense */
1224 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1225 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1226
1227 memcpy(p, caching_pg, sizeof(caching_pg));
1228 if (1 == pcontrol)
1229 memset(p + 2, 0, sizeof(caching_pg) - 2);
1230 return sizeof(caching_pg);
1231 }
1232
1233 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1234 { /* Control mode page for mode_sense */
1235 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1236 0, 0, 0, 0};
1237 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1238 0, 0, 0x2, 0x4b};
1239
1240 if (scsi_debug_dsense)
1241 ctrl_m_pg[2] |= 0x4;
1242 else
1243 ctrl_m_pg[2] &= ~0x4;
1244
1245 if (scsi_debug_ato)
1246 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1247
1248 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1249 if (1 == pcontrol)
1250 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1251 else if (2 == pcontrol)
1252 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1253 return sizeof(ctrl_m_pg);
1254 }
1255
1256
1257 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1258 { /* Informational Exceptions control mode page for mode_sense */
1259 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1260 0, 0, 0x0, 0x0};
1261 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1262 0, 0, 0x0, 0x0};
1263
1264 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1265 if (1 == pcontrol)
1266 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1267 else if (2 == pcontrol)
1268 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1269 return sizeof(iec_m_pg);
1270 }
1271
1272 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1273 { /* SAS SSP mode page - short format for mode_sense */
1274 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1275 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1276
1277 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1278 if (1 == pcontrol)
1279 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1280 return sizeof(sas_sf_m_pg);
1281 }
1282
1283
1284 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1285 int target_dev_id)
1286 { /* SAS phy control and discover mode page for mode_sense */
1287 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1288 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1289 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1290 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1291 0x2, 0, 0, 0, 0, 0, 0, 0,
1292 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1293 0, 0, 0, 0, 0, 0, 0, 0,
1294 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1295 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1296 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1297 0x3, 0, 0, 0, 0, 0, 0, 0,
1298 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1299 0, 0, 0, 0, 0, 0, 0, 0,
1300 };
1301 int port_a, port_b;
1302
1303 port_a = target_dev_id + 1;
1304 port_b = port_a + 1;
1305 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1306 p[20] = (port_a >> 24);
1307 p[21] = (port_a >> 16) & 0xff;
1308 p[22] = (port_a >> 8) & 0xff;
1309 p[23] = port_a & 0xff;
1310 p[48 + 20] = (port_b >> 24);
1311 p[48 + 21] = (port_b >> 16) & 0xff;
1312 p[48 + 22] = (port_b >> 8) & 0xff;
1313 p[48 + 23] = port_b & 0xff;
1314 if (1 == pcontrol)
1315 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1316 return sizeof(sas_pcd_m_pg);
1317 }
1318
1319 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1320 { /* SAS SSP shared protocol specific port mode subpage */
1321 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1322 0, 0, 0, 0, 0, 0, 0, 0,
1323 };
1324
1325 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1326 if (1 == pcontrol)
1327 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1328 return sizeof(sas_sha_m_pg);
1329 }
1330
1331 #define SDEBUG_MAX_MSENSE_SZ 256
1332
1333 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1334 struct sdebug_dev_info * devip)
1335 {
1336 unsigned char dbd, llbaa;
1337 int pcontrol, pcode, subpcode, bd_len;
1338 unsigned char dev_spec;
1339 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1340 unsigned char * ap;
1341 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1342 unsigned char *cmd = (unsigned char *)scp->cmnd;
1343
1344 if ((errsts = check_readiness(scp, 1, devip)))
1345 return errsts;
1346 dbd = !!(cmd[1] & 0x8);
1347 pcontrol = (cmd[2] & 0xc0) >> 6;
1348 pcode = cmd[2] & 0x3f;
1349 subpcode = cmd[3];
1350 msense_6 = (MODE_SENSE == cmd[0]);
1351 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1352 if ((0 == scsi_debug_ptype) && (0 == dbd))
1353 bd_len = llbaa ? 16 : 8;
1354 else
1355 bd_len = 0;
1356 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1357 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1358 if (0x3 == pcontrol) { /* Saving values not supported */
1359 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1360 0);
1361 return check_condition_result;
1362 }
1363 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1364 (devip->target * 1000) - 3;
1365 /* set DPOFUA bit for disks */
1366 if (0 == scsi_debug_ptype)
1367 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1368 else
1369 dev_spec = 0x0;
1370 if (msense_6) {
1371 arr[2] = dev_spec;
1372 arr[3] = bd_len;
1373 offset = 4;
1374 } else {
1375 arr[3] = dev_spec;
1376 if (16 == bd_len)
1377 arr[4] = 0x1; /* set LONGLBA bit */
1378 arr[7] = bd_len; /* assume 255 or less */
1379 offset = 8;
1380 }
1381 ap = arr + offset;
1382 if ((bd_len > 0) && (!sdebug_capacity))
1383 sdebug_capacity = get_sdebug_capacity();
1384
1385 if (8 == bd_len) {
1386 if (sdebug_capacity > 0xfffffffe) {
1387 ap[0] = 0xff;
1388 ap[1] = 0xff;
1389 ap[2] = 0xff;
1390 ap[3] = 0xff;
1391 } else {
1392 ap[0] = (sdebug_capacity >> 24) & 0xff;
1393 ap[1] = (sdebug_capacity >> 16) & 0xff;
1394 ap[2] = (sdebug_capacity >> 8) & 0xff;
1395 ap[3] = sdebug_capacity & 0xff;
1396 }
1397 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1398 ap[7] = scsi_debug_sector_size & 0xff;
1399 offset += bd_len;
1400 ap = arr + offset;
1401 } else if (16 == bd_len) {
1402 unsigned long long capac = sdebug_capacity;
1403
1404 for (k = 0; k < 8; ++k, capac >>= 8)
1405 ap[7 - k] = capac & 0xff;
1406 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1407 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1408 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1409 ap[15] = scsi_debug_sector_size & 0xff;
1410 offset += bd_len;
1411 ap = arr + offset;
1412 }
1413
1414 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1415 /* TODO: Control Extension page */
1416 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1417 0);
1418 return check_condition_result;
1419 }
1420 switch (pcode) {
1421 case 0x1: /* Read-Write error recovery page, direct access */
1422 len = resp_err_recov_pg(ap, pcontrol, target);
1423 offset += len;
1424 break;
1425 case 0x2: /* Disconnect-Reconnect page, all devices */
1426 len = resp_disconnect_pg(ap, pcontrol, target);
1427 offset += len;
1428 break;
1429 case 0x3: /* Format device page, direct access */
1430 len = resp_format_pg(ap, pcontrol, target);
1431 offset += len;
1432 break;
1433 case 0x8: /* Caching page, direct access */
1434 len = resp_caching_pg(ap, pcontrol, target);
1435 offset += len;
1436 break;
1437 case 0xa: /* Control Mode page, all devices */
1438 len = resp_ctrl_m_pg(ap, pcontrol, target);
1439 offset += len;
1440 break;
1441 case 0x19: /* if spc==1 then sas phy, control+discover */
1442 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1443 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1444 INVALID_FIELD_IN_CDB, 0);
1445 return check_condition_result;
1446 }
1447 len = 0;
1448 if ((0x0 == subpcode) || (0xff == subpcode))
1449 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1450 if ((0x1 == subpcode) || (0xff == subpcode))
1451 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1452 target_dev_id);
1453 if ((0x2 == subpcode) || (0xff == subpcode))
1454 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1455 offset += len;
1456 break;
1457 case 0x1c: /* Informational Exceptions Mode page, all devices */
1458 len = resp_iec_m_pg(ap, pcontrol, target);
1459 offset += len;
1460 break;
1461 case 0x3f: /* Read all Mode pages */
1462 if ((0 == subpcode) || (0xff == subpcode)) {
1463 len = resp_err_recov_pg(ap, pcontrol, target);
1464 len += resp_disconnect_pg(ap + len, pcontrol, target);
1465 len += resp_format_pg(ap + len, pcontrol, target);
1466 len += resp_caching_pg(ap + len, pcontrol, target);
1467 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1468 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1469 if (0xff == subpcode) {
1470 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1471 target, target_dev_id);
1472 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1473 }
1474 len += resp_iec_m_pg(ap + len, pcontrol, target);
1475 } else {
1476 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1477 INVALID_FIELD_IN_CDB, 0);
1478 return check_condition_result;
1479 }
1480 offset += len;
1481 break;
1482 default:
1483 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1484 0);
1485 return check_condition_result;
1486 }
1487 if (msense_6)
1488 arr[0] = offset - 1;
1489 else {
1490 arr[0] = ((offset - 2) >> 8) & 0xff;
1491 arr[1] = (offset - 2) & 0xff;
1492 }
1493 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1494 }
1495
1496 #define SDEBUG_MAX_MSELECT_SZ 512
1497
1498 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1499 struct sdebug_dev_info * devip)
1500 {
1501 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1502 int param_len, res, errsts, mpage;
1503 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1504 unsigned char *cmd = (unsigned char *)scp->cmnd;
1505
1506 if ((errsts = check_readiness(scp, 1, devip)))
1507 return errsts;
1508 memset(arr, 0, sizeof(arr));
1509 pf = cmd[1] & 0x10;
1510 sp = cmd[1] & 0x1;
1511 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1512 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1513 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1514 INVALID_FIELD_IN_CDB, 0);
1515 return check_condition_result;
1516 }
1517 res = fetch_to_dev_buffer(scp, arr, param_len);
1518 if (-1 == res)
1519 return (DID_ERROR << 16);
1520 else if ((res < param_len) &&
1521 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1522 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1523 " IO sent=%d bytes\n", param_len, res);
1524 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1525 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1526 if (md_len > 2) {
1527 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1528 INVALID_FIELD_IN_PARAM_LIST, 0);
1529 return check_condition_result;
1530 }
1531 off = bd_len + (mselect6 ? 4 : 8);
1532 mpage = arr[off] & 0x3f;
1533 ps = !!(arr[off] & 0x80);
1534 if (ps) {
1535 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1536 INVALID_FIELD_IN_PARAM_LIST, 0);
1537 return check_condition_result;
1538 }
1539 spf = !!(arr[off] & 0x40);
1540 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1541 (arr[off + 1] + 2);
1542 if ((pg_len + off) > param_len) {
1543 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1544 PARAMETER_LIST_LENGTH_ERR, 0);
1545 return check_condition_result;
1546 }
1547 switch (mpage) {
1548 case 0xa: /* Control Mode page */
1549 if (ctrl_m_pg[1] == arr[off + 1]) {
1550 memcpy(ctrl_m_pg + 2, arr + off + 2,
1551 sizeof(ctrl_m_pg) - 2);
1552 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1553 return 0;
1554 }
1555 break;
1556 case 0x1c: /* Informational Exceptions Mode page */
1557 if (iec_m_pg[1] == arr[off + 1]) {
1558 memcpy(iec_m_pg + 2, arr + off + 2,
1559 sizeof(iec_m_pg) - 2);
1560 return 0;
1561 }
1562 break;
1563 default:
1564 break;
1565 }
1566 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1567 INVALID_FIELD_IN_PARAM_LIST, 0);
1568 return check_condition_result;
1569 }
1570
1571 static int resp_temp_l_pg(unsigned char * arr)
1572 {
1573 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1574 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1575 };
1576
1577 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1578 return sizeof(temp_l_pg);
1579 }
1580
1581 static int resp_ie_l_pg(unsigned char * arr)
1582 {
1583 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1584 };
1585
1586 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1587 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1588 arr[4] = THRESHOLD_EXCEEDED;
1589 arr[5] = 0xff;
1590 }
1591 return sizeof(ie_l_pg);
1592 }
1593
1594 #define SDEBUG_MAX_LSENSE_SZ 512
1595
1596 static int resp_log_sense(struct scsi_cmnd * scp,
1597 struct sdebug_dev_info * devip)
1598 {
1599 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1600 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1601 unsigned char *cmd = (unsigned char *)scp->cmnd;
1602
1603 if ((errsts = check_readiness(scp, 1, devip)))
1604 return errsts;
1605 memset(arr, 0, sizeof(arr));
1606 ppc = cmd[1] & 0x2;
1607 sp = cmd[1] & 0x1;
1608 if (ppc || sp) {
1609 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1610 INVALID_FIELD_IN_CDB, 0);
1611 return check_condition_result;
1612 }
1613 pcontrol = (cmd[2] & 0xc0) >> 6;
1614 pcode = cmd[2] & 0x3f;
1615 subpcode = cmd[3] & 0xff;
1616 alloc_len = (cmd[7] << 8) + cmd[8];
1617 arr[0] = pcode;
1618 if (0 == subpcode) {
1619 switch (pcode) {
1620 case 0x0: /* Supported log pages log page */
1621 n = 4;
1622 arr[n++] = 0x0; /* this page */
1623 arr[n++] = 0xd; /* Temperature */
1624 arr[n++] = 0x2f; /* Informational exceptions */
1625 arr[3] = n - 4;
1626 break;
1627 case 0xd: /* Temperature log page */
1628 arr[3] = resp_temp_l_pg(arr + 4);
1629 break;
1630 case 0x2f: /* Informational exceptions log page */
1631 arr[3] = resp_ie_l_pg(arr + 4);
1632 break;
1633 default:
1634 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1635 INVALID_FIELD_IN_CDB, 0);
1636 return check_condition_result;
1637 }
1638 } else if (0xff == subpcode) {
1639 arr[0] |= 0x40;
1640 arr[1] = subpcode;
1641 switch (pcode) {
1642 case 0x0: /* Supported log pages and subpages log page */
1643 n = 4;
1644 arr[n++] = 0x0;
1645 arr[n++] = 0x0; /* 0,0 page */
1646 arr[n++] = 0x0;
1647 arr[n++] = 0xff; /* this page */
1648 arr[n++] = 0xd;
1649 arr[n++] = 0x0; /* Temperature */
1650 arr[n++] = 0x2f;
1651 arr[n++] = 0x0; /* Informational exceptions */
1652 arr[3] = n - 4;
1653 break;
1654 case 0xd: /* Temperature subpages */
1655 n = 4;
1656 arr[n++] = 0xd;
1657 arr[n++] = 0x0; /* Temperature */
1658 arr[3] = n - 4;
1659 break;
1660 case 0x2f: /* Informational exceptions subpages */
1661 n = 4;
1662 arr[n++] = 0x2f;
1663 arr[n++] = 0x0; /* Informational exceptions */
1664 arr[3] = n - 4;
1665 break;
1666 default:
1667 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1668 INVALID_FIELD_IN_CDB, 0);
1669 return check_condition_result;
1670 }
1671 } else {
1672 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1673 INVALID_FIELD_IN_CDB, 0);
1674 return check_condition_result;
1675 }
1676 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1677 return fill_from_dev_buffer(scp, arr,
1678 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1679 }
1680
1681 static int check_device_access_params(struct sdebug_dev_info *devi,
1682 unsigned long long lba, unsigned int num)
1683 {
1684 if (lba + num > sdebug_capacity) {
1685 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1686 return check_condition_result;
1687 }
1688 /* transfer length excessive (tie in to block limits VPD page) */
1689 if (num > sdebug_store_sectors) {
1690 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1691 return check_condition_result;
1692 }
1693 return 0;
1694 }
1695
1696 static int do_device_access(struct scsi_cmnd *scmd,
1697 struct sdebug_dev_info *devi,
1698 unsigned long long lba, unsigned int num, int write)
1699 {
1700 int ret;
1701 unsigned long long block, rest = 0;
1702 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1703
1704 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1705
1706 block = do_div(lba, sdebug_store_sectors);
1707 if (block + num > sdebug_store_sectors)
1708 rest = block + num - sdebug_store_sectors;
1709
1710 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1711 (num - rest) * scsi_debug_sector_size);
1712 if (!ret && rest)
1713 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1714
1715 return ret;
1716 }
1717
1718 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1719 unsigned int sectors, u32 ei_lba)
1720 {
1721 unsigned int i, resid;
1722 struct scatterlist *psgl;
1723 struct sd_dif_tuple *sdt;
1724 sector_t sector;
1725 sector_t tmp_sec = start_sec;
1726 void *paddr;
1727
1728 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1729
1730 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1731
1732 for (i = 0 ; i < sectors ; i++) {
1733 u16 csum;
1734
1735 if (sdt[i].app_tag == 0xffff)
1736 continue;
1737
1738 sector = start_sec + i;
1739
1740 switch (scsi_debug_guard) {
1741 case 1:
1742 csum = ip_compute_csum(fake_storep +
1743 sector * scsi_debug_sector_size,
1744 scsi_debug_sector_size);
1745 break;
1746 case 0:
1747 csum = crc_t10dif(fake_storep +
1748 sector * scsi_debug_sector_size,
1749 scsi_debug_sector_size);
1750 csum = cpu_to_be16(csum);
1751 break;
1752 default:
1753 BUG();
1754 }
1755
1756 if (sdt[i].guard_tag != csum) {
1757 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1758 " rcvd 0x%04x, data 0x%04x\n", __func__,
1759 (unsigned long)sector,
1760 be16_to_cpu(sdt[i].guard_tag),
1761 be16_to_cpu(csum));
1762 dif_errors++;
1763 return 0x01;
1764 }
1765
1766 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1767 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1768 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1769 __func__, (unsigned long)sector);
1770 dif_errors++;
1771 return 0x03;
1772 }
1773
1774 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1775 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1776 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1777 __func__, (unsigned long)sector);
1778 dif_errors++;
1779 return 0x03;
1780 }
1781
1782 ei_lba++;
1783 }
1784
1785 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1786 sector = start_sec;
1787
1788 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1789 int len = min(psgl->length, resid);
1790
1791 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1792 memcpy(paddr, dif_storep + dif_offset(sector), len);
1793
1794 sector += len >> 3;
1795 if (sector >= sdebug_store_sectors) {
1796 /* Force wrap */
1797 tmp_sec = sector;
1798 sector = do_div(tmp_sec, sdebug_store_sectors);
1799 }
1800 resid -= len;
1801 kunmap_atomic(paddr);
1802 }
1803
1804 dix_reads++;
1805
1806 return 0;
1807 }
1808
1809 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1810 unsigned int num, struct sdebug_dev_info *devip,
1811 u32 ei_lba)
1812 {
1813 unsigned long iflags;
1814 int ret;
1815
1816 ret = check_device_access_params(devip, lba, num);
1817 if (ret)
1818 return ret;
1819
1820 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1821 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1822 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1823 /* claim unrecoverable read error */
1824 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1825 /* set info field and valid bit for fixed descriptor */
1826 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1827 devip->sense_buff[0] |= 0x80; /* Valid bit */
1828 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1829 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1830 devip->sense_buff[3] = (ret >> 24) & 0xff;
1831 devip->sense_buff[4] = (ret >> 16) & 0xff;
1832 devip->sense_buff[5] = (ret >> 8) & 0xff;
1833 devip->sense_buff[6] = ret & 0xff;
1834 }
1835 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1836 return check_condition_result;
1837 }
1838
1839 /* DIX + T10 DIF */
1840 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1841 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1842
1843 if (prot_ret) {
1844 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1845 return illegal_condition_result;
1846 }
1847 }
1848
1849 read_lock_irqsave(&atomic_rw, iflags);
1850 ret = do_device_access(SCpnt, devip, lba, num, 0);
1851 read_unlock_irqrestore(&atomic_rw, iflags);
1852 return ret;
1853 }
1854
1855 void dump_sector(unsigned char *buf, int len)
1856 {
1857 int i, j;
1858
1859 printk(KERN_ERR ">>> Sector Dump <<<\n");
1860
1861 for (i = 0 ; i < len ; i += 16) {
1862 printk(KERN_ERR "%04d: ", i);
1863
1864 for (j = 0 ; j < 16 ; j++) {
1865 unsigned char c = buf[i+j];
1866 if (c >= 0x20 && c < 0x7e)
1867 printk(" %c ", buf[i+j]);
1868 else
1869 printk("%02x ", buf[i+j]);
1870 }
1871
1872 printk("\n");
1873 }
1874 }
1875
1876 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1877 unsigned int sectors, u32 ei_lba)
1878 {
1879 int i, j, ret;
1880 struct sd_dif_tuple *sdt;
1881 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1882 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1883 void *daddr, *paddr;
1884 sector_t tmp_sec = start_sec;
1885 sector_t sector;
1886 int ppage_offset;
1887 unsigned short csum;
1888
1889 sector = do_div(tmp_sec, sdebug_store_sectors);
1890
1891 BUG_ON(scsi_sg_count(SCpnt) == 0);
1892 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1893
1894 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1895 ppage_offset = 0;
1896
1897 /* For each data page */
1898 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1899 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1900
1901 /* For each sector-sized chunk in data page */
1902 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1903
1904 /* If we're at the end of the current
1905 * protection page advance to the next one
1906 */
1907 if (ppage_offset >= psgl->length) {
1908 kunmap_atomic(paddr);
1909 psgl = sg_next(psgl);
1910 BUG_ON(psgl == NULL);
1911 paddr = kmap_atomic(sg_page(psgl))
1912 + psgl->offset;
1913 ppage_offset = 0;
1914 }
1915
1916 sdt = paddr + ppage_offset;
1917
1918 switch (scsi_debug_guard) {
1919 case 1:
1920 csum = ip_compute_csum(daddr,
1921 scsi_debug_sector_size);
1922 break;
1923 case 0:
1924 csum = cpu_to_be16(crc_t10dif(daddr,
1925 scsi_debug_sector_size));
1926 break;
1927 default:
1928 BUG();
1929 ret = 0;
1930 goto out;
1931 }
1932
1933 if (sdt->guard_tag != csum) {
1934 printk(KERN_ERR
1935 "%s: GUARD check failed on sector %lu " \
1936 "rcvd 0x%04x, calculated 0x%04x\n",
1937 __func__, (unsigned long)sector,
1938 be16_to_cpu(sdt->guard_tag),
1939 be16_to_cpu(csum));
1940 ret = 0x01;
1941 dump_sector(daddr, scsi_debug_sector_size);
1942 goto out;
1943 }
1944
1945 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1946 be32_to_cpu(sdt->ref_tag)
1947 != (start_sec & 0xffffffff)) {
1948 printk(KERN_ERR
1949 "%s: REF check failed on sector %lu\n",
1950 __func__, (unsigned long)sector);
1951 ret = 0x03;
1952 dump_sector(daddr, scsi_debug_sector_size);
1953 goto out;
1954 }
1955
1956 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1957 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1958 printk(KERN_ERR
1959 "%s: REF check failed on sector %lu\n",
1960 __func__, (unsigned long)sector);
1961 ret = 0x03;
1962 dump_sector(daddr, scsi_debug_sector_size);
1963 goto out;
1964 }
1965
1966 /* Would be great to copy this in bigger
1967 * chunks. However, for the sake of
1968 * correctness we need to verify each sector
1969 * before writing it to "stable" storage
1970 */
1971 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1972
1973 sector++;
1974
1975 if (sector == sdebug_store_sectors)
1976 sector = 0; /* Force wrap */
1977
1978 start_sec++;
1979 ei_lba++;
1980 daddr += scsi_debug_sector_size;
1981 ppage_offset += sizeof(struct sd_dif_tuple);
1982 }
1983
1984 kunmap_atomic(daddr);
1985 }
1986
1987 kunmap_atomic(paddr);
1988
1989 dix_writes++;
1990
1991 return 0;
1992
1993 out:
1994 dif_errors++;
1995 kunmap_atomic(daddr);
1996 kunmap_atomic(paddr);
1997 return ret;
1998 }
1999
2000 static unsigned int map_state(sector_t lba, unsigned int *num)
2001 {
2002 unsigned int granularity, alignment, mapped;
2003 sector_t block, next, end;
2004
2005 granularity = scsi_debug_unmap_granularity;
2006 alignment = granularity - scsi_debug_unmap_alignment;
2007 block = lba + alignment;
2008 do_div(block, granularity);
2009
2010 mapped = test_bit(block, map_storep);
2011
2012 if (mapped)
2013 next = find_next_zero_bit(map_storep, map_size, block);
2014 else
2015 next = find_next_bit(map_storep, map_size, block);
2016
2017 end = next * granularity - scsi_debug_unmap_alignment;
2018 *num = end - lba;
2019
2020 return mapped;
2021 }
2022
2023 static void map_region(sector_t lba, unsigned int len)
2024 {
2025 unsigned int granularity, alignment;
2026 sector_t end = lba + len;
2027
2028 granularity = scsi_debug_unmap_granularity;
2029 alignment = granularity - scsi_debug_unmap_alignment;
2030
2031 while (lba < end) {
2032 sector_t block, rem;
2033
2034 block = lba + alignment;
2035 rem = do_div(block, granularity);
2036
2037 if (block < map_size)
2038 set_bit(block, map_storep);
2039
2040 lba += granularity - rem;
2041 }
2042 }
2043
2044 static void unmap_region(sector_t lba, unsigned int len)
2045 {
2046 unsigned int granularity, alignment;
2047 sector_t end = lba + len;
2048
2049 granularity = scsi_debug_unmap_granularity;
2050 alignment = granularity - scsi_debug_unmap_alignment;
2051
2052 while (lba < end) {
2053 sector_t block, rem;
2054
2055 block = lba + alignment;
2056 rem = do_div(block, granularity);
2057
2058 if (rem == 0 && lba + granularity < end && block < map_size) {
2059 clear_bit(block, map_storep);
2060 if (scsi_debug_lbprz)
2061 memset(fake_storep +
2062 block * scsi_debug_sector_size, 0,
2063 scsi_debug_sector_size);
2064 }
2065 lba += granularity - rem;
2066 }
2067 }
2068
2069 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2070 unsigned int num, struct sdebug_dev_info *devip,
2071 u32 ei_lba)
2072 {
2073 unsigned long iflags;
2074 int ret;
2075
2076 ret = check_device_access_params(devip, lba, num);
2077 if (ret)
2078 return ret;
2079
2080 /* DIX + T10 DIF */
2081 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2082 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2083
2084 if (prot_ret) {
2085 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2086 return illegal_condition_result;
2087 }
2088 }
2089
2090 write_lock_irqsave(&atomic_rw, iflags);
2091 ret = do_device_access(SCpnt, devip, lba, num, 1);
2092 if (scsi_debug_unmap_granularity)
2093 map_region(lba, num);
2094 write_unlock_irqrestore(&atomic_rw, iflags);
2095 if (-1 == ret)
2096 return (DID_ERROR << 16);
2097 else if ((ret < (num * scsi_debug_sector_size)) &&
2098 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2099 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2100 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2101
2102 return 0;
2103 }
2104
2105 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2106 unsigned int num, struct sdebug_dev_info *devip,
2107 u32 ei_lba, unsigned int unmap)
2108 {
2109 unsigned long iflags;
2110 unsigned long long i;
2111 int ret;
2112
2113 ret = check_device_access_params(devip, lba, num);
2114 if (ret)
2115 return ret;
2116
2117 if (num > scsi_debug_write_same_length) {
2118 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2119 0);
2120 return check_condition_result;
2121 }
2122
2123 write_lock_irqsave(&atomic_rw, iflags);
2124
2125 if (unmap && scsi_debug_unmap_granularity) {
2126 unmap_region(lba, num);
2127 goto out;
2128 }
2129
2130 /* Else fetch one logical block */
2131 ret = fetch_to_dev_buffer(scmd,
2132 fake_storep + (lba * scsi_debug_sector_size),
2133 scsi_debug_sector_size);
2134
2135 if (-1 == ret) {
2136 write_unlock_irqrestore(&atomic_rw, iflags);
2137 return (DID_ERROR << 16);
2138 } else if ((ret < (num * scsi_debug_sector_size)) &&
2139 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2140 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2141 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2142
2143 /* Copy first sector to remaining blocks */
2144 for (i = 1 ; i < num ; i++)
2145 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2146 fake_storep + (lba * scsi_debug_sector_size),
2147 scsi_debug_sector_size);
2148
2149 if (scsi_debug_unmap_granularity)
2150 map_region(lba, num);
2151 out:
2152 write_unlock_irqrestore(&atomic_rw, iflags);
2153
2154 return 0;
2155 }
2156
2157 struct unmap_block_desc {
2158 __be64 lba;
2159 __be32 blocks;
2160 __be32 __reserved;
2161 };
2162
2163 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2164 {
2165 unsigned char *buf;
2166 struct unmap_block_desc *desc;
2167 unsigned int i, payload_len, descriptors;
2168 int ret;
2169
2170 ret = check_readiness(scmd, 1, devip);
2171 if (ret)
2172 return ret;
2173
2174 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2175 BUG_ON(scsi_bufflen(scmd) != payload_len);
2176
2177 descriptors = (payload_len - 8) / 16;
2178
2179 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2180 if (!buf)
2181 return check_condition_result;
2182
2183 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2184
2185 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2186 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2187
2188 desc = (void *)&buf[8];
2189
2190 for (i = 0 ; i < descriptors ; i++) {
2191 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2192 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2193
2194 ret = check_device_access_params(devip, lba, num);
2195 if (ret)
2196 goto out;
2197
2198 unmap_region(lba, num);
2199 }
2200
2201 ret = 0;
2202
2203 out:
2204 kfree(buf);
2205
2206 return ret;
2207 }
2208
2209 #define SDEBUG_GET_LBA_STATUS_LEN 32
2210
2211 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2212 struct sdebug_dev_info * devip)
2213 {
2214 unsigned long long lba;
2215 unsigned int alloc_len, mapped, num;
2216 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2217 int ret;
2218
2219 ret = check_readiness(scmd, 1, devip);
2220 if (ret)
2221 return ret;
2222
2223 lba = get_unaligned_be64(&scmd->cmnd[2]);
2224 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2225
2226 if (alloc_len < 24)
2227 return 0;
2228
2229 ret = check_device_access_params(devip, lba, 1);
2230 if (ret)
2231 return ret;
2232
2233 mapped = map_state(lba, &num);
2234
2235 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2236 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2237 put_unaligned_be64(lba, &arr[8]); /* LBA */
2238 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2239 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2240
2241 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2242 }
2243
2244 #define SDEBUG_RLUN_ARR_SZ 256
2245
2246 static int resp_report_luns(struct scsi_cmnd * scp,
2247 struct sdebug_dev_info * devip)
2248 {
2249 unsigned int alloc_len;
2250 int lun_cnt, i, upper, num, n, wlun, lun;
2251 unsigned char *cmd = (unsigned char *)scp->cmnd;
2252 int select_report = (int)cmd[2];
2253 struct scsi_lun *one_lun;
2254 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2255 unsigned char * max_addr;
2256
2257 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2258 if ((alloc_len < 4) || (select_report > 2)) {
2259 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2260 0);
2261 return check_condition_result;
2262 }
2263 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2264 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2265 lun_cnt = scsi_debug_max_luns;
2266 if (1 == select_report)
2267 lun_cnt = 0;
2268 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2269 --lun_cnt;
2270 wlun = (select_report > 0) ? 1 : 0;
2271 num = lun_cnt + wlun;
2272 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2273 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2274 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2275 sizeof(struct scsi_lun)), num);
2276 if (n < num) {
2277 wlun = 0;
2278 lun_cnt = n;
2279 }
2280 one_lun = (struct scsi_lun *) &arr[8];
2281 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2282 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2283 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2284 i++, lun++) {
2285 upper = (lun >> 8) & 0x3f;
2286 if (upper)
2287 one_lun[i].scsi_lun[0] =
2288 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2289 one_lun[i].scsi_lun[1] = lun & 0xff;
2290 }
2291 if (wlun) {
2292 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2293 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2294 i++;
2295 }
2296 alloc_len = (unsigned char *)(one_lun + i) - arr;
2297 return fill_from_dev_buffer(scp, arr,
2298 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2299 }
2300
2301 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2302 unsigned int num, struct sdebug_dev_info *devip)
2303 {
2304 int i, j, ret = -1;
2305 unsigned char *kaddr, *buf;
2306 unsigned int offset;
2307 struct scatterlist *sg;
2308 struct scsi_data_buffer *sdb = scsi_in(scp);
2309
2310 /* better not to use temporary buffer. */
2311 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2312 if (!buf)
2313 return ret;
2314
2315 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2316
2317 offset = 0;
2318 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2319 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2320 if (!kaddr)
2321 goto out;
2322
2323 for (j = 0; j < sg->length; j++)
2324 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2325
2326 offset += sg->length;
2327 kunmap_atomic(kaddr);
2328 }
2329 ret = 0;
2330 out:
2331 kfree(buf);
2332
2333 return ret;
2334 }
2335
2336 /* When timer goes off this function is called. */
2337 static void timer_intr_handler(unsigned long indx)
2338 {
2339 struct sdebug_queued_cmd * sqcp;
2340 unsigned long iflags;
2341
2342 if (indx >= scsi_debug_max_queue) {
2343 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2344 "large\n");
2345 return;
2346 }
2347 spin_lock_irqsave(&queued_arr_lock, iflags);
2348 sqcp = &queued_arr[(int)indx];
2349 if (! sqcp->in_use) {
2350 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2351 "interrupt\n");
2352 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2353 return;
2354 }
2355 sqcp->in_use = 0;
2356 if (sqcp->done_funct) {
2357 sqcp->a_cmnd->result = sqcp->scsi_result;
2358 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2359 }
2360 sqcp->done_funct = NULL;
2361 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2362 }
2363
2364
2365 static struct sdebug_dev_info *
2366 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2367 {
2368 struct sdebug_dev_info *devip;
2369
2370 devip = kzalloc(sizeof(*devip), flags);
2371 if (devip) {
2372 devip->sdbg_host = sdbg_host;
2373 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2374 }
2375 return devip;
2376 }
2377
2378 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2379 {
2380 struct sdebug_host_info * sdbg_host;
2381 struct sdebug_dev_info * open_devip = NULL;
2382 struct sdebug_dev_info * devip =
2383 (struct sdebug_dev_info *)sdev->hostdata;
2384
2385 if (devip)
2386 return devip;
2387 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2388 if (!sdbg_host) {
2389 printk(KERN_ERR "Host info NULL\n");
2390 return NULL;
2391 }
2392 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2393 if ((devip->used) && (devip->channel == sdev->channel) &&
2394 (devip->target == sdev->id) &&
2395 (devip->lun == sdev->lun))
2396 return devip;
2397 else {
2398 if ((!devip->used) && (!open_devip))
2399 open_devip = devip;
2400 }
2401 }
2402 if (!open_devip) { /* try and make a new one */
2403 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2404 if (!open_devip) {
2405 printk(KERN_ERR "%s: out of memory at line %d\n",
2406 __func__, __LINE__);
2407 return NULL;
2408 }
2409 }
2410
2411 open_devip->channel = sdev->channel;
2412 open_devip->target = sdev->id;
2413 open_devip->lun = sdev->lun;
2414 open_devip->sdbg_host = sdbg_host;
2415 open_devip->reset = 1;
2416 open_devip->used = 1;
2417 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2418 if (scsi_debug_dsense)
2419 open_devip->sense_buff[0] = 0x72;
2420 else {
2421 open_devip->sense_buff[0] = 0x70;
2422 open_devip->sense_buff[7] = 0xa;
2423 }
2424 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2425 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2426
2427 return open_devip;
2428 }
2429
2430 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2431 {
2432 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2433 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2434 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2435 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2436 return 0;
2437 }
2438
2439 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2440 {
2441 struct sdebug_dev_info *devip;
2442
2443 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2444 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2445 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2446 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2447 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2448 devip = devInfoReg(sdp);
2449 if (NULL == devip)
2450 return 1; /* no resources, will be marked offline */
2451 sdp->hostdata = devip;
2452 if (sdp->host->cmd_per_lun)
2453 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2454 sdp->host->cmd_per_lun);
2455 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2456 if (scsi_debug_no_uld)
2457 sdp->no_uld_attach = 1;
2458 return 0;
2459 }
2460
2461 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2462 {
2463 struct sdebug_dev_info *devip =
2464 (struct sdebug_dev_info *)sdp->hostdata;
2465
2466 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2467 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2468 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2469 if (devip) {
2470 /* make this slot available for re-use */
2471 devip->used = 0;
2472 sdp->hostdata = NULL;
2473 }
2474 }
2475
2476 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2477 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2478 {
2479 unsigned long iflags;
2480 int k;
2481 struct sdebug_queued_cmd *sqcp;
2482
2483 spin_lock_irqsave(&queued_arr_lock, iflags);
2484 for (k = 0; k < scsi_debug_max_queue; ++k) {
2485 sqcp = &queued_arr[k];
2486 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2487 del_timer_sync(&sqcp->cmnd_timer);
2488 sqcp->in_use = 0;
2489 sqcp->a_cmnd = NULL;
2490 break;
2491 }
2492 }
2493 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2494 return (k < scsi_debug_max_queue) ? 1 : 0;
2495 }
2496
2497 /* Deletes (stops) timers of all queued commands */
2498 static void stop_all_queued(void)
2499 {
2500 unsigned long iflags;
2501 int k;
2502 struct sdebug_queued_cmd *sqcp;
2503
2504 spin_lock_irqsave(&queued_arr_lock, iflags);
2505 for (k = 0; k < scsi_debug_max_queue; ++k) {
2506 sqcp = &queued_arr[k];
2507 if (sqcp->in_use && sqcp->a_cmnd) {
2508 del_timer_sync(&sqcp->cmnd_timer);
2509 sqcp->in_use = 0;
2510 sqcp->a_cmnd = NULL;
2511 }
2512 }
2513 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2514 }
2515
2516 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2517 {
2518 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2519 printk(KERN_INFO "scsi_debug: abort\n");
2520 ++num_aborts;
2521 stop_queued_cmnd(SCpnt);
2522 return SUCCESS;
2523 }
2524
2525 static int scsi_debug_biosparam(struct scsi_device *sdev,
2526 struct block_device * bdev, sector_t capacity, int *info)
2527 {
2528 int res;
2529 unsigned char *buf;
2530
2531 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2532 printk(KERN_INFO "scsi_debug: biosparam\n");
2533 buf = scsi_bios_ptable(bdev);
2534 if (buf) {
2535 res = scsi_partsize(buf, capacity,
2536 &info[2], &info[0], &info[1]);
2537 kfree(buf);
2538 if (! res)
2539 return res;
2540 }
2541 info[0] = sdebug_heads;
2542 info[1] = sdebug_sectors_per;
2543 info[2] = sdebug_cylinders_per;
2544 return 0;
2545 }
2546
2547 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2548 {
2549 struct sdebug_dev_info * devip;
2550
2551 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2552 printk(KERN_INFO "scsi_debug: device_reset\n");
2553 ++num_dev_resets;
2554 if (SCpnt) {
2555 devip = devInfoReg(SCpnt->device);
2556 if (devip)
2557 devip->reset = 1;
2558 }
2559 return SUCCESS;
2560 }
2561
2562 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2563 {
2564 struct sdebug_host_info *sdbg_host;
2565 struct sdebug_dev_info * dev_info;
2566 struct scsi_device * sdp;
2567 struct Scsi_Host * hp;
2568
2569 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2570 printk(KERN_INFO "scsi_debug: bus_reset\n");
2571 ++num_bus_resets;
2572 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2573 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2574 if (sdbg_host) {
2575 list_for_each_entry(dev_info,
2576 &sdbg_host->dev_info_list,
2577 dev_list)
2578 dev_info->reset = 1;
2579 }
2580 }
2581 return SUCCESS;
2582 }
2583
2584 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2585 {
2586 struct sdebug_host_info * sdbg_host;
2587 struct sdebug_dev_info * dev_info;
2588
2589 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2590 printk(KERN_INFO "scsi_debug: host_reset\n");
2591 ++num_host_resets;
2592 spin_lock(&sdebug_host_list_lock);
2593 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2594 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2595 dev_list)
2596 dev_info->reset = 1;
2597 }
2598 spin_unlock(&sdebug_host_list_lock);
2599 stop_all_queued();
2600 return SUCCESS;
2601 }
2602
2603 /* Initializes timers in queued array */
2604 static void __init init_all_queued(void)
2605 {
2606 unsigned long iflags;
2607 int k;
2608 struct sdebug_queued_cmd * sqcp;
2609
2610 spin_lock_irqsave(&queued_arr_lock, iflags);
2611 for (k = 0; k < scsi_debug_max_queue; ++k) {
2612 sqcp = &queued_arr[k];
2613 init_timer(&sqcp->cmnd_timer);
2614 sqcp->in_use = 0;
2615 sqcp->a_cmnd = NULL;
2616 }
2617 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2618 }
2619
2620 static void __init sdebug_build_parts(unsigned char *ramp,
2621 unsigned long store_size)
2622 {
2623 struct partition * pp;
2624 int starts[SDEBUG_MAX_PARTS + 2];
2625 int sectors_per_part, num_sectors, k;
2626 int heads_by_sects, start_sec, end_sec;
2627
2628 /* assume partition table already zeroed */
2629 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2630 return;
2631 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2632 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2633 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2634 "partitions to %d\n", SDEBUG_MAX_PARTS);
2635 }
2636 num_sectors = (int)sdebug_store_sectors;
2637 sectors_per_part = (num_sectors - sdebug_sectors_per)
2638 / scsi_debug_num_parts;
2639 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2640 starts[0] = sdebug_sectors_per;
2641 for (k = 1; k < scsi_debug_num_parts; ++k)
2642 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2643 * heads_by_sects;
2644 starts[scsi_debug_num_parts] = num_sectors;
2645 starts[scsi_debug_num_parts + 1] = 0;
2646
2647 ramp[510] = 0x55; /* magic partition markings */
2648 ramp[511] = 0xAA;
2649 pp = (struct partition *)(ramp + 0x1be);
2650 for (k = 0; starts[k + 1]; ++k, ++pp) {
2651 start_sec = starts[k];
2652 end_sec = starts[k + 1] - 1;
2653 pp->boot_ind = 0;
2654
2655 pp->cyl = start_sec / heads_by_sects;
2656 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2657 / sdebug_sectors_per;
2658 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2659
2660 pp->end_cyl = end_sec / heads_by_sects;
2661 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2662 / sdebug_sectors_per;
2663 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2664
2665 pp->start_sect = start_sec;
2666 pp->nr_sects = end_sec - start_sec + 1;
2667 pp->sys_ind = 0x83; /* plain Linux partition */
2668 }
2669 }
2670
2671 static int schedule_resp(struct scsi_cmnd * cmnd,
2672 struct sdebug_dev_info * devip,
2673 done_funct_t done, int scsi_result, int delta_jiff)
2674 {
2675 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2676 if (scsi_result) {
2677 struct scsi_device * sdp = cmnd->device;
2678
2679 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2680 "non-zero result=0x%x\n", sdp->host->host_no,
2681 sdp->channel, sdp->id, sdp->lun, scsi_result);
2682 }
2683 }
2684 if (cmnd && devip) {
2685 /* simulate autosense by this driver */
2686 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2687 memcpy(cmnd->sense_buffer, devip->sense_buff,
2688 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2689 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2690 }
2691 if (delta_jiff <= 0) {
2692 if (cmnd)
2693 cmnd->result = scsi_result;
2694 if (done)
2695 done(cmnd);
2696 return 0;
2697 } else {
2698 unsigned long iflags;
2699 int k;
2700 struct sdebug_queued_cmd * sqcp = NULL;
2701
2702 spin_lock_irqsave(&queued_arr_lock, iflags);
2703 for (k = 0; k < scsi_debug_max_queue; ++k) {
2704 sqcp = &queued_arr[k];
2705 if (! sqcp->in_use)
2706 break;
2707 }
2708 if (k >= scsi_debug_max_queue) {
2709 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2710 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2711 return 1; /* report busy to mid level */
2712 }
2713 sqcp->in_use = 1;
2714 sqcp->a_cmnd = cmnd;
2715 sqcp->scsi_result = scsi_result;
2716 sqcp->done_funct = done;
2717 sqcp->cmnd_timer.function = timer_intr_handler;
2718 sqcp->cmnd_timer.data = k;
2719 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2720 add_timer(&sqcp->cmnd_timer);
2721 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2722 if (cmnd)
2723 cmnd->result = 0;
2724 return 0;
2725 }
2726 }
2727 /* Note: The following macros create attribute files in the
2728 /sys/module/scsi_debug/parameters directory. Unfortunately this
2729 driver is unaware of a change and cannot trigger auxiliary actions
2730 as it can when the corresponding attribute in the
2731 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2732 */
2733 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2734 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2735 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2736 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2737 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2738 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2739 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2740 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2741 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2742 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2743 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2744 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2745 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2746 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2747 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2748 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2749 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2750 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2751 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2752 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2753 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2754 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2755 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2756 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2757 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2758 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2759 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2760 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2761 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2762 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2763 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2764 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2765 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2766 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2767 S_IRUGO | S_IWUSR);
2768 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2769 S_IRUGO | S_IWUSR);
2770
2771 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2772 MODULE_DESCRIPTION("SCSI debug adapter driver");
2773 MODULE_LICENSE("GPL");
2774 MODULE_VERSION(SCSI_DEBUG_VERSION);
2775
2776 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2777 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2778 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2779 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2780 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2781 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2782 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2783 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2784 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2785 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2786 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2787 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2788 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2789 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2790 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2791 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2792 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2793 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2794 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2795 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2796 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2797 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2798 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2799 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2800 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2801 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2802 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2803 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2804 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2805 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2806 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2807 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2808 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2809 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2810 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2811
2812 static char sdebug_info[256];
2813
2814 static const char * scsi_debug_info(struct Scsi_Host * shp)
2815 {
2816 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2817 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2818 scsi_debug_version_date, scsi_debug_dev_size_mb,
2819 scsi_debug_opts);
2820 return sdebug_info;
2821 }
2822
2823 /* scsi_debug_proc_info
2824 * Used if the driver currently has no own support for /proc/scsi
2825 */
2826 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2827 int length, int inout)
2828 {
2829 int len, pos, begin;
2830 int orig_length;
2831
2832 orig_length = length;
2833
2834 if (inout == 1) {
2835 char arr[16];
2836 int minLen = length > 15 ? 15 : length;
2837
2838 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2839 return -EACCES;
2840 memcpy(arr, buffer, minLen);
2841 arr[minLen] = '\0';
2842 if (1 != sscanf(arr, "%d", &pos))
2843 return -EINVAL;
2844 scsi_debug_opts = pos;
2845 if (scsi_debug_every_nth != 0)
2846 scsi_debug_cmnd_count = 0;
2847 return length;
2848 }
2849 begin = 0;
2850 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2851 "%s [%s]\n"
2852 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2853 "every_nth=%d(curr:%d)\n"
2854 "delay=%d, max_luns=%d, scsi_level=%d\n"
2855 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2856 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2857 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2858 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2859 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2860 scsi_debug_cmnd_count, scsi_debug_delay,
2861 scsi_debug_max_luns, scsi_debug_scsi_level,
2862 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2863 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2864 num_host_resets, dix_reads, dix_writes, dif_errors);
2865 if (pos < offset) {
2866 len = 0;
2867 begin = pos;
2868 }
2869 *start = buffer + (offset - begin); /* Start of wanted data */
2870 len -= (offset - begin);
2871 if (len > length)
2872 len = length;
2873 return len;
2874 }
2875
2876 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2877 {
2878 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2879 }
2880
2881 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2882 const char * buf, size_t count)
2883 {
2884 int delay;
2885 char work[20];
2886
2887 if (1 == sscanf(buf, "%10s", work)) {
2888 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2889 scsi_debug_delay = delay;
2890 return count;
2891 }
2892 }
2893 return -EINVAL;
2894 }
2895 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2896 sdebug_delay_store);
2897
2898 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2899 {
2900 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2901 }
2902
2903 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2904 const char * buf, size_t count)
2905 {
2906 int opts;
2907 char work[20];
2908
2909 if (1 == sscanf(buf, "%10s", work)) {
2910 if (0 == strnicmp(work,"0x", 2)) {
2911 if (1 == sscanf(&work[2], "%x", &opts))
2912 goto opts_done;
2913 } else {
2914 if (1 == sscanf(work, "%d", &opts))
2915 goto opts_done;
2916 }
2917 }
2918 return -EINVAL;
2919 opts_done:
2920 scsi_debug_opts = opts;
2921 scsi_debug_cmnd_count = 0;
2922 return count;
2923 }
2924 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2925 sdebug_opts_store);
2926
2927 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2928 {
2929 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2930 }
2931 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2932 const char * buf, size_t count)
2933 {
2934 int n;
2935
2936 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2937 scsi_debug_ptype = n;
2938 return count;
2939 }
2940 return -EINVAL;
2941 }
2942 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2943
2944 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2945 {
2946 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2947 }
2948 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2949 const char * buf, size_t count)
2950 {
2951 int n;
2952
2953 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2954 scsi_debug_dsense = n;
2955 return count;
2956 }
2957 return -EINVAL;
2958 }
2959 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2960 sdebug_dsense_store);
2961
2962 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2963 {
2964 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2965 }
2966 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2967 const char * buf, size_t count)
2968 {
2969 int n;
2970
2971 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2972 scsi_debug_fake_rw = n;
2973 return count;
2974 }
2975 return -EINVAL;
2976 }
2977 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2978 sdebug_fake_rw_store);
2979
2980 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2981 {
2982 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2983 }
2984 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2985 const char * buf, size_t count)
2986 {
2987 int n;
2988
2989 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2990 scsi_debug_no_lun_0 = n;
2991 return count;
2992 }
2993 return -EINVAL;
2994 }
2995 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2996 sdebug_no_lun_0_store);
2997
2998 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2999 {
3000 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3001 }
3002 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
3003 const char * buf, size_t count)
3004 {
3005 int n;
3006
3007 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3008 scsi_debug_num_tgts = n;
3009 sdebug_max_tgts_luns();
3010 return count;
3011 }
3012 return -EINVAL;
3013 }
3014 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
3015 sdebug_num_tgts_store);
3016
3017 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3018 {
3019 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3020 }
3021 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3022
3023 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3024 {
3025 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3026 }
3027 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3028
3029 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3030 {
3031 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3032 }
3033 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3034 const char * buf, size_t count)
3035 {
3036 int nth;
3037
3038 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3039 scsi_debug_every_nth = nth;
3040 scsi_debug_cmnd_count = 0;
3041 return count;
3042 }
3043 return -EINVAL;
3044 }
3045 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3046 sdebug_every_nth_store);
3047
3048 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3049 {
3050 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3051 }
3052 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3053 const char * buf, size_t count)
3054 {
3055 int n;
3056
3057 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3058 scsi_debug_max_luns = n;
3059 sdebug_max_tgts_luns();
3060 return count;
3061 }
3062 return -EINVAL;
3063 }
3064 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3065 sdebug_max_luns_store);
3066
3067 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3068 {
3069 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3070 }
3071 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3072 const char * buf, size_t count)
3073 {
3074 int n;
3075
3076 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3077 (n <= SCSI_DEBUG_CANQUEUE)) {
3078 scsi_debug_max_queue = n;
3079 return count;
3080 }
3081 return -EINVAL;
3082 }
3083 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3084 sdebug_max_queue_store);
3085
3086 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3087 {
3088 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3089 }
3090 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3091
3092 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3093 {
3094 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3095 }
3096 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3097
3098 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3099 {
3100 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3101 }
3102 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3103 const char * buf, size_t count)
3104 {
3105 int n;
3106
3107 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3108 scsi_debug_virtual_gb = n;
3109
3110 sdebug_capacity = get_sdebug_capacity();
3111
3112 return count;
3113 }
3114 return -EINVAL;
3115 }
3116 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3117 sdebug_virtual_gb_store);
3118
3119 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3120 {
3121 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3122 }
3123
3124 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3125 const char * buf, size_t count)
3126 {
3127 int delta_hosts;
3128
3129 if (sscanf(buf, "%d", &delta_hosts) != 1)
3130 return -EINVAL;
3131 if (delta_hosts > 0) {
3132 do {
3133 sdebug_add_adapter();
3134 } while (--delta_hosts);
3135 } else if (delta_hosts < 0) {
3136 do {
3137 sdebug_remove_adapter();
3138 } while (++delta_hosts);
3139 }
3140 return count;
3141 }
3142 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3143 sdebug_add_host_store);
3144
3145 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3146 char * buf)
3147 {
3148 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3149 }
3150 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3151 const char * buf, size_t count)
3152 {
3153 int n;
3154
3155 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3156 scsi_debug_vpd_use_hostno = n;
3157 return count;
3158 }
3159 return -EINVAL;
3160 }
3161 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3162 sdebug_vpd_use_hostno_store);
3163
3164 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3165 {
3166 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3167 }
3168 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3169
3170 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3171 {
3172 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3173 }
3174 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3175
3176 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3177 {
3178 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3179 }
3180 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3181
3182 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3183 {
3184 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3185 }
3186 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3187
3188 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3189 {
3190 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3191 }
3192 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3193
3194 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3195 {
3196 ssize_t count;
3197
3198 if (!scsi_debug_lbp())
3199 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3200 sdebug_store_sectors);
3201
3202 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3203
3204 buf[count++] = '\n';
3205 buf[count++] = 0;
3206
3207 return count;
3208 }
3209 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3210
3211 static ssize_t sdebug_removable_show(struct device_driver *ddp,
3212 char *buf)
3213 {
3214 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3215 }
3216 static ssize_t sdebug_removable_store(struct device_driver *ddp,
3217 const char *buf, size_t count)
3218 {
3219 int n;
3220
3221 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3222 scsi_debug_removable = (n > 0);
3223 return count;
3224 }
3225 return -EINVAL;
3226 }
3227 DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
3228 sdebug_removable_store);
3229
3230
3231 /* Note: The following function creates attribute files in the
3232 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3233 files (over those found in the /sys/module/scsi_debug/parameters
3234 directory) is that auxiliary actions can be triggered when an attribute
3235 is changed. For example see: sdebug_add_host_store() above.
3236 */
3237 static int do_create_driverfs_files(void)
3238 {
3239 int ret;
3240
3241 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3242 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3243 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3244 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3245 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3246 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3247 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3248 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3249 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3250 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3251 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3252 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3253 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3254 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3255 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
3256 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3257 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3258 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3259 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3260 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3261 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3262 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3263 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3264 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3265 return ret;
3266 }
3267
3268 static void do_remove_driverfs_files(void)
3269 {
3270 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3271 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3272 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3273 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3274 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3275 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3276 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3277 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3278 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3279 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3280 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3281 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
3282 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3283 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3284 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3285 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3286 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3287 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3288 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3289 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3290 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3291 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3292 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3293 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3294 }
3295
3296 struct device *pseudo_primary;
3297
3298 static int __init scsi_debug_init(void)
3299 {
3300 unsigned long sz;
3301 int host_to_add;
3302 int k;
3303 int ret;
3304
3305 switch (scsi_debug_sector_size) {
3306 case 512:
3307 case 1024:
3308 case 2048:
3309 case 4096:
3310 break;
3311 default:
3312 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3313 scsi_debug_sector_size);
3314 return -EINVAL;
3315 }
3316
3317 switch (scsi_debug_dif) {
3318
3319 case SD_DIF_TYPE0_PROTECTION:
3320 case SD_DIF_TYPE1_PROTECTION:
3321 case SD_DIF_TYPE2_PROTECTION:
3322 case SD_DIF_TYPE3_PROTECTION:
3323 break;
3324
3325 default:
3326 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3327 return -EINVAL;
3328 }
3329
3330 if (scsi_debug_guard > 1) {
3331 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3332 return -EINVAL;
3333 }
3334
3335 if (scsi_debug_ato > 1) {
3336 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3337 return -EINVAL;
3338 }
3339
3340 if (scsi_debug_physblk_exp > 15) {
3341 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3342 scsi_debug_physblk_exp);
3343 return -EINVAL;
3344 }
3345
3346 if (scsi_debug_lowest_aligned > 0x3fff) {
3347 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3348 scsi_debug_lowest_aligned);
3349 return -EINVAL;
3350 }
3351
3352 if (scsi_debug_dev_size_mb < 1)
3353 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3354 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3355 sdebug_store_sectors = sz / scsi_debug_sector_size;
3356 sdebug_capacity = get_sdebug_capacity();
3357
3358 /* play around with geometry, don't waste too much on track 0 */
3359 sdebug_heads = 8;
3360 sdebug_sectors_per = 32;
3361 if (scsi_debug_dev_size_mb >= 16)
3362 sdebug_heads = 32;
3363 else if (scsi_debug_dev_size_mb >= 256)
3364 sdebug_heads = 64;
3365 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3366 (sdebug_sectors_per * sdebug_heads);
3367 if (sdebug_cylinders_per >= 1024) {
3368 /* other LLDs do this; implies >= 1GB ram disk ... */
3369 sdebug_heads = 255;
3370 sdebug_sectors_per = 63;
3371 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3372 (sdebug_sectors_per * sdebug_heads);
3373 }
3374
3375 fake_storep = vmalloc(sz);
3376 if (NULL == fake_storep) {
3377 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3378 return -ENOMEM;
3379 }
3380 memset(fake_storep, 0, sz);
3381 if (scsi_debug_num_parts > 0)
3382 sdebug_build_parts(fake_storep, sz);
3383
3384 if (scsi_debug_dif) {
3385 int dif_size;
3386
3387 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3388 dif_storep = vmalloc(dif_size);
3389
3390 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3391 dif_size, dif_storep);
3392
3393 if (dif_storep == NULL) {
3394 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3395 ret = -ENOMEM;
3396 goto free_vm;
3397 }
3398
3399 memset(dif_storep, 0xff, dif_size);
3400 }
3401
3402 /* Logical Block Provisioning */
3403 if (scsi_debug_lbp()) {
3404 unsigned int map_bytes;
3405
3406 scsi_debug_unmap_max_blocks =
3407 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3408
3409 scsi_debug_unmap_max_desc =
3410 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3411
3412 scsi_debug_unmap_granularity =
3413 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3414
3415 if (scsi_debug_unmap_alignment &&
3416 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3417 printk(KERN_ERR
3418 "%s: ERR: unmap_granularity < unmap_alignment\n",
3419 __func__);
3420 return -EINVAL;
3421 }
3422
3423 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3424 map_bytes = map_size >> 3;
3425 map_storep = vmalloc(map_bytes);
3426
3427 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3428 map_size);
3429
3430 if (map_storep == NULL) {
3431 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3432 ret = -ENOMEM;
3433 goto free_vm;
3434 }
3435
3436 memset(map_storep, 0x0, map_bytes);
3437
3438 /* Map first 1KB for partition table */
3439 if (scsi_debug_num_parts)
3440 map_region(0, 2);
3441 }
3442
3443 pseudo_primary = root_device_register("pseudo_0");
3444 if (IS_ERR(pseudo_primary)) {
3445 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3446 ret = PTR_ERR(pseudo_primary);
3447 goto free_vm;
3448 }
3449 ret = bus_register(&pseudo_lld_bus);
3450 if (ret < 0) {
3451 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3452 ret);
3453 goto dev_unreg;
3454 }
3455 ret = driver_register(&sdebug_driverfs_driver);
3456 if (ret < 0) {
3457 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3458 ret);
3459 goto bus_unreg;
3460 }
3461 ret = do_create_driverfs_files();
3462 if (ret < 0) {
3463 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3464 ret);
3465 goto del_files;
3466 }
3467
3468 init_all_queued();
3469
3470 host_to_add = scsi_debug_add_host;
3471 scsi_debug_add_host = 0;
3472
3473 for (k = 0; k < host_to_add; k++) {
3474 if (sdebug_add_adapter()) {
3475 printk(KERN_ERR "scsi_debug_init: "
3476 "sdebug_add_adapter failed k=%d\n", k);
3477 break;
3478 }
3479 }
3480
3481 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3482 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3483 scsi_debug_add_host);
3484 }
3485 return 0;
3486
3487 del_files:
3488 do_remove_driverfs_files();
3489 driver_unregister(&sdebug_driverfs_driver);
3490 bus_unreg:
3491 bus_unregister(&pseudo_lld_bus);
3492 dev_unreg:
3493 root_device_unregister(pseudo_primary);
3494 free_vm:
3495 if (map_storep)
3496 vfree(map_storep);
3497 if (dif_storep)
3498 vfree(dif_storep);
3499 vfree(fake_storep);
3500
3501 return ret;
3502 }
3503
3504 static void __exit scsi_debug_exit(void)
3505 {
3506 int k = scsi_debug_add_host;
3507
3508 stop_all_queued();
3509 for (; k; k--)
3510 sdebug_remove_adapter();
3511 do_remove_driverfs_files();
3512 driver_unregister(&sdebug_driverfs_driver);
3513 bus_unregister(&pseudo_lld_bus);
3514 root_device_unregister(pseudo_primary);
3515
3516 if (dif_storep)
3517 vfree(dif_storep);
3518
3519 vfree(fake_storep);
3520 }
3521
3522 device_initcall(scsi_debug_init);
3523 module_exit(scsi_debug_exit);
3524
3525 static void sdebug_release_adapter(struct device * dev)
3526 {
3527 struct sdebug_host_info *sdbg_host;
3528
3529 sdbg_host = to_sdebug_host(dev);
3530 kfree(sdbg_host);
3531 }
3532
3533 static int sdebug_add_adapter(void)
3534 {
3535 int k, devs_per_host;
3536 int error = 0;
3537 struct sdebug_host_info *sdbg_host;
3538 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3539
3540 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3541 if (NULL == sdbg_host) {
3542 printk(KERN_ERR "%s: out of memory at line %d\n",
3543 __func__, __LINE__);
3544 return -ENOMEM;
3545 }
3546
3547 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3548
3549 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3550 for (k = 0; k < devs_per_host; k++) {
3551 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3552 if (!sdbg_devinfo) {
3553 printk(KERN_ERR "%s: out of memory at line %d\n",
3554 __func__, __LINE__);
3555 error = -ENOMEM;
3556 goto clean;
3557 }
3558 }
3559
3560 spin_lock(&sdebug_host_list_lock);
3561 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3562 spin_unlock(&sdebug_host_list_lock);
3563
3564 sdbg_host->dev.bus = &pseudo_lld_bus;
3565 sdbg_host->dev.parent = pseudo_primary;
3566 sdbg_host->dev.release = &sdebug_release_adapter;
3567 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3568
3569 error = device_register(&sdbg_host->dev);
3570
3571 if (error)
3572 goto clean;
3573
3574 ++scsi_debug_add_host;
3575 return error;
3576
3577 clean:
3578 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3579 dev_list) {
3580 list_del(&sdbg_devinfo->dev_list);
3581 kfree(sdbg_devinfo);
3582 }
3583
3584 kfree(sdbg_host);
3585 return error;
3586 }
3587
3588 static void sdebug_remove_adapter(void)
3589 {
3590 struct sdebug_host_info * sdbg_host = NULL;
3591
3592 spin_lock(&sdebug_host_list_lock);
3593 if (!list_empty(&sdebug_host_list)) {
3594 sdbg_host = list_entry(sdebug_host_list.prev,
3595 struct sdebug_host_info, host_list);
3596 list_del(&sdbg_host->host_list);
3597 }
3598 spin_unlock(&sdebug_host_list_lock);
3599
3600 if (!sdbg_host)
3601 return;
3602
3603 device_unregister(&sdbg_host->dev);
3604 --scsi_debug_add_host;
3605 }
3606
3607 static
3608 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3609 {
3610 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3611 int len, k;
3612 unsigned int num;
3613 unsigned long long lba;
3614 u32 ei_lba;
3615 int errsts = 0;
3616 int target = SCpnt->device->id;
3617 struct sdebug_dev_info *devip = NULL;
3618 int inj_recovered = 0;
3619 int inj_transport = 0;
3620 int inj_dif = 0;
3621 int inj_dix = 0;
3622 int delay_override = 0;
3623 int unmap = 0;
3624
3625 scsi_set_resid(SCpnt, 0);
3626 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3627 printk(KERN_INFO "scsi_debug: cmd ");
3628 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3629 printk("%02x ", (int)cmd[k]);
3630 printk("\n");
3631 }
3632
3633 if (target == SCpnt->device->host->hostt->this_id) {
3634 printk(KERN_INFO "scsi_debug: initiator's id used as "
3635 "target!\n");
3636 return schedule_resp(SCpnt, NULL, done,
3637 DID_NO_CONNECT << 16, 0);
3638 }
3639
3640 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3641 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3642 return schedule_resp(SCpnt, NULL, done,
3643 DID_NO_CONNECT << 16, 0);
3644 devip = devInfoReg(SCpnt->device);
3645 if (NULL == devip)
3646 return schedule_resp(SCpnt, NULL, done,
3647 DID_NO_CONNECT << 16, 0);
3648
3649 if ((scsi_debug_every_nth != 0) &&
3650 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3651 scsi_debug_cmnd_count = 0;
3652 if (scsi_debug_every_nth < -1)
3653 scsi_debug_every_nth = -1;
3654 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3655 return 0; /* ignore command causing timeout */
3656 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3657 scsi_medium_access_command(SCpnt))
3658 return 0; /* time out reads and writes */
3659 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3660 inj_recovered = 1; /* to reads and writes below */
3661 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3662 inj_transport = 1; /* to reads and writes below */
3663 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3664 inj_dif = 1; /* to reads and writes below */
3665 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3666 inj_dix = 1; /* to reads and writes below */
3667 }
3668
3669 if (devip->wlun) {
3670 switch (*cmd) {
3671 case INQUIRY:
3672 case REQUEST_SENSE:
3673 case TEST_UNIT_READY:
3674 case REPORT_LUNS:
3675 break; /* only allowable wlun commands */
3676 default:
3677 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3678 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3679 "not supported for wlun\n", *cmd);
3680 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3681 INVALID_OPCODE, 0);
3682 errsts = check_condition_result;
3683 return schedule_resp(SCpnt, devip, done, errsts,
3684 0);
3685 }
3686 }
3687
3688 switch (*cmd) {
3689 case INQUIRY: /* mandatory, ignore unit attention */
3690 delay_override = 1;
3691 errsts = resp_inquiry(SCpnt, target, devip);
3692 break;
3693 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3694 delay_override = 1;
3695 errsts = resp_requests(SCpnt, devip);
3696 break;
3697 case REZERO_UNIT: /* actually this is REWIND for SSC */
3698 case START_STOP:
3699 errsts = resp_start_stop(SCpnt, devip);
3700 break;
3701 case ALLOW_MEDIUM_REMOVAL:
3702 errsts = check_readiness(SCpnt, 1, devip);
3703 if (errsts)
3704 break;
3705 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3706 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3707 cmd[4] ? "inhibited" : "enabled");
3708 break;
3709 case SEND_DIAGNOSTIC: /* mandatory */
3710 errsts = check_readiness(SCpnt, 1, devip);
3711 break;
3712 case TEST_UNIT_READY: /* mandatory */
3713 delay_override = 1;
3714 errsts = check_readiness(SCpnt, 0, devip);
3715 break;
3716 case RESERVE:
3717 errsts = check_readiness(SCpnt, 1, devip);
3718 break;
3719 case RESERVE_10:
3720 errsts = check_readiness(SCpnt, 1, devip);
3721 break;
3722 case RELEASE:
3723 errsts = check_readiness(SCpnt, 1, devip);
3724 break;
3725 case RELEASE_10:
3726 errsts = check_readiness(SCpnt, 1, devip);
3727 break;
3728 case READ_CAPACITY:
3729 errsts = resp_readcap(SCpnt, devip);
3730 break;
3731 case SERVICE_ACTION_IN:
3732 if (cmd[1] == SAI_READ_CAPACITY_16)
3733 errsts = resp_readcap16(SCpnt, devip);
3734 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3735
3736 if (scsi_debug_lbp() == 0) {
3737 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3738 INVALID_COMMAND_OPCODE, 0);
3739 errsts = check_condition_result;
3740 } else
3741 errsts = resp_get_lba_status(SCpnt, devip);
3742 } else {
3743 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3744 INVALID_OPCODE, 0);
3745 errsts = check_condition_result;
3746 }
3747 break;
3748 case MAINTENANCE_IN:
3749 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3750 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3751 INVALID_OPCODE, 0);
3752 errsts = check_condition_result;
3753 break;
3754 }
3755 errsts = resp_report_tgtpgs(SCpnt, devip);
3756 break;
3757 case READ_16:
3758 case READ_12:
3759 case READ_10:
3760 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3761 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3762 cmd[1] & 0xe0) {
3763 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3764 INVALID_COMMAND_OPCODE, 0);
3765 errsts = check_condition_result;
3766 break;
3767 }
3768
3769 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3770 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3771 (cmd[1] & 0xe0) == 0)
3772 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3773
3774 /* fall through */
3775 case READ_6:
3776 read:
3777 errsts = check_readiness(SCpnt, 0, devip);
3778 if (errsts)
3779 break;
3780 if (scsi_debug_fake_rw)
3781 break;
3782 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3783 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3784 if (inj_recovered && (0 == errsts)) {
3785 mk_sense_buffer(devip, RECOVERED_ERROR,
3786 THRESHOLD_EXCEEDED, 0);
3787 errsts = check_condition_result;
3788 } else if (inj_transport && (0 == errsts)) {
3789 mk_sense_buffer(devip, ABORTED_COMMAND,
3790 TRANSPORT_PROBLEM, ACK_NAK_TO);
3791 errsts = check_condition_result;
3792 } else if (inj_dif && (0 == errsts)) {
3793 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3794 errsts = illegal_condition_result;
3795 } else if (inj_dix && (0 == errsts)) {
3796 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3797 errsts = illegal_condition_result;
3798 }
3799 break;
3800 case REPORT_LUNS: /* mandatory, ignore unit attention */
3801 delay_override = 1;
3802 errsts = resp_report_luns(SCpnt, devip);
3803 break;
3804 case VERIFY: /* 10 byte SBC-2 command */
3805 errsts = check_readiness(SCpnt, 0, devip);
3806 break;
3807 case WRITE_16:
3808 case WRITE_12:
3809 case WRITE_10:
3810 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3811 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3812 cmd[1] & 0xe0) {
3813 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3814 INVALID_COMMAND_OPCODE, 0);
3815 errsts = check_condition_result;
3816 break;
3817 }
3818
3819 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3820 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3821 (cmd[1] & 0xe0) == 0)
3822 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3823
3824 /* fall through */
3825 case WRITE_6:
3826 write:
3827 errsts = check_readiness(SCpnt, 0, devip);
3828 if (errsts)
3829 break;
3830 if (scsi_debug_fake_rw)
3831 break;
3832 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3833 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3834 if (inj_recovered && (0 == errsts)) {
3835 mk_sense_buffer(devip, RECOVERED_ERROR,
3836 THRESHOLD_EXCEEDED, 0);
3837 errsts = check_condition_result;
3838 } else if (inj_dif && (0 == errsts)) {
3839 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3840 errsts = illegal_condition_result;
3841 } else if (inj_dix && (0 == errsts)) {
3842 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3843 errsts = illegal_condition_result;
3844 }
3845 break;
3846 case WRITE_SAME_16:
3847 case WRITE_SAME:
3848 if (cmd[1] & 0x8) {
3849 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3850 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3851 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3852 INVALID_FIELD_IN_CDB, 0);
3853 errsts = check_condition_result;
3854 } else
3855 unmap = 1;
3856 }
3857 if (errsts)
3858 break;
3859 errsts = check_readiness(SCpnt, 0, devip);
3860 if (errsts)
3861 break;
3862 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3863 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3864 break;
3865 case UNMAP:
3866 errsts = check_readiness(SCpnt, 0, devip);
3867 if (errsts)
3868 break;
3869
3870 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3871 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3872 INVALID_COMMAND_OPCODE, 0);
3873 errsts = check_condition_result;
3874 } else
3875 errsts = resp_unmap(SCpnt, devip);
3876 break;
3877 case MODE_SENSE:
3878 case MODE_SENSE_10:
3879 errsts = resp_mode_sense(SCpnt, target, devip);
3880 break;
3881 case MODE_SELECT:
3882 errsts = resp_mode_select(SCpnt, 1, devip);
3883 break;
3884 case MODE_SELECT_10:
3885 errsts = resp_mode_select(SCpnt, 0, devip);
3886 break;
3887 case LOG_SENSE:
3888 errsts = resp_log_sense(SCpnt, devip);
3889 break;
3890 case SYNCHRONIZE_CACHE:
3891 delay_override = 1;
3892 errsts = check_readiness(SCpnt, 0, devip);
3893 break;
3894 case WRITE_BUFFER:
3895 errsts = check_readiness(SCpnt, 1, devip);
3896 break;
3897 case XDWRITEREAD_10:
3898 if (!scsi_bidi_cmnd(SCpnt)) {
3899 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3900 INVALID_FIELD_IN_CDB, 0);
3901 errsts = check_condition_result;
3902 break;
3903 }
3904
3905 errsts = check_readiness(SCpnt, 0, devip);
3906 if (errsts)
3907 break;
3908 if (scsi_debug_fake_rw)
3909 break;
3910 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3911 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3912 if (errsts)
3913 break;
3914 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3915 if (errsts)
3916 break;
3917 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3918 break;
3919 case VARIABLE_LENGTH_CMD:
3920 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3921
3922 if ((cmd[10] & 0xe0) == 0)
3923 printk(KERN_ERR
3924 "Unprotected RD/WR to DIF device\n");
3925
3926 if (cmd[9] == READ_32) {
3927 BUG_ON(SCpnt->cmd_len < 32);
3928 goto read;
3929 }
3930
3931 if (cmd[9] == WRITE_32) {
3932 BUG_ON(SCpnt->cmd_len < 32);
3933 goto write;
3934 }
3935 }
3936
3937 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3938 INVALID_FIELD_IN_CDB, 0);
3939 errsts = check_condition_result;
3940 break;
3941
3942 default:
3943 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3944 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3945 "supported\n", *cmd);
3946 errsts = check_readiness(SCpnt, 1, devip);
3947 if (errsts)
3948 break; /* Unit attention takes precedence */
3949 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3950 errsts = check_condition_result;
3951 break;
3952 }
3953 return schedule_resp(SCpnt, devip, done, errsts,
3954 (delay_override ? 0 : scsi_debug_delay));
3955 }
3956
3957 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3958
3959 static struct scsi_host_template sdebug_driver_template = {
3960 .proc_info = scsi_debug_proc_info,
3961 .proc_name = sdebug_proc_name,
3962 .name = "SCSI DEBUG",
3963 .info = scsi_debug_info,
3964 .slave_alloc = scsi_debug_slave_alloc,
3965 .slave_configure = scsi_debug_slave_configure,
3966 .slave_destroy = scsi_debug_slave_destroy,
3967 .ioctl = scsi_debug_ioctl,
3968 .queuecommand = scsi_debug_queuecommand,
3969 .eh_abort_handler = scsi_debug_abort,
3970 .eh_bus_reset_handler = scsi_debug_bus_reset,
3971 .eh_device_reset_handler = scsi_debug_device_reset,
3972 .eh_host_reset_handler = scsi_debug_host_reset,
3973 .bios_param = scsi_debug_biosparam,
3974 .can_queue = SCSI_DEBUG_CANQUEUE,
3975 .this_id = 7,
3976 .sg_tablesize = 256,
3977 .cmd_per_lun = 16,
3978 .max_sectors = 0xffff,
3979 .use_clustering = DISABLE_CLUSTERING,
3980 .module = THIS_MODULE,
3981 };
3982
3983 static int sdebug_driver_probe(struct device * dev)
3984 {
3985 int error = 0;
3986 struct sdebug_host_info *sdbg_host;
3987 struct Scsi_Host *hpnt;
3988 int host_prot;
3989
3990 sdbg_host = to_sdebug_host(dev);
3991
3992 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3993 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3994 if (NULL == hpnt) {
3995 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3996 error = -ENODEV;
3997 return error;
3998 }
3999
4000 sdbg_host->shost = hpnt;
4001 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4002 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4003 hpnt->max_id = scsi_debug_num_tgts + 1;
4004 else
4005 hpnt->max_id = scsi_debug_num_tgts;
4006 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
4007
4008 host_prot = 0;
4009
4010 switch (scsi_debug_dif) {
4011
4012 case SD_DIF_TYPE1_PROTECTION:
4013 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4014 if (scsi_debug_dix)
4015 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4016 break;
4017
4018 case SD_DIF_TYPE2_PROTECTION:
4019 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4020 if (scsi_debug_dix)
4021 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4022 break;
4023
4024 case SD_DIF_TYPE3_PROTECTION:
4025 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4026 if (scsi_debug_dix)
4027 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4028 break;
4029
4030 default:
4031 if (scsi_debug_dix)
4032 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4033 break;
4034 }
4035
4036 scsi_host_set_prot(hpnt, host_prot);
4037
4038 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4039 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4040 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4041 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4042 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4043 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4044 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4045 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4046
4047 if (scsi_debug_guard == 1)
4048 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4049 else
4050 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4051
4052 error = scsi_add_host(hpnt, &sdbg_host->dev);
4053 if (error) {
4054 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4055 error = -ENODEV;
4056 scsi_host_put(hpnt);
4057 } else
4058 scsi_scan_host(hpnt);
4059
4060
4061 return error;
4062 }
4063
4064 static int sdebug_driver_remove(struct device * dev)
4065 {
4066 struct sdebug_host_info *sdbg_host;
4067 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4068
4069 sdbg_host = to_sdebug_host(dev);
4070
4071 if (!sdbg_host) {
4072 printk(KERN_ERR "%s: Unable to locate host info\n",
4073 __func__);
4074 return -ENODEV;
4075 }
4076
4077 scsi_remove_host(sdbg_host->shost);
4078
4079 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4080 dev_list) {
4081 list_del(&sdbg_devinfo->dev_list);
4082 kfree(sdbg_devinfo);
4083 }
4084
4085 scsi_host_put(sdbg_host->shost);
4086 return 0;
4087 }
4088
4089 static int pseudo_lld_bus_match(struct device *dev,
4090 struct device_driver *dev_driver)
4091 {
4092 return 1;
4093 }
4094
4095 static struct bus_type pseudo_lld_bus = {
4096 .name = "pseudo",
4097 .match = pseudo_lld_bus_match,
4098 .probe = sdebug_driver_probe,
4099 .remove = sdebug_driver_remove,
4100 };