beceem: update TODO list
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / scsi_sysfs.c
1 /*
2 * scsi_sysfs.c
3 *
4 * SCSI sysfs interface routines.
5 *
6 * Created to pull SCSI mid layer sysfs routines into one file.
7 */
8
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/device.h>
14 #include <linux/pm_runtime.h>
15
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi_transport.h>
21 #include <scsi/scsi_driver.h>
22
23 #include "scsi_priv.h"
24 #include "scsi_logging.h"
25
26 static struct device_type scsi_dev_type;
27
28 static const struct {
29 enum scsi_device_state value;
30 char *name;
31 } sdev_states[] = {
32 { SDEV_CREATED, "created" },
33 { SDEV_RUNNING, "running" },
34 { SDEV_CANCEL, "cancel" },
35 { SDEV_DEL, "deleted" },
36 { SDEV_QUIESCE, "quiesce" },
37 { SDEV_OFFLINE, "offline" },
38 { SDEV_BLOCK, "blocked" },
39 { SDEV_CREATED_BLOCK, "created-blocked" },
40 };
41
42 const char *scsi_device_state_name(enum scsi_device_state state)
43 {
44 int i;
45 char *name = NULL;
46
47 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
48 if (sdev_states[i].value == state) {
49 name = sdev_states[i].name;
50 break;
51 }
52 }
53 return name;
54 }
55
56 static const struct {
57 enum scsi_host_state value;
58 char *name;
59 } shost_states[] = {
60 { SHOST_CREATED, "created" },
61 { SHOST_RUNNING, "running" },
62 { SHOST_CANCEL, "cancel" },
63 { SHOST_DEL, "deleted" },
64 { SHOST_RECOVERY, "recovery" },
65 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
66 { SHOST_DEL_RECOVERY, "deleted/recovery", },
67 };
68 const char *scsi_host_state_name(enum scsi_host_state state)
69 {
70 int i;
71 char *name = NULL;
72
73 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
74 if (shost_states[i].value == state) {
75 name = shost_states[i].name;
76 break;
77 }
78 }
79 return name;
80 }
81
82 static int check_set(unsigned int *val, char *src)
83 {
84 char *last;
85
86 if (strncmp(src, "-", 20) == 0) {
87 *val = SCAN_WILD_CARD;
88 } else {
89 /*
90 * Doesn't check for int overflow
91 */
92 *val = simple_strtoul(src, &last, 0);
93 if (*last != '\0')
94 return 1;
95 }
96 return 0;
97 }
98
99 static int scsi_scan(struct Scsi_Host *shost, const char *str)
100 {
101 char s1[15], s2[15], s3[15], junk;
102 unsigned int channel, id, lun;
103 int res;
104
105 res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk);
106 if (res != 3)
107 return -EINVAL;
108 if (check_set(&channel, s1))
109 return -EINVAL;
110 if (check_set(&id, s2))
111 return -EINVAL;
112 if (check_set(&lun, s3))
113 return -EINVAL;
114 if (shost->transportt->user_scan)
115 res = shost->transportt->user_scan(shost, channel, id, lun);
116 else
117 res = scsi_scan_host_selected(shost, channel, id, lun, 1);
118 return res;
119 }
120
121 /*
122 * shost_show_function: macro to create an attr function that can be used to
123 * show a non-bit field.
124 */
125 #define shost_show_function(name, field, format_string) \
126 static ssize_t \
127 show_##name (struct device *dev, struct device_attribute *attr, \
128 char *buf) \
129 { \
130 struct Scsi_Host *shost = class_to_shost(dev); \
131 return snprintf (buf, 20, format_string, shost->field); \
132 }
133
134 /*
135 * shost_rd_attr: macro to create a function and attribute variable for a
136 * read only field.
137 */
138 #define shost_rd_attr2(name, field, format_string) \
139 shost_show_function(name, field, format_string) \
140 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
141
142 #define shost_rd_attr(field, format_string) \
143 shost_rd_attr2(field, field, format_string)
144
145 /*
146 * Create the actual show/store functions and data structures.
147 */
148
149 static ssize_t
150 store_scan(struct device *dev, struct device_attribute *attr,
151 const char *buf, size_t count)
152 {
153 struct Scsi_Host *shost = class_to_shost(dev);
154 int res;
155
156 res = scsi_scan(shost, buf);
157 if (res == 0)
158 res = count;
159 return res;
160 };
161 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
162
163 static ssize_t
164 store_shost_state(struct device *dev, struct device_attribute *attr,
165 const char *buf, size_t count)
166 {
167 int i;
168 struct Scsi_Host *shost = class_to_shost(dev);
169 enum scsi_host_state state = 0;
170
171 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
172 const int len = strlen(shost_states[i].name);
173 if (strncmp(shost_states[i].name, buf, len) == 0 &&
174 buf[len] == '\n') {
175 state = shost_states[i].value;
176 break;
177 }
178 }
179 if (!state)
180 return -EINVAL;
181
182 if (scsi_host_set_state(shost, state))
183 return -EINVAL;
184 return count;
185 }
186
187 static ssize_t
188 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
189 {
190 struct Scsi_Host *shost = class_to_shost(dev);
191 const char *name = scsi_host_state_name(shost->shost_state);
192
193 if (!name)
194 return -EINVAL;
195
196 return snprintf(buf, 20, "%s\n", name);
197 }
198
199 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
200 struct device_attribute dev_attr_hstate =
201 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
202
203 static ssize_t
204 show_shost_mode(unsigned int mode, char *buf)
205 {
206 ssize_t len = 0;
207
208 if (mode & MODE_INITIATOR)
209 len = sprintf(buf, "%s", "Initiator");
210
211 if (mode & MODE_TARGET)
212 len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
213
214 len += sprintf(buf + len, "\n");
215
216 return len;
217 }
218
219 static ssize_t
220 show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
221 char *buf)
222 {
223 struct Scsi_Host *shost = class_to_shost(dev);
224 unsigned int supported_mode = shost->hostt->supported_mode;
225
226 if (supported_mode == MODE_UNKNOWN)
227 /* by default this should be initiator */
228 supported_mode = MODE_INITIATOR;
229
230 return show_shost_mode(supported_mode, buf);
231 }
232
233 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
234
235 static ssize_t
236 show_shost_active_mode(struct device *dev,
237 struct device_attribute *attr, char *buf)
238 {
239 struct Scsi_Host *shost = class_to_shost(dev);
240
241 if (shost->active_mode == MODE_UNKNOWN)
242 return snprintf(buf, 20, "unknown\n");
243 else
244 return show_shost_mode(shost->active_mode, buf);
245 }
246
247 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
248
249 shost_rd_attr(unique_id, "%u\n");
250 shost_rd_attr(host_busy, "%hu\n");
251 shost_rd_attr(cmd_per_lun, "%hd\n");
252 shost_rd_attr(can_queue, "%hd\n");
253 shost_rd_attr(sg_tablesize, "%hu\n");
254 shost_rd_attr(sg_prot_tablesize, "%hu\n");
255 shost_rd_attr(unchecked_isa_dma, "%d\n");
256 shost_rd_attr(prot_capabilities, "%u\n");
257 shost_rd_attr(prot_guard_type, "%hd\n");
258 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
259
260 static struct attribute *scsi_sysfs_shost_attrs[] = {
261 &dev_attr_unique_id.attr,
262 &dev_attr_host_busy.attr,
263 &dev_attr_cmd_per_lun.attr,
264 &dev_attr_can_queue.attr,
265 &dev_attr_sg_tablesize.attr,
266 &dev_attr_sg_prot_tablesize.attr,
267 &dev_attr_unchecked_isa_dma.attr,
268 &dev_attr_proc_name.attr,
269 &dev_attr_scan.attr,
270 &dev_attr_hstate.attr,
271 &dev_attr_supported_mode.attr,
272 &dev_attr_active_mode.attr,
273 &dev_attr_prot_capabilities.attr,
274 &dev_attr_prot_guard_type.attr,
275 NULL
276 };
277
278 struct attribute_group scsi_shost_attr_group = {
279 .attrs = scsi_sysfs_shost_attrs,
280 };
281
282 const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
283 &scsi_shost_attr_group,
284 NULL
285 };
286
287 static void scsi_device_cls_release(struct device *class_dev)
288 {
289 struct scsi_device *sdev;
290
291 sdev = class_to_sdev(class_dev);
292 put_device(&sdev->sdev_gendev);
293 }
294
295 static void scsi_device_dev_release_usercontext(struct work_struct *work)
296 {
297 struct scsi_device *sdev;
298 struct device *parent;
299 struct scsi_target *starget;
300 struct list_head *this, *tmp;
301 unsigned long flags;
302
303 sdev = container_of(work, struct scsi_device, ew.work);
304
305 parent = sdev->sdev_gendev.parent;
306 starget = to_scsi_target(parent);
307
308 spin_lock_irqsave(sdev->host->host_lock, flags);
309 starget->reap_ref++;
310 list_del(&sdev->siblings);
311 list_del(&sdev->same_target_siblings);
312 list_del(&sdev->starved_entry);
313 spin_unlock_irqrestore(sdev->host->host_lock, flags);
314
315 cancel_work_sync(&sdev->event_work);
316
317 list_for_each_safe(this, tmp, &sdev->event_list) {
318 struct scsi_event *evt;
319
320 evt = list_entry(this, struct scsi_event, node);
321 list_del(&evt->node);
322 kfree(evt);
323 }
324
325 if (sdev->request_queue) {
326 sdev->request_queue->queuedata = NULL;
327 /* user context needed to free queue */
328 scsi_free_queue(sdev->request_queue);
329 /* temporary expedient, try to catch use of queue lock
330 * after free of sdev */
331 sdev->request_queue = NULL;
332 }
333
334 scsi_target_reap(scsi_target(sdev));
335
336 kfree(sdev->inquiry);
337 kfree(sdev);
338
339 if (parent)
340 put_device(parent);
341 }
342
343 static void scsi_device_dev_release(struct device *dev)
344 {
345 struct scsi_device *sdp = to_scsi_device(dev);
346 execute_in_process_context(scsi_device_dev_release_usercontext,
347 &sdp->ew);
348 }
349
350 static struct class sdev_class = {
351 .name = "scsi_device",
352 .dev_release = scsi_device_cls_release,
353 };
354
355 /* all probing is done in the individual ->probe routines */
356 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
357 {
358 struct scsi_device *sdp;
359
360 if (dev->type != &scsi_dev_type)
361 return 0;
362
363 sdp = to_scsi_device(dev);
364 if (sdp->no_uld_attach)
365 return 0;
366 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
367 }
368
369 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
370 {
371 struct scsi_device *sdev;
372
373 if (dev->type != &scsi_dev_type)
374 return 0;
375
376 sdev = to_scsi_device(dev);
377
378 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
379 return 0;
380 }
381
382 struct bus_type scsi_bus_type = {
383 .name = "scsi",
384 .match = scsi_bus_match,
385 .uevent = scsi_bus_uevent,
386 #ifdef CONFIG_PM_OPS
387 .pm = &scsi_bus_pm_ops,
388 #endif
389 };
390 EXPORT_SYMBOL_GPL(scsi_bus_type);
391
392 int scsi_sysfs_register(void)
393 {
394 int error;
395
396 error = bus_register(&scsi_bus_type);
397 if (!error) {
398 error = class_register(&sdev_class);
399 if (error)
400 bus_unregister(&scsi_bus_type);
401 }
402
403 return error;
404 }
405
406 void scsi_sysfs_unregister(void)
407 {
408 class_unregister(&sdev_class);
409 bus_unregister(&scsi_bus_type);
410 }
411
412 /*
413 * sdev_show_function: macro to create an attr function that can be used to
414 * show a non-bit field.
415 */
416 #define sdev_show_function(field, format_string) \
417 static ssize_t \
418 sdev_show_##field (struct device *dev, struct device_attribute *attr, \
419 char *buf) \
420 { \
421 struct scsi_device *sdev; \
422 sdev = to_scsi_device(dev); \
423 return snprintf (buf, 20, format_string, sdev->field); \
424 } \
425
426 /*
427 * sdev_rd_attr: macro to create a function and attribute variable for a
428 * read only field.
429 */
430 #define sdev_rd_attr(field, format_string) \
431 sdev_show_function(field, format_string) \
432 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
433
434
435 /*
436 * sdev_rw_attr: create a function and attribute variable for a
437 * read/write field.
438 */
439 #define sdev_rw_attr(field, format_string) \
440 sdev_show_function(field, format_string) \
441 \
442 static ssize_t \
443 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
444 const char *buf, size_t count) \
445 { \
446 struct scsi_device *sdev; \
447 sdev = to_scsi_device(dev); \
448 sscanf (buf, format_string, &sdev->field); \
449 return count; \
450 } \
451 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
452
453 /* Currently we don't export bit fields, but we might in future,
454 * so leave this code in */
455 #if 0
456 /*
457 * sdev_rd_attr: create a function and attribute variable for a
458 * read/write bit field.
459 */
460 #define sdev_rw_attr_bit(field) \
461 sdev_show_function(field, "%d\n") \
462 \
463 static ssize_t \
464 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
465 const char *buf, size_t count) \
466 { \
467 int ret; \
468 struct scsi_device *sdev; \
469 ret = scsi_sdev_check_buf_bit(buf); \
470 if (ret >= 0) { \
471 sdev = to_scsi_device(dev); \
472 sdev->field = ret; \
473 ret = count; \
474 } \
475 return ret; \
476 } \
477 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
478
479 /*
480 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
481 * else return -EINVAL.
482 */
483 static int scsi_sdev_check_buf_bit(const char *buf)
484 {
485 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
486 if (buf[0] == '1')
487 return 1;
488 else if (buf[0] == '0')
489 return 0;
490 else
491 return -EINVAL;
492 } else
493 return -EINVAL;
494 }
495 #endif
496 /*
497 * Create the actual show/store functions and data structures.
498 */
499 sdev_rd_attr (device_blocked, "%d\n");
500 sdev_rd_attr (queue_depth, "%d\n");
501 sdev_rd_attr (type, "%d\n");
502 sdev_rd_attr (scsi_level, "%d\n");
503 sdev_rd_attr (vendor, "%.8s\n");
504 sdev_rd_attr (model, "%.16s\n");
505 sdev_rd_attr (rev, "%.4s\n");
506
507 /*
508 * TODO: can we make these symlinks to the block layer ones?
509 */
510 static ssize_t
511 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
512 {
513 struct scsi_device *sdev;
514 sdev = to_scsi_device(dev);
515 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
516 }
517
518 static ssize_t
519 sdev_store_timeout (struct device *dev, struct device_attribute *attr,
520 const char *buf, size_t count)
521 {
522 struct scsi_device *sdev;
523 int timeout;
524 sdev = to_scsi_device(dev);
525 sscanf (buf, "%d\n", &timeout);
526 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
527 return count;
528 }
529 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
530
531 static ssize_t
532 store_rescan_field (struct device *dev, struct device_attribute *attr,
533 const char *buf, size_t count)
534 {
535 scsi_rescan_device(dev);
536 return count;
537 }
538 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
539
540 static void sdev_store_delete_callback(struct device *dev)
541 {
542 scsi_remove_device(to_scsi_device(dev));
543 }
544
545 static ssize_t
546 sdev_store_delete(struct device *dev, struct device_attribute *attr,
547 const char *buf, size_t count)
548 {
549 int rc;
550
551 /* An attribute cannot be unregistered by one of its own methods,
552 * so we have to use this roundabout approach.
553 */
554 rc = device_schedule_callback(dev, sdev_store_delete_callback);
555 if (rc)
556 count = rc;
557 return count;
558 };
559 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
560
561 static ssize_t
562 store_state_field(struct device *dev, struct device_attribute *attr,
563 const char *buf, size_t count)
564 {
565 int i;
566 struct scsi_device *sdev = to_scsi_device(dev);
567 enum scsi_device_state state = 0;
568
569 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
570 const int len = strlen(sdev_states[i].name);
571 if (strncmp(sdev_states[i].name, buf, len) == 0 &&
572 buf[len] == '\n') {
573 state = sdev_states[i].value;
574 break;
575 }
576 }
577 if (!state)
578 return -EINVAL;
579
580 if (scsi_device_set_state(sdev, state))
581 return -EINVAL;
582 return count;
583 }
584
585 static ssize_t
586 show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
587 {
588 struct scsi_device *sdev = to_scsi_device(dev);
589 const char *name = scsi_device_state_name(sdev->sdev_state);
590
591 if (!name)
592 return -EINVAL;
593
594 return snprintf(buf, 20, "%s\n", name);
595 }
596
597 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
598
599 static ssize_t
600 show_queue_type_field(struct device *dev, struct device_attribute *attr,
601 char *buf)
602 {
603 struct scsi_device *sdev = to_scsi_device(dev);
604 const char *name = "none";
605
606 if (sdev->ordered_tags)
607 name = "ordered";
608 else if (sdev->simple_tags)
609 name = "simple";
610
611 return snprintf(buf, 20, "%s\n", name);
612 }
613
614 static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
615
616 static ssize_t
617 show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf)
618 {
619 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
620 }
621
622 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
623
624 #define show_sdev_iostat(field) \
625 static ssize_t \
626 show_iostat_##field(struct device *dev, struct device_attribute *attr, \
627 char *buf) \
628 { \
629 struct scsi_device *sdev = to_scsi_device(dev); \
630 unsigned long long count = atomic_read(&sdev->field); \
631 return snprintf(buf, 20, "0x%llx\n", count); \
632 } \
633 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
634
635 show_sdev_iostat(iorequest_cnt);
636 show_sdev_iostat(iodone_cnt);
637 show_sdev_iostat(ioerr_cnt);
638
639 static ssize_t
640 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
641 {
642 struct scsi_device *sdev;
643 sdev = to_scsi_device(dev);
644 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
645 }
646 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
647
648 #define DECLARE_EVT_SHOW(name, Cap_name) \
649 static ssize_t \
650 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
651 char *buf) \
652 { \
653 struct scsi_device *sdev = to_scsi_device(dev); \
654 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
655 return snprintf(buf, 20, "%d\n", val); \
656 }
657
658 #define DECLARE_EVT_STORE(name, Cap_name) \
659 static ssize_t \
660 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
661 const char *buf, size_t count) \
662 { \
663 struct scsi_device *sdev = to_scsi_device(dev); \
664 int val = simple_strtoul(buf, NULL, 0); \
665 if (val == 0) \
666 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
667 else if (val == 1) \
668 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
669 else \
670 return -EINVAL; \
671 return count; \
672 }
673
674 #define DECLARE_EVT(name, Cap_name) \
675 DECLARE_EVT_SHOW(name, Cap_name) \
676 DECLARE_EVT_STORE(name, Cap_name) \
677 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
678 sdev_store_evt_##name);
679 #define REF_EVT(name) &dev_attr_evt_##name.attr
680
681 DECLARE_EVT(media_change, MEDIA_CHANGE)
682
683 /* Default template for device attributes. May NOT be modified */
684 static struct attribute *scsi_sdev_attrs[] = {
685 &dev_attr_device_blocked.attr,
686 &dev_attr_type.attr,
687 &dev_attr_scsi_level.attr,
688 &dev_attr_vendor.attr,
689 &dev_attr_model.attr,
690 &dev_attr_rev.attr,
691 &dev_attr_rescan.attr,
692 &dev_attr_delete.attr,
693 &dev_attr_state.attr,
694 &dev_attr_timeout.attr,
695 &dev_attr_iocounterbits.attr,
696 &dev_attr_iorequest_cnt.attr,
697 &dev_attr_iodone_cnt.attr,
698 &dev_attr_ioerr_cnt.attr,
699 &dev_attr_modalias.attr,
700 REF_EVT(media_change),
701 NULL
702 };
703
704 static struct attribute_group scsi_sdev_attr_group = {
705 .attrs = scsi_sdev_attrs,
706 };
707
708 static const struct attribute_group *scsi_sdev_attr_groups[] = {
709 &scsi_sdev_attr_group,
710 NULL
711 };
712
713 static ssize_t
714 sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
715 const char *buf, size_t count)
716 {
717 int depth, retval;
718 struct scsi_device *sdev = to_scsi_device(dev);
719 struct scsi_host_template *sht = sdev->host->hostt;
720
721 if (!sht->change_queue_depth)
722 return -EINVAL;
723
724 depth = simple_strtoul(buf, NULL, 0);
725
726 if (depth < 1)
727 return -EINVAL;
728
729 retval = sht->change_queue_depth(sdev, depth,
730 SCSI_QDEPTH_DEFAULT);
731 if (retval < 0)
732 return retval;
733
734 sdev->max_queue_depth = sdev->queue_depth;
735
736 return count;
737 }
738
739 static struct device_attribute sdev_attr_queue_depth_rw =
740 __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
741 sdev_store_queue_depth_rw);
742
743 static ssize_t
744 sdev_show_queue_ramp_up_period(struct device *dev,
745 struct device_attribute *attr,
746 char *buf)
747 {
748 struct scsi_device *sdev;
749 sdev = to_scsi_device(dev);
750 return snprintf(buf, 20, "%u\n",
751 jiffies_to_msecs(sdev->queue_ramp_up_period));
752 }
753
754 static ssize_t
755 sdev_store_queue_ramp_up_period(struct device *dev,
756 struct device_attribute *attr,
757 const char *buf, size_t count)
758 {
759 struct scsi_device *sdev = to_scsi_device(dev);
760 unsigned long period;
761
762 if (strict_strtoul(buf, 10, &period))
763 return -EINVAL;
764
765 sdev->queue_ramp_up_period = msecs_to_jiffies(period);
766 return period;
767 }
768
769 static struct device_attribute sdev_attr_queue_ramp_up_period =
770 __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
771 sdev_show_queue_ramp_up_period,
772 sdev_store_queue_ramp_up_period);
773
774 static ssize_t
775 sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
776 const char *buf, size_t count)
777 {
778 struct scsi_device *sdev = to_scsi_device(dev);
779 struct scsi_host_template *sht = sdev->host->hostt;
780 int tag_type = 0, retval;
781 int prev_tag_type = scsi_get_tag_type(sdev);
782
783 if (!sdev->tagged_supported || !sht->change_queue_type)
784 return -EINVAL;
785
786 if (strncmp(buf, "ordered", 7) == 0)
787 tag_type = MSG_ORDERED_TAG;
788 else if (strncmp(buf, "simple", 6) == 0)
789 tag_type = MSG_SIMPLE_TAG;
790 else if (strncmp(buf, "none", 4) != 0)
791 return -EINVAL;
792
793 if (tag_type == prev_tag_type)
794 return count;
795
796 retval = sht->change_queue_type(sdev, tag_type);
797 if (retval < 0)
798 return retval;
799
800 return count;
801 }
802
803 static int scsi_target_add(struct scsi_target *starget)
804 {
805 int error;
806
807 if (starget->state != STARGET_CREATED)
808 return 0;
809
810 error = device_add(&starget->dev);
811 if (error) {
812 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
813 return error;
814 }
815 transport_add_device(&starget->dev);
816 starget->state = STARGET_RUNNING;
817
818 pm_runtime_set_active(&starget->dev);
819 pm_runtime_enable(&starget->dev);
820 device_enable_async_suspend(&starget->dev);
821
822 return 0;
823 }
824
825 static struct device_attribute sdev_attr_queue_type_rw =
826 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
827 sdev_store_queue_type_rw);
828
829 /**
830 * scsi_sysfs_add_sdev - add scsi device to sysfs
831 * @sdev: scsi_device to add
832 *
833 * Return value:
834 * 0 on Success / non-zero on Failure
835 **/
836 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
837 {
838 int error, i;
839 struct request_queue *rq = sdev->request_queue;
840 struct scsi_target *starget = sdev->sdev_target;
841
842 error = scsi_device_set_state(sdev, SDEV_RUNNING);
843 if (error)
844 return error;
845
846 error = scsi_target_add(starget);
847 if (error)
848 return error;
849
850 transport_configure_device(&starget->dev);
851
852 device_enable_async_suspend(&sdev->sdev_gendev);
853 scsi_autopm_get_target(starget);
854 pm_runtime_set_active(&sdev->sdev_gendev);
855 pm_runtime_forbid(&sdev->sdev_gendev);
856 pm_runtime_enable(&sdev->sdev_gendev);
857 scsi_autopm_put_target(starget);
858
859 /* The following call will keep sdev active indefinitely, until
860 * its driver does a corresponding scsi_autopm_pm_device(). Only
861 * drivers supporting autosuspend will do this.
862 */
863 scsi_autopm_get_device(sdev);
864
865 error = device_add(&sdev->sdev_gendev);
866 if (error) {
867 printk(KERN_INFO "error 1\n");
868 return error;
869 }
870 device_enable_async_suspend(&sdev->sdev_dev);
871 error = device_add(&sdev->sdev_dev);
872 if (error) {
873 printk(KERN_INFO "error 2\n");
874 device_del(&sdev->sdev_gendev);
875 return error;
876 }
877 transport_add_device(&sdev->sdev_gendev);
878 sdev->is_visible = 1;
879
880 /* create queue files, which may be writable, depending on the host */
881 if (sdev->host->hostt->change_queue_depth) {
882 error = device_create_file(&sdev->sdev_gendev,
883 &sdev_attr_queue_depth_rw);
884 error = device_create_file(&sdev->sdev_gendev,
885 &sdev_attr_queue_ramp_up_period);
886 }
887 else
888 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
889 if (error)
890 return error;
891
892 if (sdev->host->hostt->change_queue_type)
893 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
894 else
895 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
896 if (error)
897 return error;
898
899 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
900
901 if (error)
902 /* we're treating error on bsg register as non-fatal,
903 * so pretend nothing went wrong */
904 sdev_printk(KERN_INFO, sdev,
905 "Failed to register bsg queue, errno=%d\n", error);
906
907 /* add additional host specific attributes */
908 if (sdev->host->hostt->sdev_attrs) {
909 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
910 error = device_create_file(&sdev->sdev_gendev,
911 sdev->host->hostt->sdev_attrs[i]);
912 if (error)
913 return error;
914 }
915 }
916
917 return error;
918 }
919
920 void __scsi_remove_device(struct scsi_device *sdev)
921 {
922 struct device *dev = &sdev->sdev_gendev;
923
924 if (sdev->is_visible) {
925 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
926 return;
927
928 bsg_unregister_queue(sdev->request_queue);
929 device_unregister(&sdev->sdev_dev);
930 transport_remove_device(dev);
931 device_del(dev);
932 } else
933 put_device(&sdev->sdev_dev);
934 scsi_device_set_state(sdev, SDEV_DEL);
935 if (sdev->host->hostt->slave_destroy)
936 sdev->host->hostt->slave_destroy(sdev);
937 transport_destroy_device(dev);
938 put_device(dev);
939 }
940
941 /**
942 * scsi_remove_device - unregister a device from the scsi bus
943 * @sdev: scsi_device to unregister
944 **/
945 void scsi_remove_device(struct scsi_device *sdev)
946 {
947 struct Scsi_Host *shost = sdev->host;
948
949 mutex_lock(&shost->scan_mutex);
950 __scsi_remove_device(sdev);
951 mutex_unlock(&shost->scan_mutex);
952 }
953 EXPORT_SYMBOL(scsi_remove_device);
954
955 static void __scsi_remove_target(struct scsi_target *starget)
956 {
957 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
958 unsigned long flags;
959 struct scsi_device *sdev;
960
961 spin_lock_irqsave(shost->host_lock, flags);
962 starget->reap_ref++;
963 restart:
964 list_for_each_entry(sdev, &shost->__devices, siblings) {
965 if (sdev->channel != starget->channel ||
966 sdev->id != starget->id ||
967 sdev->sdev_state == SDEV_DEL)
968 continue;
969 spin_unlock_irqrestore(shost->host_lock, flags);
970 scsi_remove_device(sdev);
971 spin_lock_irqsave(shost->host_lock, flags);
972 goto restart;
973 }
974 spin_unlock_irqrestore(shost->host_lock, flags);
975 scsi_target_reap(starget);
976 }
977
978 static int __remove_child (struct device * dev, void * data)
979 {
980 if (scsi_is_target_device(dev))
981 __scsi_remove_target(to_scsi_target(dev));
982 return 0;
983 }
984
985 /**
986 * scsi_remove_target - try to remove a target and all its devices
987 * @dev: generic starget or parent of generic stargets to be removed
988 *
989 * Note: This is slightly racy. It is possible that if the user
990 * requests the addition of another device then the target won't be
991 * removed.
992 */
993 void scsi_remove_target(struct device *dev)
994 {
995 struct device *rdev;
996
997 if (scsi_is_target_device(dev)) {
998 __scsi_remove_target(to_scsi_target(dev));
999 return;
1000 }
1001
1002 rdev = get_device(dev);
1003 device_for_each_child(dev, NULL, __remove_child);
1004 put_device(rdev);
1005 }
1006 EXPORT_SYMBOL(scsi_remove_target);
1007
1008 int scsi_register_driver(struct device_driver *drv)
1009 {
1010 drv->bus = &scsi_bus_type;
1011
1012 return driver_register(drv);
1013 }
1014 EXPORT_SYMBOL(scsi_register_driver);
1015
1016 int scsi_register_interface(struct class_interface *intf)
1017 {
1018 intf->class = &sdev_class;
1019
1020 return class_interface_register(intf);
1021 }
1022 EXPORT_SYMBOL(scsi_register_interface);
1023
1024 /**
1025 * scsi_sysfs_add_host - add scsi host to subsystem
1026 * @shost: scsi host struct to add to subsystem
1027 **/
1028 int scsi_sysfs_add_host(struct Scsi_Host *shost)
1029 {
1030 int error, i;
1031
1032 /* add host specific attributes */
1033 if (shost->hostt->shost_attrs) {
1034 for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1035 error = device_create_file(&shost->shost_dev,
1036 shost->hostt->shost_attrs[i]);
1037 if (error)
1038 return error;
1039 }
1040 }
1041
1042 transport_register_device(&shost->shost_gendev);
1043 transport_configure_device(&shost->shost_gendev);
1044 return 0;
1045 }
1046
1047 static struct device_type scsi_dev_type = {
1048 .name = "scsi_device",
1049 .release = scsi_device_dev_release,
1050 .groups = scsi_sdev_attr_groups,
1051 };
1052
1053 void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1054 {
1055 unsigned long flags;
1056 struct Scsi_Host *shost = sdev->host;
1057 struct scsi_target *starget = sdev->sdev_target;
1058
1059 device_initialize(&sdev->sdev_gendev);
1060 sdev->sdev_gendev.bus = &scsi_bus_type;
1061 sdev->sdev_gendev.type = &scsi_dev_type;
1062 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
1063 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1064
1065 device_initialize(&sdev->sdev_dev);
1066 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1067 sdev->sdev_dev.class = &sdev_class;
1068 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1069 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1070 sdev->scsi_level = starget->scsi_level;
1071 transport_setup_device(&sdev->sdev_gendev);
1072 spin_lock_irqsave(shost->host_lock, flags);
1073 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1074 list_add_tail(&sdev->siblings, &shost->__devices);
1075 spin_unlock_irqrestore(shost->host_lock, flags);
1076 }
1077
1078 int scsi_is_sdev_device(const struct device *dev)
1079 {
1080 return dev->type == &scsi_dev_type;
1081 }
1082 EXPORT_SYMBOL(scsi_is_sdev_device);
1083
1084 /* A blank transport template that is used in drivers that don't
1085 * yet implement Transport Attributes */
1086 struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };