[SCSI] fix scsi_setup_command_freelist failure path race
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / scsi.c
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h>
43 #include <linux/timer.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/completion.h>
50 #include <linux/unistd.h>
51 #include <linux/spinlock.h>
52 #include <linux/kmod.h>
53 #include <linux/interrupt.h>
54 #include <linux/notifier.h>
55 #include <linux/cpu.h>
56 #include <linux/mutex.h>
57
58 #include <scsi/scsi.h>
59 #include <scsi/scsi_cmnd.h>
60 #include <scsi/scsi_dbg.h>
61 #include <scsi/scsi_device.h>
62 #include <scsi/scsi_driver.h>
63 #include <scsi/scsi_eh.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66
67 #include "scsi_priv.h"
68 #include "scsi_logging.h"
69
70 static void scsi_done(struct scsi_cmnd *cmd);
71
72 /*
73 * Definitions and constants.
74 */
75
76 #define MIN_RESET_DELAY (2*HZ)
77
78 /* Do not call reset on error if we just did a reset within 15 sec. */
79 #define MIN_RESET_PERIOD (15*HZ)
80
81 /*
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
86 */
87 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
89
90 /*
91 * Note - the initial logging level can be set here to log events at boot time.
92 * After the system is up, you may enable logging via the /proc interface.
93 */
94 unsigned int scsi_logging_level;
95 #if defined(CONFIG_SCSI_LOGGING)
96 EXPORT_SYMBOL(scsi_logging_level);
97 #endif
98
99 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is
101 * encouraged once assigned by ANSI/INCITS T10
102 */
103 static const char *const scsi_device_types[] = {
104 "Direct-Access ",
105 "Sequential-Access",
106 "Printer ",
107 "Processor ",
108 "WORM ",
109 "CD-ROM ",
110 "Scanner ",
111 "Optical Device ",
112 "Medium Changer ",
113 "Communications ",
114 "ASC IT8 ",
115 "ASC IT8 ",
116 "RAID ",
117 "Enclosure ",
118 "Direct-Access-RBC",
119 "Optical card ",
120 "Bridge controller",
121 "Object storage ",
122 "Automation/Drive ",
123 };
124
125 /**
126 * scsi_device_type - Return 17 char string indicating device type.
127 * @type: type number to look up
128 */
129
130 const char * scsi_device_type(unsigned type)
131 {
132 if (type == 0x1e)
133 return "Well-known LUN ";
134 if (type == 0x1f)
135 return "No Device ";
136 if (type >= ARRAY_SIZE(scsi_device_types))
137 return "Unknown ";
138 return scsi_device_types[type];
139 }
140
141 EXPORT_SYMBOL(scsi_device_type);
142
143 struct scsi_host_cmd_pool {
144 struct kmem_cache *slab;
145 unsigned int users;
146 char *name;
147 unsigned int slab_flags;
148 gfp_t gfp_mask;
149 };
150
151 static struct scsi_host_cmd_pool scsi_cmd_pool = {
152 .name = "scsi_cmd_cache",
153 .slab_flags = SLAB_HWCACHE_ALIGN,
154 };
155
156 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
157 .name = "scsi_cmd_cache(DMA)",
158 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
159 .gfp_mask = __GFP_DMA,
160 };
161
162 static DEFINE_MUTEX(host_cmd_pool_mutex);
163
164 /**
165 * __scsi_get_command - Allocate a struct scsi_cmnd
166 * @shost: host to transmit command
167 * @gfp_mask: allocation mask
168 *
169 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
170 * host's free_list if necessary.
171 */
172 struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
173 {
174 struct scsi_cmnd *cmd;
175
176 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
177 gfp_mask | shost->cmd_pool->gfp_mask);
178
179 if (unlikely(!cmd)) {
180 unsigned long flags;
181
182 spin_lock_irqsave(&shost->free_list_lock, flags);
183 if (likely(!list_empty(&shost->free_list))) {
184 cmd = list_entry(shost->free_list.next,
185 struct scsi_cmnd, list);
186 list_del_init(&cmd->list);
187 }
188 spin_unlock_irqrestore(&shost->free_list_lock, flags);
189 }
190
191 return cmd;
192 }
193 EXPORT_SYMBOL_GPL(__scsi_get_command);
194
195 /**
196 * scsi_get_command - Allocate and setup a scsi command block
197 * @dev: parent scsi device
198 * @gfp_mask: allocator flags
199 *
200 * Returns: The allocated scsi command structure.
201 */
202 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
203 {
204 struct scsi_cmnd *cmd;
205
206 /* Bail if we can't get a reference to the device */
207 if (!get_device(&dev->sdev_gendev))
208 return NULL;
209
210 cmd = __scsi_get_command(dev->host, gfp_mask);
211
212 if (likely(cmd != NULL)) {
213 unsigned long flags;
214
215 memset(cmd, 0, sizeof(*cmd));
216 cmd->device = dev;
217 init_timer(&cmd->eh_timeout);
218 INIT_LIST_HEAD(&cmd->list);
219 spin_lock_irqsave(&dev->list_lock, flags);
220 list_add_tail(&cmd->list, &dev->cmd_list);
221 spin_unlock_irqrestore(&dev->list_lock, flags);
222 cmd->jiffies_at_alloc = jiffies;
223 } else
224 put_device(&dev->sdev_gendev);
225
226 return cmd;
227 }
228 EXPORT_SYMBOL(scsi_get_command);
229
230 /**
231 * __scsi_put_command - Free a struct scsi_cmnd
232 * @shost: dev->host
233 * @cmd: Command to free
234 * @dev: parent scsi device
235 */
236 void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
237 struct device *dev)
238 {
239 unsigned long flags;
240
241 /* changing locks here, don't need to restore the irq state */
242 spin_lock_irqsave(&shost->free_list_lock, flags);
243 if (unlikely(list_empty(&shost->free_list))) {
244 list_add(&cmd->list, &shost->free_list);
245 cmd = NULL;
246 }
247 spin_unlock_irqrestore(&shost->free_list_lock, flags);
248
249 if (likely(cmd != NULL))
250 kmem_cache_free(shost->cmd_pool->slab, cmd);
251
252 put_device(dev);
253 }
254 EXPORT_SYMBOL(__scsi_put_command);
255
256 /**
257 * scsi_put_command - Free a scsi command block
258 * @cmd: command block to free
259 *
260 * Returns: Nothing.
261 *
262 * Notes: The command must not belong to any lists.
263 */
264 void scsi_put_command(struct scsi_cmnd *cmd)
265 {
266 struct scsi_device *sdev = cmd->device;
267 unsigned long flags;
268
269 /* serious error if the command hasn't come from a device list */
270 spin_lock_irqsave(&cmd->device->list_lock, flags);
271 BUG_ON(list_empty(&cmd->list));
272 list_del_init(&cmd->list);
273 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
274
275 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
276 }
277 EXPORT_SYMBOL(scsi_put_command);
278
279 /**
280 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
281 * @shost: host to allocate the freelist for.
282 *
283 * Description: The command freelist protects against system-wide out of memory
284 * deadlock by preallocating one SCSI command structure for each host, so the
285 * system can always write to a swap file on a device associated with that host.
286 *
287 * Returns: Nothing.
288 */
289 int scsi_setup_command_freelist(struct Scsi_Host *shost)
290 {
291 struct scsi_host_cmd_pool *pool;
292 struct scsi_cmnd *cmd;
293
294 spin_lock_init(&shost->free_list_lock);
295 INIT_LIST_HEAD(&shost->free_list);
296
297 /*
298 * Select a command slab for this host and create it if not
299 * yet existent.
300 */
301 mutex_lock(&host_cmd_pool_mutex);
302 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
303 if (!pool->users) {
304 pool->slab = kmem_cache_create(pool->name,
305 sizeof(struct scsi_cmnd), 0,
306 pool->slab_flags, NULL);
307 if (!pool->slab)
308 goto fail;
309 }
310
311 pool->users++;
312 shost->cmd_pool = pool;
313 mutex_unlock(&host_cmd_pool_mutex);
314
315 /*
316 * Get one backup command for this host.
317 */
318 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
319 GFP_KERNEL | shost->cmd_pool->gfp_mask);
320 if (!cmd)
321 goto fail2;
322 list_add(&cmd->list, &shost->free_list);
323 return 0;
324
325 fail2:
326 mutex_lock(&host_cmd_pool_mutex);
327 if (!--pool->users)
328 kmem_cache_destroy(pool->slab);
329 fail:
330 mutex_unlock(&host_cmd_pool_mutex);
331 return -ENOMEM;
332 }
333
334 /**
335 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
336 * @shost: host whose freelist is going to be destroyed
337 */
338 void scsi_destroy_command_freelist(struct Scsi_Host *shost)
339 {
340 while (!list_empty(&shost->free_list)) {
341 struct scsi_cmnd *cmd;
342
343 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
344 list_del_init(&cmd->list);
345 kmem_cache_free(shost->cmd_pool->slab, cmd);
346 }
347
348 mutex_lock(&host_cmd_pool_mutex);
349 if (!--shost->cmd_pool->users)
350 kmem_cache_destroy(shost->cmd_pool->slab);
351 mutex_unlock(&host_cmd_pool_mutex);
352 }
353
354 #ifdef CONFIG_SCSI_LOGGING
355 void scsi_log_send(struct scsi_cmnd *cmd)
356 {
357 unsigned int level;
358
359 /*
360 * If ML QUEUE log level is greater than or equal to:
361 *
362 * 1: nothing (match completion)
363 *
364 * 2: log opcode + command of all commands
365 *
366 * 3: same as 2 plus dump cmd address
367 *
368 * 4: same as 3 plus dump extra junk
369 */
370 if (unlikely(scsi_logging_level)) {
371 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
372 SCSI_LOG_MLQUEUE_BITS);
373 if (level > 1) {
374 scmd_printk(KERN_INFO, cmd, "Send: ");
375 if (level > 2)
376 printk("0x%p ", cmd);
377 printk("\n");
378 scsi_print_command(cmd);
379 if (level > 3) {
380 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
381 " queuecommand 0x%p\n",
382 scsi_sglist(cmd), scsi_bufflen(cmd),
383 cmd->device->host->hostt->queuecommand);
384
385 }
386 }
387 }
388 }
389
390 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
391 {
392 unsigned int level;
393
394 /*
395 * If ML COMPLETE log level is greater than or equal to:
396 *
397 * 1: log disposition, result, opcode + command, and conditionally
398 * sense data for failures or non SUCCESS dispositions.
399 *
400 * 2: same as 1 but for all command completions.
401 *
402 * 3: same as 2 plus dump cmd address
403 *
404 * 4: same as 3 plus dump extra junk
405 */
406 if (unlikely(scsi_logging_level)) {
407 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
408 SCSI_LOG_MLCOMPLETE_BITS);
409 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
410 (level > 1)) {
411 scmd_printk(KERN_INFO, cmd, "Done: ");
412 if (level > 2)
413 printk("0x%p ", cmd);
414 /*
415 * Dump truncated values, so we usually fit within
416 * 80 chars.
417 */
418 switch (disposition) {
419 case SUCCESS:
420 printk("SUCCESS\n");
421 break;
422 case NEEDS_RETRY:
423 printk("RETRY\n");
424 break;
425 case ADD_TO_MLQUEUE:
426 printk("MLQUEUE\n");
427 break;
428 case FAILED:
429 printk("FAILED\n");
430 break;
431 case TIMEOUT_ERROR:
432 /*
433 * If called via scsi_times_out.
434 */
435 printk("TIMEOUT\n");
436 break;
437 default:
438 printk("UNKNOWN\n");
439 }
440 scsi_print_result(cmd);
441 scsi_print_command(cmd);
442 if (status_byte(cmd->result) & CHECK_CONDITION)
443 scsi_print_sense("", cmd);
444 if (level > 3)
445 scmd_printk(KERN_INFO, cmd,
446 "scsi host busy %d failed %d\n",
447 cmd->device->host->host_busy,
448 cmd->device->host->host_failed);
449 }
450 }
451 }
452 #endif
453
454 /**
455 * scsi_cmd_get_serial - Assign a serial number to a command
456 * @host: the scsi host
457 * @cmd: command to assign serial number to
458 *
459 * Description: a serial number identifies a request for error recovery
460 * and debugging purposes. Protected by the Host_Lock of host.
461 */
462 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
463 {
464 cmd->serial_number = host->cmd_serial_number++;
465 if (cmd->serial_number == 0)
466 cmd->serial_number = host->cmd_serial_number++;
467 }
468
469 /**
470 * scsi_dispatch_command - Dispatch a command to the low-level driver.
471 * @cmd: command block we are dispatching.
472 *
473 * Return: nonzero return request was rejected and device's queue needs to be
474 * plugged.
475 */
476 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
477 {
478 struct Scsi_Host *host = cmd->device->host;
479 unsigned long flags = 0;
480 unsigned long timeout;
481 int rtn = 0;
482
483 /* check if the device is still usable */
484 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
485 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
486 * returns an immediate error upwards, and signals
487 * that the device is no longer present */
488 cmd->result = DID_NO_CONNECT << 16;
489 atomic_inc(&cmd->device->iorequest_cnt);
490 __scsi_done(cmd);
491 /* return 0 (because the command has been processed) */
492 goto out;
493 }
494
495 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
496 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
497 /*
498 * in SDEV_BLOCK, the command is just put back on the device
499 * queue. The suspend state has already blocked the queue so
500 * future requests should not occur until the device
501 * transitions out of the suspend state.
502 */
503 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
504
505 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
506
507 /*
508 * NOTE: rtn is still zero here because we don't need the
509 * queue to be plugged on return (it's already stopped)
510 */
511 goto out;
512 }
513
514 /*
515 * If SCSI-2 or lower, store the LUN value in cmnd.
516 */
517 if (cmd->device->scsi_level <= SCSI_2 &&
518 cmd->device->scsi_level != SCSI_UNKNOWN) {
519 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
520 (cmd->device->lun << 5 & 0xe0);
521 }
522
523 /*
524 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
525 * we can avoid the drive not being ready.
526 */
527 timeout = host->last_reset + MIN_RESET_DELAY;
528
529 if (host->resetting && time_before(jiffies, timeout)) {
530 int ticks_remaining = timeout - jiffies;
531 /*
532 * NOTE: This may be executed from within an interrupt
533 * handler! This is bad, but for now, it'll do. The irq
534 * level of the interrupt handler has been masked out by the
535 * platform dependent interrupt handling code already, so the
536 * sti() here will not cause another call to the SCSI host's
537 * interrupt handler (assuming there is one irq-level per
538 * host).
539 */
540 while (--ticks_remaining >= 0)
541 mdelay(1 + 999 / HZ);
542 host->resetting = 0;
543 }
544
545 /*
546 * AK: unlikely race here: for some reason the timer could
547 * expire before the serial number is set up below.
548 */
549 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
550
551 scsi_log_send(cmd);
552
553 /*
554 * We will use a queued command if possible, otherwise we will
555 * emulate the queuing and calling of completion function ourselves.
556 */
557 atomic_inc(&cmd->device->iorequest_cnt);
558
559 /*
560 * Before we queue this command, check if the command
561 * length exceeds what the host adapter can handle.
562 */
563 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
564 SCSI_LOG_MLQUEUE(3,
565 printk("queuecommand : command too long.\n"));
566 cmd->result = (DID_ABORT << 16);
567
568 scsi_done(cmd);
569 goto out;
570 }
571
572 spin_lock_irqsave(host->host_lock, flags);
573 scsi_cmd_get_serial(host, cmd);
574
575 if (unlikely(host->shost_state == SHOST_DEL)) {
576 cmd->result = (DID_NO_CONNECT << 16);
577 scsi_done(cmd);
578 } else {
579 rtn = host->hostt->queuecommand(cmd, scsi_done);
580 }
581 spin_unlock_irqrestore(host->host_lock, flags);
582 if (rtn) {
583 if (scsi_delete_timer(cmd)) {
584 atomic_inc(&cmd->device->iodone_cnt);
585 scsi_queue_insert(cmd,
586 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
587 rtn : SCSI_MLQUEUE_HOST_BUSY);
588 }
589 SCSI_LOG_MLQUEUE(3,
590 printk("queuecommand : request rejected\n"));
591 }
592
593 out:
594 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
595 return rtn;
596 }
597
598 /**
599 * scsi_req_abort_cmd -- Request command recovery for the specified command
600 * @cmd: pointer to the SCSI command of interest
601 *
602 * This function requests that SCSI Core start recovery for the
603 * command by deleting the timer and adding the command to the eh
604 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
605 * implement their own error recovery MAY ignore the timeout event if
606 * they generated scsi_req_abort_cmd.
607 */
608 void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
609 {
610 if (!scsi_delete_timer(cmd))
611 return;
612 scsi_times_out(cmd);
613 }
614 EXPORT_SYMBOL(scsi_req_abort_cmd);
615
616 /**
617 * scsi_done - Enqueue the finished SCSI command into the done queue.
618 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
619 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
620 *
621 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
622 * which regains ownership of the SCSI command (de facto) from a LLDD, and
623 * enqueues the command to the done queue for further processing.
624 *
625 * This is the producer of the done queue who enqueues at the tail.
626 *
627 * This function is interrupt context safe.
628 */
629 static void scsi_done(struct scsi_cmnd *cmd)
630 {
631 /*
632 * We don't have to worry about this one timing out anymore.
633 * If we are unable to remove the timer, then the command
634 * has already timed out. In which case, we have no choice but to
635 * let the timeout function run, as we have no idea where in fact
636 * that function could really be. It might be on another processor,
637 * etc, etc.
638 */
639 if (!scsi_delete_timer(cmd))
640 return;
641 __scsi_done(cmd);
642 }
643
644 /* Private entry to scsi_done() to complete a command when the timer
645 * isn't running --- used by scsi_times_out */
646 void __scsi_done(struct scsi_cmnd *cmd)
647 {
648 struct request *rq = cmd->request;
649
650 /*
651 * Set the serial numbers back to zero
652 */
653 cmd->serial_number = 0;
654
655 atomic_inc(&cmd->device->iodone_cnt);
656 if (cmd->result)
657 atomic_inc(&cmd->device->ioerr_cnt);
658
659 BUG_ON(!rq);
660
661 /*
662 * The uptodate/nbytes values don't matter, as we allow partial
663 * completes and thus will check this in the softirq callback
664 */
665 rq->completion_data = cmd;
666 blk_complete_request(rq);
667 }
668
669 /* Move this to a header if it becomes more generally useful */
670 static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
671 {
672 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
673 }
674
675 /**
676 * scsi_finish_command - cleanup and pass command back to upper layer
677 * @cmd: the command
678 *
679 * Description: Pass command off to upper layer for finishing of I/O
680 * request, waking processes that are waiting on results,
681 * etc.
682 */
683 void scsi_finish_command(struct scsi_cmnd *cmd)
684 {
685 struct scsi_device *sdev = cmd->device;
686 struct Scsi_Host *shost = sdev->host;
687 struct scsi_driver *drv;
688 unsigned int good_bytes;
689
690 scsi_device_unbusy(sdev);
691
692 /*
693 * Clear the flags which say that the device/host is no longer
694 * capable of accepting new commands. These are set in scsi_queue.c
695 * for both the queue full condition on a device, and for a
696 * host full condition on the host.
697 *
698 * XXX(hch): What about locking?
699 */
700 shost->host_blocked = 0;
701 sdev->device_blocked = 0;
702
703 /*
704 * If we have valid sense information, then some kind of recovery
705 * must have taken place. Make a note of this.
706 */
707 if (SCSI_SENSE_VALID(cmd))
708 cmd->result |= (DRIVER_SENSE << 24);
709
710 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
711 "Notifying upper driver of completion "
712 "(result %x)\n", cmd->result));
713
714 good_bytes = cmd->request_bufflen;
715 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
716 drv = scsi_cmd_to_driver(cmd);
717 if (drv->done)
718 good_bytes = drv->done(cmd);
719 }
720 scsi_io_completion(cmd, good_bytes);
721 }
722 EXPORT_SYMBOL(scsi_finish_command);
723
724 /**
725 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
726 * @sdev: SCSI Device in question
727 * @tagged: Do we use tagged queueing (non-0) or do we treat
728 * this device as an untagged device (0)
729 * @tags: Number of tags allowed if tagged queueing enabled,
730 * or number of commands the low level driver can
731 * queue up in non-tagged mode (as per cmd_per_lun).
732 *
733 * Returns: Nothing
734 *
735 * Lock Status: None held on entry
736 *
737 * Notes: Low level drivers may call this at any time and we will do
738 * the right thing depending on whether or not the device is
739 * currently active and whether or not it even has the
740 * command blocks built yet.
741 */
742 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
743 {
744 unsigned long flags;
745
746 /*
747 * refuse to set tagged depth to an unworkable size
748 */
749 if (tags <= 0)
750 return;
751
752 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
753
754 /* Check to see if the queue is managed by the block layer.
755 * If it is, and we fail to adjust the depth, exit. */
756 if (blk_queue_tagged(sdev->request_queue) &&
757 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
758 goto out;
759
760 sdev->queue_depth = tags;
761 switch (tagged) {
762 case MSG_ORDERED_TAG:
763 sdev->ordered_tags = 1;
764 sdev->simple_tags = 1;
765 break;
766 case MSG_SIMPLE_TAG:
767 sdev->ordered_tags = 0;
768 sdev->simple_tags = 1;
769 break;
770 default:
771 sdev_printk(KERN_WARNING, sdev,
772 "scsi_adjust_queue_depth, bad queue type, "
773 "disabled\n");
774 case 0:
775 sdev->ordered_tags = sdev->simple_tags = 0;
776 sdev->queue_depth = tags;
777 break;
778 }
779 out:
780 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
781 }
782 EXPORT_SYMBOL(scsi_adjust_queue_depth);
783
784 /**
785 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
786 * @sdev: SCSI Device in question
787 * @depth: Current number of outstanding SCSI commands on this device,
788 * not counting the one returned as QUEUE_FULL.
789 *
790 * Description: This function will track successive QUEUE_FULL events on a
791 * specific SCSI device to determine if and when there is a
792 * need to adjust the queue depth on the device.
793 *
794 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
795 * -1 - Drop back to untagged operation using host->cmd_per_lun
796 * as the untagged command depth
797 *
798 * Lock Status: None held on entry
799 *
800 * Notes: Low level drivers may call this at any time and we will do
801 * "The Right Thing." We are interrupt context safe.
802 */
803 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
804 {
805 if ((jiffies >> 4) == sdev->last_queue_full_time)
806 return 0;
807
808 sdev->last_queue_full_time = (jiffies >> 4);
809 if (sdev->last_queue_full_depth != depth) {
810 sdev->last_queue_full_count = 1;
811 sdev->last_queue_full_depth = depth;
812 } else {
813 sdev->last_queue_full_count++;
814 }
815
816 if (sdev->last_queue_full_count <= 10)
817 return 0;
818 if (sdev->last_queue_full_depth < 8) {
819 /* Drop back to untagged */
820 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
821 return -1;
822 }
823
824 if (sdev->ordered_tags)
825 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
826 else
827 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
828 return depth;
829 }
830 EXPORT_SYMBOL(scsi_track_queue_full);
831
832 /**
833 * scsi_device_get - get an additional reference to a scsi_device
834 * @sdev: device to get a reference to
835 *
836 * Description: Gets a reference to the scsi_device and increments the use count
837 * of the underlying LLDD module. You must hold host_lock of the
838 * parent Scsi_Host or already have a reference when calling this.
839 */
840 int scsi_device_get(struct scsi_device *sdev)
841 {
842 if (sdev->sdev_state == SDEV_DEL)
843 return -ENXIO;
844 if (!get_device(&sdev->sdev_gendev))
845 return -ENXIO;
846 /* We can fail this if we're doing SCSI operations
847 * from module exit (like cache flush) */
848 try_module_get(sdev->host->hostt->module);
849
850 return 0;
851 }
852 EXPORT_SYMBOL(scsi_device_get);
853
854 /**
855 * scsi_device_put - release a reference to a scsi_device
856 * @sdev: device to release a reference on.
857 *
858 * Description: Release a reference to the scsi_device and decrements the use
859 * count of the underlying LLDD module. The device is freed once the last
860 * user vanishes.
861 */
862 void scsi_device_put(struct scsi_device *sdev)
863 {
864 #ifdef CONFIG_MODULE_UNLOAD
865 struct module *module = sdev->host->hostt->module;
866
867 /* The module refcount will be zero if scsi_device_get()
868 * was called from a module removal routine */
869 if (module && module_refcount(module) != 0)
870 module_put(module);
871 #endif
872 put_device(&sdev->sdev_gendev);
873 }
874 EXPORT_SYMBOL(scsi_device_put);
875
876 /* helper for shost_for_each_device, see that for documentation */
877 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
878 struct scsi_device *prev)
879 {
880 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
881 struct scsi_device *next = NULL;
882 unsigned long flags;
883
884 spin_lock_irqsave(shost->host_lock, flags);
885 while (list->next != &shost->__devices) {
886 next = list_entry(list->next, struct scsi_device, siblings);
887 /* skip devices that we can't get a reference to */
888 if (!scsi_device_get(next))
889 break;
890 next = NULL;
891 list = list->next;
892 }
893 spin_unlock_irqrestore(shost->host_lock, flags);
894
895 if (prev)
896 scsi_device_put(prev);
897 return next;
898 }
899 EXPORT_SYMBOL(__scsi_iterate_devices);
900
901 /**
902 * starget_for_each_device - helper to walk all devices of a target
903 * @starget: target whose devices we want to iterate over.
904 * @data: Opaque passed to each function call.
905 * @fn: Function to call on each device
906 *
907 * This traverses over each device of @starget. The devices have
908 * a reference that must be released by scsi_host_put when breaking
909 * out of the loop.
910 */
911 void starget_for_each_device(struct scsi_target *starget, void *data,
912 void (*fn)(struct scsi_device *, void *))
913 {
914 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
915 struct scsi_device *sdev;
916
917 shost_for_each_device(sdev, shost) {
918 if ((sdev->channel == starget->channel) &&
919 (sdev->id == starget->id))
920 fn(sdev, data);
921 }
922 }
923 EXPORT_SYMBOL(starget_for_each_device);
924
925 /**
926 * __starget_for_each_device - helper to walk all devices of a target
927 * (UNLOCKED)
928 * @starget: target whose devices we want to iterate over.
929 *
930 * This traverses over each device of @starget. It does _not_
931 * take a reference on the scsi_device, so the whole loop must be
932 * protected by shost->host_lock.
933 *
934 * Note: The only reason why drivers would want to use this is because
935 * they need to access the device list in irq context. Otherwise you
936 * really want to use starget_for_each_device instead.
937 **/
938 void __starget_for_each_device(struct scsi_target *starget, void *data,
939 void (*fn)(struct scsi_device *, void *))
940 {
941 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
942 struct scsi_device *sdev;
943
944 __shost_for_each_device(sdev, shost) {
945 if ((sdev->channel == starget->channel) &&
946 (sdev->id == starget->id))
947 fn(sdev, data);
948 }
949 }
950 EXPORT_SYMBOL(__starget_for_each_device);
951
952 /**
953 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
954 * @starget: SCSI target pointer
955 * @lun: SCSI Logical Unit Number
956 *
957 * Description: Looks up the scsi_device with the specified @lun for a given
958 * @starget. The returned scsi_device does not have an additional
959 * reference. You must hold the host's host_lock over this call and
960 * any access to the returned scsi_device.
961 *
962 * Note: The only reason why drivers should use this is because
963 * they need to access the device list in irq context. Otherwise you
964 * really want to use scsi_device_lookup_by_target instead.
965 **/
966 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
967 uint lun)
968 {
969 struct scsi_device *sdev;
970
971 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
972 if (sdev->lun ==lun)
973 return sdev;
974 }
975
976 return NULL;
977 }
978 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
979
980 /**
981 * scsi_device_lookup_by_target - find a device given the target
982 * @starget: SCSI target pointer
983 * @lun: SCSI Logical Unit Number
984 *
985 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
986 * for a given host. The returned scsi_device has an additional reference that
987 * needs to be released with scsi_device_put once you're done with it.
988 **/
989 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
990 uint lun)
991 {
992 struct scsi_device *sdev;
993 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
994 unsigned long flags;
995
996 spin_lock_irqsave(shost->host_lock, flags);
997 sdev = __scsi_device_lookup_by_target(starget, lun);
998 if (sdev && scsi_device_get(sdev))
999 sdev = NULL;
1000 spin_unlock_irqrestore(shost->host_lock, flags);
1001
1002 return sdev;
1003 }
1004 EXPORT_SYMBOL(scsi_device_lookup_by_target);
1005
1006 /**
1007 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1008 * @shost: SCSI host pointer
1009 * @channel: SCSI channel (zero if only one channel)
1010 * @id: SCSI target number (physical unit number)
1011 * @lun: SCSI Logical Unit Number
1012 *
1013 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1014 * for a given host. The returned scsi_device does not have an additional
1015 * reference. You must hold the host's host_lock over this call and any access
1016 * to the returned scsi_device.
1017 *
1018 * Note: The only reason why drivers would want to use this is because
1019 * they need to access the device list in irq context. Otherwise you
1020 * really want to use scsi_device_lookup instead.
1021 **/
1022 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1023 uint channel, uint id, uint lun)
1024 {
1025 struct scsi_device *sdev;
1026
1027 list_for_each_entry(sdev, &shost->__devices, siblings) {
1028 if (sdev->channel == channel && sdev->id == id &&
1029 sdev->lun ==lun)
1030 return sdev;
1031 }
1032
1033 return NULL;
1034 }
1035 EXPORT_SYMBOL(__scsi_device_lookup);
1036
1037 /**
1038 * scsi_device_lookup - find a device given the host
1039 * @shost: SCSI host pointer
1040 * @channel: SCSI channel (zero if only one channel)
1041 * @id: SCSI target number (physical unit number)
1042 * @lun: SCSI Logical Unit Number
1043 *
1044 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1045 * for a given host. The returned scsi_device has an additional reference that
1046 * needs to be released with scsi_device_put once you're done with it.
1047 **/
1048 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1049 uint channel, uint id, uint lun)
1050 {
1051 struct scsi_device *sdev;
1052 unsigned long flags;
1053
1054 spin_lock_irqsave(shost->host_lock, flags);
1055 sdev = __scsi_device_lookup(shost, channel, id, lun);
1056 if (sdev && scsi_device_get(sdev))
1057 sdev = NULL;
1058 spin_unlock_irqrestore(shost->host_lock, flags);
1059
1060 return sdev;
1061 }
1062 EXPORT_SYMBOL(scsi_device_lookup);
1063
1064 MODULE_DESCRIPTION("SCSI core");
1065 MODULE_LICENSE("GPL");
1066
1067 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1068 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1069
1070 static int __init init_scsi(void)
1071 {
1072 int error;
1073
1074 error = scsi_init_queue();
1075 if (error)
1076 return error;
1077 error = scsi_init_procfs();
1078 if (error)
1079 goto cleanup_queue;
1080 error = scsi_init_devinfo();
1081 if (error)
1082 goto cleanup_procfs;
1083 error = scsi_init_hosts();
1084 if (error)
1085 goto cleanup_devlist;
1086 error = scsi_init_sysctl();
1087 if (error)
1088 goto cleanup_hosts;
1089 error = scsi_sysfs_register();
1090 if (error)
1091 goto cleanup_sysctl;
1092
1093 scsi_netlink_init();
1094
1095 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1096 return 0;
1097
1098 cleanup_sysctl:
1099 scsi_exit_sysctl();
1100 cleanup_hosts:
1101 scsi_exit_hosts();
1102 cleanup_devlist:
1103 scsi_exit_devinfo();
1104 cleanup_procfs:
1105 scsi_exit_procfs();
1106 cleanup_queue:
1107 scsi_exit_queue();
1108 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1109 -error);
1110 return error;
1111 }
1112
1113 static void __exit exit_scsi(void)
1114 {
1115 scsi_netlink_exit();
1116 scsi_sysfs_unregister();
1117 scsi_exit_sysctl();
1118 scsi_exit_hosts();
1119 scsi_exit_devinfo();
1120 scsi_exit_procfs();
1121 scsi_exit_queue();
1122 }
1123
1124 subsys_initcall(init_scsi);
1125 module_exit(exit_scsi);