2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hardirq.h>
22 #include <linux/scatterlist.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
32 #include "scsi_priv.h"
33 #include "scsi_logging.h"
36 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
37 #define SG_MEMPOOL_SIZE 2
39 struct scsi_host_sg_pool
{
42 struct kmem_cache
*slab
;
46 #define SP(x) { x, "sgpool-" __stringify(x) }
47 #if (SCSI_MAX_SG_SEGMENTS < 32)
48 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
50 static struct scsi_host_sg_pool scsi_sg_pools
[] = {
53 #if (SCSI_MAX_SG_SEGMENTS > 32)
55 #if (SCSI_MAX_SG_SEGMENTS > 64)
57 #if (SCSI_MAX_SG_SEGMENTS > 128)
59 #if (SCSI_MAX_SG_SEGMENTS > 256)
60 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
65 SP(SCSI_MAX_SG_SEGMENTS
)
69 struct kmem_cache
*scsi_sdb_cache
;
72 #include <acpi/acpi_bus.h>
74 int scsi_register_acpi_bus_type(struct acpi_bus_type
*bus
)
76 bus
->bus
= &scsi_bus_type
;
77 return register_acpi_bus_type(bus
);
79 EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type
);
81 void scsi_unregister_acpi_bus_type(struct acpi_bus_type
*bus
)
83 unregister_acpi_bus_type(bus
);
85 EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type
);
89 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
90 * not change behaviour from the previous unplug mechanism, experimentation
91 * may prove this needs changing.
93 #define SCSI_QUEUE_DELAY 3
96 * Function: scsi_unprep_request()
98 * Purpose: Remove all preparation done for a request, including its
99 * associated scsi_cmnd, so that it can be requeued.
101 * Arguments: req - request to unprepare
103 * Lock status: Assumed that no locks are held upon entry.
107 static void scsi_unprep_request(struct request
*req
)
109 struct scsi_cmnd
*cmd
= req
->special
;
111 blk_unprep_request(req
);
114 scsi_put_command(cmd
);
118 * __scsi_queue_insert - private queue insertion
119 * @cmd: The SCSI command being requeued
120 * @reason: The reason for the requeue
121 * @unbusy: Whether the queue should be unbusied
123 * This is a private queue insertion. The public interface
124 * scsi_queue_insert() always assumes the queue should be unbusied
125 * because it's always called before the completion. This function is
126 * for a requeue after completion, which should only occur in this
129 static void __scsi_queue_insert(struct scsi_cmnd
*cmd
, int reason
, int unbusy
)
131 struct Scsi_Host
*host
= cmd
->device
->host
;
132 struct scsi_device
*device
= cmd
->device
;
133 struct scsi_target
*starget
= scsi_target(device
);
134 struct request_queue
*q
= device
->request_queue
;
138 printk("Inserting command %p into mlqueue\n", cmd
));
141 * Set the appropriate busy bit for the device/host.
143 * If the host/device isn't busy, assume that something actually
144 * completed, and that we should be able to queue a command now.
146 * Note that the prior mid-layer assumption that any host could
147 * always queue at least one command is now broken. The mid-layer
148 * will implement a user specifiable stall (see
149 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
150 * if a command is requeued with no other commands outstanding
151 * either for the device or for the host.
154 case SCSI_MLQUEUE_HOST_BUSY
:
155 host
->host_blocked
= host
->max_host_blocked
;
157 case SCSI_MLQUEUE_DEVICE_BUSY
:
158 case SCSI_MLQUEUE_EH_RETRY
:
159 device
->device_blocked
= device
->max_device_blocked
;
161 case SCSI_MLQUEUE_TARGET_BUSY
:
162 starget
->target_blocked
= starget
->max_target_blocked
;
167 * Decrement the counters, since these commands are no longer
168 * active on the host/device.
171 scsi_device_unbusy(device
);
174 * Requeue this command. It will go before all other commands
175 * that are already in the queue. Schedule requeue work under
176 * lock such that the kblockd_schedule_work() call happens
177 * before blk_cleanup_queue() finishes.
179 spin_lock_irqsave(q
->queue_lock
, flags
);
180 blk_requeue_request(q
, cmd
->request
);
181 kblockd_schedule_work(q
, &device
->requeue_work
);
182 spin_unlock_irqrestore(q
->queue_lock
, flags
);
186 * Function: scsi_queue_insert()
188 * Purpose: Insert a command in the midlevel queue.
190 * Arguments: cmd - command that we are adding to queue.
191 * reason - why we are inserting command to queue.
193 * Lock status: Assumed that lock is not held upon entry.
197 * Notes: We do this for one of two cases. Either the host is busy
198 * and it cannot accept any more commands for the time being,
199 * or the device returned QUEUE_FULL and can accept no more
201 * Notes: This could be called either from an interrupt context or a
202 * normal process context.
204 void scsi_queue_insert(struct scsi_cmnd
*cmd
, int reason
)
206 __scsi_queue_insert(cmd
, reason
, 1);
209 * scsi_execute - insert request and wait for the result
212 * @data_direction: data direction
213 * @buffer: data buffer
214 * @bufflen: len of buffer
215 * @sense: optional sense buffer
216 * @timeout: request timeout in seconds
217 * @retries: number of times to retry request
218 * @flags: or into request flags;
219 * @resid: optional residual length
221 * returns the req->errors value which is the scsi_cmnd result
224 int scsi_execute(struct scsi_device
*sdev
, const unsigned char *cmd
,
225 int data_direction
, void *buffer
, unsigned bufflen
,
226 unsigned char *sense
, int timeout
, int retries
, int flags
,
230 int write
= (data_direction
== DMA_TO_DEVICE
);
231 int ret
= DRIVER_ERROR
<< 24;
233 req
= blk_get_request(sdev
->request_queue
, write
, __GFP_WAIT
);
237 if (bufflen
&& blk_rq_map_kern(sdev
->request_queue
, req
,
238 buffer
, bufflen
, __GFP_WAIT
))
241 req
->cmd_len
= COMMAND_SIZE(cmd
[0]);
242 memcpy(req
->cmd
, cmd
, req
->cmd_len
);
245 req
->retries
= retries
;
246 req
->timeout
= timeout
;
247 req
->cmd_type
= REQ_TYPE_BLOCK_PC
;
248 req
->cmd_flags
|= flags
| REQ_QUIET
| REQ_PREEMPT
;
251 * head injection *required* here otherwise quiesce won't work
253 blk_execute_rq(req
->q
, NULL
, req
, 1);
256 * Some devices (USB mass-storage in particular) may transfer
257 * garbage data together with a residue indicating that the data
258 * is invalid. Prevent the garbage from being misinterpreted
259 * and prevent security leaks by zeroing out the excess data.
261 if (unlikely(req
->resid_len
> 0 && req
->resid_len
<= bufflen
))
262 memset(buffer
+ (bufflen
- req
->resid_len
), 0, req
->resid_len
);
265 *resid
= req
->resid_len
;
268 blk_put_request(req
);
272 EXPORT_SYMBOL(scsi_execute
);
275 int scsi_execute_req(struct scsi_device
*sdev
, const unsigned char *cmd
,
276 int data_direction
, void *buffer
, unsigned bufflen
,
277 struct scsi_sense_hdr
*sshdr
, int timeout
, int retries
,
284 sense
= kzalloc(SCSI_SENSE_BUFFERSIZE
, GFP_NOIO
);
286 return DRIVER_ERROR
<< 24;
288 result
= scsi_execute(sdev
, cmd
, data_direction
, buffer
, bufflen
,
289 sense
, timeout
, retries
, 0, resid
);
291 scsi_normalize_sense(sense
, SCSI_SENSE_BUFFERSIZE
, sshdr
);
296 EXPORT_SYMBOL(scsi_execute_req
);
299 * Function: scsi_init_cmd_errh()
301 * Purpose: Initialize cmd fields related to error handling.
303 * Arguments: cmd - command that is ready to be queued.
305 * Notes: This function has the job of initializing a number of
306 * fields related to error handling. Typically this will
307 * be called once for each command, as required.
309 static void scsi_init_cmd_errh(struct scsi_cmnd
*cmd
)
311 cmd
->serial_number
= 0;
312 scsi_set_resid(cmd
, 0);
313 memset(cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
314 if (cmd
->cmd_len
== 0)
315 cmd
->cmd_len
= scsi_command_size(cmd
->cmnd
);
318 void scsi_device_unbusy(struct scsi_device
*sdev
)
320 struct Scsi_Host
*shost
= sdev
->host
;
321 struct scsi_target
*starget
= scsi_target(sdev
);
324 spin_lock_irqsave(shost
->host_lock
, flags
);
326 starget
->target_busy
--;
327 if (unlikely(scsi_host_in_recovery(shost
) &&
328 (shost
->host_failed
|| shost
->host_eh_scheduled
)))
329 scsi_eh_wakeup(shost
);
330 spin_unlock(shost
->host_lock
);
331 spin_lock(sdev
->request_queue
->queue_lock
);
333 spin_unlock_irqrestore(sdev
->request_queue
->queue_lock
, flags
);
337 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
338 * and call blk_run_queue for all the scsi_devices on the target -
339 * including current_sdev first.
341 * Called with *no* scsi locks held.
343 static void scsi_single_lun_run(struct scsi_device
*current_sdev
)
345 struct Scsi_Host
*shost
= current_sdev
->host
;
346 struct scsi_device
*sdev
, *tmp
;
347 struct scsi_target
*starget
= scsi_target(current_sdev
);
350 spin_lock_irqsave(shost
->host_lock
, flags
);
351 starget
->starget_sdev_user
= NULL
;
352 spin_unlock_irqrestore(shost
->host_lock
, flags
);
355 * Call blk_run_queue for all LUNs on the target, starting with
356 * current_sdev. We race with others (to set starget_sdev_user),
357 * but in most cases, we will be first. Ideally, each LU on the
358 * target would get some limited time or requests on the target.
360 blk_run_queue(current_sdev
->request_queue
);
362 spin_lock_irqsave(shost
->host_lock
, flags
);
363 if (starget
->starget_sdev_user
)
365 list_for_each_entry_safe(sdev
, tmp
, &starget
->devices
,
366 same_target_siblings
) {
367 if (sdev
== current_sdev
)
369 if (scsi_device_get(sdev
))
372 spin_unlock_irqrestore(shost
->host_lock
, flags
);
373 blk_run_queue(sdev
->request_queue
);
374 spin_lock_irqsave(shost
->host_lock
, flags
);
376 scsi_device_put(sdev
);
379 spin_unlock_irqrestore(shost
->host_lock
, flags
);
382 static inline int scsi_device_is_busy(struct scsi_device
*sdev
)
384 if (sdev
->device_busy
>= sdev
->queue_depth
|| sdev
->device_blocked
)
390 static inline int scsi_target_is_busy(struct scsi_target
*starget
)
392 return ((starget
->can_queue
> 0 &&
393 starget
->target_busy
>= starget
->can_queue
) ||
394 starget
->target_blocked
);
397 static inline int scsi_host_is_busy(struct Scsi_Host
*shost
)
399 if ((shost
->can_queue
> 0 && shost
->host_busy
>= shost
->can_queue
) ||
400 shost
->host_blocked
|| shost
->host_self_blocked
)
407 * Function: scsi_run_queue()
409 * Purpose: Select a proper request queue to serve next
411 * Arguments: q - last request's queue
415 * Notes: The previous command was completely finished, start
416 * a new one if possible.
418 static void scsi_run_queue(struct request_queue
*q
)
420 struct scsi_device
*sdev
= q
->queuedata
;
421 struct Scsi_Host
*shost
;
422 LIST_HEAD(starved_list
);
426 if (scsi_target(sdev
)->single_lun
)
427 scsi_single_lun_run(sdev
);
429 spin_lock_irqsave(shost
->host_lock
, flags
);
430 list_splice_init(&shost
->starved_list
, &starved_list
);
432 while (!list_empty(&starved_list
)) {
434 * As long as shost is accepting commands and we have
435 * starved queues, call blk_run_queue. scsi_request_fn
436 * drops the queue_lock and can add us back to the
439 * host_lock protects the starved_list and starved_entry.
440 * scsi_request_fn must get the host_lock before checking
441 * or modifying starved_list or starved_entry.
443 if (scsi_host_is_busy(shost
))
446 sdev
= list_entry(starved_list
.next
,
447 struct scsi_device
, starved_entry
);
448 list_del_init(&sdev
->starved_entry
);
449 if (scsi_target_is_busy(scsi_target(sdev
))) {
450 list_move_tail(&sdev
->starved_entry
,
451 &shost
->starved_list
);
455 spin_unlock(shost
->host_lock
);
456 spin_lock(sdev
->request_queue
->queue_lock
);
457 __blk_run_queue(sdev
->request_queue
);
458 spin_unlock(sdev
->request_queue
->queue_lock
);
459 spin_lock(shost
->host_lock
);
461 /* put any unprocessed entries back */
462 list_splice(&starved_list
, &shost
->starved_list
);
463 spin_unlock_irqrestore(shost
->host_lock
, flags
);
468 void scsi_requeue_run_queue(struct work_struct
*work
)
470 struct scsi_device
*sdev
;
471 struct request_queue
*q
;
473 sdev
= container_of(work
, struct scsi_device
, requeue_work
);
474 q
= sdev
->request_queue
;
479 * Function: scsi_requeue_command()
481 * Purpose: Handle post-processing of completed commands.
483 * Arguments: q - queue to operate on
484 * cmd - command that may need to be requeued.
488 * Notes: After command completion, there may be blocks left
489 * over which weren't finished by the previous command
490 * this can be for a number of reasons - the main one is
491 * I/O errors in the middle of the request, in which case
492 * we need to request the blocks that come after the bad
494 * Notes: Upon return, cmd is a stale pointer.
496 static void scsi_requeue_command(struct request_queue
*q
, struct scsi_cmnd
*cmd
)
498 struct scsi_device
*sdev
= cmd
->device
;
499 struct request
*req
= cmd
->request
;
503 * We need to hold a reference on the device to avoid the queue being
504 * killed after the unlock and before scsi_run_queue is invoked which
505 * may happen because scsi_unprep_request() puts the command which
506 * releases its reference on the device.
508 get_device(&sdev
->sdev_gendev
);
510 spin_lock_irqsave(q
->queue_lock
, flags
);
511 scsi_unprep_request(req
);
512 blk_requeue_request(q
, req
);
513 spin_unlock_irqrestore(q
->queue_lock
, flags
);
517 put_device(&sdev
->sdev_gendev
);
520 void scsi_next_command(struct scsi_cmnd
*cmd
)
522 struct scsi_device
*sdev
= cmd
->device
;
523 struct request_queue
*q
= sdev
->request_queue
;
525 /* need to hold a reference on the device before we let go of the cmd */
526 get_device(&sdev
->sdev_gendev
);
528 scsi_put_command(cmd
);
531 /* ok to remove device now */
532 put_device(&sdev
->sdev_gendev
);
535 void scsi_run_host_queues(struct Scsi_Host
*shost
)
537 struct scsi_device
*sdev
;
539 shost_for_each_device(sdev
, shost
)
540 scsi_run_queue(sdev
->request_queue
);
543 static void __scsi_release_buffers(struct scsi_cmnd
*, int);
546 * Function: scsi_end_request()
548 * Purpose: Post-processing of completed commands (usually invoked at end
549 * of upper level post-processing and scsi_io_completion).
551 * Arguments: cmd - command that is complete.
552 * error - 0 if I/O indicates success, < 0 for I/O error.
553 * bytes - number of bytes of completed I/O
554 * requeue - indicates whether we should requeue leftovers.
556 * Lock status: Assumed that lock is not held upon entry.
558 * Returns: cmd if requeue required, NULL otherwise.
560 * Notes: This is called for block device requests in order to
561 * mark some number of sectors as complete.
563 * We are guaranteeing that the request queue will be goosed
564 * at some point during this call.
565 * Notes: If cmd was requeued, upon return it will be a stale pointer.
567 static struct scsi_cmnd
*scsi_end_request(struct scsi_cmnd
*cmd
, int error
,
568 int bytes
, int requeue
)
570 struct request_queue
*q
= cmd
->device
->request_queue
;
571 struct request
*req
= cmd
->request
;
574 * If there are blocks left over at the end, set up the command
575 * to queue the remainder of them.
577 if (blk_end_request(req
, error
, bytes
)) {
578 /* kill remainder if no retrys */
579 if (error
&& scsi_noretry_cmd(cmd
))
580 blk_end_request_all(req
, error
);
584 * Bleah. Leftovers again. Stick the
585 * leftovers in the front of the
586 * queue, and goose the queue again.
588 scsi_release_buffers(cmd
);
589 scsi_requeue_command(q
, cmd
);
597 * This will goose the queue request function at the end, so we don't
598 * need to worry about launching another command.
600 __scsi_release_buffers(cmd
, 0);
601 scsi_next_command(cmd
);
605 static inline unsigned int scsi_sgtable_index(unsigned short nents
)
609 BUG_ON(nents
> SCSI_MAX_SG_SEGMENTS
);
614 index
= get_count_order(nents
) - 3;
619 static void scsi_sg_free(struct scatterlist
*sgl
, unsigned int nents
)
621 struct scsi_host_sg_pool
*sgp
;
623 sgp
= scsi_sg_pools
+ scsi_sgtable_index(nents
);
624 mempool_free(sgl
, sgp
->pool
);
627 static struct scatterlist
*scsi_sg_alloc(unsigned int nents
, gfp_t gfp_mask
)
629 struct scsi_host_sg_pool
*sgp
;
631 sgp
= scsi_sg_pools
+ scsi_sgtable_index(nents
);
632 return mempool_alloc(sgp
->pool
, gfp_mask
);
635 static int scsi_alloc_sgtable(struct scsi_data_buffer
*sdb
, int nents
,
642 ret
= __sg_alloc_table(&sdb
->table
, nents
, SCSI_MAX_SG_SEGMENTS
,
643 gfp_mask
, scsi_sg_alloc
);
645 __sg_free_table(&sdb
->table
, SCSI_MAX_SG_SEGMENTS
,
651 static void scsi_free_sgtable(struct scsi_data_buffer
*sdb
)
653 __sg_free_table(&sdb
->table
, SCSI_MAX_SG_SEGMENTS
, scsi_sg_free
);
656 static void __scsi_release_buffers(struct scsi_cmnd
*cmd
, int do_bidi_check
)
659 if (cmd
->sdb
.table
.nents
)
660 scsi_free_sgtable(&cmd
->sdb
);
662 memset(&cmd
->sdb
, 0, sizeof(cmd
->sdb
));
664 if (do_bidi_check
&& scsi_bidi_cmnd(cmd
)) {
665 struct scsi_data_buffer
*bidi_sdb
=
666 cmd
->request
->next_rq
->special
;
667 scsi_free_sgtable(bidi_sdb
);
668 kmem_cache_free(scsi_sdb_cache
, bidi_sdb
);
669 cmd
->request
->next_rq
->special
= NULL
;
672 if (scsi_prot_sg_count(cmd
))
673 scsi_free_sgtable(cmd
->prot_sdb
);
677 * Function: scsi_release_buffers()
679 * Purpose: Completion processing for block device I/O requests.
681 * Arguments: cmd - command that we are bailing.
683 * Lock status: Assumed that no lock is held upon entry.
687 * Notes: In the event that an upper level driver rejects a
688 * command, we must release resources allocated during
689 * the __init_io() function. Primarily this would involve
690 * the scatter-gather table, and potentially any bounce
693 void scsi_release_buffers(struct scsi_cmnd
*cmd
)
695 __scsi_release_buffers(cmd
, 1);
697 EXPORT_SYMBOL(scsi_release_buffers
);
699 static int __scsi_error_from_host_byte(struct scsi_cmnd
*cmd
, int result
)
703 switch(host_byte(result
)) {
704 case DID_TRANSPORT_FAILFAST
:
707 case DID_TARGET_FAILURE
:
708 set_host_byte(cmd
, DID_OK
);
711 case DID_NEXUS_FAILURE
:
712 set_host_byte(cmd
, DID_OK
);
724 * Function: scsi_io_completion()
726 * Purpose: Completion processing for block device I/O requests.
728 * Arguments: cmd - command that is finished.
730 * Lock status: Assumed that no lock is held upon entry.
734 * Notes: This function is matched in terms of capabilities to
735 * the function that created the scatter-gather list.
736 * In other words, if there are no bounce buffers
737 * (the normal case for most drivers), we don't need
738 * the logic to deal with cleaning up afterwards.
740 * We must call scsi_end_request(). This will finish off
741 * the specified number of sectors. If we are done, the
742 * command block will be released and the queue function
743 * will be goosed. If we are not done then we have to
744 * figure out what to do next:
746 * a) We can call scsi_requeue_command(). The request
747 * will be unprepared and put back on the queue. Then
748 * a new command will be created for it. This should
749 * be used if we made forward progress, or if we want
750 * to switch from READ(10) to READ(6) for example.
752 * b) We can call scsi_queue_insert(). The request will
753 * be put back on the queue and retried using the same
754 * command as before, possibly after a delay.
756 * c) We can call blk_end_request() with -EIO to fail
757 * the remainder of the request.
759 void scsi_io_completion(struct scsi_cmnd
*cmd
, unsigned int good_bytes
)
761 int result
= cmd
->result
;
762 struct request_queue
*q
= cmd
->device
->request_queue
;
763 struct request
*req
= cmd
->request
;
765 struct scsi_sense_hdr sshdr
;
767 int sense_deferred
= 0;
768 enum {ACTION_FAIL
, ACTION_REPREP
, ACTION_RETRY
,
769 ACTION_DELAYED_RETRY
} action
;
770 char *description
= NULL
;
773 sense_valid
= scsi_command_normalize_sense(cmd
, &sshdr
);
775 sense_deferred
= scsi_sense_is_deferred(&sshdr
);
778 if (req
->cmd_type
== REQ_TYPE_BLOCK_PC
) { /* SG_IO ioctl from block level */
780 if (sense_valid
&& req
->sense
) {
782 * SG_IO wants current and deferred errors
784 int len
= 8 + cmd
->sense_buffer
[7];
786 if (len
> SCSI_SENSE_BUFFERSIZE
)
787 len
= SCSI_SENSE_BUFFERSIZE
;
788 memcpy(req
->sense
, cmd
->sense_buffer
, len
);
789 req
->sense_len
= len
;
792 error
= __scsi_error_from_host_byte(cmd
, result
);
795 * __scsi_error_from_host_byte may have reset the host_byte
797 req
->errors
= cmd
->result
;
799 req
->resid_len
= scsi_get_resid(cmd
);
801 if (scsi_bidi_cmnd(cmd
)) {
803 * Bidi commands Must be complete as a whole,
804 * both sides at once.
806 req
->next_rq
->resid_len
= scsi_in(cmd
)->resid
;
808 scsi_release_buffers(cmd
);
809 blk_end_request_all(req
, 0);
811 scsi_next_command(cmd
);
816 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
817 BUG_ON(blk_bidi_rq(req
));
820 * Next deal with any sectors which we were able to correctly
823 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
825 blk_rq_sectors(req
), good_bytes
));
828 * Recovered errors need reporting, but they're always treated
829 * as success, so fiddle the result code here. For BLOCK_PC
830 * we already took a copy of the original into rq->errors which
831 * is what gets returned to the user
833 if (sense_valid
&& (sshdr
.sense_key
== RECOVERED_ERROR
)) {
834 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
835 * print since caller wants ATA registers. Only occurs on
836 * SCSI ATA PASS_THROUGH commands when CK_COND=1
838 if ((sshdr
.asc
== 0x0) && (sshdr
.ascq
== 0x1d))
840 else if (!(req
->cmd_flags
& REQ_QUIET
))
841 scsi_print_sense("", cmd
);
843 /* BLOCK_PC may have set error */
848 * A number of bytes were successfully read. If there
849 * are leftovers and there is some kind of error
850 * (result != 0), retry the rest.
852 if (scsi_end_request(cmd
, error
, good_bytes
, result
== 0) == NULL
)
855 error
= __scsi_error_from_host_byte(cmd
, result
);
857 if (host_byte(result
) == DID_RESET
) {
858 /* Third party bus reset or reset for error recovery
859 * reasons. Just retry the command and see what
862 action
= ACTION_RETRY
;
863 } else if (sense_valid
&& !sense_deferred
) {
864 switch (sshdr
.sense_key
) {
866 if (cmd
->device
->removable
) {
867 /* Detected disc change. Set a bit
868 * and quietly refuse further access.
870 cmd
->device
->changed
= 1;
871 description
= "Media Changed";
872 action
= ACTION_FAIL
;
874 /* Must have been a power glitch, or a
875 * bus reset. Could not have been a
876 * media change, so we just retry the
877 * command and see what happens.
879 action
= ACTION_RETRY
;
882 case ILLEGAL_REQUEST
:
883 /* If we had an ILLEGAL REQUEST returned, then
884 * we may have performed an unsupported
885 * command. The only thing this should be
886 * would be a ten byte read where only a six
887 * byte read was supported. Also, on a system
888 * where READ CAPACITY failed, we may have
889 * read past the end of the disk.
891 if ((cmd
->device
->use_10_for_rw
&&
892 sshdr
.asc
== 0x20 && sshdr
.ascq
== 0x00) &&
893 (cmd
->cmnd
[0] == READ_10
||
894 cmd
->cmnd
[0] == WRITE_10
)) {
895 /* This will issue a new 6-byte command. */
896 cmd
->device
->use_10_for_rw
= 0;
897 action
= ACTION_REPREP
;
898 } else if (sshdr
.asc
== 0x10) /* DIX */ {
899 description
= "Host Data Integrity Failure";
900 action
= ACTION_FAIL
;
902 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
903 } else if ((sshdr
.asc
== 0x20 || sshdr
.asc
== 0x24) &&
904 (cmd
->cmnd
[0] == UNMAP
||
905 cmd
->cmnd
[0] == WRITE_SAME_16
||
906 cmd
->cmnd
[0] == WRITE_SAME
)) {
907 description
= "Discard failure";
908 action
= ACTION_FAIL
;
911 action
= ACTION_FAIL
;
913 case ABORTED_COMMAND
:
914 action
= ACTION_FAIL
;
915 if (sshdr
.asc
== 0x10) { /* DIF */
916 description
= "Target Data Integrity Failure";
921 /* If the device is in the process of becoming
922 * ready, or has a temporary blockage, retry.
924 if (sshdr
.asc
== 0x04) {
925 switch (sshdr
.ascq
) {
926 case 0x01: /* becoming ready */
927 case 0x04: /* format in progress */
928 case 0x05: /* rebuild in progress */
929 case 0x06: /* recalculation in progress */
930 case 0x07: /* operation in progress */
931 case 0x08: /* Long write in progress */
932 case 0x09: /* self test in progress */
933 case 0x14: /* space allocation in progress */
934 action
= ACTION_DELAYED_RETRY
;
937 description
= "Device not ready";
938 action
= ACTION_FAIL
;
942 description
= "Device not ready";
943 action
= ACTION_FAIL
;
946 case VOLUME_OVERFLOW
:
947 /* See SSC3rXX or current. */
948 action
= ACTION_FAIL
;
951 description
= "Unhandled sense code";
952 action
= ACTION_FAIL
;
956 description
= "Unhandled error code";
957 action
= ACTION_FAIL
;
962 /* Give up and fail the remainder of the request */
963 scsi_release_buffers(cmd
);
964 if (!(req
->cmd_flags
& REQ_QUIET
)) {
966 scmd_printk(KERN_INFO
, cmd
, "%s\n",
968 scsi_print_result(cmd
);
969 if (driver_byte(result
) & DRIVER_SENSE
)
970 scsi_print_sense("", cmd
);
971 scsi_print_command(cmd
);
973 if (blk_end_request_err(req
, error
))
974 scsi_requeue_command(q
, cmd
);
976 scsi_next_command(cmd
);
979 /* Unprep the request and put it back at the head of the queue.
980 * A new command will be prepared and issued.
982 scsi_release_buffers(cmd
);
983 scsi_requeue_command(q
, cmd
);
986 /* Retry the same command immediately */
987 __scsi_queue_insert(cmd
, SCSI_MLQUEUE_EH_RETRY
, 0);
989 case ACTION_DELAYED_RETRY
:
990 /* Retry the same command after a delay */
991 __scsi_queue_insert(cmd
, SCSI_MLQUEUE_DEVICE_BUSY
, 0);
996 static int scsi_init_sgtable(struct request
*req
, struct scsi_data_buffer
*sdb
,
1002 * If sg table allocation fails, requeue request later.
1004 if (unlikely(scsi_alloc_sgtable(sdb
, req
->nr_phys_segments
,
1006 return BLKPREP_DEFER
;
1012 * Next, walk the list, and fill in the addresses and sizes of
1015 count
= blk_rq_map_sg(req
->q
, req
, sdb
->table
.sgl
);
1016 BUG_ON(count
> sdb
->table
.nents
);
1017 sdb
->table
.nents
= count
;
1018 sdb
->length
= blk_rq_bytes(req
);
1023 * Function: scsi_init_io()
1025 * Purpose: SCSI I/O initialize function.
1027 * Arguments: cmd - Command descriptor we wish to initialize
1029 * Returns: 0 on success
1030 * BLKPREP_DEFER if the failure is retryable
1031 * BLKPREP_KILL if the failure is fatal
1033 int scsi_init_io(struct scsi_cmnd
*cmd
, gfp_t gfp_mask
)
1035 struct request
*rq
= cmd
->request
;
1037 int error
= scsi_init_sgtable(rq
, &cmd
->sdb
, gfp_mask
);
1041 if (blk_bidi_rq(rq
)) {
1042 struct scsi_data_buffer
*bidi_sdb
= kmem_cache_zalloc(
1043 scsi_sdb_cache
, GFP_ATOMIC
);
1045 error
= BLKPREP_DEFER
;
1049 rq
->next_rq
->special
= bidi_sdb
;
1050 error
= scsi_init_sgtable(rq
->next_rq
, bidi_sdb
, GFP_ATOMIC
);
1055 if (blk_integrity_rq(rq
)) {
1056 struct scsi_data_buffer
*prot_sdb
= cmd
->prot_sdb
;
1059 BUG_ON(prot_sdb
== NULL
);
1060 ivecs
= blk_rq_count_integrity_sg(rq
->q
, rq
->bio
);
1062 if (scsi_alloc_sgtable(prot_sdb
, ivecs
, gfp_mask
)) {
1063 error
= BLKPREP_DEFER
;
1067 count
= blk_rq_map_integrity_sg(rq
->q
, rq
->bio
,
1068 prot_sdb
->table
.sgl
);
1069 BUG_ON(unlikely(count
> ivecs
));
1070 BUG_ON(unlikely(count
> queue_max_integrity_segments(rq
->q
)));
1072 cmd
->prot_sdb
= prot_sdb
;
1073 cmd
->prot_sdb
->table
.nents
= count
;
1079 scsi_release_buffers(cmd
);
1080 cmd
->request
->special
= NULL
;
1081 scsi_put_command(cmd
);
1084 EXPORT_SYMBOL(scsi_init_io
);
1086 static struct scsi_cmnd
*scsi_get_cmd_from_req(struct scsi_device
*sdev
,
1087 struct request
*req
)
1089 struct scsi_cmnd
*cmd
;
1091 if (!req
->special
) {
1092 cmd
= scsi_get_command(sdev
, GFP_ATOMIC
);
1100 /* pull a tag out of the request if we have one */
1101 cmd
->tag
= req
->tag
;
1104 cmd
->cmnd
= req
->cmd
;
1105 cmd
->prot_op
= SCSI_PROT_NORMAL
;
1110 int scsi_setup_blk_pc_cmnd(struct scsi_device
*sdev
, struct request
*req
)
1112 struct scsi_cmnd
*cmd
;
1113 int ret
= scsi_prep_state_check(sdev
, req
);
1115 if (ret
!= BLKPREP_OK
)
1118 cmd
= scsi_get_cmd_from_req(sdev
, req
);
1120 return BLKPREP_DEFER
;
1123 * BLOCK_PC requests may transfer data, in which case they must
1124 * a bio attached to them. Or they might contain a SCSI command
1125 * that does not transfer data, in which case they may optionally
1126 * submit a request without an attached bio.
1131 BUG_ON(!req
->nr_phys_segments
);
1133 ret
= scsi_init_io(cmd
, GFP_ATOMIC
);
1137 BUG_ON(blk_rq_bytes(req
));
1139 memset(&cmd
->sdb
, 0, sizeof(cmd
->sdb
));
1143 cmd
->cmd_len
= req
->cmd_len
;
1144 if (!blk_rq_bytes(req
))
1145 cmd
->sc_data_direction
= DMA_NONE
;
1146 else if (rq_data_dir(req
) == WRITE
)
1147 cmd
->sc_data_direction
= DMA_TO_DEVICE
;
1149 cmd
->sc_data_direction
= DMA_FROM_DEVICE
;
1151 cmd
->transfersize
= blk_rq_bytes(req
);
1152 cmd
->allowed
= req
->retries
;
1155 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd
);
1158 * Setup a REQ_TYPE_FS command. These are simple read/write request
1159 * from filesystems that still need to be translated to SCSI CDBs from
1162 int scsi_setup_fs_cmnd(struct scsi_device
*sdev
, struct request
*req
)
1164 struct scsi_cmnd
*cmd
;
1165 int ret
= scsi_prep_state_check(sdev
, req
);
1167 if (ret
!= BLKPREP_OK
)
1170 if (unlikely(sdev
->scsi_dh_data
&& sdev
->scsi_dh_data
->scsi_dh
1171 && sdev
->scsi_dh_data
->scsi_dh
->prep_fn
)) {
1172 ret
= sdev
->scsi_dh_data
->scsi_dh
->prep_fn(sdev
, req
);
1173 if (ret
!= BLKPREP_OK
)
1178 * Filesystem requests must transfer data.
1180 BUG_ON(!req
->nr_phys_segments
);
1182 cmd
= scsi_get_cmd_from_req(sdev
, req
);
1184 return BLKPREP_DEFER
;
1186 memset(cmd
->cmnd
, 0, BLK_MAX_CDB
);
1187 return scsi_init_io(cmd
, GFP_ATOMIC
);
1189 EXPORT_SYMBOL(scsi_setup_fs_cmnd
);
1191 int scsi_prep_state_check(struct scsi_device
*sdev
, struct request
*req
)
1193 int ret
= BLKPREP_OK
;
1196 * If the device is not in running state we will reject some
1199 if (unlikely(sdev
->sdev_state
!= SDEV_RUNNING
)) {
1200 switch (sdev
->sdev_state
) {
1202 case SDEV_TRANSPORT_OFFLINE
:
1204 * If the device is offline we refuse to process any
1205 * commands. The device must be brought online
1206 * before trying any recovery commands.
1208 sdev_printk(KERN_ERR
, sdev
,
1209 "rejecting I/O to offline device\n");
1214 * If the device is fully deleted, we refuse to
1215 * process any commands as well.
1217 sdev_printk(KERN_ERR
, sdev
,
1218 "rejecting I/O to dead device\n");
1223 case SDEV_CREATED_BLOCK
:
1225 * If the devices is blocked we defer normal commands.
1227 if (!(req
->cmd_flags
& REQ_PREEMPT
))
1228 ret
= BLKPREP_DEFER
;
1232 * For any other not fully online state we only allow
1233 * special commands. In particular any user initiated
1234 * command is not allowed.
1236 if (!(req
->cmd_flags
& REQ_PREEMPT
))
1243 EXPORT_SYMBOL(scsi_prep_state_check
);
1245 int scsi_prep_return(struct request_queue
*q
, struct request
*req
, int ret
)
1247 struct scsi_device
*sdev
= q
->queuedata
;
1251 req
->errors
= DID_NO_CONNECT
<< 16;
1252 /* release the command and kill it */
1254 struct scsi_cmnd
*cmd
= req
->special
;
1255 scsi_release_buffers(cmd
);
1256 scsi_put_command(cmd
);
1257 req
->special
= NULL
;
1262 * If we defer, the blk_peek_request() returns NULL, but the
1263 * queue must be restarted, so we schedule a callback to happen
1266 if (sdev
->device_busy
== 0)
1267 blk_delay_queue(q
, SCSI_QUEUE_DELAY
);
1270 req
->cmd_flags
|= REQ_DONTPREP
;
1275 EXPORT_SYMBOL(scsi_prep_return
);
1277 int scsi_prep_fn(struct request_queue
*q
, struct request
*req
)
1279 struct scsi_device
*sdev
= q
->queuedata
;
1280 int ret
= BLKPREP_KILL
;
1282 if (req
->cmd_type
== REQ_TYPE_BLOCK_PC
)
1283 ret
= scsi_setup_blk_pc_cmnd(sdev
, req
);
1284 return scsi_prep_return(q
, req
, ret
);
1286 EXPORT_SYMBOL(scsi_prep_fn
);
1289 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1292 * Called with the queue_lock held.
1294 static inline int scsi_dev_queue_ready(struct request_queue
*q
,
1295 struct scsi_device
*sdev
)
1297 if (sdev
->device_busy
== 0 && sdev
->device_blocked
) {
1299 * unblock after device_blocked iterates to zero
1301 if (--sdev
->device_blocked
== 0) {
1303 sdev_printk(KERN_INFO
, sdev
,
1304 "unblocking device at zero depth\n"));
1306 blk_delay_queue(q
, SCSI_QUEUE_DELAY
);
1310 if (scsi_device_is_busy(sdev
))
1318 * scsi_target_queue_ready: checks if there we can send commands to target
1319 * @sdev: scsi device on starget to check.
1321 * Called with the host lock held.
1323 static inline int scsi_target_queue_ready(struct Scsi_Host
*shost
,
1324 struct scsi_device
*sdev
)
1326 struct scsi_target
*starget
= scsi_target(sdev
);
1328 if (starget
->single_lun
) {
1329 if (starget
->starget_sdev_user
&&
1330 starget
->starget_sdev_user
!= sdev
)
1332 starget
->starget_sdev_user
= sdev
;
1335 if (starget
->target_busy
== 0 && starget
->target_blocked
) {
1337 * unblock after target_blocked iterates to zero
1339 if (--starget
->target_blocked
== 0) {
1340 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO
, starget
,
1341 "unblocking target at zero depth\n"));
1346 if (scsi_target_is_busy(starget
)) {
1347 list_move_tail(&sdev
->starved_entry
, &shost
->starved_list
);
1355 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1356 * return 0. We must end up running the queue again whenever 0 is
1357 * returned, else IO can hang.
1359 * Called with host_lock held.
1361 static inline int scsi_host_queue_ready(struct request_queue
*q
,
1362 struct Scsi_Host
*shost
,
1363 struct scsi_device
*sdev
)
1365 if (scsi_host_in_recovery(shost
))
1367 if (shost
->host_busy
== 0 && shost
->host_blocked
) {
1369 * unblock after host_blocked iterates to zero
1371 if (--shost
->host_blocked
== 0) {
1373 printk("scsi%d unblocking host at zero depth\n",
1379 if (scsi_host_is_busy(shost
)) {
1380 if (list_empty(&sdev
->starved_entry
))
1381 list_add_tail(&sdev
->starved_entry
, &shost
->starved_list
);
1385 /* We're OK to process the command, so we can't be starved */
1386 if (!list_empty(&sdev
->starved_entry
))
1387 list_del_init(&sdev
->starved_entry
);
1393 * Busy state exporting function for request stacking drivers.
1395 * For efficiency, no lock is taken to check the busy state of
1396 * shost/starget/sdev, since the returned value is not guaranteed and
1397 * may be changed after request stacking drivers call the function,
1398 * regardless of taking lock or not.
1400 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1401 * needs to return 'not busy'. Otherwise, request stacking drivers
1402 * may hold requests forever.
1404 static int scsi_lld_busy(struct request_queue
*q
)
1406 struct scsi_device
*sdev
= q
->queuedata
;
1407 struct Scsi_Host
*shost
;
1409 if (blk_queue_dead(q
))
1415 * Ignore host/starget busy state.
1416 * Since block layer does not have a concept of fairness across
1417 * multiple queues, congestion of host/starget needs to be handled
1420 if (scsi_host_in_recovery(shost
) || scsi_device_is_busy(sdev
))
1427 * Kill a request for a dead device
1429 static void scsi_kill_request(struct request
*req
, struct request_queue
*q
)
1431 struct scsi_cmnd
*cmd
= req
->special
;
1432 struct scsi_device
*sdev
;
1433 struct scsi_target
*starget
;
1434 struct Scsi_Host
*shost
;
1436 blk_start_request(req
);
1438 scmd_printk(KERN_INFO
, cmd
, "killing request\n");
1441 starget
= scsi_target(sdev
);
1443 scsi_init_cmd_errh(cmd
);
1444 cmd
->result
= DID_NO_CONNECT
<< 16;
1445 atomic_inc(&cmd
->device
->iorequest_cnt
);
1448 * SCSI request completion path will do scsi_device_unbusy(),
1449 * bump busy counts. To bump the counters, we need to dance
1450 * with the locks as normal issue path does.
1452 sdev
->device_busy
++;
1453 spin_unlock(sdev
->request_queue
->queue_lock
);
1454 spin_lock(shost
->host_lock
);
1456 starget
->target_busy
++;
1457 spin_unlock(shost
->host_lock
);
1458 spin_lock(sdev
->request_queue
->queue_lock
);
1460 blk_complete_request(req
);
1463 static void scsi_softirq_done(struct request
*rq
)
1465 struct scsi_cmnd
*cmd
= rq
->special
;
1466 unsigned long wait_for
= (cmd
->allowed
+ 1) * rq
->timeout
;
1469 INIT_LIST_HEAD(&cmd
->eh_entry
);
1471 atomic_inc(&cmd
->device
->iodone_cnt
);
1473 atomic_inc(&cmd
->device
->ioerr_cnt
);
1475 disposition
= scsi_decide_disposition(cmd
);
1476 if (disposition
!= SUCCESS
&&
1477 time_before(cmd
->jiffies_at_alloc
+ wait_for
, jiffies
)) {
1478 sdev_printk(KERN_ERR
, cmd
->device
,
1479 "timing out command, waited %lus\n",
1481 disposition
= SUCCESS
;
1484 scsi_log_completion(cmd
, disposition
);
1486 switch (disposition
) {
1488 scsi_finish_command(cmd
);
1491 scsi_queue_insert(cmd
, SCSI_MLQUEUE_EH_RETRY
);
1493 case ADD_TO_MLQUEUE
:
1494 scsi_queue_insert(cmd
, SCSI_MLQUEUE_DEVICE_BUSY
);
1497 if (!scsi_eh_scmd_add(cmd
, 0))
1498 scsi_finish_command(cmd
);
1503 * Function: scsi_request_fn()
1505 * Purpose: Main strategy routine for SCSI.
1507 * Arguments: q - Pointer to actual queue.
1511 * Lock status: IO request lock assumed to be held when called.
1513 static void scsi_request_fn(struct request_queue
*q
)
1515 struct scsi_device
*sdev
= q
->queuedata
;
1516 struct Scsi_Host
*shost
;
1517 struct scsi_cmnd
*cmd
;
1518 struct request
*req
;
1520 if(!get_device(&sdev
->sdev_gendev
))
1521 /* We must be tearing the block queue down already */
1525 * To start with, we keep looping until the queue is empty, or until
1526 * the host is no longer able to accept any more requests.
1532 * get next queueable request. We do this early to make sure
1533 * that the request is fully prepared even if we cannot
1536 req
= blk_peek_request(q
);
1537 if (!req
|| !scsi_dev_queue_ready(q
, sdev
))
1540 if (unlikely(!scsi_device_online(sdev
))) {
1541 sdev_printk(KERN_ERR
, sdev
,
1542 "rejecting I/O to offline device\n");
1543 scsi_kill_request(req
, q
);
1549 * Remove the request from the request list.
1551 if (!(blk_queue_tagged(q
) && !blk_queue_start_tag(q
, req
)))
1552 blk_start_request(req
);
1553 sdev
->device_busy
++;
1555 spin_unlock(q
->queue_lock
);
1557 if (unlikely(cmd
== NULL
)) {
1558 printk(KERN_CRIT
"impossible request in %s.\n"
1559 "please mail a stack trace to "
1560 "linux-scsi@vger.kernel.org\n",
1562 blk_dump_rq_flags(req
, "foo");
1565 spin_lock(shost
->host_lock
);
1568 * We hit this when the driver is using a host wide
1569 * tag map. For device level tag maps the queue_depth check
1570 * in the device ready fn would prevent us from trying
1571 * to allocate a tag. Since the map is a shared host resource
1572 * we add the dev to the starved list so it eventually gets
1573 * a run when a tag is freed.
1575 if (blk_queue_tagged(q
) && !blk_rq_tagged(req
)) {
1576 if (list_empty(&sdev
->starved_entry
))
1577 list_add_tail(&sdev
->starved_entry
,
1578 &shost
->starved_list
);
1582 if (!scsi_target_queue_ready(shost
, sdev
))
1585 if (!scsi_host_queue_ready(q
, shost
, sdev
))
1588 scsi_target(sdev
)->target_busy
++;
1592 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1593 * take the lock again.
1595 spin_unlock_irq(shost
->host_lock
);
1598 * Finally, initialize any error handling parameters, and set up
1599 * the timers for timeouts.
1601 scsi_init_cmd_errh(cmd
);
1604 * Dispatch the command to the low-level driver.
1606 rtn
= scsi_dispatch_cmd(cmd
);
1607 spin_lock_irq(q
->queue_lock
);
1615 spin_unlock_irq(shost
->host_lock
);
1618 * lock q, handle tag, requeue req, and decrement device_busy. We
1619 * must return with queue_lock held.
1621 * Decrementing device_busy without checking it is OK, as all such
1622 * cases (host limits or settings) should run the queue at some
1625 spin_lock_irq(q
->queue_lock
);
1626 blk_requeue_request(q
, req
);
1627 sdev
->device_busy
--;
1629 if (sdev
->device_busy
== 0)
1630 blk_delay_queue(q
, SCSI_QUEUE_DELAY
);
1632 /* must be careful here...if we trigger the ->remove() function
1633 * we cannot be holding the q lock */
1634 spin_unlock_irq(q
->queue_lock
);
1635 put_device(&sdev
->sdev_gendev
);
1636 spin_lock_irq(q
->queue_lock
);
1639 u64
scsi_calculate_bounce_limit(struct Scsi_Host
*shost
)
1641 struct device
*host_dev
;
1642 u64 bounce_limit
= 0xffffffff;
1644 if (shost
->unchecked_isa_dma
)
1645 return BLK_BOUNCE_ISA
;
1647 * Platforms with virtual-DMA translation
1648 * hardware have no practical limit.
1650 if (!PCI_DMA_BUS_IS_PHYS
)
1651 return BLK_BOUNCE_ANY
;
1653 host_dev
= scsi_get_device(shost
);
1654 if (host_dev
&& host_dev
->dma_mask
)
1655 bounce_limit
= *host_dev
->dma_mask
;
1657 return bounce_limit
;
1659 EXPORT_SYMBOL(scsi_calculate_bounce_limit
);
1661 struct request_queue
*__scsi_alloc_queue(struct Scsi_Host
*shost
,
1662 request_fn_proc
*request_fn
)
1664 struct request_queue
*q
;
1665 struct device
*dev
= shost
->dma_dev
;
1667 q
= blk_init_queue(request_fn
, NULL
);
1672 * this limit is imposed by hardware restrictions
1674 blk_queue_max_segments(q
, min_t(unsigned short, shost
->sg_tablesize
,
1675 SCSI_MAX_SG_CHAIN_SEGMENTS
));
1677 if (scsi_host_prot_dma(shost
)) {
1678 shost
->sg_prot_tablesize
=
1679 min_not_zero(shost
->sg_prot_tablesize
,
1680 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS
);
1681 BUG_ON(shost
->sg_prot_tablesize
< shost
->sg_tablesize
);
1682 blk_queue_max_integrity_segments(q
, shost
->sg_prot_tablesize
);
1685 blk_queue_max_hw_sectors(q
, shost
->max_sectors
);
1686 blk_queue_bounce_limit(q
, scsi_calculate_bounce_limit(shost
));
1687 blk_queue_segment_boundary(q
, shost
->dma_boundary
);
1688 dma_set_seg_boundary(dev
, shost
->dma_boundary
);
1690 blk_queue_max_segment_size(q
, dma_get_max_seg_size(dev
));
1692 if (!shost
->use_clustering
)
1693 q
->limits
.cluster
= 0;
1696 * set a reasonable default alignment on word boundaries: the
1697 * host and device may alter it using
1698 * blk_queue_update_dma_alignment() later.
1700 blk_queue_dma_alignment(q
, 0x03);
1704 EXPORT_SYMBOL(__scsi_alloc_queue
);
1706 struct request_queue
*scsi_alloc_queue(struct scsi_device
*sdev
)
1708 struct request_queue
*q
;
1710 q
= __scsi_alloc_queue(sdev
->host
, scsi_request_fn
);
1714 blk_queue_prep_rq(q
, scsi_prep_fn
);
1715 blk_queue_softirq_done(q
, scsi_softirq_done
);
1716 blk_queue_rq_timed_out(q
, scsi_times_out
);
1717 blk_queue_lld_busy(q
, scsi_lld_busy
);
1722 * Function: scsi_block_requests()
1724 * Purpose: Utility function used by low-level drivers to prevent further
1725 * commands from being queued to the device.
1727 * Arguments: shost - Host in question
1731 * Lock status: No locks are assumed held.
1733 * Notes: There is no timer nor any other means by which the requests
1734 * get unblocked other than the low-level driver calling
1735 * scsi_unblock_requests().
1737 void scsi_block_requests(struct Scsi_Host
*shost
)
1739 shost
->host_self_blocked
= 1;
1741 EXPORT_SYMBOL(scsi_block_requests
);
1744 * Function: scsi_unblock_requests()
1746 * Purpose: Utility function used by low-level drivers to allow further
1747 * commands from being queued to the device.
1749 * Arguments: shost - Host in question
1753 * Lock status: No locks are assumed held.
1755 * Notes: There is no timer nor any other means by which the requests
1756 * get unblocked other than the low-level driver calling
1757 * scsi_unblock_requests().
1759 * This is done as an API function so that changes to the
1760 * internals of the scsi mid-layer won't require wholesale
1761 * changes to drivers that use this feature.
1763 void scsi_unblock_requests(struct Scsi_Host
*shost
)
1765 shost
->host_self_blocked
= 0;
1766 scsi_run_host_queues(shost
);
1768 EXPORT_SYMBOL(scsi_unblock_requests
);
1770 int __init
scsi_init_queue(void)
1774 scsi_sdb_cache
= kmem_cache_create("scsi_data_buffer",
1775 sizeof(struct scsi_data_buffer
),
1777 if (!scsi_sdb_cache
) {
1778 printk(KERN_ERR
"SCSI: can't init scsi sdb cache\n");
1782 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1783 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1784 int size
= sgp
->size
* sizeof(struct scatterlist
);
1786 sgp
->slab
= kmem_cache_create(sgp
->name
, size
, 0,
1787 SLAB_HWCACHE_ALIGN
, NULL
);
1789 printk(KERN_ERR
"SCSI: can't init sg slab %s\n",
1794 sgp
->pool
= mempool_create_slab_pool(SG_MEMPOOL_SIZE
,
1797 printk(KERN_ERR
"SCSI: can't init sg mempool %s\n",
1806 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1807 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1809 mempool_destroy(sgp
->pool
);
1811 kmem_cache_destroy(sgp
->slab
);
1813 kmem_cache_destroy(scsi_sdb_cache
);
1818 void scsi_exit_queue(void)
1822 kmem_cache_destroy(scsi_sdb_cache
);
1824 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1825 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1826 mempool_destroy(sgp
->pool
);
1827 kmem_cache_destroy(sgp
->slab
);
1832 * scsi_mode_select - issue a mode select
1833 * @sdev: SCSI device to be queried
1834 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1835 * @sp: Save page bit (0 == don't save, 1 == save)
1836 * @modepage: mode page being requested
1837 * @buffer: request buffer (may not be smaller than eight bytes)
1838 * @len: length of request buffer.
1839 * @timeout: command timeout
1840 * @retries: number of retries before failing
1841 * @data: returns a structure abstracting the mode header data
1842 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1843 * must be SCSI_SENSE_BUFFERSIZE big.
1845 * Returns zero if successful; negative error number or scsi
1850 scsi_mode_select(struct scsi_device
*sdev
, int pf
, int sp
, int modepage
,
1851 unsigned char *buffer
, int len
, int timeout
, int retries
,
1852 struct scsi_mode_data
*data
, struct scsi_sense_hdr
*sshdr
)
1854 unsigned char cmd
[10];
1855 unsigned char *real_buffer
;
1858 memset(cmd
, 0, sizeof(cmd
));
1859 cmd
[1] = (pf
? 0x10 : 0) | (sp
? 0x01 : 0);
1861 if (sdev
->use_10_for_ms
) {
1864 real_buffer
= kmalloc(8 + len
, GFP_KERNEL
);
1867 memcpy(real_buffer
+ 8, buffer
, len
);
1871 real_buffer
[2] = data
->medium_type
;
1872 real_buffer
[3] = data
->device_specific
;
1873 real_buffer
[4] = data
->longlba
? 0x01 : 0;
1875 real_buffer
[6] = data
->block_descriptor_length
>> 8;
1876 real_buffer
[7] = data
->block_descriptor_length
;
1878 cmd
[0] = MODE_SELECT_10
;
1882 if (len
> 255 || data
->block_descriptor_length
> 255 ||
1886 real_buffer
= kmalloc(4 + len
, GFP_KERNEL
);
1889 memcpy(real_buffer
+ 4, buffer
, len
);
1892 real_buffer
[1] = data
->medium_type
;
1893 real_buffer
[2] = data
->device_specific
;
1894 real_buffer
[3] = data
->block_descriptor_length
;
1897 cmd
[0] = MODE_SELECT
;
1901 ret
= scsi_execute_req(sdev
, cmd
, DMA_TO_DEVICE
, real_buffer
, len
,
1902 sshdr
, timeout
, retries
, NULL
);
1906 EXPORT_SYMBOL_GPL(scsi_mode_select
);
1909 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1910 * @sdev: SCSI device to be queried
1911 * @dbd: set if mode sense will allow block descriptors to be returned
1912 * @modepage: mode page being requested
1913 * @buffer: request buffer (may not be smaller than eight bytes)
1914 * @len: length of request buffer.
1915 * @timeout: command timeout
1916 * @retries: number of retries before failing
1917 * @data: returns a structure abstracting the mode header data
1918 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1919 * must be SCSI_SENSE_BUFFERSIZE big.
1921 * Returns zero if unsuccessful, or the header offset (either 4
1922 * or 8 depending on whether a six or ten byte command was
1923 * issued) if successful.
1926 scsi_mode_sense(struct scsi_device
*sdev
, int dbd
, int modepage
,
1927 unsigned char *buffer
, int len
, int timeout
, int retries
,
1928 struct scsi_mode_data
*data
, struct scsi_sense_hdr
*sshdr
)
1930 unsigned char cmd
[12];
1934 struct scsi_sense_hdr my_sshdr
;
1936 memset(data
, 0, sizeof(*data
));
1937 memset(&cmd
[0], 0, 12);
1938 cmd
[1] = dbd
& 0x18; /* allows DBD and LLBA bits */
1941 /* caller might not be interested in sense, but we need it */
1946 use_10_for_ms
= sdev
->use_10_for_ms
;
1948 if (use_10_for_ms
) {
1952 cmd
[0] = MODE_SENSE_10
;
1959 cmd
[0] = MODE_SENSE
;
1964 memset(buffer
, 0, len
);
1966 result
= scsi_execute_req(sdev
, cmd
, DMA_FROM_DEVICE
, buffer
, len
,
1967 sshdr
, timeout
, retries
, NULL
);
1969 /* This code looks awful: what it's doing is making sure an
1970 * ILLEGAL REQUEST sense return identifies the actual command
1971 * byte as the problem. MODE_SENSE commands can return
1972 * ILLEGAL REQUEST if the code page isn't supported */
1974 if (use_10_for_ms
&& !scsi_status_is_good(result
) &&
1975 (driver_byte(result
) & DRIVER_SENSE
)) {
1976 if (scsi_sense_valid(sshdr
)) {
1977 if ((sshdr
->sense_key
== ILLEGAL_REQUEST
) &&
1978 (sshdr
->asc
== 0x20) && (sshdr
->ascq
== 0)) {
1980 * Invalid command operation code
1982 sdev
->use_10_for_ms
= 0;
1988 if(scsi_status_is_good(result
)) {
1989 if (unlikely(buffer
[0] == 0x86 && buffer
[1] == 0x0b &&
1990 (modepage
== 6 || modepage
== 8))) {
1991 /* Initio breakage? */
1994 data
->medium_type
= 0;
1995 data
->device_specific
= 0;
1997 data
->block_descriptor_length
= 0;
1998 } else if(use_10_for_ms
) {
1999 data
->length
= buffer
[0]*256 + buffer
[1] + 2;
2000 data
->medium_type
= buffer
[2];
2001 data
->device_specific
= buffer
[3];
2002 data
->longlba
= buffer
[4] & 0x01;
2003 data
->block_descriptor_length
= buffer
[6]*256
2006 data
->length
= buffer
[0] + 1;
2007 data
->medium_type
= buffer
[1];
2008 data
->device_specific
= buffer
[2];
2009 data
->block_descriptor_length
= buffer
[3];
2011 data
->header_length
= header_length
;
2016 EXPORT_SYMBOL(scsi_mode_sense
);
2019 * scsi_test_unit_ready - test if unit is ready
2020 * @sdev: scsi device to change the state of.
2021 * @timeout: command timeout
2022 * @retries: number of retries before failing
2023 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2024 * returning sense. Make sure that this is cleared before passing
2027 * Returns zero if unsuccessful or an error if TUR failed. For
2028 * removable media, UNIT_ATTENTION sets ->changed flag.
2031 scsi_test_unit_ready(struct scsi_device
*sdev
, int timeout
, int retries
,
2032 struct scsi_sense_hdr
*sshdr_external
)
2035 TEST_UNIT_READY
, 0, 0, 0, 0, 0,
2037 struct scsi_sense_hdr
*sshdr
;
2040 if (!sshdr_external
)
2041 sshdr
= kzalloc(sizeof(*sshdr
), GFP_KERNEL
);
2043 sshdr
= sshdr_external
;
2045 /* try to eat the UNIT_ATTENTION if there are enough retries */
2047 result
= scsi_execute_req(sdev
, cmd
, DMA_NONE
, NULL
, 0, sshdr
,
2048 timeout
, retries
, NULL
);
2049 if (sdev
->removable
&& scsi_sense_valid(sshdr
) &&
2050 sshdr
->sense_key
== UNIT_ATTENTION
)
2052 } while (scsi_sense_valid(sshdr
) &&
2053 sshdr
->sense_key
== UNIT_ATTENTION
&& --retries
);
2055 if (!sshdr_external
)
2059 EXPORT_SYMBOL(scsi_test_unit_ready
);
2062 * scsi_device_set_state - Take the given device through the device state model.
2063 * @sdev: scsi device to change the state of.
2064 * @state: state to change to.
2066 * Returns zero if unsuccessful or an error if the requested
2067 * transition is illegal.
2070 scsi_device_set_state(struct scsi_device
*sdev
, enum scsi_device_state state
)
2072 enum scsi_device_state oldstate
= sdev
->sdev_state
;
2074 if (state
== oldstate
)
2080 case SDEV_CREATED_BLOCK
:
2091 case SDEV_TRANSPORT_OFFLINE
:
2104 case SDEV_TRANSPORT_OFFLINE
:
2112 case SDEV_TRANSPORT_OFFLINE
:
2127 case SDEV_CREATED_BLOCK
:
2134 case SDEV_CREATED_BLOCK
:
2149 case SDEV_TRANSPORT_OFFLINE
:
2162 case SDEV_TRANSPORT_OFFLINE
:
2171 sdev
->sdev_state
= state
;
2175 SCSI_LOG_ERROR_RECOVERY(1,
2176 sdev_printk(KERN_ERR
, sdev
,
2177 "Illegal state transition %s->%s\n",
2178 scsi_device_state_name(oldstate
),
2179 scsi_device_state_name(state
))
2183 EXPORT_SYMBOL(scsi_device_set_state
);
2186 * sdev_evt_emit - emit a single SCSI device uevent
2187 * @sdev: associated SCSI device
2188 * @evt: event to emit
2190 * Send a single uevent (scsi_event) to the associated scsi_device.
2192 static void scsi_evt_emit(struct scsi_device
*sdev
, struct scsi_event
*evt
)
2197 switch (evt
->evt_type
) {
2198 case SDEV_EVT_MEDIA_CHANGE
:
2199 envp
[idx
++] = "SDEV_MEDIA_CHANGE=1";
2209 kobject_uevent_env(&sdev
->sdev_gendev
.kobj
, KOBJ_CHANGE
, envp
);
2213 * sdev_evt_thread - send a uevent for each scsi event
2214 * @work: work struct for scsi_device
2216 * Dispatch queued events to their associated scsi_device kobjects
2219 void scsi_evt_thread(struct work_struct
*work
)
2221 struct scsi_device
*sdev
;
2222 LIST_HEAD(event_list
);
2224 sdev
= container_of(work
, struct scsi_device
, event_work
);
2227 struct scsi_event
*evt
;
2228 struct list_head
*this, *tmp
;
2229 unsigned long flags
;
2231 spin_lock_irqsave(&sdev
->list_lock
, flags
);
2232 list_splice_init(&sdev
->event_list
, &event_list
);
2233 spin_unlock_irqrestore(&sdev
->list_lock
, flags
);
2235 if (list_empty(&event_list
))
2238 list_for_each_safe(this, tmp
, &event_list
) {
2239 evt
= list_entry(this, struct scsi_event
, node
);
2240 list_del(&evt
->node
);
2241 scsi_evt_emit(sdev
, evt
);
2248 * sdev_evt_send - send asserted event to uevent thread
2249 * @sdev: scsi_device event occurred on
2250 * @evt: event to send
2252 * Assert scsi device event asynchronously.
2254 void sdev_evt_send(struct scsi_device
*sdev
, struct scsi_event
*evt
)
2256 unsigned long flags
;
2259 /* FIXME: currently this check eliminates all media change events
2260 * for polled devices. Need to update to discriminate between AN
2261 * and polled events */
2262 if (!test_bit(evt
->evt_type
, sdev
->supported_events
)) {
2268 spin_lock_irqsave(&sdev
->list_lock
, flags
);
2269 list_add_tail(&evt
->node
, &sdev
->event_list
);
2270 schedule_work(&sdev
->event_work
);
2271 spin_unlock_irqrestore(&sdev
->list_lock
, flags
);
2273 EXPORT_SYMBOL_GPL(sdev_evt_send
);
2276 * sdev_evt_alloc - allocate a new scsi event
2277 * @evt_type: type of event to allocate
2278 * @gfpflags: GFP flags for allocation
2280 * Allocates and returns a new scsi_event.
2282 struct scsi_event
*sdev_evt_alloc(enum scsi_device_event evt_type
,
2285 struct scsi_event
*evt
= kzalloc(sizeof(struct scsi_event
), gfpflags
);
2289 evt
->evt_type
= evt_type
;
2290 INIT_LIST_HEAD(&evt
->node
);
2292 /* evt_type-specific initialization, if any */
2294 case SDEV_EVT_MEDIA_CHANGE
:
2302 EXPORT_SYMBOL_GPL(sdev_evt_alloc
);
2305 * sdev_evt_send_simple - send asserted event to uevent thread
2306 * @sdev: scsi_device event occurred on
2307 * @evt_type: type of event to send
2308 * @gfpflags: GFP flags for allocation
2310 * Assert scsi device event asynchronously, given an event type.
2312 void sdev_evt_send_simple(struct scsi_device
*sdev
,
2313 enum scsi_device_event evt_type
, gfp_t gfpflags
)
2315 struct scsi_event
*evt
= sdev_evt_alloc(evt_type
, gfpflags
);
2317 sdev_printk(KERN_ERR
, sdev
, "event %d eaten due to OOM\n",
2322 sdev_evt_send(sdev
, evt
);
2324 EXPORT_SYMBOL_GPL(sdev_evt_send_simple
);
2327 * scsi_device_quiesce - Block user issued commands.
2328 * @sdev: scsi device to quiesce.
2330 * This works by trying to transition to the SDEV_QUIESCE state
2331 * (which must be a legal transition). When the device is in this
2332 * state, only special requests will be accepted, all others will
2333 * be deferred. Since special requests may also be requeued requests,
2334 * a successful return doesn't guarantee the device will be
2335 * totally quiescent.
2337 * Must be called with user context, may sleep.
2339 * Returns zero if unsuccessful or an error if not.
2342 scsi_device_quiesce(struct scsi_device
*sdev
)
2344 int err
= scsi_device_set_state(sdev
, SDEV_QUIESCE
);
2348 scsi_run_queue(sdev
->request_queue
);
2349 while (sdev
->device_busy
) {
2350 msleep_interruptible(200);
2351 scsi_run_queue(sdev
->request_queue
);
2355 EXPORT_SYMBOL(scsi_device_quiesce
);
2358 * scsi_device_resume - Restart user issued commands to a quiesced device.
2359 * @sdev: scsi device to resume.
2361 * Moves the device from quiesced back to running and restarts the
2364 * Must be called with user context, may sleep.
2366 void scsi_device_resume(struct scsi_device
*sdev
)
2368 /* check if the device state was mutated prior to resume, and if
2369 * so assume the state is being managed elsewhere (for example
2370 * device deleted during suspend)
2372 if (sdev
->sdev_state
!= SDEV_QUIESCE
||
2373 scsi_device_set_state(sdev
, SDEV_RUNNING
))
2375 scsi_run_queue(sdev
->request_queue
);
2377 EXPORT_SYMBOL(scsi_device_resume
);
2380 device_quiesce_fn(struct scsi_device
*sdev
, void *data
)
2382 scsi_device_quiesce(sdev
);
2386 scsi_target_quiesce(struct scsi_target
*starget
)
2388 starget_for_each_device(starget
, NULL
, device_quiesce_fn
);
2390 EXPORT_SYMBOL(scsi_target_quiesce
);
2393 device_resume_fn(struct scsi_device
*sdev
, void *data
)
2395 scsi_device_resume(sdev
);
2399 scsi_target_resume(struct scsi_target
*starget
)
2401 starget_for_each_device(starget
, NULL
, device_resume_fn
);
2403 EXPORT_SYMBOL(scsi_target_resume
);
2406 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2407 * @sdev: device to block
2409 * Block request made by scsi lld's to temporarily stop all
2410 * scsi commands on the specified device. Called from interrupt
2411 * or normal process context.
2413 * Returns zero if successful or error if not
2416 * This routine transitions the device to the SDEV_BLOCK state
2417 * (which must be a legal transition). When the device is in this
2418 * state, all commands are deferred until the scsi lld reenables
2419 * the device with scsi_device_unblock or device_block_tmo fires.
2422 scsi_internal_device_block(struct scsi_device
*sdev
)
2424 struct request_queue
*q
= sdev
->request_queue
;
2425 unsigned long flags
;
2428 err
= scsi_device_set_state(sdev
, SDEV_BLOCK
);
2430 err
= scsi_device_set_state(sdev
, SDEV_CREATED_BLOCK
);
2437 * The device has transitioned to SDEV_BLOCK. Stop the
2438 * block layer from calling the midlayer with this device's
2441 spin_lock_irqsave(q
->queue_lock
, flags
);
2443 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2447 EXPORT_SYMBOL_GPL(scsi_internal_device_block
);
2450 * scsi_internal_device_unblock - resume a device after a block request
2451 * @sdev: device to resume
2452 * @new_state: state to set devices to after unblocking
2454 * Called by scsi lld's or the midlayer to restart the device queue
2455 * for the previously suspended scsi device. Called from interrupt or
2456 * normal process context.
2458 * Returns zero if successful or error if not.
2461 * This routine transitions the device to the SDEV_RUNNING state
2462 * or to one of the offline states (which must be a legal transition)
2463 * allowing the midlayer to goose the queue for this device.
2466 scsi_internal_device_unblock(struct scsi_device
*sdev
,
2467 enum scsi_device_state new_state
)
2469 struct request_queue
*q
= sdev
->request_queue
;
2470 unsigned long flags
;
2473 * Try to transition the scsi device to SDEV_RUNNING or one of the
2474 * offlined states and goose the device queue if successful.
2476 if (sdev
->sdev_state
== SDEV_BLOCK
)
2477 sdev
->sdev_state
= new_state
;
2478 else if (sdev
->sdev_state
== SDEV_CREATED_BLOCK
) {
2479 if (new_state
== SDEV_TRANSPORT_OFFLINE
||
2480 new_state
== SDEV_OFFLINE
)
2481 sdev
->sdev_state
= new_state
;
2483 sdev
->sdev_state
= SDEV_CREATED
;
2484 } else if (sdev
->sdev_state
!= SDEV_CANCEL
&&
2485 sdev
->sdev_state
!= SDEV_OFFLINE
)
2488 spin_lock_irqsave(q
->queue_lock
, flags
);
2490 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2494 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock
);
2497 device_block(struct scsi_device
*sdev
, void *data
)
2499 scsi_internal_device_block(sdev
);
2503 target_block(struct device
*dev
, void *data
)
2505 if (scsi_is_target_device(dev
))
2506 starget_for_each_device(to_scsi_target(dev
), NULL
,
2512 scsi_target_block(struct device
*dev
)
2514 if (scsi_is_target_device(dev
))
2515 starget_for_each_device(to_scsi_target(dev
), NULL
,
2518 device_for_each_child(dev
, NULL
, target_block
);
2520 EXPORT_SYMBOL_GPL(scsi_target_block
);
2523 device_unblock(struct scsi_device
*sdev
, void *data
)
2525 scsi_internal_device_unblock(sdev
, *(enum scsi_device_state
*)data
);
2529 target_unblock(struct device
*dev
, void *data
)
2531 if (scsi_is_target_device(dev
))
2532 starget_for_each_device(to_scsi_target(dev
), data
,
2538 scsi_target_unblock(struct device
*dev
, enum scsi_device_state new_state
)
2540 if (scsi_is_target_device(dev
))
2541 starget_for_each_device(to_scsi_target(dev
), &new_state
,
2544 device_for_each_child(dev
, &new_state
, target_unblock
);
2546 EXPORT_SYMBOL_GPL(scsi_target_unblock
);
2549 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2550 * @sgl: scatter-gather list
2551 * @sg_count: number of segments in sg
2552 * @offset: offset in bytes into sg, on return offset into the mapped area
2553 * @len: bytes to map, on return number of bytes mapped
2555 * Returns virtual address of the start of the mapped page
2557 void *scsi_kmap_atomic_sg(struct scatterlist
*sgl
, int sg_count
,
2558 size_t *offset
, size_t *len
)
2561 size_t sg_len
= 0, len_complete
= 0;
2562 struct scatterlist
*sg
;
2565 WARN_ON(!irqs_disabled());
2567 for_each_sg(sgl
, sg
, sg_count
, i
) {
2568 len_complete
= sg_len
; /* Complete sg-entries */
2569 sg_len
+= sg
->length
;
2570 if (sg_len
> *offset
)
2574 if (unlikely(i
== sg_count
)) {
2575 printk(KERN_ERR
"%s: Bytes in sg: %zu, requested offset %zu, "
2577 __func__
, sg_len
, *offset
, sg_count
);
2582 /* Offset starting from the beginning of first page in this sg-entry */
2583 *offset
= *offset
- len_complete
+ sg
->offset
;
2585 /* Assumption: contiguous pages can be accessed as "page + i" */
2586 page
= nth_page(sg_page(sg
), (*offset
>> PAGE_SHIFT
));
2587 *offset
&= ~PAGE_MASK
;
2589 /* Bytes in this sg-entry from *offset to the end of the page */
2590 sg_len
= PAGE_SIZE
- *offset
;
2594 return kmap_atomic(page
);
2596 EXPORT_SYMBOL(scsi_kmap_atomic_sg
);
2599 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2600 * @virt: virtual address to be unmapped
2602 void scsi_kunmap_atomic_sg(void *virt
)
2604 kunmap_atomic(virt
);
2606 EXPORT_SYMBOL(scsi_kunmap_atomic_sg
);