unsigned long flags;
atomic_dec(&shost->host_busy);
- atomic_dec(&starget->target_busy);
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled))) {
static inline bool scsi_target_is_busy(struct scsi_target *starget)
{
- if (starget->can_queue > 0 &&
- atomic_read(&starget->target_busy) >= starget->can_queue)
- return true;
- if (atomic_read(&starget->target_blocked) > 0)
- return true;
+ if (starget->can_queue > 0) {
+ if (atomic_read(&starget->target_busy) >= starget->can_queue)
+ return true;
+ if (atomic_read(&starget->target_blocked) > 0)
+ return true;
+ }
return false;
}
spin_unlock_irq(shost->host_lock);
}
+ if (starget->can_queue <= 0)
+ return 1;
+
busy = atomic_inc_return(&starget->target_busy) - 1;
if (atomic_read(&starget->target_blocked) > 0) {
if (busy)
"unblocking target at zero depth\n"));
}
- if (starget->can_queue > 0 && busy >= starget->can_queue)
+ if (busy >= starget->can_queue)
goto starved;
return 1;
list_move_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&starget->target_busy);
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
return 0;
}
*/
atomic_inc(&sdev->device_busy);
atomic_inc(&shost->host_busy);
- atomic_inc(&starget->target_busy);
+ if (starget->can_queue > 0)
+ atomic_inc(&starget->target_busy);
blk_complete_request(req);
}
return;
host_not_ready:
- atomic_dec(&scsi_target(sdev)->target_busy);
+ if (scsi_target(sdev)->can_queue > 0)
+ atomic_dec(&scsi_target(sdev)->target_busy);
not_ready:
/*
* lock q, handle tag, requeue req, and decrement device_busy. We