.tf_read = ahci_tf_read,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.tf_read = ahci_tf_read,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
return 0;
}
+/**
+ * ata_std_qc_defer - Check whether a qc needs to be deferred
+ * @qc: ATA command in question
+ *
+ * Non-NCQ commands cannot run with any other command, NCQ or
+ * not. As upper layer only knows the queue depth, we are
+ * responsible for maintaining exclusion. This function checks
+ * whether a new command @qc can be issued.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int ata_std_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_link *link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (!ata_tag_valid(link->active_tag))
+ return 0;
+ } else {
+ if (!ata_tag_valid(link->active_tag) && !link->sactive)
+ return 0;
+ }
+
+ return ATA_DEFER_LINK;
+}
+
/**
* ata_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
EXPORT_SYMBOL_GPL(ata_do_set_mode);
EXPORT_SYMBOL_GPL(ata_data_xfer);
EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
+EXPORT_SYMBOL_GPL(ata_std_qc_defer);
EXPORT_SYMBOL_GPL(ata_qc_prep);
EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+
+ /* Schedule policy is determined by ->qc_defer() callback and
+ * it needs to see every deferred qc. Set dev_blocked to 1 to
+ * prevent SCSI midlayer from automatically deferring
+ * requests.
+ */
+ sdev->max_device_blocked = 1;
}
static void ata_scsi_dev_config(struct scsi_device *sdev,
ata_qc_free(qc);
}
-/**
- * ata_scmd_need_defer - Check whether we need to defer scmd
- * @dev: ATA device to which the command is addressed
- * @is_io: Is the command IO (and thus possibly NCQ)?
- *
- * NCQ and non-NCQ commands cannot run together. As upper layer
- * only knows the queue depth, we are responsible for maintaining
- * exclusion. This function checks whether a new command can be
- * issued to @dev.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * 1 if deferring is needed, 0 otherwise.
- */
-static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
-{
- struct ata_link *link = dev->link;
- int is_ncq = is_io && ata_ncq_enabled(dev);
-
- if (is_ncq) {
- if (!ata_tag_valid(link->active_tag))
- return 0;
- } else {
- if (!ata_tag_valid(link->active_tag) && !link->sactive)
- return 0;
- }
- return 1;
-}
-
/**
* ata_scsi_translate - Translate then issue SCSI command to ATA device
* @dev: ATA device to which the command is addressed
void (*done)(struct scsi_cmnd *),
ata_xlat_func_t xlat_func)
{
+ struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc;
- int is_io = xlat_func == ata_scsi_rw_xlat;
+ int rc;
VPRINTK("ENTER\n");
- if (unlikely(ata_scmd_need_defer(dev, is_io)))
- goto defer;
-
qc = ata_scsi_qc_new(dev, cmd, done);
if (!qc)
goto err_mem;
if (xlat_func(qc))
goto early_finish;
+ if (ap->ops->qc_defer) {
+ if ((rc = ap->ops->qc_defer(qc)))
+ goto defer;
+ }
+
/* select device, send command to hardware */
ata_qc_issue(qc);
return 0;
defer:
+ ata_qc_free(qc);
DPRINTK("EXIT - defer\n");
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ if (rc == ATA_DEFER_LINK)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ else
+ return SCSI_MLQUEUE_HOST_BUSY;
}
/**
shost->max_channel = 1;
shost->max_cmd_len = 16;
+ /* Schedule policy is determined by ->qc_defer()
+ * callback and it needs to see every deferred qc.
+ * Set host_blocked to 1 to prevent SCSI midlayer from
+ * automatically deferring requests.
+ */
+ shost->max_host_blocked = 1;
+
rc = scsi_add_host(ap->scsi_host, ap->host->dev);
if (rc)
goto err_add;
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = nv_adma_qc_prep,
.qc_issue = nv_adma_qc_issue,
.freeze = nv_adma_freeze,
.tf_read = sil24_tf_read,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = sil24_qc_prep,
.qc_issue = sil24_qc_issue,
/* ering size */
ATA_ERING_SIZE = 32,
+ /* return values for ->qc_defer */
+ ATA_DEFER_LINK = 1,
+ ATA_DEFER_PORT = 2,
+
/* desc_len for ata_eh_info and context */
ATA_EH_DESC_LEN = 80,
void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
+ int (*qc_defer) (struct ata_queued_cmd *qc);
void (*qc_prep) (struct ata_queued_cmd *qc);
unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
unsigned int buflen, int write_data);
extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data);
+extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc);
extern void ata_qc_prep(struct ata_queued_cmd *qc);
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);