ap->ops->tf_read(ap, &qc->result_tf);
}
+static void ata_verify_xfer(struct ata_queued_cmd *qc)
+{
+ struct ata_device *dev = qc->dev;
+
+ if (ata_tag_internal(qc->tag))
+ return;
+
+ if (ata_is_nodata(qc->tf.protocol))
+ return;
+
+ if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
+ return;
+
+ dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
+}
+
/**
* ata_qc_complete - Complete an active ATA command
* @qc: Command to complete
break;
}
+ if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
+ ata_verify_xfer(qc);
+
__ata_qc_complete(qc);
} else {
if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
spin_lock_irqsave(ap->lock, flags);
__ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
+
memset(&link->eh_context, 0, sizeof(link->eh_context));
link->eh_context.i = link->eh_info;
memset(&link->eh_info, 0, sizeof(link->eh_info));
+
+ ata_link_for_each_dev(dev, link) {
+ int devno = dev->devno;
+
+ ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+ if (ata_ncq_enabled(dev))
+ ehc->saved_ncq_enabled |= 1 << devno;
+ }
}
ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
+ struct ata_device *dev;
+ int rc;
/* has private set_mode? */
if (ap->ops->set_mode)
- return ap->ops->set_mode(link, r_failed_dev);
- return ata_do_set_mode(link, r_failed_dev);
+ rc = ap->ops->set_mode(link, r_failed_dev);
+ else
+ rc = ata_do_set_mode(link, r_failed_dev);
+
+ /* if transfer mode has changed, set DUBIOUS_XFER on device */
+ ata_link_for_each_dev(dev, link) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
+ u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
+
+ if (dev->xfer_mode != saved_xfer_mode ||
+ ata_ncq_enabled(dev) != saved_ncq)
+ dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
+ }
+
+ return rc;
}
static int ata_link_nr_enabled(struct ata_link *link)
ata_dev_init(dev);
ehc->did_probe_mask |= (1 << dev->devno);
ehc->i.action |= ATA_EH_SOFTRESET;
+ ehc->saved_xfer_mode[dev->devno] = 0;
+ ehc->saved_ncq_enabled &= ~(1 << dev->devno);
return 1;
}
ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
int tries[ATA_MAX_DEVICES];
unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask;
+ unsigned int saved_ncq_enabled;
+ u8 saved_xfer_mode[ATA_MAX_DEVICES];
};
struct ata_acpi_drive