ata_port_printk(ap, KERN_WARNING,
"failed to reset engine (errno=%d)", rc);
- ata_tf_init(ap->device, &tf);
+ ata_tf_init(ap->link.device, &tf);
/* issue the first D2H Register FIS */
msecs = 0;
ahci_stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(ap->device, &tf);
+ ata_tf_init(ap->link.device, &tf);
tf.command = 0x80;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
ahci_stop_engine(ap);
- rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context),
+ rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->link.eh_context),
deadline);
/* vt8251 needs SError cleared for the port to operate */
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
{
struct ahci_port_priv *pp = ap->private_data;
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned int err_mask = 0, action = 0;
struct ata_queued_cmd *qc;
u32 serror;
ehi->serror |= serror;
ehi->action |= action;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
else
static void ahci_port_intr(struct ata_port *ap)
{
void __iomem *port_mmio = ap->ioaddr.cmd_addr;
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
u32 status, qc_active;
int rc, known_irq = 0;
return;
}
- if (ap->sactive)
+ if (ap->link.sactive)
qc_active = readl(port_mmio + PORT_SCR_ACT);
else
qc_active = readl(port_mmio + PORT_CMD_ISSUE);
/* if !NCQ, ignore. No modern ATA device has broken HSM
* implementation for non-NCQ commands.
*/
- if (!ap->sactive)
+ if (!ap->link.sactive)
return;
if (status & PORT_IRQ_D2H_REG_FIS) {
if (!known_irq)
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
"(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
- status, ap->active_tag, ap->sactive);
+ status, ap->link.active_tag, ap->link.sactive);
}
static void ahci_irq_clear(struct ata_port *ap)
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
{
acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
- ap->device->acpi_handle = acpi_get_child(ap->host->acpi_handle, adr);
+ ap->link.device->acpi_handle =
+ acpi_get_child(ap->host->acpi_handle, adr);
}
static void ata_acpi_associate_ide_port(struct ata_port *ap)
max_devices++;
for (i = 0; i < max_devices; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
dev->acpi_handle = acpi_get_child(ap->acpi_handle, i);
}
/* Buffers for id may need byteswapping ? */
in_params[1].type = ACPI_TYPE_BUFFER;
in_params[1].buffer.length = 512;
- in_params[1].buffer.pointer = (u8 *)ap->device[0].id;
+ in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id;
in_params[2].type = ACPI_TYPE_BUFFER;
in_params[2].buffer.length = 512;
- in_params[2].buffer.pointer = (u8 *)ap->device[1].id;
+ in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id;
input.count = 3;
input.pointer = in_params;
static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf,
void **ptr_to_free)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
acpi_status status;
struct acpi_buffer output;
union acpi_object *out_obj;
static int taskfile_load_raw(struct ata_device *dev,
const struct ata_acpi_gtf *gtf)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
struct ata_taskfile tf, rtf;
unsigned int err_mask;
*/
static int ata_acpi_push_id(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
int err;
acpi_status status;
struct acpi_object_list input;
/* schedule _GTF */
for (i = 0; i < ATA_MAX_DEVICES; i++)
- ap->device[i].flags |= ATA_DFLAG_ACPI_PENDING;
+ ap->link.device[i].flags |= ATA_DFLAG_ACPI_PENDING;
}
/**
*/
int ata_acpi_on_devcfg(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_port *ap = dev->link->ap;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA;
int rc;
if (dev->flags & ATA_DFLAG_PIO) {
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
- } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
+ } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
/* Unable to use DMA due to host limitation */
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
void ata_dev_disable(struct ata_device *dev)
{
if (ata_dev_enabled(dev)) {
- if (ata_msg_drv(dev->ap))
+ if (ata_msg_drv(dev->link->ap))
ata_dev_printk(dev, KERN_WARNING, "disabled\n");
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
ATA_DNXFER_QUIET);
/* see if device passed diags: if master then continue and warn later */
if (err == 0 && device == 0)
/* diagnostic fail : do nothing _YET_ */
- ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
+ ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
else if (err == 1)
/* do nothing */ ;
else if ((device == 0) && (err == 0x81))
ap->ops->dev_select(ap, device);
if (wait) {
- if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
+ if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
msleep(150);
ata_wait_idle(ap);
}
int dma_dir, struct scatterlist *sg,
unsigned int n_elem)
{
- struct ata_port *ap = dev->ap;
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
u8 command = tf->command;
struct ata_queued_cmd *qc;
unsigned int tag, preempted_tag;
qc->dev = dev;
ata_qc_reinit(qc);
- preempted_tag = ap->active_tag;
- preempted_sactive = ap->sactive;
+ preempted_tag = link->active_tag;
+ preempted_sactive = link->sactive;
preempted_qc_active = ap->qc_active;
- ap->active_tag = ATA_TAG_POISON;
- ap->sactive = 0;
+ link->active_tag = ATA_TAG_POISON;
+ link->sactive = 0;
ap->qc_active = 0;
/* prepare & issue qc */
err_mask = qc->err_mask;
ata_qc_free(qc);
- ap->active_tag = preempted_tag;
- ap->sactive = preempted_sactive;
+ link->active_tag = preempted_tag;
+ link->sactive = preempted_sactive;
ap->qc_active = preempted_qc_active;
/* XXX - Some LLDDs (sata_mv) disable port on command failure.
{
/* Controller doesn't support IORDY. Probably a pointless check
as the caller should know this */
- if (adev->ap->flags & ATA_FLAG_NO_IORDY)
+ if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
return 0;
/* PIO3 and higher it is mandatory */
if (adev->pio_mode > XFER_PIO_2)
int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
unsigned int flags, u16 *id)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
unsigned int class = *p_class;
struct ata_taskfile tf;
unsigned int err_mask = 0;
static inline u8 ata_dev_knobble(struct ata_device *dev)
{
- return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
+ struct ata_port *ap = dev->link->ap;
+ return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
static void ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
if (!ata_id_has_ncq(dev->id)) {
*/
int ata_dev_configure(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_port *ap = dev->link->ap;
+ struct ata_eh_context *ehc = &dev->link->eh_context;
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
const u16 *id = dev->id;
unsigned int xfer_mask;
ap->ops->phy_reset(ap);
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!(ap->flags & ATA_FLAG_DISABLED) &&
dev->class != ATA_DEV_UNKNOWN)
state is undefined. Record the mode */
for (i = 0; i < ATA_MAX_DEVICES; i++)
- ap->device[i].pio_mode = XFER_PIO_0;
+ ap->link.device[i].pio_mode = XFER_PIO_0;
/* read IDENTIFY page and configure devices. We have to do the identify
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (tries[i])
dev->class = classes[i];
this in the normal order so that the user doesn't get confused */
for(i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!ata_dev_enabled(dev))
continue;
- ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
+ ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
rc = ata_dev_configure(dev);
- ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
+ ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
if (rc)
goto fail;
}
goto fail;
for (i = 0; i < ATA_MAX_DEVICES; i++)
- if (ata_dev_enabled(&ap->device[i]))
+ if (ata_dev_enabled(&ap->link.device[i]))
return 0;
/* no device present, disable port */
struct ata_device *ata_dev_pair(struct ata_device *adev)
{
- struct ata_port *ap = adev->ap;
- struct ata_device *pair = &ap->device[1 - adev->devno];
+ struct ata_link *link = adev->link;
+ struct ata_device *pair = &link->device[1 - adev->devno];
if (!ata_dev_enabled(pair))
return NULL;
return pair;
void ata_port_disable(struct ata_port *ap)
{
- ap->device[0].class = ATA_DEV_NONE;
- ap->device[1].class = ATA_DEV_NONE;
+ ap->link.device[0].class = ATA_DEV_NONE;
+ ap->link.device[1].class = ATA_DEV_NONE;
ap->flags |= ATA_FLAG_DISABLED;
}
if (rc == 0)
spd = (sstatus >> 4) & 0xf;
else
- spd = ap->sata_spd;
+ spd = ap->link.sata_spd;
- mask = ap->sata_spd_limit;
+ mask = ap->link.sata_spd_limit;
if (mask <= 1)
return -EINVAL;
if (!mask)
return -EINVAL;
- ap->sata_spd_limit = mask;
+ ap->link.sata_spd_limit = mask;
ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
sata_spd_string(fls(mask)));
{
u32 spd, limit;
- if (ap->sata_spd_limit == UINT_MAX)
+ if (ap->link.sata_spd_limit == UINT_MAX)
limit = 0;
else
- limit = fls(ap->sata_spd_limit);
+ limit = fls(ap->link.sata_spd_limit);
spd = (*scontrol >> 4) & 0xf;
*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
* @ap: Port in question
*
* Test whether the spd limit in SControl matches
- * @ap->sata_spd_limit. This function is used to determine
+ * @ap->link.sata_spd_limit. This function is used to determine
* whether hardreset is necessary to apply SATA spd
* configuration.
*
static int ata_dev_set_mode(struct ata_device *dev)
{
- struct ata_eh_context *ehc = &dev->ap->eh_context;
+ struct ata_eh_context *ehc = &dev->link->eh_context;
unsigned int err_mask;
int rc;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
unsigned int pio_mask, dma_mask;
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!ata_dev_enabled(dev))
continue;
/* step 2: always set host PIO timings */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!ata_dev_enabled(dev))
continue;
/* step 3: set host DMA timings */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!ata_dev_enabled(dev) || !dev->dma_mode)
continue;
/* step 4: update devices' xfer mode */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
/* don't update suspended devices' xfer mode */
if (!ata_dev_enabled(dev))
void ata_bus_reset(struct ata_port *ap)
{
+ struct ata_device *device = ap->link.device;
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
u8 err;
/*
* determine by signature whether we have ATA or ATAPI devices
*/
- ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
+ device[0].class = ata_dev_try_classify(ap, 0, &err);
if ((slave_possible) && (err != 0x81))
- ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
+ device[1].class = ata_dev_try_classify(ap, 1, &err);
/* is double-select really necessary? */
- if (ap->device[1].class != ATA_DEV_NONE)
+ if (device[1].class != ATA_DEV_NONE)
ap->ops->dev_select(ap, 1);
- if (ap->device[0].class != ATA_DEV_NONE)
+ if (device[0].class != ATA_DEV_NONE)
ap->ops->dev_select(ap, 0);
/* if no devices were detected, disable this port */
- if ((ap->device[0].class == ATA_DEV_NONE) &&
- (ap->device[1].class == ATA_DEV_NONE))
+ if ((device[0].class == ATA_DEV_NONE) &&
+ (device[1].class == ATA_DEV_NONE))
goto err_out;
if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
*/
int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
const unsigned long *timing = sata_ehc_deb_timing(ehc);
int rc;
int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+ const unsigned long *timing = sata_ehc_deb_timing(&ap->link.eh_context);
int rc;
DPRINTK("ENTER\n");
int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
unsigned int class = dev->class;
- u16 *id = (void *)dev->ap->sector_buf;
+ u16 *id = (void *)dev->link->ap->sector_buf;
int rc;
/* read ID data */
* DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
* if the LLDD handles only interrupts in the HSM_ST_LAST state.
*/
- if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
+ if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
(dev->flags & ATA_DFLAG_CDB_INTR))
return 1;
return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
*/
static void ata_dev_xfermask(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
struct ata_host *host = ap->host;
unsigned long xfer_mask;
void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data)
{
- struct ata_port *ap = adev->ap;
+ struct ata_port *ap = adev->link->ap;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc;
qc = ata_qc_new(ap);
void __ata_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
/* command should be marked inactive atomically with qc completion */
if (qc->tf.protocol == ATA_PROT_NCQ)
- ap->sactive &= ~(1 << qc->tag);
+ link->sactive &= ~(1 << qc->tag);
else
- ap->active_tag = ATA_TAG_POISON;
+ link->active_tag = ATA_TAG_POISON;
/* atapi: mark qc as inactive to prevent the interrupt handler
* from completing the command twice later, before the error handler
void ata_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
/* Make sure only one non-NCQ command is outstanding. The
* check is skipped for old EH because it reuses active qc to
* request ATAPI sense.
*/
- WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
+ WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
if (qc->tf.protocol == ATA_PROT_NCQ) {
- WARN_ON(ap->sactive & (1 << qc->tag));
- ap->sactive |= 1 << qc->tag;
+ WARN_ON(link->sactive & (1 << qc->tag));
+ link->sactive |= 1 << qc->tag;
} else {
- WARN_ON(ap->sactive);
- ap->active_tag = qc->tag;
+ WARN_ON(link->sactive);
+ link->active_tag = qc->tag;
}
qc->flags |= ATA_QCFLAG_ACTIVE;
inline unsigned int ata_host_intr (struct ata_port *ap,
struct ata_queued_cmd *qc)
{
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
u8 status, host_stat = 0;
VPRINTK("ata%u: protocol %d task_state %d\n",
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
(qc->flags & ATA_QCFLAG_ACTIVE))
handled |= ata_host_intr(ap, qc);
}
ap->pflags |= ATA_PFLAG_PM_PENDING;
- ap->eh_info.action |= action;
- ap->eh_info.flags |= ehi_flags;
+ ap->link.eh_info.action |= action;
+ ap->link.eh_info.flags |= ehi_flags;
ata_port_schedule_eh(ap);
*/
void ata_dev_init(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
unsigned long flags;
/* SATA spd limit is bound to the first device */
- ap->sata_spd_limit = ap->hw_sata_spd_limit;
- ap->sata_spd = 0;
+ link->sata_spd_limit = link->hw_sata_spd_limit;
+ link->sata_spd = 0;
/* High bits of dev->flags are used to record warm plug
* requests which occur asynchronously. Synchronize using
ap->host = host;
ap->dev = host->dev;
- ap->hw_sata_spd_limit = UINT_MAX;
- ap->active_tag = ATA_TAG_POISON;
+ ap->link.hw_sata_spd_limit = UINT_MAX;
+ ap->link.active_tag = ATA_TAG_POISON;
ap->last_ctl = 0xFF;
#if defined(ATA_VERBOSE_DEBUG)
ap->cbl = ATA_CBL_NONE;
+ ap->link.ap = ap;
+
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
- dev->ap = ap;
+ struct ata_device *dev = &ap->link.device[i];
+ dev->link = &ap->link;
dev->devno = i;
ata_dev_init(dev);
}
if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
int spd = (scontrol >> 4) & 0xf;
if (spd)
- ap->hw_sata_spd_limit &= (1 << spd) - 1;
+ ap->link.hw_sata_spd_limit &= (1 << spd) - 1;
}
- ap->sata_spd_limit = ap->hw_sata_spd_limit;
+ ap->link.sata_spd_limit = ap->link.hw_sata_spd_limit;
/* report the secondary IRQ for second channel legacy */
irq_line = host->irq;
/* probe */
if (ap->ops->error_handler) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
ata_port_probe(ap);
spin_lock_irqsave(ap->lock, flags);
for (i = 0; i < ATA_MAX_DEVICES; i++)
- ata_dev_disable(&ap->device[i]);
+ ata_dev_disable(&ap->link.device[i]);
spin_unlock_irqrestore(ap->lock, flags);
static unsigned int ata_eh_dev_action(struct ata_device *dev)
{
- struct ata_eh_context *ehc = &dev->ap->eh_context;
+ struct ata_eh_context *ehc = &dev->link->eh_context;
return ehc->i.action | ehc->i.dev_action[dev->devno];
}
ret = EH_HANDLED;
spin_lock_irqsave(ap->lock, flags);
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
WARN_ON(qc->scsicmd != cmd);
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
/* fetch & clear EH info */
spin_lock_irqsave(ap->lock, flags);
- memset(&ap->eh_context, 0, sizeof(ap->eh_context));
- ap->eh_context.i = ap->eh_info;
- memset(&ap->eh_info, 0, sizeof(ap->eh_info));
+ memset(&ap->link.eh_context, 0, sizeof(ap->link.eh_context));
+ ap->link.eh_context.i = ap->link.eh_info;
+ memset(&ap->link.eh_info, 0, sizeof(ap->link.eh_info));
ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
}
/* this run is complete, make sure EH info is clear */
- memset(&ap->eh_info, 0, sizeof(ap->eh_info));
+ memset(&ap->link.eh_info, 0, sizeof(ap->link.eh_info));
/* Clear host_eh_scheduled while holding ap->lock such
* that if exception occurs after this point but
spin_unlock_irqrestore(ap->lock, flags);
} else {
- WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
+ WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ap->ops->eng_timeout(ap);
}
{
DPRINTK("ENTER\n");
- ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
+ ata_qc_timeout(ata_qc_from_tag(ap, ap->link.active_tag));
DPRINTK("EXIT\n");
}
*/
static void ata_eh_detach_dev(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
unsigned long flags;
ata_dev_disable(dev);
}
/* clear per-dev EH actions */
- ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
- ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
+ ata_eh_clear_action(dev, &dev->link->eh_info, ATA_EH_PERDEV_MASK);
+ ata_eh_clear_action(dev, &dev->link->eh_context.i, ATA_EH_PERDEV_MASK);
spin_unlock_irqrestore(ap->lock, flags);
}
* @action: action about to be performed
*
* Called just before performing EH actions to clear related bits
- * in @ap->eh_info such that eh actions are not unnecessarily
- * repeated.
+ * in @ap->link.eh_info such that eh actions are not
+ * unnecessarily repeated.
*
* LOCKING:
* None.
unsigned int action)
{
unsigned long flags;
- struct ata_eh_info *ehi = &ap->eh_info;
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
spin_lock_irqsave(ap->lock, flags);
* @action: action just completed
*
* Called right after performing EH actions to clear related bits
- * in @ap->eh_context.
+ * in @ap->link.eh_context.
*
* LOCKING:
* None.
static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
unsigned int action)
{
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+
/* if reset is complete, clear all reset actions & reset modifier */
if (action & ATA_EH_RESET_MASK) {
action |= ATA_EH_RESET_MASK;
- ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
+ ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
}
- ata_eh_clear_action(dev, &ap->eh_context.i, action);
+ ata_eh_clear_action(dev, &ehc->i, action);
}
/**
static int ata_eh_read_log_10h(struct ata_device *dev,
int *tag, struct ata_taskfile *tf)
{
- u8 *buf = dev->ap->sector_buf;
+ u8 *buf = dev->link->ap->sector_buf;
unsigned int err_mask;
u8 csum;
int i;
{
struct ata_device *dev = qc->dev;
unsigned char *sense_buf = qc->scsicmd->sense_buffer;
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
struct ata_taskfile tf;
u8 cdb[ATAPI_CDB_LEN];
*/
static void ata_eh_analyze_serror(struct ata_port *ap)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
u32 serror = ehc->i.serror;
unsigned int err_mask = 0, action = 0;
*/
static void ata_eh_analyze_ncq_error(struct ata_port *ap)
{
- struct ata_eh_context *ehc = &ap->eh_context;
- struct ata_device *dev = ap->device;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+ struct ata_device *dev = ap->link.device;
struct ata_queued_cmd *qc;
struct ata_taskfile tf;
int tag, rc;
return;
/* is it NCQ device error? */
- if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
+ if (!ap->link.sactive || !(ehc->i.err_mask & AC_ERR_DEV))
return;
/* has LLDD analyzed already? */
return;
}
- if (!(ap->sactive & (1 << tag))) {
+ if (!(ap->link.sactive & (1 << tag))) {
ata_port_printk(ap, KERN_ERR, "log page 10h reported "
"inactive tag %d\n", tag);
return;
/* speed down? */
if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
/* speed down SATA link speed if possible */
- if (sata_down_spd_limit(dev->ap) == 0) {
+ if (sata_down_spd_limit(dev->link->ap) == 0) {
action |= ATA_EH_HARDRESET;
goto done;
}
* SATA. Consider it only for PATA.
*/
if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
- (dev->ap->cbl != ATA_CBL_SATA) &&
+ (dev->link->ap->cbl != ATA_CBL_SATA) &&
(dev->xfer_shift != ATA_SHIFT_PIO)) {
if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
dev->spdn_cnt = 0;
*/
static void ata_eh_autopsy(struct ata_port *ap)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
unsigned int all_err_mask = 0;
int tag, is_io = 0;
u32 serror;
*/
static void ata_eh_report(struct ata_port *ap)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
const char *frozen, *desc;
int tag, nr_failed = 0;
if (ehc->i.dev) {
ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
"SAct 0x%x SErr 0x%x action 0x%x%s\n",
- ehc->i.err_mask, ap->sactive, ehc->i.serror,
- ehc->i.action, frozen);
+ ehc->i.err_mask, ap->link.sactive,
+ ehc->i.serror, ehc->i.action, frozen);
if (desc)
ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
} else {
ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
"SAct 0x%x SErr 0x%x action 0x%x%s\n",
- ehc->i.err_mask, ap->sactive, ehc->i.serror,
- ehc->i.action, frozen);
+ ehc->i.err_mask, ap->link.sactive,
+ ehc->i.serror, ehc->i.action, frozen);
if (desc)
ata_port_printk(ap, KERN_ERR, "%s\n", desc);
}
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
unsigned int *classes = ehc->classes;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
int try = 0;
if (rc == -ENOENT) {
ata_port_printk(ap, KERN_DEBUG,
"port disabled. ignoring.\n");
- ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
+ ehc->i.action &= ~ATA_EH_RESET_MASK;
for (i = 0; i < ATA_MAX_DEVICES; i++)
classes[i] = ATA_DEV_NONE;
* controller state is undefined. Record the mode.
*/
for (i = 0; i < ATA_MAX_DEVICES; i++)
- ap->device[i].pio_mode = XFER_PIO_0;
+ ap->link.device[i].pio_mode = XFER_PIO_0;
/* record current link speed */
if (sata_scr_read(ap, SCR_STATUS, &sstatus) == 0)
- ap->sata_spd = (sstatus >> 4) & 0xf;
+ ap->link.sata_spd = (sstatus >> 4) & 0xf;
if (postreset)
postreset(ap, classes);
static int ata_eh_revalidate_and_attach(struct ata_port *ap,
struct ata_device **r_failed_dev)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
struct ata_device *dev;
unsigned int new_mask = 0;
unsigned long flags;
for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
unsigned int action, readid_flags = 0;
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
action = ata_eh_dev_action(dev);
if (ehc->i.flags & ATA_EHI_DID_RESET)
* device detection messages backwards.
*/
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!(new_mask & (1 << i)))
continue;
int i, cnt = 0;
for (i = 0; i < ATA_MAX_DEVICES; i++)
- if (ata_dev_enabled(&ap->device[i]))
+ if (ata_dev_enabled(&ap->link.device[i]))
cnt++;
return cnt;
}
int i, cnt = 0;
for (i = 0; i < ATA_MAX_DEVICES; i++)
- if (ap->device[i].class == ATA_DEV_UNKNOWN)
+ if (ap->link.device[i].class == ATA_DEV_UNKNOWN)
cnt++;
return cnt;
}
static int ata_eh_skip_recovery(struct ata_port *ap)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
int i;
/* thaw frozen port, resume link and recover failed devices */
/* skip if class codes for all vacant slots are ATA_DEV_NONE */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (dev->class == ATA_DEV_UNKNOWN &&
ehc->classes[dev->devno] != ATA_DEV_NONE)
static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
{
- struct ata_port *ap = dev->ap;
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_port *ap = dev->link->ap;
+ struct ata_eh_context *ehc = &dev->link->eh_context;
ehc->tries[dev->devno]--;
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
struct ata_device *dev;
int i, rc;
/* prep for recovery */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
out:
if (rc) {
for (i = 0; i < ATA_MAX_DEVICES; i++)
- ata_dev_disable(&ap->device[i]);
+ ata_dev_disable(&ap->link.device[i]);
}
DPRINTK("EXIT, rc=%d\n", rc);
case ATA_CMD_SET_FEATURES:
if ((qc->tf.feature == SETFEATURES_WC_ON) ||
(qc->tf.feature == SETFEATURES_WC_OFF)) {
- ap->eh_info.action |= ATA_EH_REVALIDATE;
+ ap->link.eh_info.action |= ATA_EH_REVALIDATE;
ata_port_schedule_eh(ap);
}
break;
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
case ATA_CMD_SET_MULTI: /* multi_count changed */
- ap->eh_info.action |= ATA_EH_REVALIDATE;
+ ap->link.eh_info.action |= ATA_EH_REVALIDATE;
ata_port_schedule_eh(ap);
break;
}
*/
static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
{
- struct ata_port *ap = dev->ap;
+ struct ata_link *link = dev->link;
int is_ncq = is_io && ata_ncq_enabled(dev);
if (is_ncq) {
- if (!ata_tag_valid(ap->active_tag))
+ if (!ata_tag_valid(link->active_tag))
return 0;
} else {
- if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
+ if (!ata_tag_valid(link->active_tag) && !link->sactive)
return 0;
}
return 1;
static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
{
if (likely(id < ATA_MAX_DEVICES))
- return &ap->device[id];
+ return &ap->link.device[id];
return NULL;
}
if (unlikely(!ata_dev_enabled(dev)))
return 0;
- if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
+ if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) {
if (unlikely(dev->class == ATA_DEV_ATAPI)) {
ata_dev_printk(dev, KERN_WARNING,
"WARNING: ATAPI is %s, device ignored.\n",
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct scsi_device *sdev;
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!ata_dev_enabled(dev) || dev->sdev)
continue;
* whether all devices are attached.
*/
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (ata_dev_enabled(dev) && !dev->sdev)
break;
}
*/
static void ata_scsi_remove_dev(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
+ struct ata_port *ap = dev->link->ap;
struct scsi_device *sdev;
unsigned long flags;
/* unplug detached devices */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
unsigned long flags;
if (!(dev->flags & ATA_DFLAG_DETACHED))
unsigned int id, unsigned int lun)
{
struct ata_port *ap = ata_shost_to_port(shost);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(ap->lock, flags);
if (id == SCAN_WILD_CARD) {
- ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
- ap->eh_info.action |= ATA_EH_SOFTRESET;
+ ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
+ ehi->action |= ATA_EH_SOFTRESET;
} else {
struct ata_device *dev = ata_find_dev(ap, id);
if (dev) {
- ap->eh_info.probe_mask |= 1 << dev->devno;
- ap->eh_info.action |= ATA_EH_SOFTRESET;
- ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
+ ehi->probe_mask |= 1 << dev->devno;
+ ehi->action |= ATA_EH_SOFTRESET;
+ ehi->flags |= ATA_EHI_RESUME_LINK;
} else
rc = -EINVAL;
}
spin_lock_irqsave(ap->lock, flags);
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
struct scsi_device *sdev = dev->sdev;
if (!ata_dev_enabled(dev) || !sdev)
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
- ata_scsi_dev_config(sdev, ap->device);
+ ata_scsi_dev_config(sdev, ap->link.device);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
ata_scsi_dump_cdb(ap, cmd);
- if (likely(ata_scsi_dev_enabled(ap->device)))
- rc = __ata_scsi_queuecmd(cmd, done, ap->device);
+ if (likely(ata_scsi_dev_enabled(ap->link.device)))
+ rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
else {
cmd->result = (DID_BAD_TARGET << 16);
done(cmd);
unsigned long flags;
int thaw = 0;
- qc = __ata_qc_from_tag(ap, ap->active_tag);
+ qc = __ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
qc = NULL;
/* Filter out DMA modes if the device has been configured by
the BIOS as PIO only */
- if (adev->ap->ioaddr.bmdma_addr == 0)
+ if (adev->link->ap->ioaddr.bmdma_addr == 0)
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
return xfer_mask;
}
{
struct it821x_dev *itdev = ap->private_data;
if (itdev && device != itdev->last_device) {
- struct ata_device *adev = &ap->device[device];
+ struct ata_device *adev = &ap->link.device[device];
it821x_program(ap, adev, itdev->pio[adev->devno]);
itdev->last_device = device;
}
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
dev->pio_mode = XFER_PIO_0;
unsigned int i;
unsigned int words = buflen >> 1;
u16 *buf16 = (u16 *) buf;
- struct ata_port *ap = adev->ap;
+ struct ata_port *ap = adev->link->ap;
void __iomem *mmio = ap->ioaddr.data_addr;
struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
dev->pio_mode = XFER_PIO_0;
static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
{
- struct ata_port *ap = adev->ap;
+ struct ata_port *ap = adev->link->ap;
int slop = buflen & 3;
unsigned long flags;
pci_read_config_byte(pdev, 0x43, &r);
r &= (0x0F << nybble);
- r |= (optidma_make_bits43(&ap->device[0]) +
- (optidma_make_bits43(&ap->device[0]) << 2)) << nybble;
+ r |= (optidma_make_bits43(&ap->link.device[0]) +
+ (optidma_make_bits43(&ap->link.device[0]) << 2)) << nybble;
pci_write_config_byte(pdev, 0x43, r);
}
return rc;
static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
{
- struct ata_device *master = &ap->device[0];
- struct ata_device *slave = &ap->device[1];
+ struct ata_device *master = &ap->link.device[0];
+ struct ata_device *slave = &ap->link.device[1];
if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
return ata_do_set_mode(ap, r_failed_dev);
return i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
{
- struct ata_port *ap = adev->ap;
+ struct ata_port *ap = adev->link->ap;
int slop = buflen & 3;
if (ata_id_has_dword_io(adev->id)) {
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
+ struct ata_device *dev = &ap->link.device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data)
{
- struct ata_port *ap = adev->ap;
+ struct ata_port *ap = adev->link->ap;
unsigned int words = buflen >> 1;
unsigned int i;
u16 *buf16 = (u16 *) buf;
static int sis_old_port_base(struct ata_device *adev)
{
- return 0x40 + (4 * adev->ap->port_no) + (2 * adev->devno);
+ return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
}
/**
static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
{
- struct ata_port *ap = adev->ap;
+ struct ata_port *ap = adev->link->ap;
int slop = buflen & 3;
if (ata_id_has_dword_io(adev->id)) {
pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt)
continue;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
if (status & aPERR)
qc->err_mask |= AC_ERR_HOST_BUS;
if (!qc->err_mask)
ata_qc_complete(qc);
else {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi,
"ADMA-status 0x%02X", status);
struct adma_port_priv *pp = ap->private_data;
if (!pp || pp->state != adma_state_mmio)
continue;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
/* check main status, clearing INTRQ */
if (!qc->err_mask)
ata_qc_complete(qc);
else {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi =
+ &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi,
"status 0x%02X", status);
static void inic_host_intr(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
u8 irq_stat;
/* fetch and clear irq */
writeb(irq_stat, port_base + PORT_IRQ_STAT);
if (likely(!(irq_stat & PIRQ_ERR))) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+ struct ata_queued_cmd *qc =
+ ata_qc_from_tag(ap, ap->link.active_tag);
if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
ata_chk_status(ap); /* clear ATA interrupt */
{
void __iomem *port_base = inic_port_base(ap);
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
- const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+ const unsigned long *timing = sata_ehc_deb_timing(&ap->link.eh_context);
u16 val;
int rc;
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
unsigned int action = 0, err_mask = 0;
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
return;
/* get active ATA command */
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (unlikely(!qc)) /* no active tag */
return;
if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
/* 50xx: get active ATA command */
if (IS_GEN_I(hpriv))
- tag = ap->active_tag;
+ tag = ap->link.active_tag;
/* Gen II/IIE: get active ATA command via tag, to enable
* support for queueing. this works transparently for
if (unlikely(have_err_bits)) {
struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
continue;
for (i = 0; i < host->n_ports; i++) {
ap = host->ports[i];
if (!ata_port_offline(ap)) {
- ehi = &ap->eh_info;
+ ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
if (!printed++)
ata_ehi_push_desc(ehi,
"PCI err cause 0x%08x", err_cause);
err_mask = AC_ERR_HOST_BUS;
ehi->action = ATA_EH_HARDRESET;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
else
static int mv_prereset(struct ata_port *ap, unsigned long deadline)
{
struct mv_port_priv *pp = ap->private_data;
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
int rc;
rc = mv_stop_dma(ap);
/* Not a proper libata device, ignore */
return rc;
- if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
+ if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
/*
* NVIDIA reports that ADMA mode does not support ATAPI commands.
* Therefore ATAPI commands are sent through the legacy interface.
flags & (NV_CPB_RESP_ATA_ERR |
NV_CPB_RESP_CMD_ERR |
NV_CPB_RESP_CPB_ERR)))) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
ata_ehi_clear_desc(ehi);
DPRINTK("Completing qc from tag %d\n",cpb_num);
ata_qc_complete(qc);
} else {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
/* Notifier bits set without a command may indicate the drive
is misbehaving. Raise host state machine violation on this
condition. */
static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
{
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
/* freeze if hotplugged */
if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
>> (NV_INT_PORT_SHIFT * i);
- if(ata_tag_valid(ap->active_tag))
+ if(ata_tag_valid(ap->link.active_tag))
/** NV_INT_DEV indication seems unreliable at times
at least in ADMA mode. Force it on always when a
command is active, to prevent losing interrupts. */
NV_ADMA_STAT_HOTUNPLUG |
NV_ADMA_STAT_TIMEOUT |
NV_ADMA_STAT_SERROR))) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
u32 check_commands;
int pos, error = 0;
- if(ata_tag_valid(ap->active_tag))
- check_commands = 1 << ap->active_tag;
+ if(ata_tag_valid(ap->link.active_tag))
+ check_commands = 1 << ap->link.active_tag;
else
- check_commands = ap->sactive;
+ check_commands = ap->link.sactive;
/** Check CPBs for completed commands */
while ((pos = ffs(check_commands)) && !error) {
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += ata_host_intr(ap, qc);
else
int i;
u16 tmp;
- if(ata_tag_valid(ap->active_tag) || ap->sactive) {
+ if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
struct nv_adma_cpb *cpb = &pp->cpb[i];
- if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
- ap->sactive & (1 << i) )
+ if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
+ ap->link.sactive & (1 << i) )
ata_port_printk(ap, KERN_ERR,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
u32 port_status, u32 err_mask)
{
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned int ac_err_mask = 0;
ata_ehi_clear_desc(ehi);
tmp = hotplug_status & (0x11 << ata_no);
if (tmp && ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc_host_intr(ap, qc);
}
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_pkt)
continue;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
switch (sHST) {
case 0: /* successful CPB */
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_mmio)
continue;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
/* check main status, clearing INTRQ */
return rc;
for (i = 0; i < 2; i++) {
- dev = &ap->device[i];
+ dev = &ap->link.device[i];
if (!ata_dev_enabled(dev))
dev_mode[i] = 0; /* PIO0/1/2 */
else if (dev->flags & ATA_DFLAG_PIO)
static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
{
- struct ata_eh_info *ehi = &ap->eh_info;
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
u8 status;
if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
* repeat probing needlessly.
*/
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
- ata_ehi_hotplugged(&ap->eh_info);
- ap->eh_info.serror |= serror;
+ ata_ehi_hotplugged(&ap->link.eh_info);
+ ap->link.eh_info.serror |= serror;
}
goto freeze;
*/
static void sil_dev_config(struct ata_device *dev)
{
- struct ata_port *ap = dev->ap;
- int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
+ struct ata_port *ap = dev->link->ap;
+ int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
unsigned int n, quirks = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
static void sil24_dev_config(struct ata_device *dev)
{
- void __iomem *port = dev->ap->ioaddr.cmd_addr;
+ void __iomem *port = dev->link->ap->ioaddr.cmd_addr;
if (dev->cdb_len == 16)
writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
if (time_after(deadline, jiffies))
timeout_msec = jiffies_to_msecs(deadline - jiffies);
- ata_tf_init(ap->device, &tf); /* doesn't really matter */
+ ata_tf_init(ap->link.device, &tf); /* doesn't really matter */
rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
timeout_msec);
if (rc == -EBUSY) {
{
void __iomem *port = ap->ioaddr.cmd_addr;
struct sil24_port_priv *pp = ap->private_data;
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
u32 irq_stat;
}
/* record error info */
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
sil24_read_tf(ap, qc->tag, &pp->tf);
qc->err_mask |= err_mask;
if (rc > 0)
return;
if (rc < 0) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_port_freeze(ap);
if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
"(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
- slot_stat, ap->active_tag, ap->sactive);
+ slot_stat, ap->link.active_tag, ap->link.sactive);
}
static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
static void sil24_error_handler(struct ata_port *ap)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
if (sil24_init_port(ap)) {
ata_eh_freeze_port(ap);
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc20621_host_intr(ap, qc, (i > 4),
mmio_base);
spin_lock_irqsave(&host->lock, flags);
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
*/
static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
{
- struct ata_eh_context *ehc = &ap->eh_context;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
unsigned long timeout = jiffies + (HZ * 5);
u32 sstatus, scontrol;
int online;
return;
}
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled = ata_host_intr(ap, qc);
switch(res->cfgte.proto) {
case IPR_PROTO_SATA:
case IPR_PROTO_SAS_STP:
- ap->device[0].class = ATA_DEV_ATA;
+ ap->link.device[0].class = ATA_DEV_ATA;
break;
case IPR_PROTO_SATA_ATAPI:
case IPR_PROTO_SAS_STP_ATAPI:
- ap->device[0].class = ATA_DEV_ATAPI;
+ ap->link.device[0].class = ATA_DEV_ATAPI;
break;
default:
- ap->device[0].class = ATA_DEV_UNKNOWN;
+ ap->link.device[0].class = ATA_DEV_UNKNOWN;
ap->ops->port_disable(ap);
break;
};
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
- ap->device[0].class = ATA_DEV_ATA;
+ ap->link.device[0].class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
- ap->device[0].class = ATA_DEV_ATAPI;
+ ap->link.device[0].class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
__FUNCTION__,
dev->sata_dev.command_set);
- ap->device[0].class = ATA_DEV_UNKNOWN;
+ ap->link.device[0].class = ATA_DEV_UNKNOWN;
break;
}
dev->sata_dev.serror = val;
break;
case SCR_ACTIVE:
- dev->sata_dev.ap->sactive = val;
+ dev->sata_dev.ap->link.sactive = val;
break;
default:
return -EINVAL;
*val = dev->sata_dev.serror;
return 0;
case SCR_ACTIVE:
- *val = dev->sata_dev.ap->sactive;
+ *val = dev->sata_dev.ap->link.sactive;
return 0;
default:
return -EINVAL;
};
struct ata_device {
- struct ata_port *ap;
+ struct ata_link *link;
unsigned int devno; /* 0 or 1 */
unsigned long flags; /* ATA_DFLAG_xxx */
unsigned int horkage; /* List of broken features */
u32 flags;
} __packed;
+struct ata_link {
+ struct ata_port *ap;
+
+ unsigned int active_tag; /* active tag on this link */
+ u32 sactive; /* active NCQ commands */
+
+ unsigned int hw_sata_spd_limit;
+ unsigned int sata_spd_limit;
+ unsigned int sata_spd; /* current SATA PHY speed */
+
+ /* record runtime error info, protected by host_set lock */
+ struct ata_eh_info eh_info;
+ /* EH context */
+ struct ata_eh_context eh_context;
+
+ struct ata_device device[ATA_MAX_DEVICES];
+};
+
struct ata_port {
struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
const struct ata_port_operations *ops;
unsigned int mwdma_mask;
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
- unsigned int hw_sata_spd_limit;
- unsigned int sata_spd_limit; /* SATA PHY speed limit */
- unsigned int sata_spd; /* current SATA PHY speed */
-
- /* record runtime error info, protected by host lock */
- struct ata_eh_info eh_info;
- /* EH context owned by EH */
- struct ata_eh_context eh_context;
-
- struct ata_device device[ATA_MAX_DEVICES];
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
unsigned long qc_allocated;
unsigned int qc_active;
- unsigned int active_tag;
- u32 sactive;
+ struct ata_link link; /* host default link */
struct ata_port_stats stats;
struct ata_host *host;
#define ata_port_printk(ap, lv, fmt, args...) \
printk(lv"ata%u: "fmt, (ap)->print_id , ##args)
+#define ata_link_printk(link, lv, fmt, args...) \
+ printk(lv"ata%u: "fmt, (link)->ap->print_id , ##args)
+
#define ata_dev_printk(dev, lv, fmt, args...) \
- printk(lv"ata%u.%02u: "fmt, (dev)->ap->print_id, (dev)->devno , ##args)
+ printk(lv"ata%u.%02u: "fmt, (dev)->link->ap->print_id, (dev)->devno , ##args)
/*
* ata_eh_info helpers
{
memset(tf, 0, sizeof(*tf));
- tf->ctl = dev->ap->ctl;
+ tf->ctl = dev->link->ap->ctl;
if (dev->devno == 0)
tf->device = ATA_DEVICE_OBS;
else