Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / libata-eh.c
CommitLineData
ece1d636
TH
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
ece1d636 35#include <linux/kernel.h>
242f9dcb 36#include <linux/blkdev.h>
2855568b 37#include <linux/pci.h>
ece1d636
TH
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
6521148c 43#include <scsi/scsi_dbg.h>
c6fd2807 44#include "../scsi/scsi_transport_api.h"
ece1d636
TH
45
46#include <linux/libata.h>
47
48#include "libata.h"
49
7d47e8d4 50enum {
3884f7b0 51 /* speed down verdicts */
7d47e8d4
TH
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
76326ac1 55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
3884f7b0
TH
56
57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0),
76326ac1 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
d9027470 60 ATA_EFLAG_OLD_ER = (1 << 31),
3884f7b0
TH
61
62 /* error categories */
63 ATA_ECAT_NONE = 0,
64 ATA_ECAT_ATA_BUS = 1,
65 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_UNK_DEV = 3,
75f9cafc
TH
67 ATA_ECAT_DUBIOUS_NONE = 4,
68 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
70 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
71 ATA_ECAT_NR = 8,
7d47e8d4 72
87fbc5a0
TH
73 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74
0a2c0f56
TH
75 /* always put at least this amount of time between resets */
76 ATA_EH_RESET_COOL_DOWN = 5000,
77
341c2c95
TH
78 /* Waiting in ->prereset can never be reliable. It's
79 * sometimes nice to wait there but it can't be depended upon;
80 * otherwise, we wouldn't be resetting. Just give it enough
81 * time for most drives to spin up.
82 */
83 ATA_EH_PRERESET_TIMEOUT = 10000,
84 ATA_EH_FASTDRAIN_INTERVAL = 3000,
11fc33da
TH
85
86 ATA_EH_UA_TRIES = 5,
c2c7a89c
TH
87
88 /* probe speed down parameters, see ata_eh_schedule_probe() */
89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
90 ATA_EH_PROBE_TRIALS = 2,
31daabda
TH
91};
92
93/* The following table determines how we sequence resets. Each entry
94 * represents timeout for that try. The first try can be soft or
95 * hardreset. All others are hardreset if available. In most cases
96 * the first reset w/ 10sec timeout should succeed. Following entries
97 * are mostly for error handling, hotplug and retarded devices.
98 */
99static const unsigned long ata_eh_reset_timeouts[] = {
341c2c95
TH
100 10000, /* most drives spin up by 10sec */
101 10000, /* > 99% working drives spin up before 20sec */
102 35000, /* give > 30 secs of idleness for retarded devices */
103 5000, /* and sweet one last chance */
d8af0eb6 104 ULONG_MAX, /* > 1 min has elapsed, give up */
31daabda
TH
105};
106
87fbc5a0
TH
107static const unsigned long ata_eh_identify_timeouts[] = {
108 5000, /* covers > 99% of successes and not too boring on failures */
109 10000, /* combined time till here is enough even for media access */
110 30000, /* for true idiots */
111 ULONG_MAX,
112};
113
6013efd8
TH
114static const unsigned long ata_eh_flush_timeouts[] = {
115 15000, /* be generous with flush */
116 15000, /* ditto */
117 30000, /* and even more generous */
118 ULONG_MAX,
119};
120
87fbc5a0
TH
121static const unsigned long ata_eh_other_timeouts[] = {
122 5000, /* same rationale as identify timeout */
123 10000, /* ditto */
124 /* but no merciful 30sec for other commands, it just isn't worth it */
125 ULONG_MAX,
126};
127
128struct ata_eh_cmd_timeout_ent {
129 const u8 *commands;
130 const unsigned long *timeouts;
131};
132
133/* The following table determines timeouts to use for EH internal
134 * commands. Each table entry is a command class and matches the
135 * commands the entry applies to and the timeout table to use.
136 *
137 * On the retry after a command timed out, the next timeout value from
138 * the table is used. If the table doesn't contain further entries,
139 * the last value is used.
140 *
141 * ehc->cmd_timeout_idx keeps track of which timeout to use per
142 * command class, so if SET_FEATURES times out on the first try, the
143 * next try will use the second timeout value only for that class.
144 */
145#define CMDS(cmds...) (const u8 []){ cmds, 0 }
146static const struct ata_eh_cmd_timeout_ent
147ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 .timeouts = ata_eh_identify_timeouts, },
150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 .timeouts = ata_eh_other_timeouts, },
152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 .timeouts = ata_eh_other_timeouts, },
154 { .commands = CMDS(ATA_CMD_SET_FEATURES),
155 .timeouts = ata_eh_other_timeouts, },
156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 .timeouts = ata_eh_other_timeouts, },
6013efd8
TH
158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 .timeouts = ata_eh_flush_timeouts },
87fbc5a0
TH
160};
161#undef CMDS
162
ad9e2762 163static void __ata_port_freeze(struct ata_port *ap);
6ffa01d8 164#ifdef CONFIG_PM
500530f6
TH
165static void ata_eh_handle_port_suspend(struct ata_port *ap);
166static void ata_eh_handle_port_resume(struct ata_port *ap);
6ffa01d8
TH
167#else /* CONFIG_PM */
168static void ata_eh_handle_port_suspend(struct ata_port *ap)
169{ }
170
171static void ata_eh_handle_port_resume(struct ata_port *ap)
172{ }
6ffa01d8 173#endif /* CONFIG_PM */
ad9e2762 174
b64bbc39
TH
175static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 va_list args)
177{
178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 ATA_EH_DESC_LEN - ehi->desc_len,
180 fmt, args);
181}
182
183/**
184 * __ata_ehi_push_desc - push error description without adding separator
185 * @ehi: target EHI
186 * @fmt: printf format string
187 *
188 * Format string according to @fmt and append it to @ehi->desc.
189 *
190 * LOCKING:
191 * spin_lock_irqsave(host lock)
192 */
193void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194{
195 va_list args;
196
197 va_start(args, fmt);
198 __ata_ehi_pushv_desc(ehi, fmt, args);
199 va_end(args);
200}
201
202/**
203 * ata_ehi_push_desc - push error description with separator
204 * @ehi: target EHI
205 * @fmt: printf format string
206 *
207 * Format string according to @fmt and append it to @ehi->desc.
208 * If @ehi->desc is not empty, ", " is added in-between.
209 *
210 * LOCKING:
211 * spin_lock_irqsave(host lock)
212 */
213void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214{
215 va_list args;
216
217 if (ehi->desc_len)
218 __ata_ehi_push_desc(ehi, ", ");
219
220 va_start(args, fmt);
221 __ata_ehi_pushv_desc(ehi, fmt, args);
222 va_end(args);
223}
224
225/**
226 * ata_ehi_clear_desc - clean error description
227 * @ehi: target EHI
228 *
229 * Clear @ehi->desc.
230 *
231 * LOCKING:
232 * spin_lock_irqsave(host lock)
233 */
234void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235{
236 ehi->desc[0] = '\0';
237 ehi->desc_len = 0;
238}
239
cbcdd875
TH
240/**
241 * ata_port_desc - append port description
242 * @ap: target ATA port
243 * @fmt: printf format string
244 *
245 * Format string according to @fmt and append it to port
246 * description. If port description is not empty, " " is added
247 * in-between. This function is to be used while initializing
248 * ata_host. The description is printed on host registration.
249 *
250 * LOCKING:
251 * None.
252 */
253void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254{
255 va_list args;
256
257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258
259 if (ap->link.eh_info.desc_len)
260 __ata_ehi_push_desc(&ap->link.eh_info, " ");
261
262 va_start(args, fmt);
263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 va_end(args);
265}
266
267#ifdef CONFIG_PCI
268
269/**
270 * ata_port_pbar_desc - append PCI BAR description
271 * @ap: target ATA port
272 * @bar: target PCI BAR
273 * @offset: offset into PCI BAR
274 * @name: name of the area
275 *
276 * If @offset is negative, this function formats a string which
277 * contains the name, address, size and type of the BAR and
278 * appends it to the port description. If @offset is zero or
279 * positive, only name and offsetted address is appended.
280 *
281 * LOCKING:
282 * None.
283 */
284void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 const char *name)
286{
287 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 char *type = "";
289 unsigned long long start, len;
290
291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 type = "m";
293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 type = "i";
295
296 start = (unsigned long long)pci_resource_start(pdev, bar);
297 len = (unsigned long long)pci_resource_len(pdev, bar);
298
299 if (offset < 0)
300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 else
e6a73ab1
AM
302 ata_port_desc(ap, "%s 0x%llx", name,
303 start + (unsigned long long)offset);
cbcdd875
TH
304}
305
306#endif /* CONFIG_PCI */
307
87fbc5a0
TH
308static int ata_lookup_timeout_table(u8 cmd)
309{
310 int i;
311
312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 const u8 *cur;
314
315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 if (*cur == cmd)
317 return i;
318 }
319
320 return -1;
321}
322
323/**
324 * ata_internal_cmd_timeout - determine timeout for an internal command
325 * @dev: target device
326 * @cmd: internal command to be issued
327 *
328 * Determine timeout for internal command @cmd for @dev.
329 *
330 * LOCKING:
331 * EH context.
332 *
333 * RETURNS:
334 * Determined timeout.
335 */
336unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337{
338 struct ata_eh_context *ehc = &dev->link->eh_context;
339 int ent = ata_lookup_timeout_table(cmd);
340 int idx;
341
342 if (ent < 0)
343 return ATA_EH_CMD_DFL_TIMEOUT;
344
345 idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347}
348
349/**
350 * ata_internal_cmd_timed_out - notification for internal command timeout
351 * @dev: target device
352 * @cmd: internal command which timed out
353 *
354 * Notify EH that internal command @cmd for @dev timed out. This
355 * function should be called only for commands whose timeouts are
356 * determined using ata_internal_cmd_timeout().
357 *
358 * LOCKING:
359 * EH context.
360 */
361void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362{
363 struct ata_eh_context *ehc = &dev->link->eh_context;
364 int ent = ata_lookup_timeout_table(cmd);
365 int idx;
366
367 if (ent < 0)
368 return;
369
370 idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 ehc->cmd_timeout_idx[dev->devno][ent]++;
373}
374
3884f7b0 375static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
0c247c55
TH
376 unsigned int err_mask)
377{
378 struct ata_ering_entry *ent;
379
380 WARN_ON(!err_mask);
381
382 ering->cursor++;
383 ering->cursor %= ATA_ERING_SIZE;
384
385 ent = &ering->ring[ering->cursor];
3884f7b0 386 ent->eflags = eflags;
0c247c55
TH
387 ent->err_mask = err_mask;
388 ent->timestamp = get_jiffies_64();
389}
390
76326ac1
TH
391static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392{
393 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394
395 if (ent->err_mask)
396 return ent;
397 return NULL;
398}
399
d9027470
GG
400int ata_ering_map(struct ata_ering *ering,
401 int (*map_fn)(struct ata_ering_entry *, void *),
402 void *arg)
0c247c55
TH
403{
404 int idx, rc = 0;
405 struct ata_ering_entry *ent;
406
407 idx = ering->cursor;
408 do {
409 ent = &ering->ring[idx];
410 if (!ent->err_mask)
411 break;
412 rc = map_fn(ent, arg);
413 if (rc)
414 break;
415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 } while (idx != ering->cursor);
417
418 return rc;
419}
420
d9027470
GG
421int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422{
423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0;
425}
426
427static void ata_ering_clear(struct ata_ering *ering)
428{
429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430}
431
64f65ca6
TH
432static unsigned int ata_eh_dev_action(struct ata_device *dev)
433{
9af5c9c9 434 struct ata_eh_context *ehc = &dev->link->eh_context;
64f65ca6
TH
435
436 return ehc->i.action | ehc->i.dev_action[dev->devno];
437}
438
f58229f8 439static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
af181c2d
TH
440 struct ata_eh_info *ehi, unsigned int action)
441{
f58229f8 442 struct ata_device *tdev;
af181c2d
TH
443
444 if (!dev) {
445 ehi->action &= ~action;
1eca4365 446 ata_for_each_dev(tdev, link, ALL)
f58229f8 447 ehi->dev_action[tdev->devno] &= ~action;
af181c2d
TH
448 } else {
449 /* doesn't make sense for port-wide EH actions */
450 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451
452 /* break ehi->action into ehi->dev_action */
453 if (ehi->action & action) {
1eca4365 454 ata_for_each_dev(tdev, link, ALL)
f58229f8
TH
455 ehi->dev_action[tdev->devno] |=
456 ehi->action & action;
af181c2d
TH
457 ehi->action &= ~action;
458 }
459
460 /* turn off the specified per-dev action */
461 ehi->dev_action[dev->devno] &= ~action;
462 }
463}
464
c0c362b6
TH
465/**
466 * ata_eh_acquire - acquire EH ownership
467 * @ap: ATA port to acquire EH ownership for
468 *
469 * Acquire EH ownership for @ap. This is the basic exclusion
470 * mechanism for ports sharing a host. Only one port hanging off
471 * the same host can claim the ownership of EH.
472 *
473 * LOCKING:
474 * EH context.
475 */
476void ata_eh_acquire(struct ata_port *ap)
477{
478 mutex_lock(&ap->host->eh_mutex);
479 WARN_ON_ONCE(ap->host->eh_owner);
480 ap->host->eh_owner = current;
481}
482
483/**
484 * ata_eh_release - release EH ownership
485 * @ap: ATA port to release EH ownership for
486 *
487 * Release EH ownership for @ap if the caller. The caller must
488 * have acquired EH ownership using ata_eh_acquire() previously.
489 *
490 * LOCKING:
491 * EH context.
492 */
493void ata_eh_release(struct ata_port *ap)
494{
495 WARN_ON_ONCE(ap->host->eh_owner != current);
496 ap->host->eh_owner = NULL;
497 mutex_unlock(&ap->host->eh_mutex);
498}
499
ece1d636
TH
500/**
501 * ata_scsi_timed_out - SCSI layer time out callback
502 * @cmd: timed out SCSI command
503 *
504 * Handles SCSI layer timeout. We race with normal completion of
505 * the qc for @cmd. If the qc is already gone, we lose and let
506 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
507 * timed out and EH should be invoked. Prevent ata_qc_complete()
508 * from finishing it by setting EH_SCHEDULED and return
509 * EH_NOT_HANDLED.
510 *
ad9e2762
TH
511 * TODO: kill this function once old EH is gone.
512 *
ece1d636
TH
513 * LOCKING:
514 * Called from timer context
515 *
516 * RETURNS:
517 * EH_HANDLED or EH_NOT_HANDLED
518 */
242f9dcb 519enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
ece1d636
TH
520{
521 struct Scsi_Host *host = cmd->device->host;
35bb94b1 522 struct ata_port *ap = ata_shost_to_port(host);
ece1d636
TH
523 unsigned long flags;
524 struct ata_queued_cmd *qc;
242f9dcb 525 enum blk_eh_timer_return ret;
ece1d636
TH
526
527 DPRINTK("ENTER\n");
528
ad9e2762 529 if (ap->ops->error_handler) {
242f9dcb 530 ret = BLK_EH_NOT_HANDLED;
ad9e2762
TH
531 goto out;
532 }
533
242f9dcb 534 ret = BLK_EH_HANDLED;
ba6a1308 535 spin_lock_irqsave(ap->lock, flags);
9af5c9c9 536 qc = ata_qc_from_tag(ap, ap->link.active_tag);
ece1d636
TH
537 if (qc) {
538 WARN_ON(qc->scsicmd != cmd);
539 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
540 qc->err_mask |= AC_ERR_TIMEOUT;
242f9dcb 541 ret = BLK_EH_NOT_HANDLED;
ece1d636 542 }
ba6a1308 543 spin_unlock_irqrestore(ap->lock, flags);
ece1d636 544
ad9e2762 545 out:
ece1d636
TH
546 DPRINTK("EXIT, ret=%d\n", ret);
547 return ret;
548}
549
ece180d1
TH
550static void ata_eh_unload(struct ata_port *ap)
551{
552 struct ata_link *link;
553 struct ata_device *dev;
554 unsigned long flags;
555
556 /* Restore SControl IPM and SPD for the next driver and
557 * disable attached devices.
558 */
559 ata_for_each_link(link, ap, PMP_FIRST) {
560 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
561 ata_for_each_dev(dev, link, ALL)
562 ata_dev_disable(dev);
563 }
564
565 /* freeze and set UNLOADED */
566 spin_lock_irqsave(ap->lock, flags);
567
568 ata_port_freeze(ap); /* won't be thawed */
569 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
570 ap->pflags |= ATA_PFLAG_UNLOADED;
571
572 spin_unlock_irqrestore(ap->lock, flags);
573}
574
ece1d636
TH
575/**
576 * ata_scsi_error - SCSI layer error handler callback
577 * @host: SCSI host on which error occurred
578 *
579 * Handles SCSI-layer-thrown error events.
580 *
581 * LOCKING:
582 * Inherited from SCSI layer (none, can sleep)
583 *
584 * RETURNS:
585 * Zero.
586 */
381544bb 587void ata_scsi_error(struct Scsi_Host *host)
ece1d636 588{
35bb94b1 589 struct ata_port *ap = ata_shost_to_port(host);
a1e10f7e 590 int i;
ad9e2762 591 unsigned long flags;
ece1d636
TH
592
593 DPRINTK("ENTER\n");
594
c429137a
TH
595 /* make sure sff pio task is not running */
596 ata_sff_flush_pio_task(ap);
ece1d636 597
cca3974e 598 /* synchronize with host lock and sort out timeouts */
ad9e2762
TH
599
600 /* For new EH, all qcs are finished in one of three ways -
601 * normal completion, error completion, and SCSI timeout.
c96f1732 602 * Both completions can race against SCSI timeout. When normal
ad9e2762
TH
603 * completion wins, the qc never reaches EH. When error
604 * completion wins, the qc has ATA_QCFLAG_FAILED set.
605 *
606 * When SCSI timeout wins, things are a bit more complex.
607 * Normal or error completion can occur after the timeout but
608 * before this point. In such cases, both types of
609 * completions are honored. A scmd is determined to have
610 * timed out iff its associated qc is active and not failed.
611 */
612 if (ap->ops->error_handler) {
613 struct scsi_cmnd *scmd, *tmp;
614 int nr_timedout = 0;
615
e30349d2 616 spin_lock_irqsave(ap->lock, flags);
d9027470 617
c96f1732
AC
618 /* This must occur under the ap->lock as we don't want
619 a polled recovery to race the real interrupt handler
d9027470 620
c96f1732
AC
621 The lost_interrupt handler checks for any completed but
622 non-notified command and completes much like an IRQ handler.
d9027470 623
c96f1732
AC
624 We then fall into the error recovery code which will treat
625 this as if normal completion won the race */
626
627 if (ap->ops->lost_interrupt)
628 ap->ops->lost_interrupt(ap);
d9027470 629
ad9e2762
TH
630 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
631 struct ata_queued_cmd *qc;
632
633 for (i = 0; i < ATA_MAX_QUEUE; i++) {
634 qc = __ata_qc_from_tag(ap, i);
635 if (qc->flags & ATA_QCFLAG_ACTIVE &&
636 qc->scsicmd == scmd)
637 break;
638 }
639
640 if (i < ATA_MAX_QUEUE) {
641 /* the scmd has an associated qc */
642 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
643 /* which hasn't failed yet, timeout */
644 qc->err_mask |= AC_ERR_TIMEOUT;
645 qc->flags |= ATA_QCFLAG_FAILED;
646 nr_timedout++;
647 }
648 } else {
649 /* Normal completion occurred after
650 * SCSI timeout but before this point.
651 * Successfully complete it.
652 */
653 scmd->retries = scmd->allowed;
654 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
655 }
656 }
657
658 /* If we have timed out qcs. They belong to EH from
659 * this point but the state of the controller is
660 * unknown. Freeze the port to make sure the IRQ
661 * handler doesn't diddle with those qcs. This must
662 * be done atomically w.r.t. setting QCFLAG_FAILED.
663 */
664 if (nr_timedout)
665 __ata_port_freeze(ap);
666
e30349d2 667 spin_unlock_irqrestore(ap->lock, flags);
a1e10f7e
TH
668
669 /* initialize eh_tries */
670 ap->eh_tries = ATA_EH_MAX_TRIES;
ad9e2762 671 } else
e30349d2 672 spin_unlock_wait(ap->lock);
d9027470 673
c96f1732
AC
674 /* If we timed raced normal completion and there is nothing to
675 recover nr_timedout == 0 why exactly are we doing error recovery ? */
ad9e2762 676
ad9e2762
TH
677 /* invoke error handler */
678 if (ap->ops->error_handler) {
cf1b86c8
TH
679 struct ata_link *link;
680
c0c362b6
TH
681 /* acquire EH ownership */
682 ata_eh_acquire(ap);
683 repeat:
5ddf24c5
TH
684 /* kill fast drain timer */
685 del_timer_sync(&ap->fastdrain_timer);
686
500530f6
TH
687 /* process port resume request */
688 ata_eh_handle_port_resume(ap);
689
f3e81b19 690 /* fetch & clear EH info */
e30349d2 691 spin_lock_irqsave(ap->lock, flags);
f3e81b19 692
1eca4365 693 ata_for_each_link(link, ap, HOST_FIRST) {
00115e0f
TH
694 struct ata_eh_context *ehc = &link->eh_context;
695 struct ata_device *dev;
696
cf1b86c8
TH
697 memset(&link->eh_context, 0, sizeof(link->eh_context));
698 link->eh_context.i = link->eh_info;
699 memset(&link->eh_info, 0, sizeof(link->eh_info));
00115e0f 700
1eca4365 701 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
702 int devno = dev->devno;
703
704 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
705 if (ata_ncq_enabled(dev))
706 ehc->saved_ncq_enabled |= 1 << devno;
707 }
cf1b86c8 708 }
f3e81b19 709
b51e9e5d
TH
710 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
711 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
da917d69 712 ap->excl_link = NULL; /* don't maintain exclusion over EH */
f3e81b19 713
e30349d2 714 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 715
500530f6
TH
716 /* invoke EH, skip if unloading or suspended */
717 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
720ba126 718 ap->ops->error_handler(ap);
ece180d1
TH
719 else {
720 /* if unloading, commence suicide */
721 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
722 !(ap->pflags & ATA_PFLAG_UNLOADED))
723 ata_eh_unload(ap);
720ba126 724 ata_eh_finish(ap);
ece180d1 725 }
ad9e2762 726
500530f6
TH
727 /* process port suspend request */
728 ata_eh_handle_port_suspend(ap);
729
ad9e2762
TH
730 /* Exception might have happend after ->error_handler
731 * recovered the port but before this point. Repeat
732 * EH in such case.
733 */
e30349d2 734 spin_lock_irqsave(ap->lock, flags);
ad9e2762 735
b51e9e5d 736 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
a1e10f7e 737 if (--ap->eh_tries) {
e30349d2 738 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762
TH
739 goto repeat;
740 }
741 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
a1e10f7e 742 "tries, giving up\n", ATA_EH_MAX_TRIES);
914616a3 743 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ad9e2762
TH
744 }
745
f3e81b19 746 /* this run is complete, make sure EH info is clear */
1eca4365 747 ata_for_each_link(link, ap, HOST_FIRST)
cf1b86c8 748 memset(&link->eh_info, 0, sizeof(link->eh_info));
f3e81b19 749
e30349d2 750 /* Clear host_eh_scheduled while holding ap->lock such
ad9e2762
TH
751 * that if exception occurs after this point but
752 * before EH completion, SCSI midlayer will
753 * re-initiate EH.
754 */
755 host->host_eh_scheduled = 0;
756
e30349d2 757 spin_unlock_irqrestore(ap->lock, flags);
c0c362b6 758 ata_eh_release(ap);
ad9e2762 759 } else {
9af5c9c9 760 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ad9e2762
TH
761 ap->ops->eng_timeout(ap);
762 }
ece1d636 763
ad9e2762 764 /* finish or retry handled scmd's and clean up */
ece1d636
TH
765 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
766
767 scsi_eh_flush_done_q(&ap->eh_done_q);
768
ad9e2762 769 /* clean up */
e30349d2 770 spin_lock_irqsave(ap->lock, flags);
ad9e2762 771
1cdaf534 772 if (ap->pflags & ATA_PFLAG_LOADING)
b51e9e5d 773 ap->pflags &= ~ATA_PFLAG_LOADING;
1cdaf534 774 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
ad72cf98 775 schedule_delayed_work(&ap->hotplug_task, 0);
1cdaf534
TH
776
777 if (ap->pflags & ATA_PFLAG_RECOVERED)
778 ata_port_printk(ap, KERN_INFO, "EH complete\n");
580b2102 779
b51e9e5d 780 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
ad9e2762 781
c6cf9e99 782 /* tell wait_eh that we're done */
b51e9e5d 783 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
c6cf9e99
TH
784 wake_up_all(&ap->eh_wait_q);
785
e30349d2 786 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 787
ece1d636 788 DPRINTK("EXIT\n");
ece1d636
TH
789}
790
c6cf9e99
TH
791/**
792 * ata_port_wait_eh - Wait for the currently pending EH to complete
793 * @ap: Port to wait EH for
794 *
795 * Wait until the currently pending EH is complete.
796 *
797 * LOCKING:
798 * Kernel thread context (may sleep).
799 */
800void ata_port_wait_eh(struct ata_port *ap)
801{
802 unsigned long flags;
803 DEFINE_WAIT(wait);
804
805 retry:
ba6a1308 806 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 807
b51e9e5d 808 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
c6cf9e99 809 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
ba6a1308 810 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99 811 schedule();
ba6a1308 812 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 813 }
0a1b622e 814 finish_wait(&ap->eh_wait_q, &wait);
c6cf9e99 815
ba6a1308 816 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99
TH
817
818 /* make sure SCSI EH is complete */
cca3974e 819 if (scsi_host_in_recovery(ap->scsi_host)) {
97750ceb 820 ata_msleep(ap, 10);
c6cf9e99
TH
821 goto retry;
822 }
823}
824
5ddf24c5
TH
825static int ata_eh_nr_in_flight(struct ata_port *ap)
826{
827 unsigned int tag;
828 int nr = 0;
829
830 /* count only non-internal commands */
831 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
832 if (ata_qc_from_tag(ap, tag))
833 nr++;
834
835 return nr;
836}
837
838void ata_eh_fastdrain_timerfn(unsigned long arg)
839{
840 struct ata_port *ap = (void *)arg;
841 unsigned long flags;
842 int cnt;
843
844 spin_lock_irqsave(ap->lock, flags);
845
846 cnt = ata_eh_nr_in_flight(ap);
847
848 /* are we done? */
849 if (!cnt)
850 goto out_unlock;
851
852 if (cnt == ap->fastdrain_cnt) {
853 unsigned int tag;
854
855 /* No progress during the last interval, tag all
856 * in-flight qcs as timed out and freeze the port.
857 */
858 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
859 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
860 if (qc)
861 qc->err_mask |= AC_ERR_TIMEOUT;
862 }
863
864 ata_port_freeze(ap);
865 } else {
866 /* some qcs have finished, give it another chance */
867 ap->fastdrain_cnt = cnt;
868 ap->fastdrain_timer.expires =
341c2c95 869 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
870 add_timer(&ap->fastdrain_timer);
871 }
872
873 out_unlock:
874 spin_unlock_irqrestore(ap->lock, flags);
875}
876
877/**
878 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
879 * @ap: target ATA port
880 * @fastdrain: activate fast drain
881 *
882 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
883 * is non-zero and EH wasn't pending before. Fast drain ensures
884 * that EH kicks in in timely manner.
885 *
886 * LOCKING:
887 * spin_lock_irqsave(host lock)
888 */
889static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
890{
891 int cnt;
892
893 /* already scheduled? */
894 if (ap->pflags & ATA_PFLAG_EH_PENDING)
895 return;
896
897 ap->pflags |= ATA_PFLAG_EH_PENDING;
898
899 if (!fastdrain)
900 return;
901
902 /* do we have in-flight qcs? */
903 cnt = ata_eh_nr_in_flight(ap);
904 if (!cnt)
905 return;
906
907 /* activate fast drain */
908 ap->fastdrain_cnt = cnt;
341c2c95
TH
909 ap->fastdrain_timer.expires =
910 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
911 add_timer(&ap->fastdrain_timer);
912}
913
f686bcb8
TH
914/**
915 * ata_qc_schedule_eh - schedule qc for error handling
916 * @qc: command to schedule error handling for
917 *
918 * Schedule error handling for @qc. EH will kick in as soon as
919 * other commands are drained.
920 *
921 * LOCKING:
cca3974e 922 * spin_lock_irqsave(host lock)
f686bcb8
TH
923 */
924void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
925{
926 struct ata_port *ap = qc->ap;
fa41efda
TH
927 struct request_queue *q = qc->scsicmd->device->request_queue;
928 unsigned long flags;
f686bcb8
TH
929
930 WARN_ON(!ap->ops->error_handler);
931
932 qc->flags |= ATA_QCFLAG_FAILED;
5ddf24c5 933 ata_eh_set_pending(ap, 1);
f686bcb8
TH
934
935 /* The following will fail if timeout has already expired.
936 * ata_scsi_error() takes care of such scmds on EH entry.
937 * Note that ATA_QCFLAG_FAILED is unconditionally set after
938 * this function completes.
939 */
fa41efda 940 spin_lock_irqsave(q->queue_lock, flags);
242f9dcb 941 blk_abort_request(qc->scsicmd->request);
fa41efda 942 spin_unlock_irqrestore(q->queue_lock, flags);
f686bcb8
TH
943}
944
7b70fc03
TH
945/**
946 * ata_port_schedule_eh - schedule error handling without a qc
947 * @ap: ATA port to schedule EH for
948 *
949 * Schedule error handling for @ap. EH will kick in as soon as
950 * all commands are drained.
951 *
952 * LOCKING:
cca3974e 953 * spin_lock_irqsave(host lock)
7b70fc03
TH
954 */
955void ata_port_schedule_eh(struct ata_port *ap)
956{
957 WARN_ON(!ap->ops->error_handler);
958
f4d6d004
TH
959 if (ap->pflags & ATA_PFLAG_INITIALIZING)
960 return;
961
5ddf24c5 962 ata_eh_set_pending(ap, 1);
cca3974e 963 scsi_schedule_eh(ap->scsi_host);
7b70fc03
TH
964
965 DPRINTK("port EH scheduled\n");
966}
967
dbd82616 968static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
7b70fc03
TH
969{
970 int tag, nr_aborted = 0;
971
972 WARN_ON(!ap->ops->error_handler);
973
5ddf24c5
TH
974 /* we're gonna abort all commands, no need for fast drain */
975 ata_eh_set_pending(ap, 0);
976
7b70fc03
TH
977 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
978 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
979
dbd82616 980 if (qc && (!link || qc->dev->link == link)) {
7b70fc03
TH
981 qc->flags |= ATA_QCFLAG_FAILED;
982 ata_qc_complete(qc);
983 nr_aborted++;
984 }
985 }
986
987 if (!nr_aborted)
988 ata_port_schedule_eh(ap);
989
990 return nr_aborted;
991}
992
dbd82616
TH
993/**
994 * ata_link_abort - abort all qc's on the link
995 * @link: ATA link to abort qc's for
996 *
997 * Abort all active qc's active on @link and schedule EH.
998 *
999 * LOCKING:
1000 * spin_lock_irqsave(host lock)
1001 *
1002 * RETURNS:
1003 * Number of aborted qc's.
1004 */
1005int ata_link_abort(struct ata_link *link)
1006{
1007 return ata_do_link_abort(link->ap, link);
1008}
1009
1010/**
1011 * ata_port_abort - abort all qc's on the port
1012 * @ap: ATA port to abort qc's for
1013 *
1014 * Abort all active qc's of @ap and schedule EH.
1015 *
1016 * LOCKING:
1017 * spin_lock_irqsave(host_set lock)
1018 *
1019 * RETURNS:
1020 * Number of aborted qc's.
1021 */
1022int ata_port_abort(struct ata_port *ap)
1023{
1024 return ata_do_link_abort(ap, NULL);
1025}
1026
e3180499
TH
1027/**
1028 * __ata_port_freeze - freeze port
1029 * @ap: ATA port to freeze
1030 *
1031 * This function is called when HSM violation or some other
1032 * condition disrupts normal operation of the port. Frozen port
1033 * is not allowed to perform any operation until the port is
1034 * thawed, which usually follows a successful reset.
1035 *
1036 * ap->ops->freeze() callback can be used for freezing the port
1037 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1038 * port cannot be frozen hardware-wise, the interrupt handler
1039 * must ack and clear interrupts unconditionally while the port
1040 * is frozen.
1041 *
1042 * LOCKING:
cca3974e 1043 * spin_lock_irqsave(host lock)
e3180499
TH
1044 */
1045static void __ata_port_freeze(struct ata_port *ap)
1046{
1047 WARN_ON(!ap->ops->error_handler);
1048
1049 if (ap->ops->freeze)
1050 ap->ops->freeze(ap);
1051
b51e9e5d 1052 ap->pflags |= ATA_PFLAG_FROZEN;
e3180499 1053
44877b4e 1054 DPRINTK("ata%u port frozen\n", ap->print_id);
e3180499
TH
1055}
1056
1057/**
1058 * ata_port_freeze - abort & freeze port
1059 * @ap: ATA port to freeze
1060 *
54c38444
JG
1061 * Abort and freeze @ap. The freeze operation must be called
1062 * first, because some hardware requires special operations
1063 * before the taskfile registers are accessible.
e3180499
TH
1064 *
1065 * LOCKING:
cca3974e 1066 * spin_lock_irqsave(host lock)
e3180499
TH
1067 *
1068 * RETURNS:
1069 * Number of aborted commands.
1070 */
1071int ata_port_freeze(struct ata_port *ap)
1072{
1073 int nr_aborted;
1074
1075 WARN_ON(!ap->ops->error_handler);
1076
e3180499 1077 __ata_port_freeze(ap);
54c38444 1078 nr_aborted = ata_port_abort(ap);
e3180499
TH
1079
1080 return nr_aborted;
1081}
1082
7d77b247
TH
1083/**
1084 * sata_async_notification - SATA async notification handler
1085 * @ap: ATA port where async notification is received
1086 *
1087 * Handler to be called when async notification via SDB FIS is
1088 * received. This function schedules EH if necessary.
1089 *
1090 * LOCKING:
1091 * spin_lock_irqsave(host lock)
1092 *
1093 * RETURNS:
1094 * 1 if EH is scheduled, 0 otherwise.
1095 */
1096int sata_async_notification(struct ata_port *ap)
1097{
1098 u32 sntf;
1099 int rc;
1100
1101 if (!(ap->flags & ATA_FLAG_AN))
1102 return 0;
1103
1104 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1105 if (rc == 0)
1106 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1107
071f44b1 1108 if (!sata_pmp_attached(ap) || rc) {
7d77b247 1109 /* PMP is not attached or SNTF is not available */
071f44b1 1110 if (!sata_pmp_attached(ap)) {
7d77b247
TH
1111 /* PMP is not attached. Check whether ATAPI
1112 * AN is configured. If so, notify media
1113 * change.
1114 */
1115 struct ata_device *dev = ap->link.device;
1116
1117 if ((dev->class == ATA_DEV_ATAPI) &&
1118 (dev->flags & ATA_DFLAG_AN))
1119 ata_scsi_media_change_notify(dev);
1120 return 0;
1121 } else {
1122 /* PMP is attached but SNTF is not available.
1123 * ATAPI async media change notification is
1124 * not used. The PMP must be reporting PHY
1125 * status change, schedule EH.
1126 */
1127 ata_port_schedule_eh(ap);
1128 return 1;
1129 }
1130 } else {
1131 /* PMP is attached and SNTF is available */
1132 struct ata_link *link;
1133
1134 /* check and notify ATAPI AN */
1eca4365 1135 ata_for_each_link(link, ap, EDGE) {
7d77b247
TH
1136 if (!(sntf & (1 << link->pmp)))
1137 continue;
1138
1139 if ((link->device->class == ATA_DEV_ATAPI) &&
1140 (link->device->flags & ATA_DFLAG_AN))
1141 ata_scsi_media_change_notify(link->device);
1142 }
1143
1144 /* If PMP is reporting that PHY status of some
1145 * downstream ports has changed, schedule EH.
1146 */
1147 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1148 ata_port_schedule_eh(ap);
1149 return 1;
1150 }
1151
1152 return 0;
1153 }
1154}
1155
e3180499
TH
1156/**
1157 * ata_eh_freeze_port - EH helper to freeze port
1158 * @ap: ATA port to freeze
1159 *
1160 * Freeze @ap.
1161 *
1162 * LOCKING:
1163 * None.
1164 */
1165void ata_eh_freeze_port(struct ata_port *ap)
1166{
1167 unsigned long flags;
1168
1169 if (!ap->ops->error_handler)
1170 return;
1171
ba6a1308 1172 spin_lock_irqsave(ap->lock, flags);
e3180499 1173 __ata_port_freeze(ap);
ba6a1308 1174 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1175}
1176
1177/**
1178 * ata_port_thaw_port - EH helper to thaw port
1179 * @ap: ATA port to thaw
1180 *
1181 * Thaw frozen port @ap.
1182 *
1183 * LOCKING:
1184 * None.
1185 */
1186void ata_eh_thaw_port(struct ata_port *ap)
1187{
1188 unsigned long flags;
1189
1190 if (!ap->ops->error_handler)
1191 return;
1192
ba6a1308 1193 spin_lock_irqsave(ap->lock, flags);
e3180499 1194
b51e9e5d 1195 ap->pflags &= ~ATA_PFLAG_FROZEN;
e3180499
TH
1196
1197 if (ap->ops->thaw)
1198 ap->ops->thaw(ap);
1199
ba6a1308 1200 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1201
44877b4e 1202 DPRINTK("ata%u port thawed\n", ap->print_id);
e3180499
TH
1203}
1204
ece1d636
TH
1205static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1206{
1207 /* nada */
1208}
1209
1210static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1211{
1212 struct ata_port *ap = qc->ap;
1213 struct scsi_cmnd *scmd = qc->scsicmd;
1214 unsigned long flags;
1215
ba6a1308 1216 spin_lock_irqsave(ap->lock, flags);
ece1d636
TH
1217 qc->scsidone = ata_eh_scsidone;
1218 __ata_qc_complete(qc);
1219 WARN_ON(ata_tag_valid(qc->tag));
ba6a1308 1220 spin_unlock_irqrestore(ap->lock, flags);
ece1d636
TH
1221
1222 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1223}
1224
1225/**
1226 * ata_eh_qc_complete - Complete an active ATA command from EH
1227 * @qc: Command to complete
1228 *
1229 * Indicate to the mid and upper layers that an ATA command has
1230 * completed. To be used from EH.
1231 */
1232void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1233{
1234 struct scsi_cmnd *scmd = qc->scsicmd;
1235 scmd->retries = scmd->allowed;
1236 __ata_eh_qc_complete(qc);
1237}
1238
1239/**
1240 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1241 * @qc: Command to retry
1242 *
1243 * Indicate to the mid and upper layers that an ATA command
1244 * should be retried. To be used from EH.
1245 *
1246 * SCSI midlayer limits the number of retries to scmd->allowed.
1247 * scmd->retries is decremented for commands which get retried
1248 * due to unrelated failures (qc->err_mask is zero).
1249 */
1250void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1251{
1252 struct scsi_cmnd *scmd = qc->scsicmd;
1253 if (!qc->err_mask && scmd->retries)
1254 scmd->retries--;
1255 __ata_eh_qc_complete(qc);
1256}
022bdb07 1257
678afac6
TH
1258/**
1259 * ata_dev_disable - disable ATA device
1260 * @dev: ATA device to disable
1261 *
1262 * Disable @dev.
1263 *
1264 * Locking:
1265 * EH context.
1266 */
1267void ata_dev_disable(struct ata_device *dev)
1268{
1269 if (!ata_dev_enabled(dev))
1270 return;
1271
1272 if (ata_msg_drv(dev->link->ap))
1273 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1274 ata_acpi_on_disable(dev);
1275 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1276 dev->class++;
99cf610a
TH
1277
1278 /* From now till the next successful probe, ering is used to
1279 * track probe failures. Clear accumulated device error info.
1280 */
1281 ata_ering_clear(&dev->ering);
678afac6
TH
1282}
1283
0ea035a3
TH
1284/**
1285 * ata_eh_detach_dev - detach ATA device
1286 * @dev: ATA device to detach
1287 *
1288 * Detach @dev.
1289 *
1290 * LOCKING:
1291 * None.
1292 */
fb7fd614 1293void ata_eh_detach_dev(struct ata_device *dev)
0ea035a3 1294{
f58229f8
TH
1295 struct ata_link *link = dev->link;
1296 struct ata_port *ap = link->ap;
90484ebf 1297 struct ata_eh_context *ehc = &link->eh_context;
0ea035a3
TH
1298 unsigned long flags;
1299
1300 ata_dev_disable(dev);
1301
ba6a1308 1302 spin_lock_irqsave(ap->lock, flags);
0ea035a3
TH
1303
1304 dev->flags &= ~ATA_DFLAG_DETACH;
1305
1306 if (ata_scsi_offline_dev(dev)) {
1307 dev->flags |= ATA_DFLAG_DETACHED;
b51e9e5d 1308 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
0ea035a3
TH
1309 }
1310
90484ebf 1311 /* clear per-dev EH info */
f58229f8
TH
1312 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1313 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
90484ebf
TH
1314 ehc->saved_xfer_mode[dev->devno] = 0;
1315 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
beb07c1a 1316
ba6a1308 1317 spin_unlock_irqrestore(ap->lock, flags);
0ea035a3
TH
1318}
1319
022bdb07
TH
1320/**
1321 * ata_eh_about_to_do - about to perform eh_action
955e57df 1322 * @link: target ATA link
47005f25 1323 * @dev: target ATA dev for per-dev action (can be NULL)
022bdb07
TH
1324 * @action: action about to be performed
1325 *
1326 * Called just before performing EH actions to clear related bits
955e57df
TH
1327 * in @link->eh_info such that eh actions are not unnecessarily
1328 * repeated.
022bdb07
TH
1329 *
1330 * LOCKING:
1331 * None.
1332 */
fb7fd614
TH
1333void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1334 unsigned int action)
022bdb07 1335{
955e57df
TH
1336 struct ata_port *ap = link->ap;
1337 struct ata_eh_info *ehi = &link->eh_info;
1338 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1339 unsigned long flags;
1340
ba6a1308 1341 spin_lock_irqsave(ap->lock, flags);
1cdaf534 1342
955e57df 1343 ata_eh_clear_action(link, dev, ehi, action);
1cdaf534 1344
a568d1d2
TH
1345 /* About to take EH action, set RECOVERED. Ignore actions on
1346 * slave links as master will do them again.
1347 */
1348 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1cdaf534
TH
1349 ap->pflags |= ATA_PFLAG_RECOVERED;
1350
ba6a1308 1351 spin_unlock_irqrestore(ap->lock, flags);
022bdb07
TH
1352}
1353
47005f25
TH
1354/**
1355 * ata_eh_done - EH action complete
955e57df 1356* @ap: target ATA port
47005f25
TH
1357 * @dev: target ATA dev for per-dev action (can be NULL)
1358 * @action: action just completed
1359 *
1360 * Called right after performing EH actions to clear related bits
955e57df 1361 * in @link->eh_context.
47005f25
TH
1362 *
1363 * LOCKING:
1364 * None.
1365 */
fb7fd614
TH
1366void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1367 unsigned int action)
47005f25 1368{
955e57df 1369 struct ata_eh_context *ehc = &link->eh_context;
9af5c9c9 1370
955e57df 1371 ata_eh_clear_action(link, dev, &ehc->i, action);
47005f25
TH
1372}
1373
022bdb07
TH
1374/**
1375 * ata_err_string - convert err_mask to descriptive string
1376 * @err_mask: error mask to convert to string
1377 *
1378 * Convert @err_mask to descriptive string. Errors are
1379 * prioritized according to severity and only the most severe
1380 * error is reported.
1381 *
1382 * LOCKING:
1383 * None.
1384 *
1385 * RETURNS:
1386 * Descriptive string for @err_mask
1387 */
2dcb407e 1388static const char *ata_err_string(unsigned int err_mask)
022bdb07
TH
1389{
1390 if (err_mask & AC_ERR_HOST_BUS)
1391 return "host bus error";
1392 if (err_mask & AC_ERR_ATA_BUS)
1393 return "ATA bus error";
1394 if (err_mask & AC_ERR_TIMEOUT)
1395 return "timeout";
1396 if (err_mask & AC_ERR_HSM)
1397 return "HSM violation";
1398 if (err_mask & AC_ERR_SYSTEM)
1399 return "internal error";
1400 if (err_mask & AC_ERR_MEDIA)
1401 return "media error";
1402 if (err_mask & AC_ERR_INVALID)
1403 return "invalid argument";
1404 if (err_mask & AC_ERR_DEV)
1405 return "device error";
1406 return "unknown error";
1407}
1408
e8ee8451
TH
1409/**
1410 * ata_read_log_page - read a specific log page
1411 * @dev: target device
1412 * @page: page to read
1413 * @buf: buffer to store read page
1414 * @sectors: number of sectors to read
1415 *
1416 * Read log page using READ_LOG_EXT command.
1417 *
1418 * LOCKING:
1419 * Kernel thread context (may sleep).
1420 *
1421 * RETURNS:
1422 * 0 on success, AC_ERR_* mask otherwise.
1423 */
1424static unsigned int ata_read_log_page(struct ata_device *dev,
1425 u8 page, void *buf, unsigned int sectors)
1426{
1427 struct ata_taskfile tf;
1428 unsigned int err_mask;
1429
1430 DPRINTK("read log page - page %d\n", page);
1431
1432 ata_tf_init(dev, &tf);
1433 tf.command = ATA_CMD_READ_LOG_EXT;
1434 tf.lbal = page;
1435 tf.nsect = sectors;
1436 tf.hob_nsect = sectors >> 8;
1437 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1438 tf.protocol = ATA_PROT_PIO;
1439
1440 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1441 buf, sectors * ATA_SECT_SIZE, 0);
e8ee8451
TH
1442
1443 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1444 return err_mask;
1445}
1446
1447/**
1448 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1449 * @dev: Device to read log page 10h from
1450 * @tag: Resulting tag of the failed command
1451 * @tf: Resulting taskfile registers of the failed command
1452 *
1453 * Read log page 10h to obtain NCQ error details and clear error
1454 * condition.
1455 *
1456 * LOCKING:
1457 * Kernel thread context (may sleep).
1458 *
1459 * RETURNS:
1460 * 0 on success, -errno otherwise.
1461 */
1462static int ata_eh_read_log_10h(struct ata_device *dev,
1463 int *tag, struct ata_taskfile *tf)
1464{
9af5c9c9 1465 u8 *buf = dev->link->ap->sector_buf;
e8ee8451
TH
1466 unsigned int err_mask;
1467 u8 csum;
1468 int i;
1469
1470 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1471 if (err_mask)
1472 return -EIO;
1473
1474 csum = 0;
1475 for (i = 0; i < ATA_SECT_SIZE; i++)
1476 csum += buf[i];
1477 if (csum)
1478 ata_dev_printk(dev, KERN_WARNING,
1479 "invalid checksum 0x%x on log page 10h\n", csum);
1480
1481 if (buf[0] & 0x80)
1482 return -ENOENT;
1483
1484 *tag = buf[0] & 0x1f;
1485
1486 tf->command = buf[2];
1487 tf->feature = buf[3];
1488 tf->lbal = buf[4];
1489 tf->lbam = buf[5];
1490 tf->lbah = buf[6];
1491 tf->device = buf[7];
1492 tf->hob_lbal = buf[8];
1493 tf->hob_lbam = buf[9];
1494 tf->hob_lbah = buf[10];
1495 tf->nsect = buf[12];
1496 tf->hob_nsect = buf[13];
1497
1498 return 0;
1499}
1500
11fc33da
TH
1501/**
1502 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1503 * @dev: target ATAPI device
1504 * @r_sense_key: out parameter for sense_key
1505 *
1506 * Perform ATAPI TEST_UNIT_READY.
1507 *
1508 * LOCKING:
1509 * EH context (may sleep).
1510 *
1511 * RETURNS:
1512 * 0 on success, AC_ERR_* mask on failure.
1513 */
1514static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1515{
1516 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1517 struct ata_taskfile tf;
1518 unsigned int err_mask;
1519
1520 ata_tf_init(dev, &tf);
1521
1522 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1523 tf.command = ATA_CMD_PACKET;
1524 tf.protocol = ATAPI_PROT_NODATA;
1525
1526 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1527 if (err_mask == AC_ERR_DEV)
1528 *r_sense_key = tf.feature >> 4;
1529 return err_mask;
1530}
1531
022bdb07
TH
1532/**
1533 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1534 * @dev: device to perform REQUEST_SENSE to
1535 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
3eabddb8 1536 * @dfl_sense_key: default sense key to use
022bdb07
TH
1537 *
1538 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1539 * SENSE. This function is EH helper.
1540 *
1541 * LOCKING:
1542 * Kernel thread context (may sleep).
1543 *
1544 * RETURNS:
1545 * 0 on success, AC_ERR_* mask on failure
1546 */
3eabddb8
TH
1547static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1548 u8 *sense_buf, u8 dfl_sense_key)
022bdb07 1549{
3eabddb8
TH
1550 u8 cdb[ATAPI_CDB_LEN] =
1551 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
9af5c9c9 1552 struct ata_port *ap = dev->link->ap;
022bdb07 1553 struct ata_taskfile tf;
022bdb07
TH
1554
1555 DPRINTK("ATAPI request sense\n");
1556
022bdb07
TH
1557 /* FIXME: is this needed? */
1558 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1559
56287768
AL
1560 /* initialize sense_buf with the error register,
1561 * for the case where they are -not- overwritten
1562 */
022bdb07 1563 sense_buf[0] = 0x70;
3eabddb8 1564 sense_buf[2] = dfl_sense_key;
56287768 1565
a617c09f 1566 /* some devices time out if garbage left in tf */
56287768 1567 ata_tf_init(dev, &tf);
022bdb07 1568
022bdb07
TH
1569 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1570 tf.command = ATA_CMD_PACKET;
1571
1572 /* is it pointless to prefer PIO for "safety reasons"? */
1573 if (ap->flags & ATA_FLAG_PIO_DMA) {
0dc36888 1574 tf.protocol = ATAPI_PROT_DMA;
022bdb07
TH
1575 tf.feature |= ATAPI_PKT_DMA;
1576 } else {
0dc36888 1577 tf.protocol = ATAPI_PROT_PIO;
f2dfc1a1
TH
1578 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1579 tf.lbah = 0;
022bdb07
TH
1580 }
1581
1582 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
2b789108 1583 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
022bdb07
TH
1584}
1585
1586/**
1587 * ata_eh_analyze_serror - analyze SError for a failed port
0260731f 1588 * @link: ATA link to analyze SError for
022bdb07
TH
1589 *
1590 * Analyze SError if available and further determine cause of
1591 * failure.
1592 *
1593 * LOCKING:
1594 * None.
1595 */
0260731f 1596static void ata_eh_analyze_serror(struct ata_link *link)
022bdb07 1597{
0260731f 1598 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1599 u32 serror = ehc->i.serror;
1600 unsigned int err_mask = 0, action = 0;
f9df58cb 1601 u32 hotplug_mask;
022bdb07 1602
e0614db2 1603 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
022bdb07 1604 err_mask |= AC_ERR_ATA_BUS;
cf480626 1605 action |= ATA_EH_RESET;
022bdb07
TH
1606 }
1607 if (serror & SERR_PROTOCOL) {
1608 err_mask |= AC_ERR_HSM;
cf480626 1609 action |= ATA_EH_RESET;
022bdb07
TH
1610 }
1611 if (serror & SERR_INTERNAL) {
1612 err_mask |= AC_ERR_SYSTEM;
cf480626 1613 action |= ATA_EH_RESET;
022bdb07 1614 }
f9df58cb
TH
1615
1616 /* Determine whether a hotplug event has occurred. Both
1617 * SError.N/X are considered hotplug events for enabled or
1618 * host links. For disabled PMP links, only N bit is
1619 * considered as X bit is left at 1 for link plugging.
1620 */
6b7ae954
TH
1621 if (link->lpm_policy != ATA_LPM_MAX_POWER)
1622 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1623 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
f9df58cb
TH
1624 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1625 else
1626 hotplug_mask = SERR_PHYRDY_CHG;
1627
1628 if (serror & hotplug_mask)
084fe639 1629 ata_ehi_hotplugged(&ehc->i);
022bdb07
TH
1630
1631 ehc->i.err_mask |= err_mask;
1632 ehc->i.action |= action;
1633}
1634
e8ee8451
TH
1635/**
1636 * ata_eh_analyze_ncq_error - analyze NCQ error
0260731f 1637 * @link: ATA link to analyze NCQ error for
e8ee8451
TH
1638 *
1639 * Read log page 10h, determine the offending qc and acquire
1640 * error status TF. For NCQ device errors, all LLDDs have to do
1641 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1642 * care of the rest.
1643 *
1644 * LOCKING:
1645 * Kernel thread context (may sleep).
1646 */
10acf3b0 1647void ata_eh_analyze_ncq_error(struct ata_link *link)
e8ee8451 1648{
0260731f
TH
1649 struct ata_port *ap = link->ap;
1650 struct ata_eh_context *ehc = &link->eh_context;
1651 struct ata_device *dev = link->device;
e8ee8451
TH
1652 struct ata_queued_cmd *qc;
1653 struct ata_taskfile tf;
1654 int tag, rc;
1655
1656 /* if frozen, we can't do much */
b51e9e5d 1657 if (ap->pflags & ATA_PFLAG_FROZEN)
e8ee8451
TH
1658 return;
1659
1660 /* is it NCQ device error? */
0260731f 1661 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
e8ee8451
TH
1662 return;
1663
1664 /* has LLDD analyzed already? */
1665 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1666 qc = __ata_qc_from_tag(ap, tag);
1667
1668 if (!(qc->flags & ATA_QCFLAG_FAILED))
1669 continue;
1670
1671 if (qc->err_mask)
1672 return;
1673 }
1674
1675 /* okay, this error is ours */
a09bf4cd 1676 memset(&tf, 0, sizeof(tf));
e8ee8451
TH
1677 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1678 if (rc) {
0260731f 1679 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
e8ee8451
TH
1680 "(errno=%d)\n", rc);
1681 return;
1682 }
1683
0260731f
TH
1684 if (!(link->sactive & (1 << tag))) {
1685 ata_link_printk(link, KERN_ERR, "log page 10h reported "
e8ee8451
TH
1686 "inactive tag %d\n", tag);
1687 return;
1688 }
1689
1690 /* we've got the perpetrator, condemn it */
1691 qc = __ata_qc_from_tag(ap, tag);
1692 memcpy(&qc->result_tf, &tf, sizeof(tf));
a6116c9e 1693 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
5335b729 1694 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
e8ee8451
TH
1695 ehc->i.err_mask &= ~AC_ERR_DEV;
1696}
1697
022bdb07
TH
1698/**
1699 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1700 * @qc: qc to analyze
1701 * @tf: Taskfile registers to analyze
1702 *
1703 * Analyze taskfile of @qc and further determine cause of
1704 * failure. This function also requests ATAPI sense data if
1705 * avaliable.
1706 *
1707 * LOCKING:
1708 * Kernel thread context (may sleep).
1709 *
1710 * RETURNS:
1711 * Determined recovery action
1712 */
1713static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1714 const struct ata_taskfile *tf)
1715{
1716 unsigned int tmp, action = 0;
1717 u8 stat = tf->command, err = tf->feature;
1718
1719 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1720 qc->err_mask |= AC_ERR_HSM;
cf480626 1721 return ATA_EH_RESET;
022bdb07
TH
1722 }
1723
a51d644a
TH
1724 if (stat & (ATA_ERR | ATA_DF))
1725 qc->err_mask |= AC_ERR_DEV;
1726 else
022bdb07
TH
1727 return 0;
1728
1729 switch (qc->dev->class) {
1730 case ATA_DEV_ATA:
1731 if (err & ATA_ICRC)
1732 qc->err_mask |= AC_ERR_ATA_BUS;
1733 if (err & ATA_UNC)
1734 qc->err_mask |= AC_ERR_MEDIA;
1735 if (err & ATA_IDNF)
1736 qc->err_mask |= AC_ERR_INVALID;
1737 break;
1738
1739 case ATA_DEV_ATAPI:
a569a30d 1740 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
3eabddb8
TH
1741 tmp = atapi_eh_request_sense(qc->dev,
1742 qc->scsicmd->sense_buffer,
1743 qc->result_tf.feature >> 4);
a569a30d
TH
1744 if (!tmp) {
1745 /* ATA_QCFLAG_SENSE_VALID is used to
1746 * tell atapi_qc_complete() that sense
1747 * data is already valid.
1748 *
1749 * TODO: interpret sense data and set
1750 * appropriate err_mask.
1751 */
1752 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1753 } else
1754 qc->err_mask |= tmp;
1755 }
022bdb07
TH
1756 }
1757
1758 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
cf480626 1759 action |= ATA_EH_RESET;
022bdb07
TH
1760
1761 return action;
1762}
1763
76326ac1
TH
1764static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1765 int *xfer_ok)
022bdb07 1766{
76326ac1
TH
1767 int base = 0;
1768
1769 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1770 *xfer_ok = 1;
1771
1772 if (!*xfer_ok)
75f9cafc 1773 base = ATA_ECAT_DUBIOUS_NONE;
76326ac1 1774
7d47e8d4 1775 if (err_mask & AC_ERR_ATA_BUS)
76326ac1 1776 return base + ATA_ECAT_ATA_BUS;
022bdb07 1777
7d47e8d4 1778 if (err_mask & AC_ERR_TIMEOUT)
76326ac1 1779 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4 1780
3884f7b0 1781 if (eflags & ATA_EFLAG_IS_IO) {
7d47e8d4 1782 if (err_mask & AC_ERR_HSM)
76326ac1 1783 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4
TH
1784 if ((err_mask &
1785 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
76326ac1 1786 return base + ATA_ECAT_UNK_DEV;
022bdb07
TH
1787 }
1788
1789 return 0;
1790}
1791
7d47e8d4 1792struct speed_down_verdict_arg {
022bdb07 1793 u64 since;
76326ac1 1794 int xfer_ok;
3884f7b0 1795 int nr_errors[ATA_ECAT_NR];
022bdb07
TH
1796};
1797
7d47e8d4 1798static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
022bdb07 1799{
7d47e8d4 1800 struct speed_down_verdict_arg *arg = void_arg;
76326ac1 1801 int cat;
022bdb07 1802
d9027470 1803 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
022bdb07
TH
1804 return -1;
1805
76326ac1
TH
1806 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1807 &arg->xfer_ok);
7d47e8d4 1808 arg->nr_errors[cat]++;
76326ac1 1809
022bdb07
TH
1810 return 0;
1811}
1812
1813/**
7d47e8d4 1814 * ata_eh_speed_down_verdict - Determine speed down verdict
022bdb07
TH
1815 * @dev: Device of interest
1816 *
1817 * This function examines error ring of @dev and determines
7d47e8d4
TH
1818 * whether NCQ needs to be turned off, transfer speed should be
1819 * stepped down, or falling back to PIO is necessary.
022bdb07 1820 *
3884f7b0
TH
1821 * ECAT_ATA_BUS : ATA_BUS error for any command
1822 *
1823 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1824 * IO commands
1825 *
1826 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1827 *
76326ac1
TH
1828 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1829 * data transfer hasn't been verified.
1830 *
3884f7b0
TH
1831 * Verdicts are
1832 *
1833 * NCQ_OFF : Turn off NCQ.
022bdb07 1834 *
3884f7b0
TH
1835 * SPEED_DOWN : Speed down transfer speed but don't fall back
1836 * to PIO.
7d47e8d4 1837 *
3884f7b0 1838 * FALLBACK_TO_PIO : Fall back to PIO.
022bdb07 1839 *
3884f7b0 1840 * Even if multiple verdicts are returned, only one action is
76326ac1
TH
1841 * taken per error. An action triggered by non-DUBIOUS errors
1842 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1843 * This is to expedite speed down decisions right after device is
1844 * initially configured.
1845 *
1846 * The followings are speed down rules. #1 and #2 deal with
1847 * DUBIOUS errors.
7d47e8d4 1848 *
76326ac1
TH
1849 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1850 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1851 *
1852 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1853 * occurred during last 5 mins, NCQ_OFF.
1854 *
1855 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
3884f7b0 1856 * ocurred during last 5 mins, FALLBACK_TO_PIO
7d47e8d4 1857 *
76326ac1 1858 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
3884f7b0
TH
1859 * during last 10 mins, NCQ_OFF.
1860 *
76326ac1 1861 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
3884f7b0 1862 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
7d47e8d4 1863 *
022bdb07
TH
1864 * LOCKING:
1865 * Inherited from caller.
1866 *
1867 * RETURNS:
7d47e8d4 1868 * OR of ATA_EH_SPDN_* flags.
022bdb07 1869 */
7d47e8d4 1870static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
022bdb07 1871{
7d47e8d4
TH
1872 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1873 u64 j64 = get_jiffies_64();
1874 struct speed_down_verdict_arg arg;
1875 unsigned int verdict = 0;
022bdb07 1876
3884f7b0 1877 /* scan past 5 mins of error history */
7d47e8d4 1878 memset(&arg, 0, sizeof(arg));
3884f7b0 1879 arg.since = j64 - min(j64, j5mins);
7d47e8d4 1880 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1881
76326ac1
TH
1882 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1883 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1884 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1885 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1886
1887 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1888 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1889 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1890
3884f7b0
TH
1891 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1892 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
663f99b8 1893 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1894 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
022bdb07 1895
3884f7b0 1896 /* scan past 10 mins of error history */
022bdb07 1897 memset(&arg, 0, sizeof(arg));
3884f7b0 1898 arg.since = j64 - min(j64, j10mins);
7d47e8d4 1899 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1900
3884f7b0
TH
1901 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1902 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1903 verdict |= ATA_EH_SPDN_NCQ_OFF;
1904
1905 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1906 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
663f99b8 1907 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1908 verdict |= ATA_EH_SPDN_SPEED_DOWN;
022bdb07 1909
7d47e8d4 1910 return verdict;
022bdb07
TH
1911}
1912
1913/**
1914 * ata_eh_speed_down - record error and speed down if necessary
1915 * @dev: Failed device
3884f7b0 1916 * @eflags: mask of ATA_EFLAG_* flags
022bdb07
TH
1917 * @err_mask: err_mask of the error
1918 *
1919 * Record error and examine error history to determine whether
1920 * adjusting transmission speed is necessary. It also sets
1921 * transmission limits appropriately if such adjustment is
1922 * necessary.
1923 *
1924 * LOCKING:
1925 * Kernel thread context (may sleep).
1926 *
1927 * RETURNS:
7d47e8d4 1928 * Determined recovery action.
022bdb07 1929 */
3884f7b0
TH
1930static unsigned int ata_eh_speed_down(struct ata_device *dev,
1931 unsigned int eflags, unsigned int err_mask)
022bdb07 1932{
b1c72916 1933 struct ata_link *link = ata_dev_phys_link(dev);
76326ac1 1934 int xfer_ok = 0;
7d47e8d4
TH
1935 unsigned int verdict;
1936 unsigned int action = 0;
1937
1938 /* don't bother if Cat-0 error */
76326ac1 1939 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
022bdb07
TH
1940 return 0;
1941
1942 /* record error and determine whether speed down is necessary */
3884f7b0 1943 ata_ering_record(&dev->ering, eflags, err_mask);
7d47e8d4 1944 verdict = ata_eh_speed_down_verdict(dev);
022bdb07 1945
7d47e8d4
TH
1946 /* turn off NCQ? */
1947 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1948 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1949 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1950 dev->flags |= ATA_DFLAG_NCQ_OFF;
1951 ata_dev_printk(dev, KERN_WARNING,
1952 "NCQ disabled due to excessive errors\n");
1953 goto done;
1954 }
022bdb07 1955
7d47e8d4
TH
1956 /* speed down? */
1957 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1958 /* speed down SATA link speed if possible */
a07d499b 1959 if (sata_down_spd_limit(link, 0) == 0) {
cf480626 1960 action |= ATA_EH_RESET;
7d47e8d4
TH
1961 goto done;
1962 }
022bdb07 1963
7d47e8d4
TH
1964 /* lower transfer mode */
1965 if (dev->spdn_cnt < 2) {
1966 static const int dma_dnxfer_sel[] =
1967 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1968 static const int pio_dnxfer_sel[] =
1969 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1970 int sel;
1971
1972 if (dev->xfer_shift != ATA_SHIFT_PIO)
1973 sel = dma_dnxfer_sel[dev->spdn_cnt];
1974 else
1975 sel = pio_dnxfer_sel[dev->spdn_cnt];
1976
1977 dev->spdn_cnt++;
1978
1979 if (ata_down_xfermask_limit(dev, sel) == 0) {
cf480626 1980 action |= ATA_EH_RESET;
7d47e8d4
TH
1981 goto done;
1982 }
1983 }
1984 }
1985
1986 /* Fall back to PIO? Slowing down to PIO is meaningless for
663f99b8 1987 * SATA ATA devices. Consider it only for PATA and SATAPI.
7d47e8d4
TH
1988 */
1989 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
663f99b8 1990 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
7d47e8d4
TH
1991 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1992 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1993 dev->spdn_cnt = 0;
cf480626 1994 action |= ATA_EH_RESET;
7d47e8d4
TH
1995 goto done;
1996 }
1997 }
022bdb07 1998
022bdb07 1999 return 0;
7d47e8d4
TH
2000 done:
2001 /* device has been slowed down, blow error history */
76326ac1
TH
2002 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2003 ata_ering_clear(&dev->ering);
7d47e8d4 2004 return action;
022bdb07
TH
2005}
2006
2007/**
9b1e2658
TH
2008 * ata_eh_link_autopsy - analyze error and determine recovery action
2009 * @link: host link to perform autopsy on
022bdb07 2010 *
0260731f
TH
2011 * Analyze why @link failed and determine which recovery actions
2012 * are needed. This function also sets more detailed AC_ERR_*
2013 * values and fills sense data for ATAPI CHECK SENSE.
022bdb07
TH
2014 *
2015 * LOCKING:
2016 * Kernel thread context (may sleep).
2017 */
9b1e2658 2018static void ata_eh_link_autopsy(struct ata_link *link)
022bdb07 2019{
0260731f 2020 struct ata_port *ap = link->ap;
936fd732 2021 struct ata_eh_context *ehc = &link->eh_context;
dfcc173d 2022 struct ata_device *dev;
3884f7b0
TH
2023 unsigned int all_err_mask = 0, eflags = 0;
2024 int tag;
022bdb07
TH
2025 u32 serror;
2026 int rc;
2027
2028 DPRINTK("ENTER\n");
2029
1cdaf534
TH
2030 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2031 return;
2032
022bdb07 2033 /* obtain and analyze SError */
936fd732 2034 rc = sata_scr_read(link, SCR_ERROR, &serror);
022bdb07
TH
2035 if (rc == 0) {
2036 ehc->i.serror |= serror;
0260731f 2037 ata_eh_analyze_serror(link);
4e57c517 2038 } else if (rc != -EOPNOTSUPP) {
cf480626 2039 /* SError read failed, force reset and probing */
b558eddd 2040 ehc->i.probe_mask |= ATA_ALL_DEVICES;
cf480626 2041 ehc->i.action |= ATA_EH_RESET;
4e57c517
TH
2042 ehc->i.err_mask |= AC_ERR_OTHER;
2043 }
022bdb07 2044
e8ee8451 2045 /* analyze NCQ failure */
0260731f 2046 ata_eh_analyze_ncq_error(link);
e8ee8451 2047
022bdb07
TH
2048 /* any real error trumps AC_ERR_OTHER */
2049 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2050 ehc->i.err_mask &= ~AC_ERR_OTHER;
2051
2052 all_err_mask |= ehc->i.err_mask;
2053
2054 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2055 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2056
b1c72916
TH
2057 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2058 ata_dev_phys_link(qc->dev) != link)
022bdb07
TH
2059 continue;
2060
2061 /* inherit upper level err_mask */
2062 qc->err_mask |= ehc->i.err_mask;
2063
022bdb07 2064 /* analyze TF */
4528e4da 2065 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
022bdb07
TH
2066
2067 /* DEV errors are probably spurious in case of ATA_BUS error */
2068 if (qc->err_mask & AC_ERR_ATA_BUS)
2069 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2070 AC_ERR_INVALID);
2071
2072 /* any real error trumps unknown error */
2073 if (qc->err_mask & ~AC_ERR_OTHER)
2074 qc->err_mask &= ~AC_ERR_OTHER;
2075
2076 /* SENSE_VALID trumps dev/unknown error and revalidation */
f90f0828 2077 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
022bdb07 2078 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
022bdb07 2079
03faab78 2080 /* determine whether the command is worth retrying */
534ead70
TH
2081 if (qc->flags & ATA_QCFLAG_IO ||
2082 (!(qc->err_mask & AC_ERR_INVALID) &&
2083 qc->err_mask != AC_ERR_DEV))
03faab78
TH
2084 qc->flags |= ATA_QCFLAG_RETRY;
2085
022bdb07 2086 /* accumulate error info */
4528e4da 2087 ehc->i.dev = qc->dev;
022bdb07
TH
2088 all_err_mask |= qc->err_mask;
2089 if (qc->flags & ATA_QCFLAG_IO)
3884f7b0 2090 eflags |= ATA_EFLAG_IS_IO;
022bdb07
TH
2091 }
2092
a20f33ff 2093 /* enforce default EH actions */
b51e9e5d 2094 if (ap->pflags & ATA_PFLAG_FROZEN ||
a20f33ff 2095 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
cf480626 2096 ehc->i.action |= ATA_EH_RESET;
3884f7b0
TH
2097 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2098 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
4528e4da 2099 ehc->i.action |= ATA_EH_REVALIDATE;
022bdb07 2100
dfcc173d
TH
2101 /* If we have offending qcs and the associated failed device,
2102 * perform per-dev EH action only on the offending device.
2103 */
4528e4da 2104 if (ehc->i.dev) {
4528e4da
TH
2105 ehc->i.dev_action[ehc->i.dev->devno] |=
2106 ehc->i.action & ATA_EH_PERDEV_MASK;
2107 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
47005f25
TH
2108 }
2109
2695e366
TH
2110 /* propagate timeout to host link */
2111 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2112 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2113
2114 /* record error and consider speeding down */
dfcc173d 2115 dev = ehc->i.dev;
2695e366
TH
2116 if (!dev && ((ata_link_max_devices(link) == 1 &&
2117 ata_dev_enabled(link->device))))
2118 dev = link->device;
dfcc173d 2119
76326ac1
TH
2120 if (dev) {
2121 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2122 eflags |= ATA_EFLAG_DUBIOUS_XFER;
3884f7b0 2123 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
76326ac1 2124 }
dfcc173d 2125
022bdb07
TH
2126 DPRINTK("EXIT\n");
2127}
2128
2129/**
9b1e2658
TH
2130 * ata_eh_autopsy - analyze error and determine recovery action
2131 * @ap: host port to perform autopsy on
2132 *
2133 * Analyze all links of @ap and determine why they failed and
2134 * which recovery actions are needed.
2135 *
2136 * LOCKING:
2137 * Kernel thread context (may sleep).
2138 */
fb7fd614 2139void ata_eh_autopsy(struct ata_port *ap)
9b1e2658
TH
2140{
2141 struct ata_link *link;
2142
1eca4365 2143 ata_for_each_link(link, ap, EDGE)
9b1e2658 2144 ata_eh_link_autopsy(link);
2695e366 2145
b1c72916
TH
2146 /* Handle the frigging slave link. Autopsy is done similarly
2147 * but actions and flags are transferred over to the master
2148 * link and handled from there.
2149 */
2150 if (ap->slave_link) {
2151 struct ata_eh_context *mehc = &ap->link.eh_context;
2152 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2153
848e4c68
TH
2154 /* transfer control flags from master to slave */
2155 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2156
2157 /* perform autopsy on the slave link */
b1c72916
TH
2158 ata_eh_link_autopsy(ap->slave_link);
2159
848e4c68 2160 /* transfer actions from slave to master and clear slave */
b1c72916
TH
2161 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2162 mehc->i.action |= sehc->i.action;
2163 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2164 mehc->i.flags |= sehc->i.flags;
2165 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2166 }
2167
2695e366
TH
2168 /* Autopsy of fanout ports can affect host link autopsy.
2169 * Perform host link autopsy last.
2170 */
071f44b1 2171 if (sata_pmp_attached(ap))
2695e366 2172 ata_eh_link_autopsy(&ap->link);
9b1e2658
TH
2173}
2174
6521148c
RH
2175/**
2176 * ata_get_cmd_descript - get description for ATA command
2177 * @command: ATA command code to get description for
2178 *
2179 * Return a textual description of the given command, or NULL if the
2180 * command is not known.
2181 *
2182 * LOCKING:
2183 * None
2184 */
2185const char *ata_get_cmd_descript(u8 command)
2186{
2187#ifdef CONFIG_ATA_VERBOSE_ERROR
2188 static const struct
2189 {
2190 u8 command;
2191 const char *text;
2192 } cmd_descr[] = {
2193 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2194 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2195 { ATA_CMD_STANDBY, "STANDBY" },
2196 { ATA_CMD_IDLE, "IDLE" },
2197 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2198 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2199 { ATA_CMD_NOP, "NOP" },
2200 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2201 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2202 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2203 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2204 { ATA_CMD_SERVICE, "SERVICE" },
2205 { ATA_CMD_READ, "READ DMA" },
2206 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2207 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2208 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2209 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2210 { ATA_CMD_WRITE, "WRITE DMA" },
2211 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2212 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2213 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2214 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2215 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2216 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2217 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2218 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2219 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2220 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2221 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2222 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2223 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2224 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2225 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2226 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2227 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2228 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2229 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2230 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2231 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2232 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2233 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2234 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2235 { ATA_CMD_SLEEP, "SLEEP" },
2236 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2237 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2238 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2239 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2240 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2241 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2242 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2243 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2244 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2245 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2246 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2247 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2248 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2249 { ATA_CMD_PMP_READ, "READ BUFFER" },
2250 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2251 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2252 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2253 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2254 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2255 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2256 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2257 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2258 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2259 { ATA_CMD_SMART, "SMART" },
2260 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2261 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
acad7627 2262 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
6521148c
RH
2263 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2264 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2265 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2266 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2267 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2268 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2269 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2270 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2271 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2272 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2273 { ATA_CMD_RESTORE, "RECALIBRATE" },
2274 { 0, NULL } /* terminate list */
2275 };
2276
2277 unsigned int i;
2278 for (i = 0; cmd_descr[i].text; i++)
2279 if (cmd_descr[i].command == command)
2280 return cmd_descr[i].text;
2281#endif
2282
2283 return NULL;
2284}
2285
9b1e2658
TH
2286/**
2287 * ata_eh_link_report - report error handling to user
0260731f 2288 * @link: ATA link EH is going on
022bdb07
TH
2289 *
2290 * Report EH to user.
2291 *
2292 * LOCKING:
2293 * None.
2294 */
9b1e2658 2295static void ata_eh_link_report(struct ata_link *link)
022bdb07 2296{
0260731f
TH
2297 struct ata_port *ap = link->ap;
2298 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2299 const char *frozen, *desc;
a1e10f7e 2300 char tries_buf[6];
022bdb07
TH
2301 int tag, nr_failed = 0;
2302
94ff3d54
TH
2303 if (ehc->i.flags & ATA_EHI_QUIET)
2304 return;
2305
022bdb07
TH
2306 desc = NULL;
2307 if (ehc->i.desc[0] != '\0')
2308 desc = ehc->i.desc;
2309
2310 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2311 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2312
b1c72916
TH
2313 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2314 ata_dev_phys_link(qc->dev) != link ||
e027bd36
TH
2315 ((qc->flags & ATA_QCFLAG_QUIET) &&
2316 qc->err_mask == AC_ERR_DEV))
022bdb07
TH
2317 continue;
2318 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2319 continue;
2320
2321 nr_failed++;
2322 }
2323
2324 if (!nr_failed && !ehc->i.err_mask)
2325 return;
2326
2327 frozen = "";
b51e9e5d 2328 if (ap->pflags & ATA_PFLAG_FROZEN)
022bdb07
TH
2329 frozen = " frozen";
2330
a1e10f7e
TH
2331 memset(tries_buf, 0, sizeof(tries_buf));
2332 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2333 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2334 ap->eh_tries);
2335
022bdb07 2336 if (ehc->i.dev) {
e8ee8451 2337 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2338 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2339 ehc->i.err_mask, link->sactive, ehc->i.serror,
2340 ehc->i.action, frozen, tries_buf);
022bdb07 2341 if (desc)
b64bbc39 2342 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
022bdb07 2343 } else {
0260731f 2344 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2345 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2346 ehc->i.err_mask, link->sactive, ehc->i.serror,
2347 ehc->i.action, frozen, tries_buf);
022bdb07 2348 if (desc)
0260731f 2349 ata_link_printk(link, KERN_ERR, "%s\n", desc);
022bdb07
TH
2350 }
2351
6521148c 2352#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2353 if (ehc->i.serror)
da0e21d3 2354 ata_link_printk(link, KERN_ERR,
1333e194
RH
2355 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2356 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2357 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2358 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2359 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2360 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2361 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2362 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2363 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2364 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2365 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2366 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2367 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2368 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2369 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2370 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2371 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2dcb407e 2372 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
6521148c 2373#endif
1333e194 2374
022bdb07
TH
2375 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2376 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
8a937581 2377 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
abb6a889
TH
2378 const u8 *cdb = qc->cdb;
2379 char data_buf[20] = "";
2380 char cdb_buf[70] = "";
022bdb07 2381
0260731f 2382 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
b1c72916 2383 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
022bdb07
TH
2384 continue;
2385
abb6a889
TH
2386 if (qc->dma_dir != DMA_NONE) {
2387 static const char *dma_str[] = {
2388 [DMA_BIDIRECTIONAL] = "bidi",
2389 [DMA_TO_DEVICE] = "out",
2390 [DMA_FROM_DEVICE] = "in",
2391 };
2392 static const char *prot_str[] = {
2393 [ATA_PROT_PIO] = "pio",
2394 [ATA_PROT_DMA] = "dma",
2395 [ATA_PROT_NCQ] = "ncq",
0dc36888
TH
2396 [ATAPI_PROT_PIO] = "pio",
2397 [ATAPI_PROT_DMA] = "dma",
abb6a889
TH
2398 };
2399
2400 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2401 prot_str[qc->tf.protocol], qc->nbytes,
2402 dma_str[qc->dma_dir]);
2403 }
2404
6521148c
RH
2405 if (ata_is_atapi(qc->tf.protocol)) {
2406 if (qc->scsicmd)
2407 scsi_print_command(qc->scsicmd);
2408 else
2409 snprintf(cdb_buf, sizeof(cdb_buf),
abb6a889
TH
2410 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2411 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2412 cdb[0], cdb[1], cdb[2], cdb[3],
2413 cdb[4], cdb[5], cdb[6], cdb[7],
2414 cdb[8], cdb[9], cdb[10], cdb[11],
2415 cdb[12], cdb[13], cdb[14], cdb[15]);
6521148c
RH
2416 } else {
2417 const char *descr = ata_get_cmd_descript(cmd->command);
2418 if (descr)
2419 ata_dev_printk(qc->dev, KERN_ERR,
2420 "failed command: %s\n", descr);
2421 }
abb6a889 2422
8a937581
TH
2423 ata_dev_printk(qc->dev, KERN_ERR,
2424 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
abb6a889 2425 "tag %d%s\n %s"
8a937581 2426 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
5335b729 2427 "Emask 0x%x (%s)%s\n",
8a937581
TH
2428 cmd->command, cmd->feature, cmd->nsect,
2429 cmd->lbal, cmd->lbam, cmd->lbah,
2430 cmd->hob_feature, cmd->hob_nsect,
2431 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
abb6a889 2432 cmd->device, qc->tag, data_buf, cdb_buf,
8a937581
TH
2433 res->command, res->feature, res->nsect,
2434 res->lbal, res->lbam, res->lbah,
2435 res->hob_feature, res->hob_nsect,
2436 res->hob_lbal, res->hob_lbam, res->hob_lbah,
5335b729
TH
2437 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2438 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1333e194 2439
6521148c 2440#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2441 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2dcb407e 2442 ATA_ERR)) {
1333e194
RH
2443 if (res->command & ATA_BUSY)
2444 ata_dev_printk(qc->dev, KERN_ERR,
2dcb407e 2445 "status: { Busy }\n");
1333e194
RH
2446 else
2447 ata_dev_printk(qc->dev, KERN_ERR,
2448 "status: { %s%s%s%s}\n",
2449 res->command & ATA_DRDY ? "DRDY " : "",
2450 res->command & ATA_DF ? "DF " : "",
2451 res->command & ATA_DRQ ? "DRQ " : "",
2dcb407e 2452 res->command & ATA_ERR ? "ERR " : "");
1333e194
RH
2453 }
2454
2455 if (cmd->command != ATA_CMD_PACKET &&
2456 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2457 ATA_ABORTED)))
2458 ata_dev_printk(qc->dev, KERN_ERR,
2459 "error: { %s%s%s%s}\n",
2460 res->feature & ATA_ICRC ? "ICRC " : "",
2461 res->feature & ATA_UNC ? "UNC " : "",
2462 res->feature & ATA_IDNF ? "IDNF " : "",
2dcb407e 2463 res->feature & ATA_ABORTED ? "ABRT " : "");
6521148c 2464#endif
022bdb07
TH
2465 }
2466}
2467
9b1e2658
TH
2468/**
2469 * ata_eh_report - report error handling to user
2470 * @ap: ATA port to report EH about
2471 *
2472 * Report EH to user.
2473 *
2474 * LOCKING:
2475 * None.
2476 */
fb7fd614 2477void ata_eh_report(struct ata_port *ap)
9b1e2658
TH
2478{
2479 struct ata_link *link;
2480
1eca4365 2481 ata_for_each_link(link, ap, HOST_FIRST)
9b1e2658
TH
2482 ata_eh_link_report(link);
2483}
2484
cc0680a5 2485static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
b1c72916
TH
2486 unsigned int *classes, unsigned long deadline,
2487 bool clear_classes)
d87fa38e 2488{
f58229f8 2489 struct ata_device *dev;
d87fa38e 2490
b1c72916 2491 if (clear_classes)
1eca4365 2492 ata_for_each_dev(dev, link, ALL)
b1c72916 2493 classes[dev->devno] = ATA_DEV_UNKNOWN;
d87fa38e 2494
f046519f 2495 return reset(link, classes, deadline);
d87fa38e
TH
2496}
2497
ae791c05 2498static int ata_eh_followup_srst_needed(struct ata_link *link,
5dbfc9cb 2499 int rc, const unsigned int *classes)
664faf09 2500{
45db2f6c 2501 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
ae791c05 2502 return 0;
5dbfc9cb
TH
2503 if (rc == -EAGAIN)
2504 return 1;
071f44b1 2505 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
3495de73 2506 return 1;
664faf09
TH
2507 return 0;
2508}
2509
fb7fd614
TH
2510int ata_eh_reset(struct ata_link *link, int classify,
2511 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2512 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
022bdb07 2513{
afaa5c37 2514 struct ata_port *ap = link->ap;
b1c72916 2515 struct ata_link *slave = ap->slave_link;
936fd732 2516 struct ata_eh_context *ehc = &link->eh_context;
705d2014 2517 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
664faf09 2518 unsigned int *classes = ehc->classes;
416dc9ed 2519 unsigned int lflags = link->flags;
1cdaf534 2520 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
d8af0eb6 2521 int max_tries = 0, try = 0;
b1c72916 2522 struct ata_link *failed_link;
f58229f8 2523 struct ata_device *dev;
416dc9ed 2524 unsigned long deadline, now;
022bdb07 2525 ata_reset_fn_t reset;
afaa5c37 2526 unsigned long flags;
416dc9ed 2527 u32 sstatus;
b1c72916 2528 int nr_unknown, rc;
022bdb07 2529
932648b0
TH
2530 /*
2531 * Prepare to reset
2532 */
d8af0eb6
TH
2533 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2534 max_tries++;
05944bdf
TH
2535 if (link->flags & ATA_LFLAG_NO_HRST)
2536 hardreset = NULL;
2537 if (link->flags & ATA_LFLAG_NO_SRST)
2538 softreset = NULL;
d8af0eb6 2539
19b72321
TH
2540 /* make sure each reset attemp is at least COOL_DOWN apart */
2541 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2542 now = jiffies;
2543 WARN_ON(time_after(ehc->last_reset, now));
2544 deadline = ata_deadline(ehc->last_reset,
2545 ATA_EH_RESET_COOL_DOWN);
2546 if (time_before(now, deadline))
2547 schedule_timeout_uninterruptible(deadline - now);
2548 }
0a2c0f56 2549
afaa5c37
TH
2550 spin_lock_irqsave(ap->lock, flags);
2551 ap->pflags |= ATA_PFLAG_RESETTING;
2552 spin_unlock_irqrestore(ap->lock, flags);
2553
cf480626 2554 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
13abf50d 2555
1eca4365 2556 ata_for_each_dev(dev, link, ALL) {
cdeab114
TH
2557 /* If we issue an SRST then an ATA drive (not ATAPI)
2558 * may change configuration and be in PIO0 timing. If
2559 * we do a hard reset (or are coming from power on)
2560 * this is true for ATA or ATAPI. Until we've set a
2561 * suitable controller mode we should not touch the
2562 * bus as we may be talking too fast.
2563 */
2564 dev->pio_mode = XFER_PIO_0;
2565
2566 /* If the controller has a pio mode setup function
2567 * then use it to set the chipset to rights. Don't
2568 * touch the DMA setup as that will be dealt with when
2569 * configuring devices.
2570 */
2571 if (ap->ops->set_piomode)
2572 ap->ops->set_piomode(ap, dev);
2573 }
2574
cf480626 2575 /* prefer hardreset */
932648b0 2576 reset = NULL;
cf480626
TH
2577 ehc->i.action &= ~ATA_EH_RESET;
2578 if (hardreset) {
2579 reset = hardreset;
a674050e 2580 ehc->i.action |= ATA_EH_HARDRESET;
4f7faa3f 2581 } else if (softreset) {
cf480626 2582 reset = softreset;
a674050e 2583 ehc->i.action |= ATA_EH_SOFTRESET;
cf480626 2584 }
f5914a46
TH
2585
2586 if (prereset) {
b1c72916
TH
2587 unsigned long deadline = ata_deadline(jiffies,
2588 ATA_EH_PRERESET_TIMEOUT);
2589
2590 if (slave) {
2591 sehc->i.action &= ~ATA_EH_RESET;
2592 sehc->i.action |= ehc->i.action;
2593 }
2594
2595 rc = prereset(link, deadline);
2596
2597 /* If present, do prereset on slave link too. Reset
2598 * is skipped iff both master and slave links report
2599 * -ENOENT or clear ATA_EH_RESET.
2600 */
2601 if (slave && (rc == 0 || rc == -ENOENT)) {
2602 int tmp;
2603
2604 tmp = prereset(slave, deadline);
2605 if (tmp != -ENOENT)
2606 rc = tmp;
2607
2608 ehc->i.action |= sehc->i.action;
2609 }
2610
f5914a46 2611 if (rc) {
c961922b 2612 if (rc == -ENOENT) {
cc0680a5 2613 ata_link_printk(link, KERN_DEBUG,
4aa9ab67 2614 "port disabled. ignoring.\n");
cf480626 2615 ehc->i.action &= ~ATA_EH_RESET;
4aa9ab67 2616
1eca4365 2617 ata_for_each_dev(dev, link, ALL)
f58229f8 2618 classes[dev->devno] = ATA_DEV_NONE;
4aa9ab67
TH
2619
2620 rc = 0;
c961922b 2621 } else
cc0680a5 2622 ata_link_printk(link, KERN_ERR,
f5914a46 2623 "prereset failed (errno=%d)\n", rc);
fccb6ea5 2624 goto out;
f5914a46 2625 }
f5914a46 2626
932648b0 2627 /* prereset() might have cleared ATA_EH_RESET. If so,
d6515e6f 2628 * bang classes, thaw and return.
932648b0
TH
2629 */
2630 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
1eca4365 2631 ata_for_each_dev(dev, link, ALL)
932648b0 2632 classes[dev->devno] = ATA_DEV_NONE;
d6515e6f
TH
2633 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2634 ata_is_host_link(link))
2635 ata_eh_thaw_port(ap);
932648b0
TH
2636 rc = 0;
2637 goto out;
2638 }
f5914a46
TH
2639 }
2640
022bdb07 2641 retry:
932648b0
TH
2642 /*
2643 * Perform reset
2644 */
dc98c32c
TH
2645 if (ata_is_host_link(link))
2646 ata_eh_freeze_port(ap);
2647
341c2c95 2648 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
31daabda 2649
932648b0
TH
2650 if (reset) {
2651 if (verbose)
2652 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2653 reset == softreset ? "soft" : "hard");
2654
2655 /* mark that this EH session started with reset */
19b72321 2656 ehc->last_reset = jiffies;
932648b0
TH
2657 if (reset == hardreset)
2658 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2659 else
2660 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
022bdb07 2661
b1c72916
TH
2662 rc = ata_do_reset(link, reset, classes, deadline, true);
2663 if (rc && rc != -EAGAIN) {
2664 failed_link = link;
5dbfc9cb 2665 goto fail;
b1c72916
TH
2666 }
2667
2668 /* hardreset slave link if existent */
2669 if (slave && reset == hardreset) {
2670 int tmp;
2671
2672 if (verbose)
2673 ata_link_printk(slave, KERN_INFO,
2674 "hard resetting link\n");
2675
2676 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2677 tmp = ata_do_reset(slave, reset, classes, deadline,
2678 false);
2679 switch (tmp) {
2680 case -EAGAIN:
2681 rc = -EAGAIN;
2682 case 0:
2683 break;
2684 default:
2685 failed_link = slave;
2686 rc = tmp;
2687 goto fail;
2688 }
2689 }
022bdb07 2690
b1c72916 2691 /* perform follow-up SRST if necessary */
932648b0 2692 if (reset == hardreset &&
5dbfc9cb 2693 ata_eh_followup_srst_needed(link, rc, classes)) {
932648b0 2694 reset = softreset;
022bdb07 2695
932648b0
TH
2696 if (!reset) {
2697 ata_link_printk(link, KERN_ERR,
2698 "follow-up softreset required "
2699 "but no softreset avaliable\n");
b1c72916 2700 failed_link = link;
932648b0
TH
2701 rc = -EINVAL;
2702 goto fail;
2703 }
664faf09 2704
932648b0 2705 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
b1c72916 2706 rc = ata_do_reset(link, reset, classes, deadline, true);
fe2c4d01
TH
2707 if (rc) {
2708 failed_link = link;
2709 goto fail;
2710 }
664faf09 2711 }
932648b0
TH
2712 } else {
2713 if (verbose)
2714 ata_link_printk(link, KERN_INFO, "no reset method "
2715 "available, skipping reset\n");
2716 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2717 lflags |= ATA_LFLAG_ASSUME_ATA;
664faf09
TH
2718 }
2719
932648b0
TH
2720 /*
2721 * Post-reset processing
2722 */
1eca4365 2723 ata_for_each_dev(dev, link, ALL) {
416dc9ed
TH
2724 /* After the reset, the device state is PIO 0 and the
2725 * controller state is undefined. Reset also wakes up
2726 * drives from sleeping mode.
2727 */
2728 dev->pio_mode = XFER_PIO_0;
2729 dev->flags &= ~ATA_DFLAG_SLEEPING;
31daabda 2730
3b761d3d
TH
2731 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2732 continue;
2733
2734 /* apply class override */
2735 if (lflags & ATA_LFLAG_ASSUME_ATA)
2736 classes[dev->devno] = ATA_DEV_ATA;
2737 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2738 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
022bdb07
TH
2739 }
2740
416dc9ed
TH
2741 /* record current link speed */
2742 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2743 link->sata_spd = (sstatus >> 4) & 0xf;
b1c72916
TH
2744 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2745 slave->sata_spd = (sstatus >> 4) & 0xf;
008a7896 2746
dc98c32c
TH
2747 /* thaw the port */
2748 if (ata_is_host_link(link))
2749 ata_eh_thaw_port(ap);
2750
f046519f
TH
2751 /* postreset() should clear hardware SError. Although SError
2752 * is cleared during link resume, clearing SError here is
2753 * necessary as some PHYs raise hotplug events after SRST.
2754 * This introduces race condition where hotplug occurs between
2755 * reset and here. This race is mediated by cross checking
2756 * link onlineness and classification result later.
2757 */
b1c72916 2758 if (postreset) {
416dc9ed 2759 postreset(link, classes);
b1c72916
TH
2760 if (slave)
2761 postreset(slave, classes);
2762 }
20952b69 2763
1e641060
TH
2764 /*
2765 * Some controllers can't be frozen very well and may set
2766 * spuruious error conditions during reset. Clear accumulated
2767 * error information. As reset is the final recovery action,
2768 * nothing is lost by doing this.
2769 */
f046519f 2770 spin_lock_irqsave(link->ap->lock, flags);
1e641060 2771 memset(&link->eh_info, 0, sizeof(link->eh_info));
b1c72916 2772 if (slave)
1e641060
TH
2773 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2774 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
f046519f
TH
2775 spin_unlock_irqrestore(link->ap->lock, flags);
2776
3b761d3d
TH
2777 /*
2778 * Make sure onlineness and classification result correspond.
f046519f
TH
2779 * Hotplug could have happened during reset and some
2780 * controllers fail to wait while a drive is spinning up after
2781 * being hotplugged causing misdetection. By cross checking
3b761d3d
TH
2782 * link on/offlineness and classification result, those
2783 * conditions can be reliably detected and retried.
f046519f 2784 */
b1c72916 2785 nr_unknown = 0;
1eca4365 2786 ata_for_each_dev(dev, link, ALL) {
3b761d3d
TH
2787 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2788 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2789 ata_dev_printk(dev, KERN_DEBUG, "link online "
2790 "but device misclassifed\n");
2791 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2792 nr_unknown++;
3b761d3d
TH
2793 }
2794 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2795 if (ata_class_enabled(classes[dev->devno]))
2796 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2797 "clearing class %d to NONE\n",
2798 classes[dev->devno]);
2799 classes[dev->devno] = ATA_DEV_NONE;
2800 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2801 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2802 "clearing UNKNOWN to NONE\n");
2803 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2804 }
f046519f
TH
2805 }
2806
b1c72916 2807 if (classify && nr_unknown) {
f046519f
TH
2808 if (try < max_tries) {
2809 ata_link_printk(link, KERN_WARNING, "link online but "
3b761d3d
TH
2810 "%d devices misclassified, retrying\n",
2811 nr_unknown);
b1c72916 2812 failed_link = link;
f046519f
TH
2813 rc = -EAGAIN;
2814 goto fail;
2815 }
2816 ata_link_printk(link, KERN_WARNING,
3b761d3d
TH
2817 "link online but %d devices misclassified, "
2818 "device detection might fail\n", nr_unknown);
f046519f
TH
2819 }
2820
416dc9ed 2821 /* reset successful, schedule revalidation */
cf480626 2822 ata_eh_done(link, NULL, ATA_EH_RESET);
b1c72916
TH
2823 if (slave)
2824 ata_eh_done(slave, NULL, ATA_EH_RESET);
6b7ae954 2825 ehc->last_reset = jiffies; /* update to completion time */
416dc9ed 2826 ehc->i.action |= ATA_EH_REVALIDATE;
6b7ae954 2827 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
ae791c05 2828
416dc9ed 2829 rc = 0;
fccb6ea5
TH
2830 out:
2831 /* clear hotplug flag */
2832 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
b1c72916
TH
2833 if (slave)
2834 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
afaa5c37
TH
2835
2836 spin_lock_irqsave(ap->lock, flags);
2837 ap->pflags &= ~ATA_PFLAG_RESETTING;
2838 spin_unlock_irqrestore(ap->lock, flags);
2839
022bdb07 2840 return rc;
416dc9ed
TH
2841
2842 fail:
5958e302
TH
2843 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2844 if (!ata_is_host_link(link) &&
2845 sata_scr_read(link, SCR_STATUS, &sstatus))
2846 rc = -ERESTART;
2847
416dc9ed
TH
2848 if (rc == -ERESTART || try >= max_tries)
2849 goto out;
2850
2851 now = jiffies;
2852 if (time_before(now, deadline)) {
2853 unsigned long delta = deadline - now;
2854
b1c72916 2855 ata_link_printk(failed_link, KERN_WARNING,
0a2c0f56
TH
2856 "reset failed (errno=%d), retrying in %u secs\n",
2857 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
416dc9ed 2858
c0c362b6 2859 ata_eh_release(ap);
416dc9ed
TH
2860 while (delta)
2861 delta = schedule_timeout_uninterruptible(delta);
c0c362b6 2862 ata_eh_acquire(ap);
416dc9ed
TH
2863 }
2864
b1c72916 2865 if (try == max_tries - 1) {
a07d499b 2866 sata_down_spd_limit(link, 0);
b1c72916 2867 if (slave)
a07d499b 2868 sata_down_spd_limit(slave, 0);
b1c72916 2869 } else if (rc == -EPIPE)
a07d499b 2870 sata_down_spd_limit(failed_link, 0);
b1c72916 2871
416dc9ed
TH
2872 if (hardreset)
2873 reset = hardreset;
2874 goto retry;
022bdb07
TH
2875}
2876
45fabbb7
EO
2877static inline void ata_eh_pull_park_action(struct ata_port *ap)
2878{
2879 struct ata_link *link;
2880 struct ata_device *dev;
2881 unsigned long flags;
2882
2883 /*
2884 * This function can be thought of as an extended version of
2885 * ata_eh_about_to_do() specially crafted to accommodate the
2886 * requirements of ATA_EH_PARK handling. Since the EH thread
2887 * does not leave the do {} while () loop in ata_eh_recover as
2888 * long as the timeout for a park request to *one* device on
2889 * the port has not expired, and since we still want to pick
2890 * up park requests to other devices on the same port or
2891 * timeout updates for the same device, we have to pull
2892 * ATA_EH_PARK actions from eh_info into eh_context.i
2893 * ourselves at the beginning of each pass over the loop.
2894 *
2895 * Additionally, all write accesses to &ap->park_req_pending
2896 * through INIT_COMPLETION() (see below) or complete_all()
2897 * (see ata_scsi_park_store()) are protected by the host lock.
2898 * As a result we have that park_req_pending.done is zero on
2899 * exit from this function, i.e. when ATA_EH_PARK actions for
2900 * *all* devices on port ap have been pulled into the
2901 * respective eh_context structs. If, and only if,
2902 * park_req_pending.done is non-zero by the time we reach
2903 * wait_for_completion_timeout(), another ATA_EH_PARK action
2904 * has been scheduled for at least one of the devices on port
2905 * ap and we have to cycle over the do {} while () loop in
2906 * ata_eh_recover() again.
2907 */
2908
2909 spin_lock_irqsave(ap->lock, flags);
2910 INIT_COMPLETION(ap->park_req_pending);
1eca4365
TH
2911 ata_for_each_link(link, ap, EDGE) {
2912 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
2913 struct ata_eh_info *ehi = &link->eh_info;
2914
2915 link->eh_context.i.dev_action[dev->devno] |=
2916 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2917 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2918 }
2919 }
2920 spin_unlock_irqrestore(ap->lock, flags);
2921}
2922
2923static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2924{
2925 struct ata_eh_context *ehc = &dev->link->eh_context;
2926 struct ata_taskfile tf;
2927 unsigned int err_mask;
2928
2929 ata_tf_init(dev, &tf);
2930 if (park) {
2931 ehc->unloaded_mask |= 1 << dev->devno;
2932 tf.command = ATA_CMD_IDLEIMMEDIATE;
2933 tf.feature = 0x44;
2934 tf.lbal = 0x4c;
2935 tf.lbam = 0x4e;
2936 tf.lbah = 0x55;
2937 } else {
2938 ehc->unloaded_mask &= ~(1 << dev->devno);
2939 tf.command = ATA_CMD_CHK_POWER;
2940 }
2941
2942 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2943 tf.protocol |= ATA_PROT_NODATA;
2944 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2945 if (park && (err_mask || tf.lbal != 0xc4)) {
2946 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2947 ehc->unloaded_mask &= ~(1 << dev->devno);
2948 }
2949}
2950
0260731f 2951static int ata_eh_revalidate_and_attach(struct ata_link *link,
084fe639 2952 struct ata_device **r_failed_dev)
022bdb07 2953{
0260731f
TH
2954 struct ata_port *ap = link->ap;
2955 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2956 struct ata_device *dev;
8c3c52a8 2957 unsigned int new_mask = 0;
084fe639 2958 unsigned long flags;
f58229f8 2959 int rc = 0;
022bdb07
TH
2960
2961 DPRINTK("ENTER\n");
2962
8c3c52a8
TH
2963 /* For PATA drive side cable detection to work, IDENTIFY must
2964 * be done backwards such that PDIAG- is released by the slave
2965 * device before the master device is identified.
2966 */
1eca4365 2967 ata_for_each_dev(dev, link, ALL_REVERSE) {
f58229f8
TH
2968 unsigned int action = ata_eh_dev_action(dev);
2969 unsigned int readid_flags = 0;
022bdb07 2970
bff04647
TH
2971 if (ehc->i.flags & ATA_EHI_DID_RESET)
2972 readid_flags |= ATA_READID_POSTRESET;
2973
9666f400 2974 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
633273a3
TH
2975 WARN_ON(dev->class == ATA_DEV_PMP);
2976
b1c72916 2977 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
022bdb07 2978 rc = -EIO;
8c3c52a8 2979 goto err;
022bdb07
TH
2980 }
2981
0260731f 2982 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
422c9daa
TH
2983 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2984 readid_flags);
022bdb07 2985 if (rc)
8c3c52a8 2986 goto err;
022bdb07 2987
0260731f 2988 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
47005f25 2989
baa1e78a
TH
2990 /* Configuration may have changed, reconfigure
2991 * transfer mode.
2992 */
2993 ehc->i.flags |= ATA_EHI_SETMODE;
2994
3057ac3c 2995 /* schedule the scsi_rescan_device() here */
ad72cf98 2996 schedule_work(&(ap->scsi_rescan_task));
084fe639
TH
2997 } else if (dev->class == ATA_DEV_UNKNOWN &&
2998 ehc->tries[dev->devno] &&
2999 ata_class_enabled(ehc->classes[dev->devno])) {
842faa6c
TH
3000 /* Temporarily set dev->class, it will be
3001 * permanently set once all configurations are
3002 * complete. This is necessary because new
3003 * device configuration is done in two
3004 * separate loops.
3005 */
084fe639
TH
3006 dev->class = ehc->classes[dev->devno];
3007
633273a3
TH
3008 if (dev->class == ATA_DEV_PMP)
3009 rc = sata_pmp_attach(dev);
3010 else
3011 rc = ata_dev_read_id(dev, &dev->class,
3012 readid_flags, dev->id);
842faa6c
TH
3013
3014 /* read_id might have changed class, store and reset */
3015 ehc->classes[dev->devno] = dev->class;
3016 dev->class = ATA_DEV_UNKNOWN;
3017
8c3c52a8
TH
3018 switch (rc) {
3019 case 0:
99cf610a
TH
3020 /* clear error info accumulated during probe */
3021 ata_ering_clear(&dev->ering);
f58229f8 3022 new_mask |= 1 << dev->devno;
8c3c52a8
TH
3023 break;
3024 case -ENOENT:
55a8e2c8
TH
3025 /* IDENTIFY was issued to non-existent
3026 * device. No need to reset. Just
842faa6c 3027 * thaw and ignore the device.
55a8e2c8
TH
3028 */
3029 ata_eh_thaw_port(ap);
084fe639 3030 break;
8c3c52a8 3031 default:
8c3c52a8 3032 goto err;
084fe639 3033 }
8c3c52a8
TH
3034 }
3035 }
084fe639 3036
c1c4e8d5 3037 /* PDIAG- should have been released, ask cable type if post-reset */
33267325
TH
3038 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3039 if (ap->ops->cable_detect)
3040 ap->cbl = ap->ops->cable_detect(ap);
3041 ata_force_cbl(ap);
3042 }
c1c4e8d5 3043
8c3c52a8
TH
3044 /* Configure new devices forward such that user doesn't see
3045 * device detection messages backwards.
3046 */
1eca4365 3047 ata_for_each_dev(dev, link, ALL) {
4f7c2874 3048 if (!(new_mask & (1 << dev->devno)))
8c3c52a8
TH
3049 continue;
3050
842faa6c
TH
3051 dev->class = ehc->classes[dev->devno];
3052
4f7c2874
TH
3053 if (dev->class == ATA_DEV_PMP)
3054 continue;
3055
8c3c52a8
TH
3056 ehc->i.flags |= ATA_EHI_PRINTINFO;
3057 rc = ata_dev_configure(dev);
3058 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
842faa6c
TH
3059 if (rc) {
3060 dev->class = ATA_DEV_UNKNOWN;
8c3c52a8 3061 goto err;
842faa6c 3062 }
8c3c52a8
TH
3063
3064 spin_lock_irqsave(ap->lock, flags);
3065 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3066 spin_unlock_irqrestore(ap->lock, flags);
3067
3068 /* new device discovered, configure xfermode */
3069 ehc->i.flags |= ATA_EHI_SETMODE;
022bdb07
TH
3070 }
3071
8c3c52a8 3072 return 0;
022bdb07 3073
8c3c52a8
TH
3074 err:
3075 *r_failed_dev = dev;
3076 DPRINTK("EXIT rc=%d\n", rc);
022bdb07
TH
3077 return rc;
3078}
3079
6f1d1e3a
TH
3080/**
3081 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3082 * @link: link on which timings will be programmed
98a1708d 3083 * @r_failed_dev: out parameter for failed device
6f1d1e3a
TH
3084 *
3085 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3086 * ata_set_mode() fails, pointer to the failing device is
3087 * returned in @r_failed_dev.
3088 *
3089 * LOCKING:
3090 * PCI/etc. bus probe sem.
3091 *
3092 * RETURNS:
3093 * 0 on success, negative errno otherwise
3094 */
3095int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3096{
3097 struct ata_port *ap = link->ap;
00115e0f
TH
3098 struct ata_device *dev;
3099 int rc;
6f1d1e3a 3100
76326ac1 3101 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
1eca4365 3102 ata_for_each_dev(dev, link, ENABLED) {
76326ac1
TH
3103 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3104 struct ata_ering_entry *ent;
3105
3106 ent = ata_ering_top(&dev->ering);
3107 if (ent)
3108 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3109 }
3110 }
3111
6f1d1e3a
TH
3112 /* has private set_mode? */
3113 if (ap->ops->set_mode)
00115e0f
TH
3114 rc = ap->ops->set_mode(link, r_failed_dev);
3115 else
3116 rc = ata_do_set_mode(link, r_failed_dev);
3117
3118 /* if transfer mode has changed, set DUBIOUS_XFER on device */
1eca4365 3119 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
3120 struct ata_eh_context *ehc = &link->eh_context;
3121 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3122 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3123
3124 if (dev->xfer_mode != saved_xfer_mode ||
3125 ata_ncq_enabled(dev) != saved_ncq)
3126 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3127 }
3128
3129 return rc;
6f1d1e3a
TH
3130}
3131
11fc33da
TH
3132/**
3133 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3134 * @dev: ATAPI device to clear UA for
3135 *
3136 * Resets and other operations can make an ATAPI device raise
3137 * UNIT ATTENTION which causes the next operation to fail. This
3138 * function clears UA.
3139 *
3140 * LOCKING:
3141 * EH context (may sleep).
3142 *
3143 * RETURNS:
3144 * 0 on success, -errno on failure.
3145 */
3146static int atapi_eh_clear_ua(struct ata_device *dev)
3147{
3148 int i;
3149
3150 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
b5357081 3151 u8 *sense_buffer = dev->link->ap->sector_buf;
11fc33da
TH
3152 u8 sense_key = 0;
3153 unsigned int err_mask;
3154
3155 err_mask = atapi_eh_tur(dev, &sense_key);
3156 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3157 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3158 "failed (err_mask=0x%x)\n", err_mask);
3159 return -EIO;
3160 }
3161
3162 if (!err_mask || sense_key != UNIT_ATTENTION)
3163 return 0;
3164
3165 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3166 if (err_mask) {
3167 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3168 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3169 return -EIO;
3170 }
3171 }
3172
3173 ata_dev_printk(dev, KERN_WARNING,
3174 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3175
3176 return 0;
3177}
3178
6013efd8
TH
3179/**
3180 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3181 * @dev: ATA device which may need FLUSH retry
3182 *
3183 * If @dev failed FLUSH, it needs to be reported upper layer
3184 * immediately as it means that @dev failed to remap and already
3185 * lost at least a sector and further FLUSH retrials won't make
3186 * any difference to the lost sector. However, if FLUSH failed
3187 * for other reasons, for example transmission error, FLUSH needs
3188 * to be retried.
3189 *
3190 * This function determines whether FLUSH failure retry is
3191 * necessary and performs it if so.
3192 *
3193 * RETURNS:
3194 * 0 if EH can continue, -errno if EH needs to be repeated.
3195 */
3196static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3197{
3198 struct ata_link *link = dev->link;
3199 struct ata_port *ap = link->ap;
3200 struct ata_queued_cmd *qc;
3201 struct ata_taskfile tf;
3202 unsigned int err_mask;
3203 int rc = 0;
3204
3205 /* did flush fail for this device? */
3206 if (!ata_tag_valid(link->active_tag))
3207 return 0;
3208
3209 qc = __ata_qc_from_tag(ap, link->active_tag);
3210 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3211 qc->tf.command != ATA_CMD_FLUSH))
3212 return 0;
3213
3214 /* if the device failed it, it should be reported to upper layers */
3215 if (qc->err_mask & AC_ERR_DEV)
3216 return 0;
3217
3218 /* flush failed for some other reason, give it another shot */
3219 ata_tf_init(dev, &tf);
3220
3221 tf.command = qc->tf.command;
3222 tf.flags |= ATA_TFLAG_DEVICE;
3223 tf.protocol = ATA_PROT_NODATA;
3224
3225 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3226 tf.command, qc->err_mask);
3227
3228 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3229 if (!err_mask) {
3230 /*
3231 * FLUSH is complete but there's no way to
3232 * successfully complete a failed command from EH.
3233 * Making sure retry is allowed at least once and
3234 * retrying it should do the trick - whatever was in
3235 * the cache is already on the platter and this won't
3236 * cause infinite loop.
3237 */
3238 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3239 } else {
3240 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3241 err_mask);
3242 rc = -EIO;
3243
3244 /* if device failed it, report it to upper layers */
3245 if (err_mask & AC_ERR_DEV) {
3246 qc->err_mask |= AC_ERR_DEV;
3247 qc->result_tf = tf;
3248 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3249 rc = 0;
3250 }
3251 }
3252 return rc;
3253}
3254
6b7ae954
TH
3255/**
3256 * ata_eh_set_lpm - configure SATA interface power management
3257 * @link: link to configure power management
3258 * @policy: the link power management policy
3259 * @r_failed_dev: out parameter for failed device
3260 *
3261 * Enable SATA Interface power management. This will enable
3262 * Device Interface Power Management (DIPM) for min_power
3263 * policy, and then call driver specific callbacks for
3264 * enabling Host Initiated Power management.
3265 *
3266 * LOCKING:
3267 * EH context.
3268 *
3269 * RETURNS:
3270 * 0 on success, -errno on failure.
3271 */
3272static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3273 struct ata_device **r_failed_dev)
3274{
6c8ea89c 3275 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
6b7ae954
TH
3276 struct ata_eh_context *ehc = &link->eh_context;
3277 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
e5005b15 3278 enum ata_lpm_policy old_policy = link->lpm_policy;
6b7ae954
TH
3279 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3280 unsigned int err_mask;
3281 int rc;
3282
3283 /* if the link or host doesn't do LPM, noop */
3284 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3285 return 0;
3286
3287 /*
3288 * DIPM is enabled only for MIN_POWER as some devices
3289 * misbehave when the host NACKs transition to SLUMBER. Order
3290 * device and link configurations such that the host always
3291 * allows DIPM requests.
3292 */
3293 ata_for_each_dev(dev, link, ENABLED) {
3294 bool hipm = ata_id_has_hipm(dev->id);
3295 bool dipm = ata_id_has_dipm(dev->id);
3296
3297 /* find the first enabled and LPM enabled devices */
3298 if (!link_dev)
3299 link_dev = dev;
3300
3301 if (!lpm_dev && (hipm || dipm))
3302 lpm_dev = dev;
3303
3304 hints &= ~ATA_LPM_EMPTY;
3305 if (!hipm)
3306 hints &= ~ATA_LPM_HIPM;
3307
3308 /* disable DIPM before changing link config */
3309 if (policy != ATA_LPM_MIN_POWER && dipm) {
3310 err_mask = ata_dev_set_feature(dev,
3311 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3312 if (err_mask && err_mask != AC_ERR_DEV) {
3313 ata_dev_printk(dev, KERN_WARNING,
3314 "failed to disable DIPM, Emask 0x%x\n",
3315 err_mask);
3316 rc = -EIO;
3317 goto fail;
3318 }
3319 }
3320 }
3321
6c8ea89c
TH
3322 if (ap) {
3323 rc = ap->ops->set_lpm(link, policy, hints);
3324 if (!rc && ap->slave_link)
3325 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3326 } else
3327 rc = sata_pmp_set_lpm(link, policy, hints);
6b7ae954
TH
3328
3329 /*
3330 * Attribute link config failure to the first (LPM) enabled
3331 * device on the link.
3332 */
3333 if (rc) {
3334 if (rc == -EOPNOTSUPP) {
3335 link->flags |= ATA_LFLAG_NO_LPM;
3336 return 0;
3337 }
3338 dev = lpm_dev ? lpm_dev : link_dev;
3339 goto fail;
3340 }
3341
e5005b15
TH
3342 /*
3343 * Low level driver acked the transition. Issue DIPM command
3344 * with the new policy set.
3345 */
3346 link->lpm_policy = policy;
3347 if (ap && ap->slave_link)
3348 ap->slave_link->lpm_policy = policy;
3349
6b7ae954
TH
3350 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3351 ata_for_each_dev(dev, link, ENABLED) {
3352 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
3353 err_mask = ata_dev_set_feature(dev,
3354 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3355 if (err_mask && err_mask != AC_ERR_DEV) {
3356 ata_dev_printk(dev, KERN_WARNING,
3357 "failed to enable DIPM, Emask 0x%x\n",
3358 err_mask);
3359 rc = -EIO;
3360 goto fail;
3361 }
3362 }
3363 }
3364
6b7ae954
TH
3365 return 0;
3366
3367fail:
e5005b15
TH
3368 /* restore the old policy */
3369 link->lpm_policy = old_policy;
3370 if (ap && ap->slave_link)
3371 ap->slave_link->lpm_policy = old_policy;
3372
6b7ae954
TH
3373 /* if no device or only one more chance is left, disable LPM */
3374 if (!dev || ehc->tries[dev->devno] <= 2) {
3375 ata_link_printk(link, KERN_WARNING,
3376 "disabling LPM on the link\n");
3377 link->flags |= ATA_LFLAG_NO_LPM;
3378 }
3379 if (r_failed_dev)
3380 *r_failed_dev = dev;
3381 return rc;
3382}
3383
0260731f 3384static int ata_link_nr_enabled(struct ata_link *link)
022bdb07 3385{
f58229f8
TH
3386 struct ata_device *dev;
3387 int cnt = 0;
022bdb07 3388
1eca4365
TH
3389 ata_for_each_dev(dev, link, ENABLED)
3390 cnt++;
022bdb07
TH
3391 return cnt;
3392}
3393
0260731f 3394static int ata_link_nr_vacant(struct ata_link *link)
084fe639 3395{
f58229f8
TH
3396 struct ata_device *dev;
3397 int cnt = 0;
084fe639 3398
1eca4365 3399 ata_for_each_dev(dev, link, ALL)
f58229f8 3400 if (dev->class == ATA_DEV_UNKNOWN)
084fe639
TH
3401 cnt++;
3402 return cnt;
3403}
3404
0260731f 3405static int ata_eh_skip_recovery(struct ata_link *link)
084fe639 3406{
672b2d65 3407 struct ata_port *ap = link->ap;
0260731f 3408 struct ata_eh_context *ehc = &link->eh_context;
f58229f8 3409 struct ata_device *dev;
084fe639 3410
f9df58cb
TH
3411 /* skip disabled links */
3412 if (link->flags & ATA_LFLAG_DISABLED)
3413 return 1;
3414
e2f3d75f
TH
3415 /* skip if explicitly requested */
3416 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3417 return 1;
3418
672b2d65
TH
3419 /* thaw frozen port and recover failed devices */
3420 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3421 return 0;
3422
3423 /* reset at least once if reset is requested */
3424 if ((ehc->i.action & ATA_EH_RESET) &&
3425 !(ehc->i.flags & ATA_EHI_DID_RESET))
084fe639
TH
3426 return 0;
3427
3428 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1eca4365 3429 ata_for_each_dev(dev, link, ALL) {
084fe639
TH
3430 if (dev->class == ATA_DEV_UNKNOWN &&
3431 ehc->classes[dev->devno] != ATA_DEV_NONE)
3432 return 0;
3433 }
3434
3435 return 1;
3436}
3437
c2c7a89c
TH
3438static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3439{
3440 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3441 u64 now = get_jiffies_64();
3442 int *trials = void_arg;
3443
3444 if (ent->timestamp < now - min(now, interval))
3445 return -1;
3446
3447 (*trials)++;
3448 return 0;
3449}
3450
02c05a27
TH
3451static int ata_eh_schedule_probe(struct ata_device *dev)
3452{
3453 struct ata_eh_context *ehc = &dev->link->eh_context;
c2c7a89c
TH
3454 struct ata_link *link = ata_dev_phys_link(dev);
3455 int trials = 0;
02c05a27
TH
3456
3457 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3458 (ehc->did_probe_mask & (1 << dev->devno)))
3459 return 0;
3460
3461 ata_eh_detach_dev(dev);
3462 ata_dev_init(dev);
3463 ehc->did_probe_mask |= (1 << dev->devno);
cf480626 3464 ehc->i.action |= ATA_EH_RESET;
00115e0f
TH
3465 ehc->saved_xfer_mode[dev->devno] = 0;
3466 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
02c05a27 3467
6b7ae954 3468 /* the link maybe in a deep sleep, wake it up */
6c8ea89c
TH
3469 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3470 if (ata_is_host_link(link))
3471 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3472 ATA_LPM_EMPTY);
3473 else
3474 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3475 ATA_LPM_EMPTY);
3476 }
6b7ae954 3477
c2c7a89c
TH
3478 /* Record and count probe trials on the ering. The specific
3479 * error mask used is irrelevant. Because a successful device
3480 * detection clears the ering, this count accumulates only if
3481 * there are consecutive failed probes.
3482 *
3483 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3484 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3485 * forced to 1.5Gbps.
3486 *
3487 * This is to work around cases where failed link speed
3488 * negotiation results in device misdetection leading to
3489 * infinite DEVXCHG or PHRDY CHG events.
3490 */
3491 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3492 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3493
3494 if (trials > ATA_EH_PROBE_TRIALS)
3495 sata_down_spd_limit(link, 1);
3496
02c05a27
TH
3497 return 1;
3498}
3499
9b1e2658 3500static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
fee7ca72 3501{
9af5c9c9 3502 struct ata_eh_context *ehc = &dev->link->eh_context;
fee7ca72 3503
cf9a590a
TH
3504 /* -EAGAIN from EH routine indicates retry without prejudice.
3505 * The requester is responsible for ensuring forward progress.
3506 */
3507 if (err != -EAGAIN)
3508 ehc->tries[dev->devno]--;
fee7ca72
TH
3509
3510 switch (err) {
3511 case -ENODEV:
3512 /* device missing or wrong IDENTIFY data, schedule probing */
3513 ehc->i.probe_mask |= (1 << dev->devno);
3514 case -EINVAL:
3515 /* give it just one more chance */
3516 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3517 case -EIO:
d89293ab 3518 if (ehc->tries[dev->devno] == 1) {
fee7ca72
TH
3519 /* This is the last chance, better to slow
3520 * down than lose it.
3521 */
a07d499b 3522 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
d89293ab
TH
3523 if (dev->pio_mode > XFER_PIO_0)
3524 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
fee7ca72
TH
3525 }
3526 }
3527
3528 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3529 /* disable device if it has used up all its chances */
3530 ata_dev_disable(dev);
3531
3532 /* detach if offline */
b1c72916 3533 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
fee7ca72
TH
3534 ata_eh_detach_dev(dev);
3535
02c05a27 3536 /* schedule probe if necessary */
87fbc5a0 3537 if (ata_eh_schedule_probe(dev)) {
fee7ca72 3538 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
87fbc5a0
TH
3539 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3540 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3541 }
9b1e2658
TH
3542
3543 return 1;
fee7ca72 3544 } else {
cf480626 3545 ehc->i.action |= ATA_EH_RESET;
9b1e2658 3546 return 0;
fee7ca72
TH
3547 }
3548}
3549
022bdb07
TH
3550/**
3551 * ata_eh_recover - recover host port after error
3552 * @ap: host port to recover
f5914a46 3553 * @prereset: prereset method (can be NULL)
022bdb07
TH
3554 * @softreset: softreset method (can be NULL)
3555 * @hardreset: hardreset method (can be NULL)
3556 * @postreset: postreset method (can be NULL)
9b1e2658 3557 * @r_failed_link: out parameter for failed link
022bdb07
TH
3558 *
3559 * This is the alpha and omega, eum and yang, heart and soul of
3560 * libata exception handling. On entry, actions required to
9b1e2658
TH
3561 * recover each link and hotplug requests are recorded in the
3562 * link's eh_context. This function executes all the operations
3563 * with appropriate retrials and fallbacks to resurrect failed
084fe639 3564 * devices, detach goners and greet newcomers.
022bdb07
TH
3565 *
3566 * LOCKING:
3567 * Kernel thread context (may sleep).
3568 *
3569 * RETURNS:
3570 * 0 on success, -errno on failure.
3571 */
fb7fd614
TH
3572int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3573 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3574 ata_postreset_fn_t postreset,
3575 struct ata_link **r_failed_link)
022bdb07 3576{
9b1e2658 3577 struct ata_link *link;
022bdb07 3578 struct ata_device *dev;
6b7ae954 3579 int rc, nr_fails;
45fabbb7 3580 unsigned long flags, deadline;
022bdb07
TH
3581
3582 DPRINTK("ENTER\n");
3583
3584 /* prep for recovery */
1eca4365 3585 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3586 struct ata_eh_context *ehc = &link->eh_context;
084fe639 3587
f9df58cb
TH
3588 /* re-enable link? */
3589 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3590 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3591 spin_lock_irqsave(ap->lock, flags);
3592 link->flags &= ~ATA_LFLAG_DISABLED;
3593 spin_unlock_irqrestore(ap->lock, flags);
3594 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3595 }
3596
1eca4365 3597 ata_for_each_dev(dev, link, ALL) {
fd995f70
TH
3598 if (link->flags & ATA_LFLAG_NO_RETRY)
3599 ehc->tries[dev->devno] = 1;
3600 else
3601 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
084fe639 3602
9b1e2658
TH
3603 /* collect port action mask recorded in dev actions */
3604 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3605 ~ATA_EH_PERDEV_MASK;
3606 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3607
3608 /* process hotplug request */
3609 if (dev->flags & ATA_DFLAG_DETACH)
3610 ata_eh_detach_dev(dev);
3611
02c05a27
TH
3612 /* schedule probe if necessary */
3613 if (!ata_dev_enabled(dev))
3614 ata_eh_schedule_probe(dev);
084fe639 3615 }
022bdb07
TH
3616 }
3617
3618 retry:
022bdb07
TH
3619 rc = 0;
3620
aeb2ecd6 3621 /* if UNLOADING, finish immediately */
b51e9e5d 3622 if (ap->pflags & ATA_PFLAG_UNLOADING)
aeb2ecd6
TH
3623 goto out;
3624
9b1e2658 3625 /* prep for EH */
1eca4365 3626 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3627 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3628
9b1e2658
TH
3629 /* skip EH if possible. */
3630 if (ata_eh_skip_recovery(link))
3631 ehc->i.action = 0;
3632
1eca4365 3633 ata_for_each_dev(dev, link, ALL)
9b1e2658
TH
3634 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3635 }
084fe639 3636
022bdb07 3637 /* reset */
1eca4365 3638 ata_for_each_link(link, ap, EDGE) {
dc98c32c 3639 struct ata_eh_context *ehc = &link->eh_context;
9b1e2658 3640
dc98c32c
TH
3641 if (!(ehc->i.action & ATA_EH_RESET))
3642 continue;
9b1e2658 3643
dc98c32c
TH
3644 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3645 prereset, softreset, hardreset, postreset);
3646 if (rc) {
3647 ata_link_printk(link, KERN_ERR,
3648 "reset failed, giving up\n");
3649 goto out;
022bdb07 3650 }
022bdb07
TH
3651 }
3652
45fabbb7
EO
3653 do {
3654 unsigned long now;
3655
3656 /*
3657 * clears ATA_EH_PARK in eh_info and resets
3658 * ap->park_req_pending
3659 */
3660 ata_eh_pull_park_action(ap);
3661
3662 deadline = jiffies;
1eca4365
TH
3663 ata_for_each_link(link, ap, EDGE) {
3664 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3665 struct ata_eh_context *ehc = &link->eh_context;
3666 unsigned long tmp;
3667
3668 if (dev->class != ATA_DEV_ATA)
3669 continue;
3670 if (!(ehc->i.dev_action[dev->devno] &
3671 ATA_EH_PARK))
3672 continue;
3673 tmp = dev->unpark_deadline;
3674 if (time_before(deadline, tmp))
3675 deadline = tmp;
3676 else if (time_before_eq(tmp, jiffies))
3677 continue;
3678 if (ehc->unloaded_mask & (1 << dev->devno))
3679 continue;
3680
3681 ata_eh_park_issue_cmd(dev, 1);
3682 }
3683 }
3684
3685 now = jiffies;
3686 if (time_before_eq(deadline, now))
3687 break;
3688
c0c362b6 3689 ata_eh_release(ap);
45fabbb7
EO
3690 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3691 deadline - now);
c0c362b6 3692 ata_eh_acquire(ap);
45fabbb7 3693 } while (deadline);
1eca4365
TH
3694 ata_for_each_link(link, ap, EDGE) {
3695 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3696 if (!(link->eh_context.unloaded_mask &
3697 (1 << dev->devno)))
3698 continue;
3699
3700 ata_eh_park_issue_cmd(dev, 0);
3701 ata_eh_done(link, dev, ATA_EH_PARK);
3702 }
3703 }
3704
9b1e2658 3705 /* the rest */
6b7ae954
TH
3706 nr_fails = 0;
3707 ata_for_each_link(link, ap, PMP_FIRST) {
9b1e2658 3708 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3709
6b7ae954
TH
3710 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3711 goto config_lpm;
3712
9b1e2658
TH
3713 /* revalidate existing devices and attach new ones */
3714 rc = ata_eh_revalidate_and_attach(link, &dev);
4ae72a1e 3715 if (rc)
6b7ae954 3716 goto rest_fail;
022bdb07 3717
633273a3
TH
3718 /* if PMP got attached, return, pmp EH will take care of it */
3719 if (link->device->class == ATA_DEV_PMP) {
3720 ehc->i.action = 0;
3721 return 0;
3722 }
3723
9b1e2658
TH
3724 /* configure transfer mode if necessary */
3725 if (ehc->i.flags & ATA_EHI_SETMODE) {
3726 rc = ata_set_mode(link, &dev);
3727 if (rc)
6b7ae954 3728 goto rest_fail;
9b1e2658
TH
3729 ehc->i.flags &= ~ATA_EHI_SETMODE;
3730 }
3731
11fc33da
TH
3732 /* If reset has been issued, clear UA to avoid
3733 * disrupting the current users of the device.
3734 */
3735 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1eca4365 3736 ata_for_each_dev(dev, link, ALL) {
11fc33da
TH
3737 if (dev->class != ATA_DEV_ATAPI)
3738 continue;
3739 rc = atapi_eh_clear_ua(dev);
3740 if (rc)
6b7ae954 3741 goto rest_fail;
11fc33da
TH
3742 }
3743 }
3744
6013efd8
TH
3745 /* retry flush if necessary */
3746 ata_for_each_dev(dev, link, ALL) {
3747 if (dev->class != ATA_DEV_ATA)
3748 continue;
3749 rc = ata_eh_maybe_retry_flush(dev);
3750 if (rc)
6b7ae954 3751 goto rest_fail;
6013efd8
TH
3752 }
3753
6b7ae954 3754 config_lpm:
11fc33da 3755 /* configure link power saving */
6b7ae954
TH
3756 if (link->lpm_policy != ap->target_lpm_policy) {
3757 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3758 if (rc)
3759 goto rest_fail;
3760 }
ca77329f 3761
9b1e2658
TH
3762 /* this link is okay now */
3763 ehc->i.flags = 0;
3764 continue;
022bdb07 3765
6b7ae954
TH
3766 rest_fail:
3767 nr_fails++;
3768 if (dev)
3769 ata_eh_handle_dev_fail(dev, rc);
022bdb07 3770
b06ce3e5
TH
3771 if (ap->pflags & ATA_PFLAG_FROZEN) {
3772 /* PMP reset requires working host port.
3773 * Can't retry if it's frozen.
3774 */
071f44b1 3775 if (sata_pmp_attached(ap))
b06ce3e5 3776 goto out;
9b1e2658 3777 break;
b06ce3e5 3778 }
022bdb07
TH
3779 }
3780
6b7ae954 3781 if (nr_fails)
9b1e2658 3782 goto retry;
022bdb07 3783
9b1e2658
TH
3784 out:
3785 if (rc && r_failed_link)
3786 *r_failed_link = link;
3787
022bdb07
TH
3788 DPRINTK("EXIT, rc=%d\n", rc);
3789 return rc;
3790}
3791
3792/**
3793 * ata_eh_finish - finish up EH
3794 * @ap: host port to finish EH for
3795 *
3796 * Recovery is complete. Clean up EH states and retry or finish
3797 * failed qcs.
3798 *
3799 * LOCKING:
3800 * None.
3801 */
fb7fd614 3802void ata_eh_finish(struct ata_port *ap)
022bdb07
TH
3803{
3804 int tag;
3805
3806 /* retry or finish qcs */
3807 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3808 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3809
3810 if (!(qc->flags & ATA_QCFLAG_FAILED))
3811 continue;
3812
3813 if (qc->err_mask) {
3814 /* FIXME: Once EH migration is complete,
3815 * generate sense data in this function,
3816 * considering both err_mask and tf.
3817 */
03faab78 3818 if (qc->flags & ATA_QCFLAG_RETRY)
022bdb07 3819 ata_eh_qc_retry(qc);
03faab78
TH
3820 else
3821 ata_eh_qc_complete(qc);
022bdb07
TH
3822 } else {
3823 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3824 ata_eh_qc_complete(qc);
3825 } else {
3826 /* feed zero TF to sense generation */
3827 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3828 ata_eh_qc_retry(qc);
3829 }
3830 }
3831 }
da917d69
TH
3832
3833 /* make sure nr_active_links is zero after EH */
3834 WARN_ON(ap->nr_active_links);
3835 ap->nr_active_links = 0;
022bdb07
TH
3836}
3837
3838/**
3839 * ata_do_eh - do standard error handling
3840 * @ap: host port to handle error for
a1efdaba 3841 *
f5914a46 3842 * @prereset: prereset method (can be NULL)
022bdb07
TH
3843 * @softreset: softreset method (can be NULL)
3844 * @hardreset: hardreset method (can be NULL)
3845 * @postreset: postreset method (can be NULL)
3846 *
3847 * Perform standard error handling sequence.
3848 *
3849 * LOCKING:
3850 * Kernel thread context (may sleep).
3851 */
f5914a46
TH
3852void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3853 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3854 ata_postreset_fn_t postreset)
022bdb07 3855{
9b1e2658
TH
3856 struct ata_device *dev;
3857 int rc;
3858
3859 ata_eh_autopsy(ap);
3860 ata_eh_report(ap);
3861
3862 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3863 NULL);
3864 if (rc) {
1eca4365 3865 ata_for_each_dev(dev, &ap->link, ALL)
9b1e2658
TH
3866 ata_dev_disable(dev);
3867 }
3868
022bdb07
TH
3869 ata_eh_finish(ap);
3870}
500530f6 3871
a1efdaba
TH
3872/**
3873 * ata_std_error_handler - standard error handler
3874 * @ap: host port to handle error for
3875 *
3876 * Standard error handler
3877 *
3878 * LOCKING:
3879 * Kernel thread context (may sleep).
3880 */
3881void ata_std_error_handler(struct ata_port *ap)
3882{
3883 struct ata_port_operations *ops = ap->ops;
3884 ata_reset_fn_t hardreset = ops->hardreset;
3885
57c9efdf 3886 /* ignore built-in hardreset if SCR access is not available */
fe06e5f9 3887 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
a1efdaba
TH
3888 hardreset = NULL;
3889
3890 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3891}
3892
6ffa01d8 3893#ifdef CONFIG_PM
500530f6
TH
3894/**
3895 * ata_eh_handle_port_suspend - perform port suspend operation
3896 * @ap: port to suspend
3897 *
3898 * Suspend @ap.
3899 *
3900 * LOCKING:
3901 * Kernel thread context (may sleep).
3902 */
3903static void ata_eh_handle_port_suspend(struct ata_port *ap)
3904{
3905 unsigned long flags;
3906 int rc = 0;
3907
3908 /* are we suspending? */
3909 spin_lock_irqsave(ap->lock, flags);
3910 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3911 ap->pm_mesg.event == PM_EVENT_ON) {
3912 spin_unlock_irqrestore(ap->lock, flags);
3913 return;
3914 }
3915 spin_unlock_irqrestore(ap->lock, flags);
3916
3917 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3918
64578a3d
TH
3919 /* tell ACPI we're suspending */
3920 rc = ata_acpi_on_suspend(ap);
3921 if (rc)
3922 goto out;
3923
500530f6
TH
3924 /* suspend */
3925 ata_eh_freeze_port(ap);
3926
3927 if (ap->ops->port_suspend)
3928 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3929
bd3adca5 3930 ata_acpi_set_state(ap, PMSG_SUSPEND);
64578a3d 3931 out:
500530f6
TH
3932 /* report result */
3933 spin_lock_irqsave(ap->lock, flags);
3934
3935 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3936 if (rc == 0)
3937 ap->pflags |= ATA_PFLAG_SUSPENDED;
64578a3d 3938 else if (ap->pflags & ATA_PFLAG_FROZEN)
500530f6
TH
3939 ata_port_schedule_eh(ap);
3940
3941 if (ap->pm_result) {
3942 *ap->pm_result = rc;
3943 ap->pm_result = NULL;
3944 }
3945
3946 spin_unlock_irqrestore(ap->lock, flags);
3947
3948 return;
3949}
3950
3951/**
3952 * ata_eh_handle_port_resume - perform port resume operation
3953 * @ap: port to resume
3954 *
3955 * Resume @ap.
3956 *
500530f6
TH
3957 * LOCKING:
3958 * Kernel thread context (may sleep).
3959 */
3960static void ata_eh_handle_port_resume(struct ata_port *ap)
3961{
6f9c1ea2
TH
3962 struct ata_link *link;
3963 struct ata_device *dev;
500530f6 3964 unsigned long flags;
9666f400 3965 int rc = 0;
500530f6
TH
3966
3967 /* are we resuming? */
3968 spin_lock_irqsave(ap->lock, flags);
3969 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3970 ap->pm_mesg.event != PM_EVENT_ON) {
3971 spin_unlock_irqrestore(ap->lock, flags);
3972 return;
3973 }
3974 spin_unlock_irqrestore(ap->lock, flags);
3975
9666f400 3976 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
500530f6 3977
6f9c1ea2
TH
3978 /*
3979 * Error timestamps are in jiffies which doesn't run while
3980 * suspended and PHY events during resume isn't too uncommon.
3981 * When the two are combined, it can lead to unnecessary speed
3982 * downs if the machine is suspended and resumed repeatedly.
3983 * Clear error history.
3984 */
3985 ata_for_each_link(link, ap, HOST_FIRST)
3986 ata_for_each_dev(dev, link, ALL)
3987 ata_ering_clear(&dev->ering);
3988
bd3adca5
SL
3989 ata_acpi_set_state(ap, PMSG_ON);
3990
500530f6
TH
3991 if (ap->ops->port_resume)
3992 rc = ap->ops->port_resume(ap);
3993
6746544c
TH
3994 /* tell ACPI that we're resuming */
3995 ata_acpi_on_resume(ap);
3996
9666f400 3997 /* report result */
500530f6
TH
3998 spin_lock_irqsave(ap->lock, flags);
3999 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4000 if (ap->pm_result) {
4001 *ap->pm_result = rc;
4002 ap->pm_result = NULL;
4003 }
4004 spin_unlock_irqrestore(ap->lock, flags);
4005}
6ffa01d8 4006#endif /* CONFIG_PM */