2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/pci.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h"
46 #include <linux/libata.h>
51 /* speed down verdicts */
52 ATA_EH_SPDN_NCQ_OFF
= (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN
= (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO
= (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS
= (1 << 3),
58 ATA_EFLAG_IS_IO
= (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER
= (1 << 1),
61 /* error categories */
64 ATA_ECAT_TOUT_HSM
= 2,
66 ATA_ECAT_DUBIOUS_NONE
= 4,
67 ATA_ECAT_DUBIOUS_ATA_BUS
= 5,
68 ATA_ECAT_DUBIOUS_TOUT_HSM
= 6,
69 ATA_ECAT_DUBIOUS_UNK_DEV
= 7,
72 ATA_EH_CMD_DFL_TIMEOUT
= 5000,
74 /* always put at least this amount of time between resets */
75 ATA_EH_RESET_COOL_DOWN
= 5000,
77 /* Waiting in ->prereset can never be reliable. It's
78 * sometimes nice to wait there but it can't be depended upon;
79 * otherwise, we wouldn't be resetting. Just give it enough
80 * time for most drives to spin up.
82 ATA_EH_PRERESET_TIMEOUT
= 10000,
83 ATA_EH_FASTDRAIN_INTERVAL
= 3000,
87 /* probe speed down parameters, see ata_eh_schedule_probe() */
88 ATA_EH_PROBE_TRIAL_INTERVAL
= 60000, /* 1 min */
89 ATA_EH_PROBE_TRIALS
= 2,
92 /* The following table determines how we sequence resets. Each entry
93 * represents timeout for that try. The first try can be soft or
94 * hardreset. All others are hardreset if available. In most cases
95 * the first reset w/ 10sec timeout should succeed. Following entries
96 * are mostly for error handling, hotplug and retarded devices.
98 static const unsigned long ata_eh_reset_timeouts
[] = {
99 10000, /* most drives spin up by 10sec */
100 10000, /* > 99% working drives spin up before 20sec */
101 35000, /* give > 30 secs of idleness for retarded devices */
102 5000, /* and sweet one last chance */
103 ULONG_MAX
, /* > 1 min has elapsed, give up */
106 static const unsigned long ata_eh_identify_timeouts
[] = {
107 5000, /* covers > 99% of successes and not too boring on failures */
108 10000, /* combined time till here is enough even for media access */
109 30000, /* for true idiots */
113 static const unsigned long ata_eh_flush_timeouts
[] = {
114 15000, /* be generous with flush */
116 30000, /* and even more generous */
120 static const unsigned long ata_eh_other_timeouts
[] = {
121 5000, /* same rationale as identify timeout */
123 /* but no merciful 30sec for other commands, it just isn't worth it */
127 struct ata_eh_cmd_timeout_ent
{
129 const unsigned long *timeouts
;
132 /* The following table determines timeouts to use for EH internal
133 * commands. Each table entry is a command class and matches the
134 * commands the entry applies to and the timeout table to use.
136 * On the retry after a command timed out, the next timeout value from
137 * the table is used. If the table doesn't contain further entries,
138 * the last value is used.
140 * ehc->cmd_timeout_idx keeps track of which timeout to use per
141 * command class, so if SET_FEATURES times out on the first try, the
142 * next try will use the second timeout value only for that class.
144 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
145 static const struct ata_eh_cmd_timeout_ent
146 ata_eh_cmd_timeout_table
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE
] = {
147 { .commands
= CMDS(ATA_CMD_ID_ATA
, ATA_CMD_ID_ATAPI
),
148 .timeouts
= ata_eh_identify_timeouts
, },
149 { .commands
= CMDS(ATA_CMD_READ_NATIVE_MAX
, ATA_CMD_READ_NATIVE_MAX_EXT
),
150 .timeouts
= ata_eh_other_timeouts
, },
151 { .commands
= CMDS(ATA_CMD_SET_MAX
, ATA_CMD_SET_MAX_EXT
),
152 .timeouts
= ata_eh_other_timeouts
, },
153 { .commands
= CMDS(ATA_CMD_SET_FEATURES
),
154 .timeouts
= ata_eh_other_timeouts
, },
155 { .commands
= CMDS(ATA_CMD_INIT_DEV_PARAMS
),
156 .timeouts
= ata_eh_other_timeouts
, },
157 { .commands
= CMDS(ATA_CMD_FLUSH
, ATA_CMD_FLUSH_EXT
),
158 .timeouts
= ata_eh_flush_timeouts
},
162 static void __ata_port_freeze(struct ata_port
*ap
);
164 static void ata_eh_handle_port_suspend(struct ata_port
*ap
);
165 static void ata_eh_handle_port_resume(struct ata_port
*ap
);
166 #else /* CONFIG_PM */
167 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
170 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
172 #endif /* CONFIG_PM */
174 static void __ata_ehi_pushv_desc(struct ata_eh_info
*ehi
, const char *fmt
,
177 ehi
->desc_len
+= vscnprintf(ehi
->desc
+ ehi
->desc_len
,
178 ATA_EH_DESC_LEN
- ehi
->desc_len
,
183 * __ata_ehi_push_desc - push error description without adding separator
185 * @fmt: printf format string
187 * Format string according to @fmt and append it to @ehi->desc.
190 * spin_lock_irqsave(host lock)
192 void __ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
197 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
202 * ata_ehi_push_desc - push error description with separator
204 * @fmt: printf format string
206 * Format string according to @fmt and append it to @ehi->desc.
207 * If @ehi->desc is not empty, ", " is added in-between.
210 * spin_lock_irqsave(host lock)
212 void ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
217 __ata_ehi_push_desc(ehi
, ", ");
220 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
225 * ata_ehi_clear_desc - clean error description
231 * spin_lock_irqsave(host lock)
233 void ata_ehi_clear_desc(struct ata_eh_info
*ehi
)
240 * ata_port_desc - append port description
241 * @ap: target ATA port
242 * @fmt: printf format string
244 * Format string according to @fmt and append it to port
245 * description. If port description is not empty, " " is added
246 * in-between. This function is to be used while initializing
247 * ata_host. The description is printed on host registration.
252 void ata_port_desc(struct ata_port
*ap
, const char *fmt
, ...)
256 WARN_ON(!(ap
->pflags
& ATA_PFLAG_INITIALIZING
));
258 if (ap
->link
.eh_info
.desc_len
)
259 __ata_ehi_push_desc(&ap
->link
.eh_info
, " ");
262 __ata_ehi_pushv_desc(&ap
->link
.eh_info
, fmt
, args
);
269 * ata_port_pbar_desc - append PCI BAR description
270 * @ap: target ATA port
271 * @bar: target PCI BAR
272 * @offset: offset into PCI BAR
273 * @name: name of the area
275 * If @offset is negative, this function formats a string which
276 * contains the name, address, size and type of the BAR and
277 * appends it to the port description. If @offset is zero or
278 * positive, only name and offsetted address is appended.
283 void ata_port_pbar_desc(struct ata_port
*ap
, int bar
, ssize_t offset
,
286 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
288 unsigned long long start
, len
;
290 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
292 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
295 start
= (unsigned long long)pci_resource_start(pdev
, bar
);
296 len
= (unsigned long long)pci_resource_len(pdev
, bar
);
299 ata_port_desc(ap
, "%s %s%llu@0x%llx", name
, type
, len
, start
);
301 ata_port_desc(ap
, "%s 0x%llx", name
,
302 start
+ (unsigned long long)offset
);
305 #endif /* CONFIG_PCI */
307 static int ata_lookup_timeout_table(u8 cmd
)
311 for (i
= 0; i
< ATA_EH_CMD_TIMEOUT_TABLE_SIZE
; i
++) {
314 for (cur
= ata_eh_cmd_timeout_table
[i
].commands
; *cur
; cur
++)
323 * ata_internal_cmd_timeout - determine timeout for an internal command
324 * @dev: target device
325 * @cmd: internal command to be issued
327 * Determine timeout for internal command @cmd for @dev.
333 * Determined timeout.
335 unsigned long ata_internal_cmd_timeout(struct ata_device
*dev
, u8 cmd
)
337 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
338 int ent
= ata_lookup_timeout_table(cmd
);
342 return ATA_EH_CMD_DFL_TIMEOUT
;
344 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
345 return ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
];
349 * ata_internal_cmd_timed_out - notification for internal command timeout
350 * @dev: target device
351 * @cmd: internal command which timed out
353 * Notify EH that internal command @cmd for @dev timed out. This
354 * function should be called only for commands whose timeouts are
355 * determined using ata_internal_cmd_timeout().
360 void ata_internal_cmd_timed_out(struct ata_device
*dev
, u8 cmd
)
362 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
363 int ent
= ata_lookup_timeout_table(cmd
);
369 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
370 if (ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
+ 1] != ULONG_MAX
)
371 ehc
->cmd_timeout_idx
[dev
->devno
][ent
]++;
374 static void ata_ering_record(struct ata_ering
*ering
, unsigned int eflags
,
375 unsigned int err_mask
)
377 struct ata_ering_entry
*ent
;
382 ering
->cursor
%= ATA_ERING_SIZE
;
384 ent
= &ering
->ring
[ering
->cursor
];
385 ent
->eflags
= eflags
;
386 ent
->err_mask
= err_mask
;
387 ent
->timestamp
= get_jiffies_64();
390 static struct ata_ering_entry
*ata_ering_top(struct ata_ering
*ering
)
392 struct ata_ering_entry
*ent
= &ering
->ring
[ering
->cursor
];
399 static void ata_ering_clear(struct ata_ering
*ering
)
401 memset(ering
, 0, sizeof(*ering
));
404 static int ata_ering_map(struct ata_ering
*ering
,
405 int (*map_fn
)(struct ata_ering_entry
*, void *),
409 struct ata_ering_entry
*ent
;
413 ent
= &ering
->ring
[idx
];
416 rc
= map_fn(ent
, arg
);
419 idx
= (idx
- 1 + ATA_ERING_SIZE
) % ATA_ERING_SIZE
;
420 } while (idx
!= ering
->cursor
);
425 static unsigned int ata_eh_dev_action(struct ata_device
*dev
)
427 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
429 return ehc
->i
.action
| ehc
->i
.dev_action
[dev
->devno
];
432 static void ata_eh_clear_action(struct ata_link
*link
, struct ata_device
*dev
,
433 struct ata_eh_info
*ehi
, unsigned int action
)
435 struct ata_device
*tdev
;
438 ehi
->action
&= ~action
;
439 ata_for_each_dev(tdev
, link
, ALL
)
440 ehi
->dev_action
[tdev
->devno
] &= ~action
;
442 /* doesn't make sense for port-wide EH actions */
443 WARN_ON(!(action
& ATA_EH_PERDEV_MASK
));
445 /* break ehi->action into ehi->dev_action */
446 if (ehi
->action
& action
) {
447 ata_for_each_dev(tdev
, link
, ALL
)
448 ehi
->dev_action
[tdev
->devno
] |=
449 ehi
->action
& action
;
450 ehi
->action
&= ~action
;
453 /* turn off the specified per-dev action */
454 ehi
->dev_action
[dev
->devno
] &= ~action
;
459 * ata_scsi_timed_out - SCSI layer time out callback
460 * @cmd: timed out SCSI command
462 * Handles SCSI layer timeout. We race with normal completion of
463 * the qc for @cmd. If the qc is already gone, we lose and let
464 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
465 * timed out and EH should be invoked. Prevent ata_qc_complete()
466 * from finishing it by setting EH_SCHEDULED and return
469 * TODO: kill this function once old EH is gone.
472 * Called from timer context
475 * EH_HANDLED or EH_NOT_HANDLED
477 enum blk_eh_timer_return
ata_scsi_timed_out(struct scsi_cmnd
*cmd
)
479 struct Scsi_Host
*host
= cmd
->device
->host
;
480 struct ata_port
*ap
= ata_shost_to_port(host
);
482 struct ata_queued_cmd
*qc
;
483 enum blk_eh_timer_return ret
;
487 if (ap
->ops
->error_handler
) {
488 ret
= BLK_EH_NOT_HANDLED
;
492 ret
= BLK_EH_HANDLED
;
493 spin_lock_irqsave(ap
->lock
, flags
);
494 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
496 WARN_ON(qc
->scsicmd
!= cmd
);
497 qc
->flags
|= ATA_QCFLAG_EH_SCHEDULED
;
498 qc
->err_mask
|= AC_ERR_TIMEOUT
;
499 ret
= BLK_EH_NOT_HANDLED
;
501 spin_unlock_irqrestore(ap
->lock
, flags
);
504 DPRINTK("EXIT, ret=%d\n", ret
);
508 static void ata_eh_unload(struct ata_port
*ap
)
510 struct ata_link
*link
;
511 struct ata_device
*dev
;
514 /* Restore SControl IPM and SPD for the next driver and
515 * disable attached devices.
517 ata_for_each_link(link
, ap
, PMP_FIRST
) {
518 sata_scr_write(link
, SCR_CONTROL
, link
->saved_scontrol
& 0xff0);
519 ata_for_each_dev(dev
, link
, ALL
)
520 ata_dev_disable(dev
);
523 /* freeze and set UNLOADED */
524 spin_lock_irqsave(ap
->lock
, flags
);
526 ata_port_freeze(ap
); /* won't be thawed */
527 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
; /* clear pending from freeze */
528 ap
->pflags
|= ATA_PFLAG_UNLOADED
;
530 spin_unlock_irqrestore(ap
->lock
, flags
);
534 * ata_scsi_error - SCSI layer error handler callback
535 * @host: SCSI host on which error occurred
537 * Handles SCSI-layer-thrown error events.
540 * Inherited from SCSI layer (none, can sleep)
545 void ata_scsi_error(struct Scsi_Host
*host
)
547 struct ata_port
*ap
= ata_shost_to_port(host
);
553 /* make sure sff pio task is not running */
554 ata_sff_flush_pio_task(ap
);
556 /* synchronize with host lock and sort out timeouts */
558 /* For new EH, all qcs are finished in one of three ways -
559 * normal completion, error completion, and SCSI timeout.
560 * Both completions can race against SCSI timeout. When normal
561 * completion wins, the qc never reaches EH. When error
562 * completion wins, the qc has ATA_QCFLAG_FAILED set.
564 * When SCSI timeout wins, things are a bit more complex.
565 * Normal or error completion can occur after the timeout but
566 * before this point. In such cases, both types of
567 * completions are honored. A scmd is determined to have
568 * timed out iff its associated qc is active and not failed.
570 if (ap
->ops
->error_handler
) {
571 struct scsi_cmnd
*scmd
, *tmp
;
574 spin_lock_irqsave(ap
->lock
, flags
);
576 /* This must occur under the ap->lock as we don't want
577 a polled recovery to race the real interrupt handler
579 The lost_interrupt handler checks for any completed but
580 non-notified command and completes much like an IRQ handler.
582 We then fall into the error recovery code which will treat
583 this as if normal completion won the race */
585 if (ap
->ops
->lost_interrupt
)
586 ap
->ops
->lost_interrupt(ap
);
588 list_for_each_entry_safe(scmd
, tmp
, &host
->eh_cmd_q
, eh_entry
) {
589 struct ata_queued_cmd
*qc
;
591 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
592 qc
= __ata_qc_from_tag(ap
, i
);
593 if (qc
->flags
& ATA_QCFLAG_ACTIVE
&&
598 if (i
< ATA_MAX_QUEUE
) {
599 /* the scmd has an associated qc */
600 if (!(qc
->flags
& ATA_QCFLAG_FAILED
)) {
601 /* which hasn't failed yet, timeout */
602 qc
->err_mask
|= AC_ERR_TIMEOUT
;
603 qc
->flags
|= ATA_QCFLAG_FAILED
;
607 /* Normal completion occurred after
608 * SCSI timeout but before this point.
609 * Successfully complete it.
611 scmd
->retries
= scmd
->allowed
;
612 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
616 /* If we have timed out qcs. They belong to EH from
617 * this point but the state of the controller is
618 * unknown. Freeze the port to make sure the IRQ
619 * handler doesn't diddle with those qcs. This must
620 * be done atomically w.r.t. setting QCFLAG_FAILED.
623 __ata_port_freeze(ap
);
625 spin_unlock_irqrestore(ap
->lock
, flags
);
627 /* initialize eh_tries */
628 ap
->eh_tries
= ATA_EH_MAX_TRIES
;
630 spin_unlock_wait(ap
->lock
);
632 /* If we timed raced normal completion and there is nothing to
633 recover nr_timedout == 0 why exactly are we doing error recovery ? */
636 /* invoke error handler */
637 if (ap
->ops
->error_handler
) {
638 struct ata_link
*link
;
640 /* kill fast drain timer */
641 del_timer_sync(&ap
->fastdrain_timer
);
643 /* process port resume request */
644 ata_eh_handle_port_resume(ap
);
646 /* fetch & clear EH info */
647 spin_lock_irqsave(ap
->lock
, flags
);
649 ata_for_each_link(link
, ap
, HOST_FIRST
) {
650 struct ata_eh_context
*ehc
= &link
->eh_context
;
651 struct ata_device
*dev
;
653 memset(&link
->eh_context
, 0, sizeof(link
->eh_context
));
654 link
->eh_context
.i
= link
->eh_info
;
655 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
657 ata_for_each_dev(dev
, link
, ENABLED
) {
658 int devno
= dev
->devno
;
660 ehc
->saved_xfer_mode
[devno
] = dev
->xfer_mode
;
661 if (ata_ncq_enabled(dev
))
662 ehc
->saved_ncq_enabled
|= 1 << devno
;
666 ap
->pflags
|= ATA_PFLAG_EH_IN_PROGRESS
;
667 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
668 ap
->excl_link
= NULL
; /* don't maintain exclusion over EH */
670 spin_unlock_irqrestore(ap
->lock
, flags
);
672 /* invoke EH, skip if unloading or suspended */
673 if (!(ap
->pflags
& (ATA_PFLAG_UNLOADING
| ATA_PFLAG_SUSPENDED
)))
674 ap
->ops
->error_handler(ap
);
676 /* if unloading, commence suicide */
677 if ((ap
->pflags
& ATA_PFLAG_UNLOADING
) &&
678 !(ap
->pflags
& ATA_PFLAG_UNLOADED
))
683 /* process port suspend request */
684 ata_eh_handle_port_suspend(ap
);
686 /* Exception might have happend after ->error_handler
687 * recovered the port but before this point. Repeat
690 spin_lock_irqsave(ap
->lock
, flags
);
692 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
) {
693 if (--ap
->eh_tries
) {
694 spin_unlock_irqrestore(ap
->lock
, flags
);
697 ata_port_printk(ap
, KERN_ERR
, "EH pending after %d "
698 "tries, giving up\n", ATA_EH_MAX_TRIES
);
699 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
702 /* this run is complete, make sure EH info is clear */
703 ata_for_each_link(link
, ap
, HOST_FIRST
)
704 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
706 /* Clear host_eh_scheduled while holding ap->lock such
707 * that if exception occurs after this point but
708 * before EH completion, SCSI midlayer will
711 host
->host_eh_scheduled
= 0;
713 spin_unlock_irqrestore(ap
->lock
, flags
);
715 WARN_ON(ata_qc_from_tag(ap
, ap
->link
.active_tag
) == NULL
);
716 ap
->ops
->eng_timeout(ap
);
719 /* finish or retry handled scmd's and clean up */
720 WARN_ON(host
->host_failed
|| !list_empty(&host
->eh_cmd_q
));
722 scsi_eh_flush_done_q(&ap
->eh_done_q
);
725 spin_lock_irqsave(ap
->lock
, flags
);
727 if (ap
->pflags
& ATA_PFLAG_LOADING
)
728 ap
->pflags
&= ~ATA_PFLAG_LOADING
;
729 else if (ap
->pflags
& ATA_PFLAG_SCSI_HOTPLUG
)
730 schedule_delayed_work(&ap
->hotplug_task
, 0);
732 if (ap
->pflags
& ATA_PFLAG_RECOVERED
)
733 ata_port_printk(ap
, KERN_INFO
, "EH complete\n");
735 ap
->pflags
&= ~(ATA_PFLAG_SCSI_HOTPLUG
| ATA_PFLAG_RECOVERED
);
737 /* tell wait_eh that we're done */
738 ap
->pflags
&= ~ATA_PFLAG_EH_IN_PROGRESS
;
739 wake_up_all(&ap
->eh_wait_q
);
741 spin_unlock_irqrestore(ap
->lock
, flags
);
747 * ata_port_wait_eh - Wait for the currently pending EH to complete
748 * @ap: Port to wait EH for
750 * Wait until the currently pending EH is complete.
753 * Kernel thread context (may sleep).
755 void ata_port_wait_eh(struct ata_port
*ap
)
761 spin_lock_irqsave(ap
->lock
, flags
);
763 while (ap
->pflags
& (ATA_PFLAG_EH_PENDING
| ATA_PFLAG_EH_IN_PROGRESS
)) {
764 prepare_to_wait(&ap
->eh_wait_q
, &wait
, TASK_UNINTERRUPTIBLE
);
765 spin_unlock_irqrestore(ap
->lock
, flags
);
767 spin_lock_irqsave(ap
->lock
, flags
);
769 finish_wait(&ap
->eh_wait_q
, &wait
);
771 spin_unlock_irqrestore(ap
->lock
, flags
);
773 /* make sure SCSI EH is complete */
774 if (scsi_host_in_recovery(ap
->scsi_host
)) {
780 static int ata_eh_nr_in_flight(struct ata_port
*ap
)
785 /* count only non-internal commands */
786 for (tag
= 0; tag
< ATA_MAX_QUEUE
- 1; tag
++)
787 if (ata_qc_from_tag(ap
, tag
))
793 void ata_eh_fastdrain_timerfn(unsigned long arg
)
795 struct ata_port
*ap
= (void *)arg
;
799 spin_lock_irqsave(ap
->lock
, flags
);
801 cnt
= ata_eh_nr_in_flight(ap
);
807 if (cnt
== ap
->fastdrain_cnt
) {
810 /* No progress during the last interval, tag all
811 * in-flight qcs as timed out and freeze the port.
813 for (tag
= 0; tag
< ATA_MAX_QUEUE
- 1; tag
++) {
814 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
816 qc
->err_mask
|= AC_ERR_TIMEOUT
;
821 /* some qcs have finished, give it another chance */
822 ap
->fastdrain_cnt
= cnt
;
823 ap
->fastdrain_timer
.expires
=
824 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
825 add_timer(&ap
->fastdrain_timer
);
829 spin_unlock_irqrestore(ap
->lock
, flags
);
833 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
834 * @ap: target ATA port
835 * @fastdrain: activate fast drain
837 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
838 * is non-zero and EH wasn't pending before. Fast drain ensures
839 * that EH kicks in in timely manner.
842 * spin_lock_irqsave(host lock)
844 static void ata_eh_set_pending(struct ata_port
*ap
, int fastdrain
)
848 /* already scheduled? */
849 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
)
852 ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
857 /* do we have in-flight qcs? */
858 cnt
= ata_eh_nr_in_flight(ap
);
862 /* activate fast drain */
863 ap
->fastdrain_cnt
= cnt
;
864 ap
->fastdrain_timer
.expires
=
865 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
866 add_timer(&ap
->fastdrain_timer
);
870 * ata_qc_schedule_eh - schedule qc for error handling
871 * @qc: command to schedule error handling for
873 * Schedule error handling for @qc. EH will kick in as soon as
874 * other commands are drained.
877 * spin_lock_irqsave(host lock)
879 void ata_qc_schedule_eh(struct ata_queued_cmd
*qc
)
881 struct ata_port
*ap
= qc
->ap
;
882 struct request_queue
*q
= qc
->scsicmd
->device
->request_queue
;
885 WARN_ON(!ap
->ops
->error_handler
);
887 qc
->flags
|= ATA_QCFLAG_FAILED
;
888 ata_eh_set_pending(ap
, 1);
890 /* The following will fail if timeout has already expired.
891 * ata_scsi_error() takes care of such scmds on EH entry.
892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
893 * this function completes.
895 spin_lock_irqsave(q
->queue_lock
, flags
);
896 blk_abort_request(qc
->scsicmd
->request
);
897 spin_unlock_irqrestore(q
->queue_lock
, flags
);
901 * ata_port_schedule_eh - schedule error handling without a qc
902 * @ap: ATA port to schedule EH for
904 * Schedule error handling for @ap. EH will kick in as soon as
905 * all commands are drained.
908 * spin_lock_irqsave(host lock)
910 void ata_port_schedule_eh(struct ata_port
*ap
)
912 WARN_ON(!ap
->ops
->error_handler
);
914 if (ap
->pflags
& ATA_PFLAG_INITIALIZING
)
917 ata_eh_set_pending(ap
, 1);
918 scsi_schedule_eh(ap
->scsi_host
);
920 DPRINTK("port EH scheduled\n");
923 static int ata_do_link_abort(struct ata_port
*ap
, struct ata_link
*link
)
925 int tag
, nr_aborted
= 0;
927 WARN_ON(!ap
->ops
->error_handler
);
929 /* we're gonna abort all commands, no need for fast drain */
930 ata_eh_set_pending(ap
, 0);
932 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
933 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
935 if (qc
&& (!link
|| qc
->dev
->link
== link
)) {
936 qc
->flags
|= ATA_QCFLAG_FAILED
;
943 ata_port_schedule_eh(ap
);
949 * ata_link_abort - abort all qc's on the link
950 * @link: ATA link to abort qc's for
952 * Abort all active qc's active on @link and schedule EH.
955 * spin_lock_irqsave(host lock)
958 * Number of aborted qc's.
960 int ata_link_abort(struct ata_link
*link
)
962 return ata_do_link_abort(link
->ap
, link
);
966 * ata_port_abort - abort all qc's on the port
967 * @ap: ATA port to abort qc's for
969 * Abort all active qc's of @ap and schedule EH.
972 * spin_lock_irqsave(host_set lock)
975 * Number of aborted qc's.
977 int ata_port_abort(struct ata_port
*ap
)
979 return ata_do_link_abort(ap
, NULL
);
983 * __ata_port_freeze - freeze port
984 * @ap: ATA port to freeze
986 * This function is called when HSM violation or some other
987 * condition disrupts normal operation of the port. Frozen port
988 * is not allowed to perform any operation until the port is
989 * thawed, which usually follows a successful reset.
991 * ap->ops->freeze() callback can be used for freezing the port
992 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
993 * port cannot be frozen hardware-wise, the interrupt handler
994 * must ack and clear interrupts unconditionally while the port
998 * spin_lock_irqsave(host lock)
1000 static void __ata_port_freeze(struct ata_port
*ap
)
1002 WARN_ON(!ap
->ops
->error_handler
);
1004 if (ap
->ops
->freeze
)
1005 ap
->ops
->freeze(ap
);
1007 ap
->pflags
|= ATA_PFLAG_FROZEN
;
1009 DPRINTK("ata%u port frozen\n", ap
->print_id
);
1013 * ata_port_freeze - abort & freeze port
1014 * @ap: ATA port to freeze
1016 * Abort and freeze @ap. The freeze operation must be called
1017 * first, because some hardware requires special operations
1018 * before the taskfile registers are accessible.
1021 * spin_lock_irqsave(host lock)
1024 * Number of aborted commands.
1026 int ata_port_freeze(struct ata_port
*ap
)
1030 WARN_ON(!ap
->ops
->error_handler
);
1032 __ata_port_freeze(ap
);
1033 nr_aborted
= ata_port_abort(ap
);
1039 * sata_async_notification - SATA async notification handler
1040 * @ap: ATA port where async notification is received
1042 * Handler to be called when async notification via SDB FIS is
1043 * received. This function schedules EH if necessary.
1046 * spin_lock_irqsave(host lock)
1049 * 1 if EH is scheduled, 0 otherwise.
1051 int sata_async_notification(struct ata_port
*ap
)
1056 if (!(ap
->flags
& ATA_FLAG_AN
))
1059 rc
= sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
);
1061 sata_scr_write(&ap
->link
, SCR_NOTIFICATION
, sntf
);
1063 if (!sata_pmp_attached(ap
) || rc
) {
1064 /* PMP is not attached or SNTF is not available */
1065 if (!sata_pmp_attached(ap
)) {
1066 /* PMP is not attached. Check whether ATAPI
1067 * AN is configured. If so, notify media
1070 struct ata_device
*dev
= ap
->link
.device
;
1072 if ((dev
->class == ATA_DEV_ATAPI
) &&
1073 (dev
->flags
& ATA_DFLAG_AN
))
1074 ata_scsi_media_change_notify(dev
);
1077 /* PMP is attached but SNTF is not available.
1078 * ATAPI async media change notification is
1079 * not used. The PMP must be reporting PHY
1080 * status change, schedule EH.
1082 ata_port_schedule_eh(ap
);
1086 /* PMP is attached and SNTF is available */
1087 struct ata_link
*link
;
1089 /* check and notify ATAPI AN */
1090 ata_for_each_link(link
, ap
, EDGE
) {
1091 if (!(sntf
& (1 << link
->pmp
)))
1094 if ((link
->device
->class == ATA_DEV_ATAPI
) &&
1095 (link
->device
->flags
& ATA_DFLAG_AN
))
1096 ata_scsi_media_change_notify(link
->device
);
1099 /* If PMP is reporting that PHY status of some
1100 * downstream ports has changed, schedule EH.
1102 if (sntf
& (1 << SATA_PMP_CTRL_PORT
)) {
1103 ata_port_schedule_eh(ap
);
1112 * ata_eh_freeze_port - EH helper to freeze port
1113 * @ap: ATA port to freeze
1120 void ata_eh_freeze_port(struct ata_port
*ap
)
1122 unsigned long flags
;
1124 if (!ap
->ops
->error_handler
)
1127 spin_lock_irqsave(ap
->lock
, flags
);
1128 __ata_port_freeze(ap
);
1129 spin_unlock_irqrestore(ap
->lock
, flags
);
1133 * ata_port_thaw_port - EH helper to thaw port
1134 * @ap: ATA port to thaw
1136 * Thaw frozen port @ap.
1141 void ata_eh_thaw_port(struct ata_port
*ap
)
1143 unsigned long flags
;
1145 if (!ap
->ops
->error_handler
)
1148 spin_lock_irqsave(ap
->lock
, flags
);
1150 ap
->pflags
&= ~ATA_PFLAG_FROZEN
;
1155 spin_unlock_irqrestore(ap
->lock
, flags
);
1157 DPRINTK("ata%u port thawed\n", ap
->print_id
);
1160 static void ata_eh_scsidone(struct scsi_cmnd
*scmd
)
1165 static void __ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1167 struct ata_port
*ap
= qc
->ap
;
1168 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1169 unsigned long flags
;
1171 spin_lock_irqsave(ap
->lock
, flags
);
1172 qc
->scsidone
= ata_eh_scsidone
;
1173 __ata_qc_complete(qc
);
1174 WARN_ON(ata_tag_valid(qc
->tag
));
1175 spin_unlock_irqrestore(ap
->lock
, flags
);
1177 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
1181 * ata_eh_qc_complete - Complete an active ATA command from EH
1182 * @qc: Command to complete
1184 * Indicate to the mid and upper layers that an ATA command has
1185 * completed. To be used from EH.
1187 void ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1189 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1190 scmd
->retries
= scmd
->allowed
;
1191 __ata_eh_qc_complete(qc
);
1195 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1196 * @qc: Command to retry
1198 * Indicate to the mid and upper layers that an ATA command
1199 * should be retried. To be used from EH.
1201 * SCSI midlayer limits the number of retries to scmd->allowed.
1202 * scmd->retries is decremented for commands which get retried
1203 * due to unrelated failures (qc->err_mask is zero).
1205 void ata_eh_qc_retry(struct ata_queued_cmd
*qc
)
1207 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1208 if (!qc
->err_mask
&& scmd
->retries
)
1210 __ata_eh_qc_complete(qc
);
1214 * ata_dev_disable - disable ATA device
1215 * @dev: ATA device to disable
1222 void ata_dev_disable(struct ata_device
*dev
)
1224 if (!ata_dev_enabled(dev
))
1227 if (ata_msg_drv(dev
->link
->ap
))
1228 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
1229 ata_acpi_on_disable(dev
);
1230 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
| ATA_DNXFER_QUIET
);
1233 /* From now till the next successful probe, ering is used to
1234 * track probe failures. Clear accumulated device error info.
1236 ata_ering_clear(&dev
->ering
);
1240 * ata_eh_detach_dev - detach ATA device
1241 * @dev: ATA device to detach
1248 void ata_eh_detach_dev(struct ata_device
*dev
)
1250 struct ata_link
*link
= dev
->link
;
1251 struct ata_port
*ap
= link
->ap
;
1252 struct ata_eh_context
*ehc
= &link
->eh_context
;
1253 unsigned long flags
;
1255 ata_dev_disable(dev
);
1257 spin_lock_irqsave(ap
->lock
, flags
);
1259 dev
->flags
&= ~ATA_DFLAG_DETACH
;
1261 if (ata_scsi_offline_dev(dev
)) {
1262 dev
->flags
|= ATA_DFLAG_DETACHED
;
1263 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
1266 /* clear per-dev EH info */
1267 ata_eh_clear_action(link
, dev
, &link
->eh_info
, ATA_EH_PERDEV_MASK
);
1268 ata_eh_clear_action(link
, dev
, &link
->eh_context
.i
, ATA_EH_PERDEV_MASK
);
1269 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
1270 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
1272 spin_unlock_irqrestore(ap
->lock
, flags
);
1276 * ata_eh_about_to_do - about to perform eh_action
1277 * @link: target ATA link
1278 * @dev: target ATA dev for per-dev action (can be NULL)
1279 * @action: action about to be performed
1281 * Called just before performing EH actions to clear related bits
1282 * in @link->eh_info such that eh actions are not unnecessarily
1288 void ata_eh_about_to_do(struct ata_link
*link
, struct ata_device
*dev
,
1289 unsigned int action
)
1291 struct ata_port
*ap
= link
->ap
;
1292 struct ata_eh_info
*ehi
= &link
->eh_info
;
1293 struct ata_eh_context
*ehc
= &link
->eh_context
;
1294 unsigned long flags
;
1296 spin_lock_irqsave(ap
->lock
, flags
);
1298 ata_eh_clear_action(link
, dev
, ehi
, action
);
1300 /* About to take EH action, set RECOVERED. Ignore actions on
1301 * slave links as master will do them again.
1303 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
) && link
!= ap
->slave_link
)
1304 ap
->pflags
|= ATA_PFLAG_RECOVERED
;
1306 spin_unlock_irqrestore(ap
->lock
, flags
);
1310 * ata_eh_done - EH action complete
1311 * @ap: target ATA port
1312 * @dev: target ATA dev for per-dev action (can be NULL)
1313 * @action: action just completed
1315 * Called right after performing EH actions to clear related bits
1316 * in @link->eh_context.
1321 void ata_eh_done(struct ata_link
*link
, struct ata_device
*dev
,
1322 unsigned int action
)
1324 struct ata_eh_context
*ehc
= &link
->eh_context
;
1326 ata_eh_clear_action(link
, dev
, &ehc
->i
, action
);
1330 * ata_err_string - convert err_mask to descriptive string
1331 * @err_mask: error mask to convert to string
1333 * Convert @err_mask to descriptive string. Errors are
1334 * prioritized according to severity and only the most severe
1335 * error is reported.
1341 * Descriptive string for @err_mask
1343 static const char *ata_err_string(unsigned int err_mask
)
1345 if (err_mask
& AC_ERR_HOST_BUS
)
1346 return "host bus error";
1347 if (err_mask
& AC_ERR_ATA_BUS
)
1348 return "ATA bus error";
1349 if (err_mask
& AC_ERR_TIMEOUT
)
1351 if (err_mask
& AC_ERR_HSM
)
1352 return "HSM violation";
1353 if (err_mask
& AC_ERR_SYSTEM
)
1354 return "internal error";
1355 if (err_mask
& AC_ERR_MEDIA
)
1356 return "media error";
1357 if (err_mask
& AC_ERR_INVALID
)
1358 return "invalid argument";
1359 if (err_mask
& AC_ERR_DEV
)
1360 return "device error";
1361 return "unknown error";
1365 * ata_read_log_page - read a specific log page
1366 * @dev: target device
1367 * @page: page to read
1368 * @buf: buffer to store read page
1369 * @sectors: number of sectors to read
1371 * Read log page using READ_LOG_EXT command.
1374 * Kernel thread context (may sleep).
1377 * 0 on success, AC_ERR_* mask otherwise.
1379 static unsigned int ata_read_log_page(struct ata_device
*dev
,
1380 u8 page
, void *buf
, unsigned int sectors
)
1382 struct ata_taskfile tf
;
1383 unsigned int err_mask
;
1385 DPRINTK("read log page - page %d\n", page
);
1387 ata_tf_init(dev
, &tf
);
1388 tf
.command
= ATA_CMD_READ_LOG_EXT
;
1391 tf
.hob_nsect
= sectors
>> 8;
1392 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA48
| ATA_TFLAG_DEVICE
;
1393 tf
.protocol
= ATA_PROT_PIO
;
1395 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1396 buf
, sectors
* ATA_SECT_SIZE
, 0);
1398 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
1403 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1404 * @dev: Device to read log page 10h from
1405 * @tag: Resulting tag of the failed command
1406 * @tf: Resulting taskfile registers of the failed command
1408 * Read log page 10h to obtain NCQ error details and clear error
1412 * Kernel thread context (may sleep).
1415 * 0 on success, -errno otherwise.
1417 static int ata_eh_read_log_10h(struct ata_device
*dev
,
1418 int *tag
, struct ata_taskfile
*tf
)
1420 u8
*buf
= dev
->link
->ap
->sector_buf
;
1421 unsigned int err_mask
;
1425 err_mask
= ata_read_log_page(dev
, ATA_LOG_SATA_NCQ
, buf
, 1);
1430 for (i
= 0; i
< ATA_SECT_SIZE
; i
++)
1433 ata_dev_printk(dev
, KERN_WARNING
,
1434 "invalid checksum 0x%x on log page 10h\n", csum
);
1439 *tag
= buf
[0] & 0x1f;
1441 tf
->command
= buf
[2];
1442 tf
->feature
= buf
[3];
1446 tf
->device
= buf
[7];
1447 tf
->hob_lbal
= buf
[8];
1448 tf
->hob_lbam
= buf
[9];
1449 tf
->hob_lbah
= buf
[10];
1450 tf
->nsect
= buf
[12];
1451 tf
->hob_nsect
= buf
[13];
1457 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1458 * @dev: target ATAPI device
1459 * @r_sense_key: out parameter for sense_key
1461 * Perform ATAPI TEST_UNIT_READY.
1464 * EH context (may sleep).
1467 * 0 on success, AC_ERR_* mask on failure.
1469 static unsigned int atapi_eh_tur(struct ata_device
*dev
, u8
*r_sense_key
)
1471 u8 cdb
[ATAPI_CDB_LEN
] = { TEST_UNIT_READY
, 0, 0, 0, 0, 0 };
1472 struct ata_taskfile tf
;
1473 unsigned int err_mask
;
1475 ata_tf_init(dev
, &tf
);
1477 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1478 tf
.command
= ATA_CMD_PACKET
;
1479 tf
.protocol
= ATAPI_PROT_NODATA
;
1481 err_mask
= ata_exec_internal(dev
, &tf
, cdb
, DMA_NONE
, NULL
, 0, 0);
1482 if (err_mask
== AC_ERR_DEV
)
1483 *r_sense_key
= tf
.feature
>> 4;
1488 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1489 * @dev: device to perform REQUEST_SENSE to
1490 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1491 * @dfl_sense_key: default sense key to use
1493 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1494 * SENSE. This function is EH helper.
1497 * Kernel thread context (may sleep).
1500 * 0 on success, AC_ERR_* mask on failure
1502 static unsigned int atapi_eh_request_sense(struct ata_device
*dev
,
1503 u8
*sense_buf
, u8 dfl_sense_key
)
1505 u8 cdb
[ATAPI_CDB_LEN
] =
1506 { REQUEST_SENSE
, 0, 0, 0, SCSI_SENSE_BUFFERSIZE
, 0 };
1507 struct ata_port
*ap
= dev
->link
->ap
;
1508 struct ata_taskfile tf
;
1510 DPRINTK("ATAPI request sense\n");
1512 /* FIXME: is this needed? */
1513 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
1515 /* initialize sense_buf with the error register,
1516 * for the case where they are -not- overwritten
1518 sense_buf
[0] = 0x70;
1519 sense_buf
[2] = dfl_sense_key
;
1521 /* some devices time out if garbage left in tf */
1522 ata_tf_init(dev
, &tf
);
1524 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1525 tf
.command
= ATA_CMD_PACKET
;
1527 /* is it pointless to prefer PIO for "safety reasons"? */
1528 if (ap
->flags
& ATA_FLAG_PIO_DMA
) {
1529 tf
.protocol
= ATAPI_PROT_DMA
;
1530 tf
.feature
|= ATAPI_PKT_DMA
;
1532 tf
.protocol
= ATAPI_PROT_PIO
;
1533 tf
.lbam
= SCSI_SENSE_BUFFERSIZE
;
1537 return ata_exec_internal(dev
, &tf
, cdb
, DMA_FROM_DEVICE
,
1538 sense_buf
, SCSI_SENSE_BUFFERSIZE
, 0);
1542 * ata_eh_analyze_serror - analyze SError for a failed port
1543 * @link: ATA link to analyze SError for
1545 * Analyze SError if available and further determine cause of
1551 static void ata_eh_analyze_serror(struct ata_link
*link
)
1553 struct ata_eh_context
*ehc
= &link
->eh_context
;
1554 u32 serror
= ehc
->i
.serror
;
1555 unsigned int err_mask
= 0, action
= 0;
1558 if (serror
& (SERR_PERSISTENT
| SERR_DATA
)) {
1559 err_mask
|= AC_ERR_ATA_BUS
;
1560 action
|= ATA_EH_RESET
;
1562 if (serror
& SERR_PROTOCOL
) {
1563 err_mask
|= AC_ERR_HSM
;
1564 action
|= ATA_EH_RESET
;
1566 if (serror
& SERR_INTERNAL
) {
1567 err_mask
|= AC_ERR_SYSTEM
;
1568 action
|= ATA_EH_RESET
;
1571 /* Determine whether a hotplug event has occurred. Both
1572 * SError.N/X are considered hotplug events for enabled or
1573 * host links. For disabled PMP links, only N bit is
1574 * considered as X bit is left at 1 for link plugging.
1578 if (!(link
->flags
& ATA_LFLAG_DISABLED
) || ata_is_host_link(link
))
1579 hotplug_mask
= SERR_PHYRDY_CHG
| SERR_DEV_XCHG
;
1581 hotplug_mask
= SERR_PHYRDY_CHG
;
1583 if (serror
& hotplug_mask
)
1584 ata_ehi_hotplugged(&ehc
->i
);
1586 ehc
->i
.err_mask
|= err_mask
;
1587 ehc
->i
.action
|= action
;
1591 * ata_eh_analyze_ncq_error - analyze NCQ error
1592 * @link: ATA link to analyze NCQ error for
1594 * Read log page 10h, determine the offending qc and acquire
1595 * error status TF. For NCQ device errors, all LLDDs have to do
1596 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1600 * Kernel thread context (may sleep).
1602 void ata_eh_analyze_ncq_error(struct ata_link
*link
)
1604 struct ata_port
*ap
= link
->ap
;
1605 struct ata_eh_context
*ehc
= &link
->eh_context
;
1606 struct ata_device
*dev
= link
->device
;
1607 struct ata_queued_cmd
*qc
;
1608 struct ata_taskfile tf
;
1611 /* if frozen, we can't do much */
1612 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
1615 /* is it NCQ device error? */
1616 if (!link
->sactive
|| !(ehc
->i
.err_mask
& AC_ERR_DEV
))
1619 /* has LLDD analyzed already? */
1620 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1621 qc
= __ata_qc_from_tag(ap
, tag
);
1623 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
1630 /* okay, this error is ours */
1631 memset(&tf
, 0, sizeof(tf
));
1632 rc
= ata_eh_read_log_10h(dev
, &tag
, &tf
);
1634 ata_link_printk(link
, KERN_ERR
, "failed to read log page 10h "
1635 "(errno=%d)\n", rc
);
1639 if (!(link
->sactive
& (1 << tag
))) {
1640 ata_link_printk(link
, KERN_ERR
, "log page 10h reported "
1641 "inactive tag %d\n", tag
);
1645 /* we've got the perpetrator, condemn it */
1646 qc
= __ata_qc_from_tag(ap
, tag
);
1647 memcpy(&qc
->result_tf
, &tf
, sizeof(tf
));
1648 qc
->result_tf
.flags
= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1649 qc
->err_mask
|= AC_ERR_DEV
| AC_ERR_NCQ
;
1650 ehc
->i
.err_mask
&= ~AC_ERR_DEV
;
1654 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1655 * @qc: qc to analyze
1656 * @tf: Taskfile registers to analyze
1658 * Analyze taskfile of @qc and further determine cause of
1659 * failure. This function also requests ATAPI sense data if
1663 * Kernel thread context (may sleep).
1666 * Determined recovery action
1668 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd
*qc
,
1669 const struct ata_taskfile
*tf
)
1671 unsigned int tmp
, action
= 0;
1672 u8 stat
= tf
->command
, err
= tf
->feature
;
1674 if ((stat
& (ATA_BUSY
| ATA_DRQ
| ATA_DRDY
)) != ATA_DRDY
) {
1675 qc
->err_mask
|= AC_ERR_HSM
;
1676 return ATA_EH_RESET
;
1679 if (stat
& (ATA_ERR
| ATA_DF
))
1680 qc
->err_mask
|= AC_ERR_DEV
;
1684 switch (qc
->dev
->class) {
1687 qc
->err_mask
|= AC_ERR_ATA_BUS
;
1689 qc
->err_mask
|= AC_ERR_MEDIA
;
1691 qc
->err_mask
|= AC_ERR_INVALID
;
1695 if (!(qc
->ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1696 tmp
= atapi_eh_request_sense(qc
->dev
,
1697 qc
->scsicmd
->sense_buffer
,
1698 qc
->result_tf
.feature
>> 4);
1700 /* ATA_QCFLAG_SENSE_VALID is used to
1701 * tell atapi_qc_complete() that sense
1702 * data is already valid.
1704 * TODO: interpret sense data and set
1705 * appropriate err_mask.
1707 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1709 qc
->err_mask
|= tmp
;
1713 if (qc
->err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
| AC_ERR_ATA_BUS
))
1714 action
|= ATA_EH_RESET
;
1719 static int ata_eh_categorize_error(unsigned int eflags
, unsigned int err_mask
,
1724 if (!(eflags
& ATA_EFLAG_DUBIOUS_XFER
))
1728 base
= ATA_ECAT_DUBIOUS_NONE
;
1730 if (err_mask
& AC_ERR_ATA_BUS
)
1731 return base
+ ATA_ECAT_ATA_BUS
;
1733 if (err_mask
& AC_ERR_TIMEOUT
)
1734 return base
+ ATA_ECAT_TOUT_HSM
;
1736 if (eflags
& ATA_EFLAG_IS_IO
) {
1737 if (err_mask
& AC_ERR_HSM
)
1738 return base
+ ATA_ECAT_TOUT_HSM
;
1740 (AC_ERR_DEV
|AC_ERR_MEDIA
|AC_ERR_INVALID
)) == AC_ERR_DEV
)
1741 return base
+ ATA_ECAT_UNK_DEV
;
1747 struct speed_down_verdict_arg
{
1750 int nr_errors
[ATA_ECAT_NR
];
1753 static int speed_down_verdict_cb(struct ata_ering_entry
*ent
, void *void_arg
)
1755 struct speed_down_verdict_arg
*arg
= void_arg
;
1758 if (ent
->timestamp
< arg
->since
)
1761 cat
= ata_eh_categorize_error(ent
->eflags
, ent
->err_mask
,
1763 arg
->nr_errors
[cat
]++;
1769 * ata_eh_speed_down_verdict - Determine speed down verdict
1770 * @dev: Device of interest
1772 * This function examines error ring of @dev and determines
1773 * whether NCQ needs to be turned off, transfer speed should be
1774 * stepped down, or falling back to PIO is necessary.
1776 * ECAT_ATA_BUS : ATA_BUS error for any command
1778 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1781 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1783 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1784 * data transfer hasn't been verified.
1788 * NCQ_OFF : Turn off NCQ.
1790 * SPEED_DOWN : Speed down transfer speed but don't fall back
1793 * FALLBACK_TO_PIO : Fall back to PIO.
1795 * Even if multiple verdicts are returned, only one action is
1796 * taken per error. An action triggered by non-DUBIOUS errors
1797 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1798 * This is to expedite speed down decisions right after device is
1799 * initially configured.
1801 * The followings are speed down rules. #1 and #2 deal with
1804 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1805 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1807 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1808 * occurred during last 5 mins, NCQ_OFF.
1810 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1811 * ocurred during last 5 mins, FALLBACK_TO_PIO
1813 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1814 * during last 10 mins, NCQ_OFF.
1816 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1817 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1820 * Inherited from caller.
1823 * OR of ATA_EH_SPDN_* flags.
1825 static unsigned int ata_eh_speed_down_verdict(struct ata_device
*dev
)
1827 const u64 j5mins
= 5LLU * 60 * HZ
, j10mins
= 10LLU * 60 * HZ
;
1828 u64 j64
= get_jiffies_64();
1829 struct speed_down_verdict_arg arg
;
1830 unsigned int verdict
= 0;
1832 /* scan past 5 mins of error history */
1833 memset(&arg
, 0, sizeof(arg
));
1834 arg
.since
= j64
- min(j64
, j5mins
);
1835 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
1837 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_ATA_BUS
] +
1838 arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] > 1)
1839 verdict
|= ATA_EH_SPDN_SPEED_DOWN
|
1840 ATA_EH_SPDN_FALLBACK_TO_PIO
| ATA_EH_SPDN_KEEP_ERRORS
;
1842 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] +
1843 arg
.nr_errors
[ATA_ECAT_DUBIOUS_UNK_DEV
] > 1)
1844 verdict
|= ATA_EH_SPDN_NCQ_OFF
| ATA_EH_SPDN_KEEP_ERRORS
;
1846 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
1847 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
1848 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
1849 verdict
|= ATA_EH_SPDN_FALLBACK_TO_PIO
;
1851 /* scan past 10 mins of error history */
1852 memset(&arg
, 0, sizeof(arg
));
1853 arg
.since
= j64
- min(j64
, j10mins
);
1854 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
1856 if (arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
1857 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 3)
1858 verdict
|= ATA_EH_SPDN_NCQ_OFF
;
1860 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
1861 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] > 3 ||
1862 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
1863 verdict
|= ATA_EH_SPDN_SPEED_DOWN
;
1869 * ata_eh_speed_down - record error and speed down if necessary
1870 * @dev: Failed device
1871 * @eflags: mask of ATA_EFLAG_* flags
1872 * @err_mask: err_mask of the error
1874 * Record error and examine error history to determine whether
1875 * adjusting transmission speed is necessary. It also sets
1876 * transmission limits appropriately if such adjustment is
1880 * Kernel thread context (may sleep).
1883 * Determined recovery action.
1885 static unsigned int ata_eh_speed_down(struct ata_device
*dev
,
1886 unsigned int eflags
, unsigned int err_mask
)
1888 struct ata_link
*link
= ata_dev_phys_link(dev
);
1890 unsigned int verdict
;
1891 unsigned int action
= 0;
1893 /* don't bother if Cat-0 error */
1894 if (ata_eh_categorize_error(eflags
, err_mask
, &xfer_ok
) == 0)
1897 /* record error and determine whether speed down is necessary */
1898 ata_ering_record(&dev
->ering
, eflags
, err_mask
);
1899 verdict
= ata_eh_speed_down_verdict(dev
);
1902 if ((verdict
& ATA_EH_SPDN_NCQ_OFF
) &&
1903 (dev
->flags
& (ATA_DFLAG_PIO
| ATA_DFLAG_NCQ
|
1904 ATA_DFLAG_NCQ_OFF
)) == ATA_DFLAG_NCQ
) {
1905 dev
->flags
|= ATA_DFLAG_NCQ_OFF
;
1906 ata_dev_printk(dev
, KERN_WARNING
,
1907 "NCQ disabled due to excessive errors\n");
1912 if (verdict
& ATA_EH_SPDN_SPEED_DOWN
) {
1913 /* speed down SATA link speed if possible */
1914 if (sata_down_spd_limit(link
, 0) == 0) {
1915 action
|= ATA_EH_RESET
;
1919 /* lower transfer mode */
1920 if (dev
->spdn_cnt
< 2) {
1921 static const int dma_dnxfer_sel
[] =
1922 { ATA_DNXFER_DMA
, ATA_DNXFER_40C
};
1923 static const int pio_dnxfer_sel
[] =
1924 { ATA_DNXFER_PIO
, ATA_DNXFER_FORCE_PIO0
};
1927 if (dev
->xfer_shift
!= ATA_SHIFT_PIO
)
1928 sel
= dma_dnxfer_sel
[dev
->spdn_cnt
];
1930 sel
= pio_dnxfer_sel
[dev
->spdn_cnt
];
1934 if (ata_down_xfermask_limit(dev
, sel
) == 0) {
1935 action
|= ATA_EH_RESET
;
1941 /* Fall back to PIO? Slowing down to PIO is meaningless for
1942 * SATA ATA devices. Consider it only for PATA and SATAPI.
1944 if ((verdict
& ATA_EH_SPDN_FALLBACK_TO_PIO
) && (dev
->spdn_cnt
>= 2) &&
1945 (link
->ap
->cbl
!= ATA_CBL_SATA
|| dev
->class == ATA_DEV_ATAPI
) &&
1946 (dev
->xfer_shift
!= ATA_SHIFT_PIO
)) {
1947 if (ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO
) == 0) {
1949 action
|= ATA_EH_RESET
;
1956 /* device has been slowed down, blow error history */
1957 if (!(verdict
& ATA_EH_SPDN_KEEP_ERRORS
))
1958 ata_ering_clear(&dev
->ering
);
1963 * ata_eh_link_autopsy - analyze error and determine recovery action
1964 * @link: host link to perform autopsy on
1966 * Analyze why @link failed and determine which recovery actions
1967 * are needed. This function also sets more detailed AC_ERR_*
1968 * values and fills sense data for ATAPI CHECK SENSE.
1971 * Kernel thread context (may sleep).
1973 static void ata_eh_link_autopsy(struct ata_link
*link
)
1975 struct ata_port
*ap
= link
->ap
;
1976 struct ata_eh_context
*ehc
= &link
->eh_context
;
1977 struct ata_device
*dev
;
1978 unsigned int all_err_mask
= 0, eflags
= 0;
1985 if (ehc
->i
.flags
& ATA_EHI_NO_AUTOPSY
)
1988 /* obtain and analyze SError */
1989 rc
= sata_scr_read(link
, SCR_ERROR
, &serror
);
1991 ehc
->i
.serror
|= serror
;
1992 ata_eh_analyze_serror(link
);
1993 } else if (rc
!= -EOPNOTSUPP
) {
1994 /* SError read failed, force reset and probing */
1995 ehc
->i
.probe_mask
|= ATA_ALL_DEVICES
;
1996 ehc
->i
.action
|= ATA_EH_RESET
;
1997 ehc
->i
.err_mask
|= AC_ERR_OTHER
;
2000 /* analyze NCQ failure */
2001 ata_eh_analyze_ncq_error(link
);
2003 /* any real error trumps AC_ERR_OTHER */
2004 if (ehc
->i
.err_mask
& ~AC_ERR_OTHER
)
2005 ehc
->i
.err_mask
&= ~AC_ERR_OTHER
;
2007 all_err_mask
|= ehc
->i
.err_mask
;
2009 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2010 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2012 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2013 ata_dev_phys_link(qc
->dev
) != link
)
2016 /* inherit upper level err_mask */
2017 qc
->err_mask
|= ehc
->i
.err_mask
;
2020 ehc
->i
.action
|= ata_eh_analyze_tf(qc
, &qc
->result_tf
);
2022 /* DEV errors are probably spurious in case of ATA_BUS error */
2023 if (qc
->err_mask
& AC_ERR_ATA_BUS
)
2024 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_MEDIA
|
2027 /* any real error trumps unknown error */
2028 if (qc
->err_mask
& ~AC_ERR_OTHER
)
2029 qc
->err_mask
&= ~AC_ERR_OTHER
;
2031 /* SENSE_VALID trumps dev/unknown error and revalidation */
2032 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
2033 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_OTHER
);
2035 /* determine whether the command is worth retrying */
2036 if (qc
->flags
& ATA_QCFLAG_IO
||
2037 (!(qc
->err_mask
& AC_ERR_INVALID
) &&
2038 qc
->err_mask
!= AC_ERR_DEV
))
2039 qc
->flags
|= ATA_QCFLAG_RETRY
;
2041 /* accumulate error info */
2042 ehc
->i
.dev
= qc
->dev
;
2043 all_err_mask
|= qc
->err_mask
;
2044 if (qc
->flags
& ATA_QCFLAG_IO
)
2045 eflags
|= ATA_EFLAG_IS_IO
;
2048 /* enforce default EH actions */
2049 if (ap
->pflags
& ATA_PFLAG_FROZEN
||
2050 all_err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
))
2051 ehc
->i
.action
|= ATA_EH_RESET
;
2052 else if (((eflags
& ATA_EFLAG_IS_IO
) && all_err_mask
) ||
2053 (!(eflags
& ATA_EFLAG_IS_IO
) && (all_err_mask
& ~AC_ERR_DEV
)))
2054 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2056 /* If we have offending qcs and the associated failed device,
2057 * perform per-dev EH action only on the offending device.
2060 ehc
->i
.dev_action
[ehc
->i
.dev
->devno
] |=
2061 ehc
->i
.action
& ATA_EH_PERDEV_MASK
;
2062 ehc
->i
.action
&= ~ATA_EH_PERDEV_MASK
;
2065 /* propagate timeout to host link */
2066 if ((all_err_mask
& AC_ERR_TIMEOUT
) && !ata_is_host_link(link
))
2067 ap
->link
.eh_context
.i
.err_mask
|= AC_ERR_TIMEOUT
;
2069 /* record error and consider speeding down */
2071 if (!dev
&& ((ata_link_max_devices(link
) == 1 &&
2072 ata_dev_enabled(link
->device
))))
2076 if (dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)
2077 eflags
|= ATA_EFLAG_DUBIOUS_XFER
;
2078 ehc
->i
.action
|= ata_eh_speed_down(dev
, eflags
, all_err_mask
);
2085 * ata_eh_autopsy - analyze error and determine recovery action
2086 * @ap: host port to perform autopsy on
2088 * Analyze all links of @ap and determine why they failed and
2089 * which recovery actions are needed.
2092 * Kernel thread context (may sleep).
2094 void ata_eh_autopsy(struct ata_port
*ap
)
2096 struct ata_link
*link
;
2098 ata_for_each_link(link
, ap
, EDGE
)
2099 ata_eh_link_autopsy(link
);
2101 /* Handle the frigging slave link. Autopsy is done similarly
2102 * but actions and flags are transferred over to the master
2103 * link and handled from there.
2105 if (ap
->slave_link
) {
2106 struct ata_eh_context
*mehc
= &ap
->link
.eh_context
;
2107 struct ata_eh_context
*sehc
= &ap
->slave_link
->eh_context
;
2109 /* transfer control flags from master to slave */
2110 sehc
->i
.flags
|= mehc
->i
.flags
& ATA_EHI_TO_SLAVE_MASK
;
2112 /* perform autopsy on the slave link */
2113 ata_eh_link_autopsy(ap
->slave_link
);
2115 /* transfer actions from slave to master and clear slave */
2116 ata_eh_about_to_do(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2117 mehc
->i
.action
|= sehc
->i
.action
;
2118 mehc
->i
.dev_action
[1] |= sehc
->i
.dev_action
[1];
2119 mehc
->i
.flags
|= sehc
->i
.flags
;
2120 ata_eh_done(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2123 /* Autopsy of fanout ports can affect host link autopsy.
2124 * Perform host link autopsy last.
2126 if (sata_pmp_attached(ap
))
2127 ata_eh_link_autopsy(&ap
->link
);
2131 * ata_get_cmd_descript - get description for ATA command
2132 * @command: ATA command code to get description for
2134 * Return a textual description of the given command, or NULL if the
2135 * command is not known.
2140 const char *ata_get_cmd_descript(u8 command
)
2142 #ifdef CONFIG_ATA_VERBOSE_ERROR
2148 { ATA_CMD_DEV_RESET
, "DEVICE RESET" },
2149 { ATA_CMD_CHK_POWER
, "CHECK POWER MODE" },
2150 { ATA_CMD_STANDBY
, "STANDBY" },
2151 { ATA_CMD_IDLE
, "IDLE" },
2152 { ATA_CMD_EDD
, "EXECUTE DEVICE DIAGNOSTIC" },
2153 { ATA_CMD_DOWNLOAD_MICRO
, "DOWNLOAD MICROCODE" },
2154 { ATA_CMD_NOP
, "NOP" },
2155 { ATA_CMD_FLUSH
, "FLUSH CACHE" },
2156 { ATA_CMD_FLUSH_EXT
, "FLUSH CACHE EXT" },
2157 { ATA_CMD_ID_ATA
, "IDENTIFY DEVICE" },
2158 { ATA_CMD_ID_ATAPI
, "IDENTIFY PACKET DEVICE" },
2159 { ATA_CMD_SERVICE
, "SERVICE" },
2160 { ATA_CMD_READ
, "READ DMA" },
2161 { ATA_CMD_READ_EXT
, "READ DMA EXT" },
2162 { ATA_CMD_READ_QUEUED
, "READ DMA QUEUED" },
2163 { ATA_CMD_READ_STREAM_EXT
, "READ STREAM EXT" },
2164 { ATA_CMD_READ_STREAM_DMA_EXT
, "READ STREAM DMA EXT" },
2165 { ATA_CMD_WRITE
, "WRITE DMA" },
2166 { ATA_CMD_WRITE_EXT
, "WRITE DMA EXT" },
2167 { ATA_CMD_WRITE_QUEUED
, "WRITE DMA QUEUED EXT" },
2168 { ATA_CMD_WRITE_STREAM_EXT
, "WRITE STREAM EXT" },
2169 { ATA_CMD_WRITE_STREAM_DMA_EXT
, "WRITE STREAM DMA EXT" },
2170 { ATA_CMD_WRITE_FUA_EXT
, "WRITE DMA FUA EXT" },
2171 { ATA_CMD_WRITE_QUEUED_FUA_EXT
, "WRITE DMA QUEUED FUA EXT" },
2172 { ATA_CMD_FPDMA_READ
, "READ FPDMA QUEUED" },
2173 { ATA_CMD_FPDMA_WRITE
, "WRITE FPDMA QUEUED" },
2174 { ATA_CMD_PIO_READ
, "READ SECTOR(S)" },
2175 { ATA_CMD_PIO_READ_EXT
, "READ SECTOR(S) EXT" },
2176 { ATA_CMD_PIO_WRITE
, "WRITE SECTOR(S)" },
2177 { ATA_CMD_PIO_WRITE_EXT
, "WRITE SECTOR(S) EXT" },
2178 { ATA_CMD_READ_MULTI
, "READ MULTIPLE" },
2179 { ATA_CMD_READ_MULTI_EXT
, "READ MULTIPLE EXT" },
2180 { ATA_CMD_WRITE_MULTI
, "WRITE MULTIPLE" },
2181 { ATA_CMD_WRITE_MULTI_EXT
, "WRITE MULTIPLE EXT" },
2182 { ATA_CMD_WRITE_MULTI_FUA_EXT
, "WRITE MULTIPLE FUA EXT" },
2183 { ATA_CMD_SET_FEATURES
, "SET FEATURES" },
2184 { ATA_CMD_SET_MULTI
, "SET MULTIPLE MODE" },
2185 { ATA_CMD_VERIFY
, "READ VERIFY SECTOR(S)" },
2186 { ATA_CMD_VERIFY_EXT
, "READ VERIFY SECTOR(S) EXT" },
2187 { ATA_CMD_WRITE_UNCORR_EXT
, "WRITE UNCORRECTABLE EXT" },
2188 { ATA_CMD_STANDBYNOW1
, "STANDBY IMMEDIATE" },
2189 { ATA_CMD_IDLEIMMEDIATE
, "IDLE IMMEDIATE" },
2190 { ATA_CMD_SLEEP
, "SLEEP" },
2191 { ATA_CMD_INIT_DEV_PARAMS
, "INITIALIZE DEVICE PARAMETERS" },
2192 { ATA_CMD_READ_NATIVE_MAX
, "READ NATIVE MAX ADDRESS" },
2193 { ATA_CMD_READ_NATIVE_MAX_EXT
, "READ NATIVE MAX ADDRESS EXT" },
2194 { ATA_CMD_SET_MAX
, "SET MAX ADDRESS" },
2195 { ATA_CMD_SET_MAX_EXT
, "SET MAX ADDRESS EXT" },
2196 { ATA_CMD_READ_LOG_EXT
, "READ LOG EXT" },
2197 { ATA_CMD_WRITE_LOG_EXT
, "WRITE LOG EXT" },
2198 { ATA_CMD_READ_LOG_DMA_EXT
, "READ LOG DMA EXT" },
2199 { ATA_CMD_WRITE_LOG_DMA_EXT
, "WRITE LOG DMA EXT" },
2200 { ATA_CMD_TRUSTED_RCV
, "TRUSTED RECEIVE" },
2201 { ATA_CMD_TRUSTED_RCV_DMA
, "TRUSTED RECEIVE DMA" },
2202 { ATA_CMD_TRUSTED_SND
, "TRUSTED SEND" },
2203 { ATA_CMD_TRUSTED_SND_DMA
, "TRUSTED SEND DMA" },
2204 { ATA_CMD_PMP_READ
, "READ BUFFER" },
2205 { ATA_CMD_PMP_WRITE
, "WRITE BUFFER" },
2206 { ATA_CMD_CONF_OVERLAY
, "DEVICE CONFIGURATION OVERLAY" },
2207 { ATA_CMD_SEC_SET_PASS
, "SECURITY SET PASSWORD" },
2208 { ATA_CMD_SEC_UNLOCK
, "SECURITY UNLOCK" },
2209 { ATA_CMD_SEC_ERASE_PREP
, "SECURITY ERASE PREPARE" },
2210 { ATA_CMD_SEC_ERASE_UNIT
, "SECURITY ERASE UNIT" },
2211 { ATA_CMD_SEC_FREEZE_LOCK
, "SECURITY FREEZE LOCK" },
2212 { ATA_CMD_SEC_DISABLE_PASS
, "SECURITY DISABLE PASSWORD" },
2213 { ATA_CMD_CONFIG_STREAM
, "CONFIGURE STREAM" },
2214 { ATA_CMD_SMART
, "SMART" },
2215 { ATA_CMD_MEDIA_LOCK
, "DOOR LOCK" },
2216 { ATA_CMD_MEDIA_UNLOCK
, "DOOR UNLOCK" },
2217 { ATA_CMD_DSM
, "DATA SET MANAGEMENT" },
2218 { ATA_CMD_CHK_MED_CRD_TYP
, "CHECK MEDIA CARD TYPE" },
2219 { ATA_CMD_CFA_REQ_EXT_ERR
, "CFA REQUEST EXTENDED ERROR" },
2220 { ATA_CMD_CFA_WRITE_NE
, "CFA WRITE SECTORS WITHOUT ERASE" },
2221 { ATA_CMD_CFA_TRANS_SECT
, "CFA TRANSLATE SECTOR" },
2222 { ATA_CMD_CFA_ERASE
, "CFA ERASE SECTORS" },
2223 { ATA_CMD_CFA_WRITE_MULT_NE
, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2224 { ATA_CMD_READ_LONG
, "READ LONG (with retries)" },
2225 { ATA_CMD_READ_LONG_ONCE
, "READ LONG (without retries)" },
2226 { ATA_CMD_WRITE_LONG
, "WRITE LONG (with retries)" },
2227 { ATA_CMD_WRITE_LONG_ONCE
, "WRITE LONG (without retries)" },
2228 { ATA_CMD_RESTORE
, "RECALIBRATE" },
2229 { 0, NULL
} /* terminate list */
2233 for (i
= 0; cmd_descr
[i
].text
; i
++)
2234 if (cmd_descr
[i
].command
== command
)
2235 return cmd_descr
[i
].text
;
2242 * ata_eh_link_report - report error handling to user
2243 * @link: ATA link EH is going on
2245 * Report EH to user.
2250 static void ata_eh_link_report(struct ata_link
*link
)
2252 struct ata_port
*ap
= link
->ap
;
2253 struct ata_eh_context
*ehc
= &link
->eh_context
;
2254 const char *frozen
, *desc
;
2256 int tag
, nr_failed
= 0;
2258 if (ehc
->i
.flags
& ATA_EHI_QUIET
)
2262 if (ehc
->i
.desc
[0] != '\0')
2265 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2266 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2268 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2269 ata_dev_phys_link(qc
->dev
) != link
||
2270 ((qc
->flags
& ATA_QCFLAG_QUIET
) &&
2271 qc
->err_mask
== AC_ERR_DEV
))
2273 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
&& !qc
->err_mask
)
2279 if (!nr_failed
&& !ehc
->i
.err_mask
)
2283 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2286 memset(tries_buf
, 0, sizeof(tries_buf
));
2287 if (ap
->eh_tries
< ATA_EH_MAX_TRIES
)
2288 snprintf(tries_buf
, sizeof(tries_buf
) - 1, " t%d",
2292 ata_dev_printk(ehc
->i
.dev
, KERN_ERR
, "exception Emask 0x%x "
2293 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2294 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2295 ehc
->i
.action
, frozen
, tries_buf
);
2297 ata_dev_printk(ehc
->i
.dev
, KERN_ERR
, "%s\n", desc
);
2299 ata_link_printk(link
, KERN_ERR
, "exception Emask 0x%x "
2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2301 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2302 ehc
->i
.action
, frozen
, tries_buf
);
2304 ata_link_printk(link
, KERN_ERR
, "%s\n", desc
);
2307 #ifdef CONFIG_ATA_VERBOSE_ERROR
2309 ata_link_printk(link
, KERN_ERR
,
2310 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2311 ehc
->i
.serror
& SERR_DATA_RECOVERED
? "RecovData " : "",
2312 ehc
->i
.serror
& SERR_COMM_RECOVERED
? "RecovComm " : "",
2313 ehc
->i
.serror
& SERR_DATA
? "UnrecovData " : "",
2314 ehc
->i
.serror
& SERR_PERSISTENT
? "Persist " : "",
2315 ehc
->i
.serror
& SERR_PROTOCOL
? "Proto " : "",
2316 ehc
->i
.serror
& SERR_INTERNAL
? "HostInt " : "",
2317 ehc
->i
.serror
& SERR_PHYRDY_CHG
? "PHYRdyChg " : "",
2318 ehc
->i
.serror
& SERR_PHY_INT_ERR
? "PHYInt " : "",
2319 ehc
->i
.serror
& SERR_COMM_WAKE
? "CommWake " : "",
2320 ehc
->i
.serror
& SERR_10B_8B_ERR
? "10B8B " : "",
2321 ehc
->i
.serror
& SERR_DISPARITY
? "Dispar " : "",
2322 ehc
->i
.serror
& SERR_CRC
? "BadCRC " : "",
2323 ehc
->i
.serror
& SERR_HANDSHAKE
? "Handshk " : "",
2324 ehc
->i
.serror
& SERR_LINK_SEQ_ERR
? "LinkSeq " : "",
2325 ehc
->i
.serror
& SERR_TRANS_ST_ERROR
? "TrStaTrns " : "",
2326 ehc
->i
.serror
& SERR_UNRECOG_FIS
? "UnrecFIS " : "",
2327 ehc
->i
.serror
& SERR_DEV_XCHG
? "DevExch " : "");
2330 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2331 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2332 struct ata_taskfile
*cmd
= &qc
->tf
, *res
= &qc
->result_tf
;
2333 const u8
*cdb
= qc
->cdb
;
2334 char data_buf
[20] = "";
2335 char cdb_buf
[70] = "";
2337 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2338 ata_dev_phys_link(qc
->dev
) != link
|| !qc
->err_mask
)
2341 if (qc
->dma_dir
!= DMA_NONE
) {
2342 static const char *dma_str
[] = {
2343 [DMA_BIDIRECTIONAL
] = "bidi",
2344 [DMA_TO_DEVICE
] = "out",
2345 [DMA_FROM_DEVICE
] = "in",
2347 static const char *prot_str
[] = {
2348 [ATA_PROT_PIO
] = "pio",
2349 [ATA_PROT_DMA
] = "dma",
2350 [ATA_PROT_NCQ
] = "ncq",
2351 [ATAPI_PROT_PIO
] = "pio",
2352 [ATAPI_PROT_DMA
] = "dma",
2355 snprintf(data_buf
, sizeof(data_buf
), " %s %u %s",
2356 prot_str
[qc
->tf
.protocol
], qc
->nbytes
,
2357 dma_str
[qc
->dma_dir
]);
2360 if (ata_is_atapi(qc
->tf
.protocol
)) {
2362 scsi_print_command(qc
->scsicmd
);
2364 snprintf(cdb_buf
, sizeof(cdb_buf
),
2365 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2366 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2367 cdb
[0], cdb
[1], cdb
[2], cdb
[3],
2368 cdb
[4], cdb
[5], cdb
[6], cdb
[7],
2369 cdb
[8], cdb
[9], cdb
[10], cdb
[11],
2370 cdb
[12], cdb
[13], cdb
[14], cdb
[15]);
2372 const char *descr
= ata_get_cmd_descript(cmd
->command
);
2374 ata_dev_printk(qc
->dev
, KERN_ERR
,
2375 "failed command: %s\n", descr
);
2378 ata_dev_printk(qc
->dev
, KERN_ERR
,
2379 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2381 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2382 "Emask 0x%x (%s)%s\n",
2383 cmd
->command
, cmd
->feature
, cmd
->nsect
,
2384 cmd
->lbal
, cmd
->lbam
, cmd
->lbah
,
2385 cmd
->hob_feature
, cmd
->hob_nsect
,
2386 cmd
->hob_lbal
, cmd
->hob_lbam
, cmd
->hob_lbah
,
2387 cmd
->device
, qc
->tag
, data_buf
, cdb_buf
,
2388 res
->command
, res
->feature
, res
->nsect
,
2389 res
->lbal
, res
->lbam
, res
->lbah
,
2390 res
->hob_feature
, res
->hob_nsect
,
2391 res
->hob_lbal
, res
->hob_lbam
, res
->hob_lbah
,
2392 res
->device
, qc
->err_mask
, ata_err_string(qc
->err_mask
),
2393 qc
->err_mask
& AC_ERR_NCQ
? " <F>" : "");
2395 #ifdef CONFIG_ATA_VERBOSE_ERROR
2396 if (res
->command
& (ATA_BUSY
| ATA_DRDY
| ATA_DF
| ATA_DRQ
|
2398 if (res
->command
& ATA_BUSY
)
2399 ata_dev_printk(qc
->dev
, KERN_ERR
,
2400 "status: { Busy }\n");
2402 ata_dev_printk(qc
->dev
, KERN_ERR
,
2403 "status: { %s%s%s%s}\n",
2404 res
->command
& ATA_DRDY
? "DRDY " : "",
2405 res
->command
& ATA_DF
? "DF " : "",
2406 res
->command
& ATA_DRQ
? "DRQ " : "",
2407 res
->command
& ATA_ERR
? "ERR " : "");
2410 if (cmd
->command
!= ATA_CMD_PACKET
&&
2411 (res
->feature
& (ATA_ICRC
| ATA_UNC
| ATA_IDNF
|
2413 ata_dev_printk(qc
->dev
, KERN_ERR
,
2414 "error: { %s%s%s%s}\n",
2415 res
->feature
& ATA_ICRC
? "ICRC " : "",
2416 res
->feature
& ATA_UNC
? "UNC " : "",
2417 res
->feature
& ATA_IDNF
? "IDNF " : "",
2418 res
->feature
& ATA_ABORTED
? "ABRT " : "");
2424 * ata_eh_report - report error handling to user
2425 * @ap: ATA port to report EH about
2427 * Report EH to user.
2432 void ata_eh_report(struct ata_port
*ap
)
2434 struct ata_link
*link
;
2436 ata_for_each_link(link
, ap
, HOST_FIRST
)
2437 ata_eh_link_report(link
);
2440 static int ata_do_reset(struct ata_link
*link
, ata_reset_fn_t reset
,
2441 unsigned int *classes
, unsigned long deadline
,
2444 struct ata_device
*dev
;
2447 ata_for_each_dev(dev
, link
, ALL
)
2448 classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
2450 return reset(link
, classes
, deadline
);
2453 static int ata_eh_followup_srst_needed(struct ata_link
*link
,
2454 int rc
, const unsigned int *classes
)
2456 if ((link
->flags
& ATA_LFLAG_NO_SRST
) || ata_link_offline(link
))
2460 if (sata_pmp_supported(link
->ap
) && ata_is_host_link(link
))
2465 int ata_eh_reset(struct ata_link
*link
, int classify
,
2466 ata_prereset_fn_t prereset
, ata_reset_fn_t softreset
,
2467 ata_reset_fn_t hardreset
, ata_postreset_fn_t postreset
)
2469 struct ata_port
*ap
= link
->ap
;
2470 struct ata_link
*slave
= ap
->slave_link
;
2471 struct ata_eh_context
*ehc
= &link
->eh_context
;
2472 struct ata_eh_context
*sehc
= slave
? &slave
->eh_context
: NULL
;
2473 unsigned int *classes
= ehc
->classes
;
2474 unsigned int lflags
= link
->flags
;
2475 int verbose
= !(ehc
->i
.flags
& ATA_EHI_QUIET
);
2476 int max_tries
= 0, try = 0;
2477 struct ata_link
*failed_link
;
2478 struct ata_device
*dev
;
2479 unsigned long deadline
, now
;
2480 ata_reset_fn_t reset
;
2481 unsigned long flags
;
2488 while (ata_eh_reset_timeouts
[max_tries
] != ULONG_MAX
)
2490 if (link
->flags
& ATA_LFLAG_NO_HRST
)
2492 if (link
->flags
& ATA_LFLAG_NO_SRST
)
2495 /* make sure each reset attemp is at least COOL_DOWN apart */
2496 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
2498 WARN_ON(time_after(ehc
->last_reset
, now
));
2499 deadline
= ata_deadline(ehc
->last_reset
,
2500 ATA_EH_RESET_COOL_DOWN
);
2501 if (time_before(now
, deadline
))
2502 schedule_timeout_uninterruptible(deadline
- now
);
2505 spin_lock_irqsave(ap
->lock
, flags
);
2506 ap
->pflags
|= ATA_PFLAG_RESETTING
;
2507 spin_unlock_irqrestore(ap
->lock
, flags
);
2509 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2511 ata_for_each_dev(dev
, link
, ALL
) {
2512 /* If we issue an SRST then an ATA drive (not ATAPI)
2513 * may change configuration and be in PIO0 timing. If
2514 * we do a hard reset (or are coming from power on)
2515 * this is true for ATA or ATAPI. Until we've set a
2516 * suitable controller mode we should not touch the
2517 * bus as we may be talking too fast.
2519 dev
->pio_mode
= XFER_PIO_0
;
2521 /* If the controller has a pio mode setup function
2522 * then use it to set the chipset to rights. Don't
2523 * touch the DMA setup as that will be dealt with when
2524 * configuring devices.
2526 if (ap
->ops
->set_piomode
)
2527 ap
->ops
->set_piomode(ap
, dev
);
2530 /* prefer hardreset */
2532 ehc
->i
.action
&= ~ATA_EH_RESET
;
2535 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2536 } else if (softreset
) {
2538 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2542 unsigned long deadline
= ata_deadline(jiffies
,
2543 ATA_EH_PRERESET_TIMEOUT
);
2546 sehc
->i
.action
&= ~ATA_EH_RESET
;
2547 sehc
->i
.action
|= ehc
->i
.action
;
2550 rc
= prereset(link
, deadline
);
2552 /* If present, do prereset on slave link too. Reset
2553 * is skipped iff both master and slave links report
2554 * -ENOENT or clear ATA_EH_RESET.
2556 if (slave
&& (rc
== 0 || rc
== -ENOENT
)) {
2559 tmp
= prereset(slave
, deadline
);
2563 ehc
->i
.action
|= sehc
->i
.action
;
2567 if (rc
== -ENOENT
) {
2568 ata_link_printk(link
, KERN_DEBUG
,
2569 "port disabled. ignoring.\n");
2570 ehc
->i
.action
&= ~ATA_EH_RESET
;
2572 ata_for_each_dev(dev
, link
, ALL
)
2573 classes
[dev
->devno
] = ATA_DEV_NONE
;
2577 ata_link_printk(link
, KERN_ERR
,
2578 "prereset failed (errno=%d)\n", rc
);
2582 /* prereset() might have cleared ATA_EH_RESET. If so,
2583 * bang classes, thaw and return.
2585 if (reset
&& !(ehc
->i
.action
& ATA_EH_RESET
)) {
2586 ata_for_each_dev(dev
, link
, ALL
)
2587 classes
[dev
->devno
] = ATA_DEV_NONE
;
2588 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) &&
2589 ata_is_host_link(link
))
2590 ata_eh_thaw_port(ap
);
2600 if (ata_is_host_link(link
))
2601 ata_eh_freeze_port(ap
);
2603 deadline
= ata_deadline(jiffies
, ata_eh_reset_timeouts
[try++]);
2607 ata_link_printk(link
, KERN_INFO
, "%s resetting link\n",
2608 reset
== softreset
? "soft" : "hard");
2610 /* mark that this EH session started with reset */
2611 ehc
->last_reset
= jiffies
;
2612 if (reset
== hardreset
)
2613 ehc
->i
.flags
|= ATA_EHI_DID_HARDRESET
;
2615 ehc
->i
.flags
|= ATA_EHI_DID_SOFTRESET
;
2617 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2618 if (rc
&& rc
!= -EAGAIN
) {
2623 /* hardreset slave link if existent */
2624 if (slave
&& reset
== hardreset
) {
2628 ata_link_printk(slave
, KERN_INFO
,
2629 "hard resetting link\n");
2631 ata_eh_about_to_do(slave
, NULL
, ATA_EH_RESET
);
2632 tmp
= ata_do_reset(slave
, reset
, classes
, deadline
,
2640 failed_link
= slave
;
2646 /* perform follow-up SRST if necessary */
2647 if (reset
== hardreset
&&
2648 ata_eh_followup_srst_needed(link
, rc
, classes
)) {
2652 ata_link_printk(link
, KERN_ERR
,
2653 "follow-up softreset required "
2654 "but no softreset avaliable\n");
2660 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2661 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2669 ata_link_printk(link
, KERN_INFO
, "no reset method "
2670 "available, skipping reset\n");
2671 if (!(lflags
& ATA_LFLAG_ASSUME_CLASS
))
2672 lflags
|= ATA_LFLAG_ASSUME_ATA
;
2676 * Post-reset processing
2678 ata_for_each_dev(dev
, link
, ALL
) {
2679 /* After the reset, the device state is PIO 0 and the
2680 * controller state is undefined. Reset also wakes up
2681 * drives from sleeping mode.
2683 dev
->pio_mode
= XFER_PIO_0
;
2684 dev
->flags
&= ~ATA_DFLAG_SLEEPING
;
2686 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
2689 /* apply class override */
2690 if (lflags
& ATA_LFLAG_ASSUME_ATA
)
2691 classes
[dev
->devno
] = ATA_DEV_ATA
;
2692 else if (lflags
& ATA_LFLAG_ASSUME_SEMB
)
2693 classes
[dev
->devno
] = ATA_DEV_SEMB_UNSUP
;
2696 /* record current link speed */
2697 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0)
2698 link
->sata_spd
= (sstatus
>> 4) & 0xf;
2699 if (slave
&& sata_scr_read(slave
, SCR_STATUS
, &sstatus
) == 0)
2700 slave
->sata_spd
= (sstatus
>> 4) & 0xf;
2703 if (ata_is_host_link(link
))
2704 ata_eh_thaw_port(ap
);
2706 /* postreset() should clear hardware SError. Although SError
2707 * is cleared during link resume, clearing SError here is
2708 * necessary as some PHYs raise hotplug events after SRST.
2709 * This introduces race condition where hotplug occurs between
2710 * reset and here. This race is mediated by cross checking
2711 * link onlineness and classification result later.
2714 postreset(link
, classes
);
2716 postreset(slave
, classes
);
2720 * Some controllers can't be frozen very well and may set
2721 * spuruious error conditions during reset. Clear accumulated
2722 * error information. As reset is the final recovery action,
2723 * nothing is lost by doing this.
2725 spin_lock_irqsave(link
->ap
->lock
, flags
);
2726 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
2728 memset(&slave
->eh_info
, 0, sizeof(link
->eh_info
));
2729 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
2730 spin_unlock_irqrestore(link
->ap
->lock
, flags
);
2733 * Make sure onlineness and classification result correspond.
2734 * Hotplug could have happened during reset and some
2735 * controllers fail to wait while a drive is spinning up after
2736 * being hotplugged causing misdetection. By cross checking
2737 * link on/offlineness and classification result, those
2738 * conditions can be reliably detected and retried.
2741 ata_for_each_dev(dev
, link
, ALL
) {
2742 if (ata_phys_link_online(ata_dev_phys_link(dev
))) {
2743 if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
2744 ata_dev_printk(dev
, KERN_DEBUG
, "link online "
2745 "but device misclassifed\n");
2746 classes
[dev
->devno
] = ATA_DEV_NONE
;
2749 } else if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
2750 if (ata_class_enabled(classes
[dev
->devno
]))
2751 ata_dev_printk(dev
, KERN_DEBUG
, "link offline, "
2752 "clearing class %d to NONE\n",
2753 classes
[dev
->devno
]);
2754 classes
[dev
->devno
] = ATA_DEV_NONE
;
2755 } else if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
2756 ata_dev_printk(dev
, KERN_DEBUG
, "link status unknown, "
2757 "clearing UNKNOWN to NONE\n");
2758 classes
[dev
->devno
] = ATA_DEV_NONE
;
2762 if (classify
&& nr_unknown
) {
2763 if (try < max_tries
) {
2764 ata_link_printk(link
, KERN_WARNING
, "link online but "
2765 "%d devices misclassified, retrying\n",
2771 ata_link_printk(link
, KERN_WARNING
,
2772 "link online but %d devices misclassified, "
2773 "device detection might fail\n", nr_unknown
);
2776 /* reset successful, schedule revalidation */
2777 ata_eh_done(link
, NULL
, ATA_EH_RESET
);
2779 ata_eh_done(slave
, NULL
, ATA_EH_RESET
);
2780 ehc
->last_reset
= jiffies
; /* update to completion time */
2781 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2785 /* clear hotplug flag */
2786 ehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
2788 sehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
2790 spin_lock_irqsave(ap
->lock
, flags
);
2791 ap
->pflags
&= ~ATA_PFLAG_RESETTING
;
2792 spin_unlock_irqrestore(ap
->lock
, flags
);
2797 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2798 if (!ata_is_host_link(link
) &&
2799 sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2802 if (rc
== -ERESTART
|| try >= max_tries
)
2806 if (time_before(now
, deadline
)) {
2807 unsigned long delta
= deadline
- now
;
2809 ata_link_printk(failed_link
, KERN_WARNING
,
2810 "reset failed (errno=%d), retrying in %u secs\n",
2811 rc
, DIV_ROUND_UP(jiffies_to_msecs(delta
), 1000));
2814 delta
= schedule_timeout_uninterruptible(delta
);
2817 if (try == max_tries
- 1) {
2818 sata_down_spd_limit(link
, 0);
2820 sata_down_spd_limit(slave
, 0);
2821 } else if (rc
== -EPIPE
)
2822 sata_down_spd_limit(failed_link
, 0);
2829 static inline void ata_eh_pull_park_action(struct ata_port
*ap
)
2831 struct ata_link
*link
;
2832 struct ata_device
*dev
;
2833 unsigned long flags
;
2836 * This function can be thought of as an extended version of
2837 * ata_eh_about_to_do() specially crafted to accommodate the
2838 * requirements of ATA_EH_PARK handling. Since the EH thread
2839 * does not leave the do {} while () loop in ata_eh_recover as
2840 * long as the timeout for a park request to *one* device on
2841 * the port has not expired, and since we still want to pick
2842 * up park requests to other devices on the same port or
2843 * timeout updates for the same device, we have to pull
2844 * ATA_EH_PARK actions from eh_info into eh_context.i
2845 * ourselves at the beginning of each pass over the loop.
2847 * Additionally, all write accesses to &ap->park_req_pending
2848 * through INIT_COMPLETION() (see below) or complete_all()
2849 * (see ata_scsi_park_store()) are protected by the host lock.
2850 * As a result we have that park_req_pending.done is zero on
2851 * exit from this function, i.e. when ATA_EH_PARK actions for
2852 * *all* devices on port ap have been pulled into the
2853 * respective eh_context structs. If, and only if,
2854 * park_req_pending.done is non-zero by the time we reach
2855 * wait_for_completion_timeout(), another ATA_EH_PARK action
2856 * has been scheduled for at least one of the devices on port
2857 * ap and we have to cycle over the do {} while () loop in
2858 * ata_eh_recover() again.
2861 spin_lock_irqsave(ap
->lock
, flags
);
2862 INIT_COMPLETION(ap
->park_req_pending
);
2863 ata_for_each_link(link
, ap
, EDGE
) {
2864 ata_for_each_dev(dev
, link
, ALL
) {
2865 struct ata_eh_info
*ehi
= &link
->eh_info
;
2867 link
->eh_context
.i
.dev_action
[dev
->devno
] |=
2868 ehi
->dev_action
[dev
->devno
] & ATA_EH_PARK
;
2869 ata_eh_clear_action(link
, dev
, ehi
, ATA_EH_PARK
);
2872 spin_unlock_irqrestore(ap
->lock
, flags
);
2875 static void ata_eh_park_issue_cmd(struct ata_device
*dev
, int park
)
2877 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
2878 struct ata_taskfile tf
;
2879 unsigned int err_mask
;
2881 ata_tf_init(dev
, &tf
);
2883 ehc
->unloaded_mask
|= 1 << dev
->devno
;
2884 tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
2890 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
2891 tf
.command
= ATA_CMD_CHK_POWER
;
2894 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
2895 tf
.protocol
|= ATA_PROT_NODATA
;
2896 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
2897 if (park
&& (err_mask
|| tf
.lbal
!= 0xc4)) {
2898 ata_dev_printk(dev
, KERN_ERR
, "head unload failed!\n");
2899 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
2903 static int ata_eh_revalidate_and_attach(struct ata_link
*link
,
2904 struct ata_device
**r_failed_dev
)
2906 struct ata_port
*ap
= link
->ap
;
2907 struct ata_eh_context
*ehc
= &link
->eh_context
;
2908 struct ata_device
*dev
;
2909 unsigned int new_mask
= 0;
2910 unsigned long flags
;
2915 /* For PATA drive side cable detection to work, IDENTIFY must
2916 * be done backwards such that PDIAG- is released by the slave
2917 * device before the master device is identified.
2919 ata_for_each_dev(dev
, link
, ALL_REVERSE
) {
2920 unsigned int action
= ata_eh_dev_action(dev
);
2921 unsigned int readid_flags
= 0;
2923 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
)
2924 readid_flags
|= ATA_READID_POSTRESET
;
2926 if ((action
& ATA_EH_REVALIDATE
) && ata_dev_enabled(dev
)) {
2927 WARN_ON(dev
->class == ATA_DEV_PMP
);
2929 if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
2934 ata_eh_about_to_do(link
, dev
, ATA_EH_REVALIDATE
);
2935 rc
= ata_dev_revalidate(dev
, ehc
->classes
[dev
->devno
],
2940 ata_eh_done(link
, dev
, ATA_EH_REVALIDATE
);
2942 /* Configuration may have changed, reconfigure
2945 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
2947 /* schedule the scsi_rescan_device() here */
2948 schedule_work(&(ap
->scsi_rescan_task
));
2949 } else if (dev
->class == ATA_DEV_UNKNOWN
&&
2950 ehc
->tries
[dev
->devno
] &&
2951 ata_class_enabled(ehc
->classes
[dev
->devno
])) {
2952 /* Temporarily set dev->class, it will be
2953 * permanently set once all configurations are
2954 * complete. This is necessary because new
2955 * device configuration is done in two
2958 dev
->class = ehc
->classes
[dev
->devno
];
2960 if (dev
->class == ATA_DEV_PMP
)
2961 rc
= sata_pmp_attach(dev
);
2963 rc
= ata_dev_read_id(dev
, &dev
->class,
2964 readid_flags
, dev
->id
);
2966 /* read_id might have changed class, store and reset */
2967 ehc
->classes
[dev
->devno
] = dev
->class;
2968 dev
->class = ATA_DEV_UNKNOWN
;
2972 /* clear error info accumulated during probe */
2973 ata_ering_clear(&dev
->ering
);
2974 new_mask
|= 1 << dev
->devno
;
2977 /* IDENTIFY was issued to non-existent
2978 * device. No need to reset. Just
2979 * thaw and ignore the device.
2981 ata_eh_thaw_port(ap
);
2989 /* PDIAG- should have been released, ask cable type if post-reset */
2990 if ((ehc
->i
.flags
& ATA_EHI_DID_RESET
) && ata_is_host_link(link
)) {
2991 if (ap
->ops
->cable_detect
)
2992 ap
->cbl
= ap
->ops
->cable_detect(ap
);
2996 /* Configure new devices forward such that user doesn't see
2997 * device detection messages backwards.
2999 ata_for_each_dev(dev
, link
, ALL
) {
3000 if (!(new_mask
& (1 << dev
->devno
)))
3003 dev
->class = ehc
->classes
[dev
->devno
];
3005 if (dev
->class == ATA_DEV_PMP
)
3008 ehc
->i
.flags
|= ATA_EHI_PRINTINFO
;
3009 rc
= ata_dev_configure(dev
);
3010 ehc
->i
.flags
&= ~ATA_EHI_PRINTINFO
;
3012 dev
->class = ATA_DEV_UNKNOWN
;
3016 spin_lock_irqsave(ap
->lock
, flags
);
3017 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
3018 spin_unlock_irqrestore(ap
->lock
, flags
);
3020 /* new device discovered, configure xfermode */
3021 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3027 *r_failed_dev
= dev
;
3028 DPRINTK("EXIT rc=%d\n", rc
);
3033 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3034 * @link: link on which timings will be programmed
3035 * @r_failed_dev: out parameter for failed device
3037 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3038 * ata_set_mode() fails, pointer to the failing device is
3039 * returned in @r_failed_dev.
3042 * PCI/etc. bus probe sem.
3045 * 0 on success, negative errno otherwise
3047 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3049 struct ata_port
*ap
= link
->ap
;
3050 struct ata_device
*dev
;
3053 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3054 ata_for_each_dev(dev
, link
, ENABLED
) {
3055 if (!(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)) {
3056 struct ata_ering_entry
*ent
;
3058 ent
= ata_ering_top(&dev
->ering
);
3060 ent
->eflags
&= ~ATA_EFLAG_DUBIOUS_XFER
;
3064 /* has private set_mode? */
3065 if (ap
->ops
->set_mode
)
3066 rc
= ap
->ops
->set_mode(link
, r_failed_dev
);
3068 rc
= ata_do_set_mode(link
, r_failed_dev
);
3070 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3071 ata_for_each_dev(dev
, link
, ENABLED
) {
3072 struct ata_eh_context
*ehc
= &link
->eh_context
;
3073 u8 saved_xfer_mode
= ehc
->saved_xfer_mode
[dev
->devno
];
3074 u8 saved_ncq
= !!(ehc
->saved_ncq_enabled
& (1 << dev
->devno
));
3076 if (dev
->xfer_mode
!= saved_xfer_mode
||
3077 ata_ncq_enabled(dev
) != saved_ncq
)
3078 dev
->flags
|= ATA_DFLAG_DUBIOUS_XFER
;
3085 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3086 * @dev: ATAPI device to clear UA for
3088 * Resets and other operations can make an ATAPI device raise
3089 * UNIT ATTENTION which causes the next operation to fail. This
3090 * function clears UA.
3093 * EH context (may sleep).
3096 * 0 on success, -errno on failure.
3098 static int atapi_eh_clear_ua(struct ata_device
*dev
)
3102 for (i
= 0; i
< ATA_EH_UA_TRIES
; i
++) {
3103 u8
*sense_buffer
= dev
->link
->ap
->sector_buf
;
3105 unsigned int err_mask
;
3107 err_mask
= atapi_eh_tur(dev
, &sense_key
);
3108 if (err_mask
!= 0 && err_mask
!= AC_ERR_DEV
) {
3109 ata_dev_printk(dev
, KERN_WARNING
, "TEST_UNIT_READY "
3110 "failed (err_mask=0x%x)\n", err_mask
);
3114 if (!err_mask
|| sense_key
!= UNIT_ATTENTION
)
3117 err_mask
= atapi_eh_request_sense(dev
, sense_buffer
, sense_key
);
3119 ata_dev_printk(dev
, KERN_WARNING
, "failed to clear "
3120 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask
);
3125 ata_dev_printk(dev
, KERN_WARNING
,
3126 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES
);
3132 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3133 * @dev: ATA device which may need FLUSH retry
3135 * If @dev failed FLUSH, it needs to be reported upper layer
3136 * immediately as it means that @dev failed to remap and already
3137 * lost at least a sector and further FLUSH retrials won't make
3138 * any difference to the lost sector. However, if FLUSH failed
3139 * for other reasons, for example transmission error, FLUSH needs
3142 * This function determines whether FLUSH failure retry is
3143 * necessary and performs it if so.
3146 * 0 if EH can continue, -errno if EH needs to be repeated.
3148 static int ata_eh_maybe_retry_flush(struct ata_device
*dev
)
3150 struct ata_link
*link
= dev
->link
;
3151 struct ata_port
*ap
= link
->ap
;
3152 struct ata_queued_cmd
*qc
;
3153 struct ata_taskfile tf
;
3154 unsigned int err_mask
;
3157 /* did flush fail for this device? */
3158 if (!ata_tag_valid(link
->active_tag
))
3161 qc
= __ata_qc_from_tag(ap
, link
->active_tag
);
3162 if (qc
->dev
!= dev
|| (qc
->tf
.command
!= ATA_CMD_FLUSH_EXT
&&
3163 qc
->tf
.command
!= ATA_CMD_FLUSH
))
3166 /* if the device failed it, it should be reported to upper layers */
3167 if (qc
->err_mask
& AC_ERR_DEV
)
3170 /* flush failed for some other reason, give it another shot */
3171 ata_tf_init(dev
, &tf
);
3173 tf
.command
= qc
->tf
.command
;
3174 tf
.flags
|= ATA_TFLAG_DEVICE
;
3175 tf
.protocol
= ATA_PROT_NODATA
;
3177 ata_dev_printk(dev
, KERN_WARNING
, "retrying FLUSH 0x%x Emask 0x%x\n",
3178 tf
.command
, qc
->err_mask
);
3180 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3183 * FLUSH is complete but there's no way to
3184 * successfully complete a failed command from EH.
3185 * Making sure retry is allowed at least once and
3186 * retrying it should do the trick - whatever was in
3187 * the cache is already on the platter and this won't
3188 * cause infinite loop.
3190 qc
->scsicmd
->allowed
= max(qc
->scsicmd
->allowed
, 1);
3192 ata_dev_printk(dev
, KERN_WARNING
, "FLUSH failed Emask 0x%x\n",
3196 /* if device failed it, report it to upper layers */
3197 if (err_mask
& AC_ERR_DEV
) {
3198 qc
->err_mask
|= AC_ERR_DEV
;
3200 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
3207 static int ata_link_nr_enabled(struct ata_link
*link
)
3209 struct ata_device
*dev
;
3212 ata_for_each_dev(dev
, link
, ENABLED
)
3217 static int ata_link_nr_vacant(struct ata_link
*link
)
3219 struct ata_device
*dev
;
3222 ata_for_each_dev(dev
, link
, ALL
)
3223 if (dev
->class == ATA_DEV_UNKNOWN
)
3228 static int ata_eh_skip_recovery(struct ata_link
*link
)
3230 struct ata_port
*ap
= link
->ap
;
3231 struct ata_eh_context
*ehc
= &link
->eh_context
;
3232 struct ata_device
*dev
;
3234 /* skip disabled links */
3235 if (link
->flags
& ATA_LFLAG_DISABLED
)
3238 /* thaw frozen port and recover failed devices */
3239 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) || ata_link_nr_enabled(link
))
3242 /* reset at least once if reset is requested */
3243 if ((ehc
->i
.action
& ATA_EH_RESET
) &&
3244 !(ehc
->i
.flags
& ATA_EHI_DID_RESET
))
3247 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3248 ata_for_each_dev(dev
, link
, ALL
) {
3249 if (dev
->class == ATA_DEV_UNKNOWN
&&
3250 ehc
->classes
[dev
->devno
] != ATA_DEV_NONE
)
3257 static int ata_count_probe_trials_cb(struct ata_ering_entry
*ent
, void *void_arg
)
3259 u64 interval
= msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL
);
3260 u64 now
= get_jiffies_64();
3261 int *trials
= void_arg
;
3263 if (ent
->timestamp
< now
- min(now
, interval
))
3270 static int ata_eh_schedule_probe(struct ata_device
*dev
)
3272 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3273 struct ata_link
*link
= ata_dev_phys_link(dev
);
3276 if (!(ehc
->i
.probe_mask
& (1 << dev
->devno
)) ||
3277 (ehc
->did_probe_mask
& (1 << dev
->devno
)))
3280 ata_eh_detach_dev(dev
);
3282 ehc
->did_probe_mask
|= (1 << dev
->devno
);
3283 ehc
->i
.action
|= ATA_EH_RESET
;
3284 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
3285 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
3287 /* Record and count probe trials on the ering. The specific
3288 * error mask used is irrelevant. Because a successful device
3289 * detection clears the ering, this count accumulates only if
3290 * there are consecutive failed probes.
3292 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3293 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3294 * forced to 1.5Gbps.
3296 * This is to work around cases where failed link speed
3297 * negotiation results in device misdetection leading to
3298 * infinite DEVXCHG or PHRDY CHG events.
3300 ata_ering_record(&dev
->ering
, 0, AC_ERR_OTHER
);
3301 ata_ering_map(&dev
->ering
, ata_count_probe_trials_cb
, &trials
);
3303 if (trials
> ATA_EH_PROBE_TRIALS
)
3304 sata_down_spd_limit(link
, 1);
3309 static int ata_eh_handle_dev_fail(struct ata_device
*dev
, int err
)
3311 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3313 /* -EAGAIN from EH routine indicates retry without prejudice.
3314 * The requester is responsible for ensuring forward progress.
3317 ehc
->tries
[dev
->devno
]--;
3321 /* device missing or wrong IDENTIFY data, schedule probing */
3322 ehc
->i
.probe_mask
|= (1 << dev
->devno
);
3324 /* give it just one more chance */
3325 ehc
->tries
[dev
->devno
] = min(ehc
->tries
[dev
->devno
], 1);
3327 if (ehc
->tries
[dev
->devno
] == 1) {
3328 /* This is the last chance, better to slow
3329 * down than lose it.
3331 sata_down_spd_limit(ata_dev_phys_link(dev
), 0);
3332 if (dev
->pio_mode
> XFER_PIO_0
)
3333 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
3337 if (ata_dev_enabled(dev
) && !ehc
->tries
[dev
->devno
]) {
3338 /* disable device if it has used up all its chances */
3339 ata_dev_disable(dev
);
3341 /* detach if offline */
3342 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
3343 ata_eh_detach_dev(dev
);
3345 /* schedule probe if necessary */
3346 if (ata_eh_schedule_probe(dev
)) {
3347 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3348 memset(ehc
->cmd_timeout_idx
[dev
->devno
], 0,
3349 sizeof(ehc
->cmd_timeout_idx
[dev
->devno
]));
3354 ehc
->i
.action
|= ATA_EH_RESET
;
3360 * ata_eh_recover - recover host port after error
3361 * @ap: host port to recover
3362 * @prereset: prereset method (can be NULL)
3363 * @softreset: softreset method (can be NULL)
3364 * @hardreset: hardreset method (can be NULL)
3365 * @postreset: postreset method (can be NULL)
3366 * @r_failed_link: out parameter for failed link
3368 * This is the alpha and omega, eum and yang, heart and soul of
3369 * libata exception handling. On entry, actions required to
3370 * recover each link and hotplug requests are recorded in the
3371 * link's eh_context. This function executes all the operations
3372 * with appropriate retrials and fallbacks to resurrect failed
3373 * devices, detach goners and greet newcomers.
3376 * Kernel thread context (may sleep).
3379 * 0 on success, -errno on failure.
3381 int ata_eh_recover(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3382 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3383 ata_postreset_fn_t postreset
,
3384 struct ata_link
**r_failed_link
)
3386 struct ata_link
*link
;
3387 struct ata_device
*dev
;
3390 unsigned long flags
, deadline
;
3394 /* prep for recovery */
3395 ata_for_each_link(link
, ap
, EDGE
) {
3396 struct ata_eh_context
*ehc
= &link
->eh_context
;
3398 /* re-enable link? */
3399 if (ehc
->i
.action
& ATA_EH_ENABLE_LINK
) {
3400 ata_eh_about_to_do(link
, NULL
, ATA_EH_ENABLE_LINK
);
3401 spin_lock_irqsave(ap
->lock
, flags
);
3402 link
->flags
&= ~ATA_LFLAG_DISABLED
;
3403 spin_unlock_irqrestore(ap
->lock
, flags
);
3404 ata_eh_done(link
, NULL
, ATA_EH_ENABLE_LINK
);
3407 ata_for_each_dev(dev
, link
, ALL
) {
3408 if (link
->flags
& ATA_LFLAG_NO_RETRY
)
3409 ehc
->tries
[dev
->devno
] = 1;
3411 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3413 /* collect port action mask recorded in dev actions */
3414 ehc
->i
.action
|= ehc
->i
.dev_action
[dev
->devno
] &
3415 ~ATA_EH_PERDEV_MASK
;
3416 ehc
->i
.dev_action
[dev
->devno
] &= ATA_EH_PERDEV_MASK
;
3418 /* process hotplug request */
3419 if (dev
->flags
& ATA_DFLAG_DETACH
)
3420 ata_eh_detach_dev(dev
);
3422 /* schedule probe if necessary */
3423 if (!ata_dev_enabled(dev
))
3424 ata_eh_schedule_probe(dev
);
3432 /* if UNLOADING, finish immediately */
3433 if (ap
->pflags
& ATA_PFLAG_UNLOADING
)
3437 ata_for_each_link(link
, ap
, EDGE
) {
3438 struct ata_eh_context
*ehc
= &link
->eh_context
;
3440 /* skip EH if possible. */
3441 if (ata_eh_skip_recovery(link
))
3444 ata_for_each_dev(dev
, link
, ALL
)
3445 ehc
->classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
3449 ata_for_each_link(link
, ap
, EDGE
) {
3450 struct ata_eh_context
*ehc
= &link
->eh_context
;
3452 if (!(ehc
->i
.action
& ATA_EH_RESET
))
3455 rc
= ata_eh_reset(link
, ata_link_nr_vacant(link
),
3456 prereset
, softreset
, hardreset
, postreset
);
3458 ata_link_printk(link
, KERN_ERR
,
3459 "reset failed, giving up\n");
3468 * clears ATA_EH_PARK in eh_info and resets
3469 * ap->park_req_pending
3471 ata_eh_pull_park_action(ap
);
3474 ata_for_each_link(link
, ap
, EDGE
) {
3475 ata_for_each_dev(dev
, link
, ALL
) {
3476 struct ata_eh_context
*ehc
= &link
->eh_context
;
3479 if (dev
->class != ATA_DEV_ATA
)
3481 if (!(ehc
->i
.dev_action
[dev
->devno
] &
3484 tmp
= dev
->unpark_deadline
;
3485 if (time_before(deadline
, tmp
))
3487 else if (time_before_eq(tmp
, jiffies
))
3489 if (ehc
->unloaded_mask
& (1 << dev
->devno
))
3492 ata_eh_park_issue_cmd(dev
, 1);
3497 if (time_before_eq(deadline
, now
))
3500 deadline
= wait_for_completion_timeout(&ap
->park_req_pending
,
3503 ata_for_each_link(link
, ap
, EDGE
) {
3504 ata_for_each_dev(dev
, link
, ALL
) {
3505 if (!(link
->eh_context
.unloaded_mask
&
3509 ata_eh_park_issue_cmd(dev
, 0);
3510 ata_eh_done(link
, dev
, ATA_EH_PARK
);
3515 ata_for_each_link(link
, ap
, EDGE
) {
3516 struct ata_eh_context
*ehc
= &link
->eh_context
;
3518 /* revalidate existing devices and attach new ones */
3519 rc
= ata_eh_revalidate_and_attach(link
, &dev
);
3523 /* if PMP got attached, return, pmp EH will take care of it */
3524 if (link
->device
->class == ATA_DEV_PMP
) {
3529 /* configure transfer mode if necessary */
3530 if (ehc
->i
.flags
& ATA_EHI_SETMODE
) {
3531 rc
= ata_set_mode(link
, &dev
);
3534 ehc
->i
.flags
&= ~ATA_EHI_SETMODE
;
3537 /* If reset has been issued, clear UA to avoid
3538 * disrupting the current users of the device.
3540 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
3541 ata_for_each_dev(dev
, link
, ALL
) {
3542 if (dev
->class != ATA_DEV_ATAPI
)
3544 rc
= atapi_eh_clear_ua(dev
);
3550 /* retry flush if necessary */
3551 ata_for_each_dev(dev
, link
, ALL
) {
3552 if (dev
->class != ATA_DEV_ATA
)
3554 rc
= ata_eh_maybe_retry_flush(dev
);
3559 /* configure link power saving */
3560 if (ehc
->i
.action
& ATA_EH_LPM
)
3561 ata_for_each_dev(dev
, link
, ALL
)
3562 ata_dev_enable_pm(dev
, ap
->pm_policy
);
3564 /* this link is okay now */
3570 ata_eh_handle_dev_fail(dev
, rc
);
3572 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
3573 /* PMP reset requires working host port.
3574 * Can't retry if it's frozen.
3576 if (sata_pmp_attached(ap
))
3586 if (rc
&& r_failed_link
)
3587 *r_failed_link
= link
;
3589 DPRINTK("EXIT, rc=%d\n", rc
);
3594 * ata_eh_finish - finish up EH
3595 * @ap: host port to finish EH for
3597 * Recovery is complete. Clean up EH states and retry or finish
3603 void ata_eh_finish(struct ata_port
*ap
)
3607 /* retry or finish qcs */
3608 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
3609 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
3611 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
3615 /* FIXME: Once EH migration is complete,
3616 * generate sense data in this function,
3617 * considering both err_mask and tf.
3619 if (qc
->flags
& ATA_QCFLAG_RETRY
)
3620 ata_eh_qc_retry(qc
);
3622 ata_eh_qc_complete(qc
);
3624 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
3625 ata_eh_qc_complete(qc
);
3627 /* feed zero TF to sense generation */
3628 memset(&qc
->result_tf
, 0, sizeof(qc
->result_tf
));
3629 ata_eh_qc_retry(qc
);
3634 /* make sure nr_active_links is zero after EH */
3635 WARN_ON(ap
->nr_active_links
);
3636 ap
->nr_active_links
= 0;
3640 * ata_do_eh - do standard error handling
3641 * @ap: host port to handle error for
3643 * @prereset: prereset method (can be NULL)
3644 * @softreset: softreset method (can be NULL)
3645 * @hardreset: hardreset method (can be NULL)
3646 * @postreset: postreset method (can be NULL)
3648 * Perform standard error handling sequence.
3651 * Kernel thread context (may sleep).
3653 void ata_do_eh(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3654 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3655 ata_postreset_fn_t postreset
)
3657 struct ata_device
*dev
;
3663 rc
= ata_eh_recover(ap
, prereset
, softreset
, hardreset
, postreset
,
3666 ata_for_each_dev(dev
, &ap
->link
, ALL
)
3667 ata_dev_disable(dev
);
3674 * ata_std_error_handler - standard error handler
3675 * @ap: host port to handle error for
3677 * Standard error handler
3680 * Kernel thread context (may sleep).
3682 void ata_std_error_handler(struct ata_port
*ap
)
3684 struct ata_port_operations
*ops
= ap
->ops
;
3685 ata_reset_fn_t hardreset
= ops
->hardreset
;
3687 /* ignore built-in hardreset if SCR access is not available */
3688 if (hardreset
== sata_std_hardreset
&& !sata_scr_valid(&ap
->link
))
3691 ata_do_eh(ap
, ops
->prereset
, ops
->softreset
, hardreset
, ops
->postreset
);
3696 * ata_eh_handle_port_suspend - perform port suspend operation
3697 * @ap: port to suspend
3702 * Kernel thread context (may sleep).
3704 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
3706 unsigned long flags
;
3709 /* are we suspending? */
3710 spin_lock_irqsave(ap
->lock
, flags
);
3711 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
3712 ap
->pm_mesg
.event
== PM_EVENT_ON
) {
3713 spin_unlock_irqrestore(ap
->lock
, flags
);
3716 spin_unlock_irqrestore(ap
->lock
, flags
);
3718 WARN_ON(ap
->pflags
& ATA_PFLAG_SUSPENDED
);
3720 /* tell ACPI we're suspending */
3721 rc
= ata_acpi_on_suspend(ap
);
3726 ata_eh_freeze_port(ap
);
3728 if (ap
->ops
->port_suspend
)
3729 rc
= ap
->ops
->port_suspend(ap
, ap
->pm_mesg
);
3731 ata_acpi_set_state(ap
, PMSG_SUSPEND
);
3734 spin_lock_irqsave(ap
->lock
, flags
);
3736 ap
->pflags
&= ~ATA_PFLAG_PM_PENDING
;
3738 ap
->pflags
|= ATA_PFLAG_SUSPENDED
;
3739 else if (ap
->pflags
& ATA_PFLAG_FROZEN
)
3740 ata_port_schedule_eh(ap
);
3742 if (ap
->pm_result
) {
3743 *ap
->pm_result
= rc
;
3744 ap
->pm_result
= NULL
;
3747 spin_unlock_irqrestore(ap
->lock
, flags
);
3753 * ata_eh_handle_port_resume - perform port resume operation
3754 * @ap: port to resume
3759 * Kernel thread context (may sleep).
3761 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
3763 struct ata_link
*link
;
3764 struct ata_device
*dev
;
3765 unsigned long flags
;
3768 /* are we resuming? */
3769 spin_lock_irqsave(ap
->lock
, flags
);
3770 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
3771 ap
->pm_mesg
.event
!= PM_EVENT_ON
) {
3772 spin_unlock_irqrestore(ap
->lock
, flags
);
3775 spin_unlock_irqrestore(ap
->lock
, flags
);
3777 WARN_ON(!(ap
->pflags
& ATA_PFLAG_SUSPENDED
));
3780 * Error timestamps are in jiffies which doesn't run while
3781 * suspended and PHY events during resume isn't too uncommon.
3782 * When the two are combined, it can lead to unnecessary speed
3783 * downs if the machine is suspended and resumed repeatedly.
3784 * Clear error history.
3786 ata_for_each_link(link
, ap
, HOST_FIRST
)
3787 ata_for_each_dev(dev
, link
, ALL
)
3788 ata_ering_clear(&dev
->ering
);
3790 ata_acpi_set_state(ap
, PMSG_ON
);
3792 if (ap
->ops
->port_resume
)
3793 rc
= ap
->ops
->port_resume(ap
);
3795 /* tell ACPI that we're resuming */
3796 ata_acpi_on_resume(ap
);
3799 spin_lock_irqsave(ap
->lock
, flags
);
3800 ap
->pflags
&= ~(ATA_PFLAG_PM_PENDING
| ATA_PFLAG_SUSPENDED
);
3801 if (ap
->pm_result
) {
3802 *ap
->pm_result
= rc
;
3803 ap
->pm_result
= NULL
;
3805 spin_unlock_irqrestore(ap
->lock
, flags
);
3807 #endif /* CONFIG_PM */