2 * libata-eh.c - libata error handling
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
47 #include <linux/libata.h>
49 #include <trace/events/libata.h>
53 /* speed down verdicts */
54 ATA_EH_SPDN_NCQ_OFF
= (1 << 0),
55 ATA_EH_SPDN_SPEED_DOWN
= (1 << 1),
56 ATA_EH_SPDN_FALLBACK_TO_PIO
= (1 << 2),
57 ATA_EH_SPDN_KEEP_ERRORS
= (1 << 3),
60 ATA_EFLAG_IS_IO
= (1 << 0),
61 ATA_EFLAG_DUBIOUS_XFER
= (1 << 1),
62 ATA_EFLAG_OLD_ER
= (1 << 31),
64 /* error categories */
67 ATA_ECAT_TOUT_HSM
= 2,
69 ATA_ECAT_DUBIOUS_NONE
= 4,
70 ATA_ECAT_DUBIOUS_ATA_BUS
= 5,
71 ATA_ECAT_DUBIOUS_TOUT_HSM
= 6,
72 ATA_ECAT_DUBIOUS_UNK_DEV
= 7,
75 ATA_EH_CMD_DFL_TIMEOUT
= 5000,
77 /* always put at least this amount of time between resets */
78 ATA_EH_RESET_COOL_DOWN
= 5000,
80 /* Waiting in ->prereset can never be reliable. It's
81 * sometimes nice to wait there but it can't be depended upon;
82 * otherwise, we wouldn't be resetting. Just give it enough
83 * time for most drives to spin up.
85 ATA_EH_PRERESET_TIMEOUT
= 10000,
86 ATA_EH_FASTDRAIN_INTERVAL
= 3000,
90 /* probe speed down parameters, see ata_eh_schedule_probe() */
91 ATA_EH_PROBE_TRIAL_INTERVAL
= 60000, /* 1 min */
92 ATA_EH_PROBE_TRIALS
= 2,
95 /* The following table determines how we sequence resets. Each entry
96 * represents timeout for that try. The first try can be soft or
97 * hardreset. All others are hardreset if available. In most cases
98 * the first reset w/ 10sec timeout should succeed. Following entries
99 * are mostly for error handling, hotplug and those outlier devices that
100 * take an exceptionally long time to recover from reset.
102 static const unsigned long ata_eh_reset_timeouts
[] = {
103 10000, /* most drives spin up by 10sec */
104 10000, /* > 99% working drives spin up before 20sec */
105 35000, /* give > 30 secs of idleness for outlier devices */
106 5000, /* and sweet one last chance */
107 ULONG_MAX
, /* > 1 min has elapsed, give up */
110 static const unsigned long ata_eh_identify_timeouts
[] = {
111 5000, /* covers > 99% of successes and not too boring on failures */
112 10000, /* combined time till here is enough even for media access */
113 30000, /* for true idiots */
117 static const unsigned long ata_eh_flush_timeouts
[] = {
118 15000, /* be generous with flush */
120 30000, /* and even more generous */
124 static const unsigned long ata_eh_other_timeouts
[] = {
125 5000, /* same rationale as identify timeout */
127 /* but no merciful 30sec for other commands, it just isn't worth it */
131 struct ata_eh_cmd_timeout_ent
{
133 const unsigned long *timeouts
;
136 /* The following table determines timeouts to use for EH internal
137 * commands. Each table entry is a command class and matches the
138 * commands the entry applies to and the timeout table to use.
140 * On the retry after a command timed out, the next timeout value from
141 * the table is used. If the table doesn't contain further entries,
142 * the last value is used.
144 * ehc->cmd_timeout_idx keeps track of which timeout to use per
145 * command class, so if SET_FEATURES times out on the first try, the
146 * next try will use the second timeout value only for that class.
148 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
149 static const struct ata_eh_cmd_timeout_ent
150 ata_eh_cmd_timeout_table
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE
] = {
151 { .commands
= CMDS(ATA_CMD_ID_ATA
, ATA_CMD_ID_ATAPI
),
152 .timeouts
= ata_eh_identify_timeouts
, },
153 { .commands
= CMDS(ATA_CMD_READ_NATIVE_MAX
, ATA_CMD_READ_NATIVE_MAX_EXT
),
154 .timeouts
= ata_eh_other_timeouts
, },
155 { .commands
= CMDS(ATA_CMD_SET_MAX
, ATA_CMD_SET_MAX_EXT
),
156 .timeouts
= ata_eh_other_timeouts
, },
157 { .commands
= CMDS(ATA_CMD_SET_FEATURES
),
158 .timeouts
= ata_eh_other_timeouts
, },
159 { .commands
= CMDS(ATA_CMD_INIT_DEV_PARAMS
),
160 .timeouts
= ata_eh_other_timeouts
, },
161 { .commands
= CMDS(ATA_CMD_FLUSH
, ATA_CMD_FLUSH_EXT
),
162 .timeouts
= ata_eh_flush_timeouts
},
166 static void __ata_port_freeze(struct ata_port
*ap
);
168 static void ata_eh_handle_port_suspend(struct ata_port
*ap
);
169 static void ata_eh_handle_port_resume(struct ata_port
*ap
);
170 #else /* CONFIG_PM */
171 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
174 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
176 #endif /* CONFIG_PM */
178 static void __ata_ehi_pushv_desc(struct ata_eh_info
*ehi
, const char *fmt
,
181 ehi
->desc_len
+= vscnprintf(ehi
->desc
+ ehi
->desc_len
,
182 ATA_EH_DESC_LEN
- ehi
->desc_len
,
187 * __ata_ehi_push_desc - push error description without adding separator
189 * @fmt: printf format string
191 * Format string according to @fmt and append it to @ehi->desc.
194 * spin_lock_irqsave(host lock)
196 void __ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
201 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
206 * ata_ehi_push_desc - push error description with separator
208 * @fmt: printf format string
210 * Format string according to @fmt and append it to @ehi->desc.
211 * If @ehi->desc is not empty, ", " is added in-between.
214 * spin_lock_irqsave(host lock)
216 void ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
221 __ata_ehi_push_desc(ehi
, ", ");
224 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
229 * ata_ehi_clear_desc - clean error description
235 * spin_lock_irqsave(host lock)
237 void ata_ehi_clear_desc(struct ata_eh_info
*ehi
)
244 * ata_port_desc - append port description
245 * @ap: target ATA port
246 * @fmt: printf format string
248 * Format string according to @fmt and append it to port
249 * description. If port description is not empty, " " is added
250 * in-between. This function is to be used while initializing
251 * ata_host. The description is printed on host registration.
256 void ata_port_desc(struct ata_port
*ap
, const char *fmt
, ...)
260 WARN_ON(!(ap
->pflags
& ATA_PFLAG_INITIALIZING
));
262 if (ap
->link
.eh_info
.desc_len
)
263 __ata_ehi_push_desc(&ap
->link
.eh_info
, " ");
266 __ata_ehi_pushv_desc(&ap
->link
.eh_info
, fmt
, args
);
273 * ata_port_pbar_desc - append PCI BAR description
274 * @ap: target ATA port
275 * @bar: target PCI BAR
276 * @offset: offset into PCI BAR
277 * @name: name of the area
279 * If @offset is negative, this function formats a string which
280 * contains the name, address, size and type of the BAR and
281 * appends it to the port description. If @offset is zero or
282 * positive, only name and offsetted address is appended.
287 void ata_port_pbar_desc(struct ata_port
*ap
, int bar
, ssize_t offset
,
290 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
292 unsigned long long start
, len
;
294 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
296 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
299 start
= (unsigned long long)pci_resource_start(pdev
, bar
);
300 len
= (unsigned long long)pci_resource_len(pdev
, bar
);
303 ata_port_desc(ap
, "%s %s%llu@0x%llx", name
, type
, len
, start
);
305 ata_port_desc(ap
, "%s 0x%llx", name
,
306 start
+ (unsigned long long)offset
);
309 #endif /* CONFIG_PCI */
311 static int ata_lookup_timeout_table(u8 cmd
)
315 for (i
= 0; i
< ATA_EH_CMD_TIMEOUT_TABLE_SIZE
; i
++) {
318 for (cur
= ata_eh_cmd_timeout_table
[i
].commands
; *cur
; cur
++)
327 * ata_internal_cmd_timeout - determine timeout for an internal command
328 * @dev: target device
329 * @cmd: internal command to be issued
331 * Determine timeout for internal command @cmd for @dev.
337 * Determined timeout.
339 unsigned long ata_internal_cmd_timeout(struct ata_device
*dev
, u8 cmd
)
341 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
342 int ent
= ata_lookup_timeout_table(cmd
);
346 return ATA_EH_CMD_DFL_TIMEOUT
;
348 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
349 return ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
];
353 * ata_internal_cmd_timed_out - notification for internal command timeout
354 * @dev: target device
355 * @cmd: internal command which timed out
357 * Notify EH that internal command @cmd for @dev timed out. This
358 * function should be called only for commands whose timeouts are
359 * determined using ata_internal_cmd_timeout().
364 void ata_internal_cmd_timed_out(struct ata_device
*dev
, u8 cmd
)
366 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
367 int ent
= ata_lookup_timeout_table(cmd
);
373 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
374 if (ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
+ 1] != ULONG_MAX
)
375 ehc
->cmd_timeout_idx
[dev
->devno
][ent
]++;
378 static void ata_ering_record(struct ata_ering
*ering
, unsigned int eflags
,
379 unsigned int err_mask
)
381 struct ata_ering_entry
*ent
;
386 ering
->cursor
%= ATA_ERING_SIZE
;
388 ent
= &ering
->ring
[ering
->cursor
];
389 ent
->eflags
= eflags
;
390 ent
->err_mask
= err_mask
;
391 ent
->timestamp
= get_jiffies_64();
394 static struct ata_ering_entry
*ata_ering_top(struct ata_ering
*ering
)
396 struct ata_ering_entry
*ent
= &ering
->ring
[ering
->cursor
];
403 int ata_ering_map(struct ata_ering
*ering
,
404 int (*map_fn
)(struct ata_ering_entry
*, void *),
408 struct ata_ering_entry
*ent
;
412 ent
= &ering
->ring
[idx
];
415 rc
= map_fn(ent
, arg
);
418 idx
= (idx
- 1 + ATA_ERING_SIZE
) % ATA_ERING_SIZE
;
419 } while (idx
!= ering
->cursor
);
424 static int ata_ering_clear_cb(struct ata_ering_entry
*ent
, void *void_arg
)
426 ent
->eflags
|= ATA_EFLAG_OLD_ER
;
430 static void ata_ering_clear(struct ata_ering
*ering
)
432 ata_ering_map(ering
, ata_ering_clear_cb
, NULL
);
435 static unsigned int ata_eh_dev_action(struct ata_device
*dev
)
437 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
439 return ehc
->i
.action
| ehc
->i
.dev_action
[dev
->devno
];
442 static void ata_eh_clear_action(struct ata_link
*link
, struct ata_device
*dev
,
443 struct ata_eh_info
*ehi
, unsigned int action
)
445 struct ata_device
*tdev
;
448 ehi
->action
&= ~action
;
449 ata_for_each_dev(tdev
, link
, ALL
)
450 ehi
->dev_action
[tdev
->devno
] &= ~action
;
452 /* doesn't make sense for port-wide EH actions */
453 WARN_ON(!(action
& ATA_EH_PERDEV_MASK
));
455 /* break ehi->action into ehi->dev_action */
456 if (ehi
->action
& action
) {
457 ata_for_each_dev(tdev
, link
, ALL
)
458 ehi
->dev_action
[tdev
->devno
] |=
459 ehi
->action
& action
;
460 ehi
->action
&= ~action
;
463 /* turn off the specified per-dev action */
464 ehi
->dev_action
[dev
->devno
] &= ~action
;
469 * ata_eh_acquire - acquire EH ownership
470 * @ap: ATA port to acquire EH ownership for
472 * Acquire EH ownership for @ap. This is the basic exclusion
473 * mechanism for ports sharing a host. Only one port hanging off
474 * the same host can claim the ownership of EH.
479 void ata_eh_acquire(struct ata_port
*ap
)
481 mutex_lock(&ap
->host
->eh_mutex
);
482 WARN_ON_ONCE(ap
->host
->eh_owner
);
483 ap
->host
->eh_owner
= current
;
487 * ata_eh_release - release EH ownership
488 * @ap: ATA port to release EH ownership for
490 * Release EH ownership for @ap if the caller. The caller must
491 * have acquired EH ownership using ata_eh_acquire() previously.
496 void ata_eh_release(struct ata_port
*ap
)
498 WARN_ON_ONCE(ap
->host
->eh_owner
!= current
);
499 ap
->host
->eh_owner
= NULL
;
500 mutex_unlock(&ap
->host
->eh_mutex
);
504 * ata_scsi_timed_out - SCSI layer time out callback
505 * @cmd: timed out SCSI command
507 * Handles SCSI layer timeout. We race with normal completion of
508 * the qc for @cmd. If the qc is already gone, we lose and let
509 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
510 * timed out and EH should be invoked. Prevent ata_qc_complete()
511 * from finishing it by setting EH_SCHEDULED and return
514 * TODO: kill this function once old EH is gone.
517 * Called from timer context
520 * EH_HANDLED or EH_NOT_HANDLED
522 enum blk_eh_timer_return
ata_scsi_timed_out(struct scsi_cmnd
*cmd
)
524 struct Scsi_Host
*host
= cmd
->device
->host
;
525 struct ata_port
*ap
= ata_shost_to_port(host
);
527 struct ata_queued_cmd
*qc
;
528 enum blk_eh_timer_return ret
;
532 if (ap
->ops
->error_handler
) {
533 ret
= BLK_EH_NOT_HANDLED
;
537 ret
= BLK_EH_HANDLED
;
538 spin_lock_irqsave(ap
->lock
, flags
);
539 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
541 WARN_ON(qc
->scsicmd
!= cmd
);
542 qc
->flags
|= ATA_QCFLAG_EH_SCHEDULED
;
543 qc
->err_mask
|= AC_ERR_TIMEOUT
;
544 ret
= BLK_EH_NOT_HANDLED
;
546 spin_unlock_irqrestore(ap
->lock
, flags
);
549 DPRINTK("EXIT, ret=%d\n", ret
);
553 static void ata_eh_unload(struct ata_port
*ap
)
555 struct ata_link
*link
;
556 struct ata_device
*dev
;
559 /* Restore SControl IPM and SPD for the next driver and
560 * disable attached devices.
562 ata_for_each_link(link
, ap
, PMP_FIRST
) {
563 sata_scr_write(link
, SCR_CONTROL
, link
->saved_scontrol
& 0xff0);
564 ata_for_each_dev(dev
, link
, ALL
)
565 ata_dev_disable(dev
);
568 /* freeze and set UNLOADED */
569 spin_lock_irqsave(ap
->lock
, flags
);
571 ata_port_freeze(ap
); /* won't be thawed */
572 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
; /* clear pending from freeze */
573 ap
->pflags
|= ATA_PFLAG_UNLOADED
;
575 spin_unlock_irqrestore(ap
->lock
, flags
);
579 * ata_scsi_error - SCSI layer error handler callback
580 * @host: SCSI host on which error occurred
582 * Handles SCSI-layer-thrown error events.
585 * Inherited from SCSI layer (none, can sleep)
590 void ata_scsi_error(struct Scsi_Host
*host
)
592 struct ata_port
*ap
= ata_shost_to_port(host
);
594 LIST_HEAD(eh_work_q
);
598 spin_lock_irqsave(host
->host_lock
, flags
);
599 list_splice_init(&host
->eh_cmd_q
, &eh_work_q
);
600 spin_unlock_irqrestore(host
->host_lock
, flags
);
602 ata_scsi_cmd_error_handler(host
, ap
, &eh_work_q
);
604 /* If we timed raced normal completion and there is nothing to
605 recover nr_timedout == 0 why exactly are we doing error recovery ? */
606 ata_scsi_port_error_handler(host
, ap
);
608 /* finish or retry handled scmd's and clean up */
609 WARN_ON(!list_empty(&eh_work_q
));
615 * ata_scsi_cmd_error_handler - error callback for a list of commands
616 * @host: scsi host containing the port
617 * @ap: ATA port within the host
618 * @eh_work_q: list of commands to process
620 * process the given list of commands and return those finished to the
621 * ap->eh_done_q. This function is the first part of the libata error
622 * handler which processes a given list of failed commands.
624 void ata_scsi_cmd_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
,
625 struct list_head
*eh_work_q
)
630 /* make sure sff pio task is not running */
631 ata_sff_flush_pio_task(ap
);
633 /* synchronize with host lock and sort out timeouts */
635 /* For new EH, all qcs are finished in one of three ways -
636 * normal completion, error completion, and SCSI timeout.
637 * Both completions can race against SCSI timeout. When normal
638 * completion wins, the qc never reaches EH. When error
639 * completion wins, the qc has ATA_QCFLAG_FAILED set.
641 * When SCSI timeout wins, things are a bit more complex.
642 * Normal or error completion can occur after the timeout but
643 * before this point. In such cases, both types of
644 * completions are honored. A scmd is determined to have
645 * timed out iff its associated qc is active and not failed.
647 if (ap
->ops
->error_handler
) {
648 struct scsi_cmnd
*scmd
, *tmp
;
651 spin_lock_irqsave(ap
->lock
, flags
);
653 /* This must occur under the ap->lock as we don't want
654 a polled recovery to race the real interrupt handler
656 The lost_interrupt handler checks for any completed but
657 non-notified command and completes much like an IRQ handler.
659 We then fall into the error recovery code which will treat
660 this as if normal completion won the race */
662 if (ap
->ops
->lost_interrupt
)
663 ap
->ops
->lost_interrupt(ap
);
665 list_for_each_entry_safe(scmd
, tmp
, eh_work_q
, eh_entry
) {
666 struct ata_queued_cmd
*qc
;
668 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
669 qc
= __ata_qc_from_tag(ap
, i
);
670 if (qc
->flags
& ATA_QCFLAG_ACTIVE
&&
675 if (i
< ATA_MAX_QUEUE
) {
676 /* the scmd has an associated qc */
677 if (!(qc
->flags
& ATA_QCFLAG_FAILED
)) {
678 /* which hasn't failed yet, timeout */
679 qc
->err_mask
|= AC_ERR_TIMEOUT
;
680 qc
->flags
|= ATA_QCFLAG_FAILED
;
684 /* Normal completion occurred after
685 * SCSI timeout but before this point.
686 * Successfully complete it.
688 scmd
->retries
= scmd
->allowed
;
689 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
693 /* If we have timed out qcs. They belong to EH from
694 * this point but the state of the controller is
695 * unknown. Freeze the port to make sure the IRQ
696 * handler doesn't diddle with those qcs. This must
697 * be done atomically w.r.t. setting QCFLAG_FAILED.
700 __ata_port_freeze(ap
);
702 spin_unlock_irqrestore(ap
->lock
, flags
);
704 /* initialize eh_tries */
705 ap
->eh_tries
= ATA_EH_MAX_TRIES
;
707 spin_unlock_wait(ap
->lock
);
710 EXPORT_SYMBOL(ata_scsi_cmd_error_handler
);
713 * ata_scsi_port_error_handler - recover the port after the commands
714 * @host: SCSI host containing the port
717 * Handle the recovery of the port @ap after all the commands
718 * have been recovered.
720 void ata_scsi_port_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
)
724 /* invoke error handler */
725 if (ap
->ops
->error_handler
) {
726 struct ata_link
*link
;
728 /* acquire EH ownership */
731 /* kill fast drain timer */
732 del_timer_sync(&ap
->fastdrain_timer
);
734 /* process port resume request */
735 ata_eh_handle_port_resume(ap
);
737 /* fetch & clear EH info */
738 spin_lock_irqsave(ap
->lock
, flags
);
740 ata_for_each_link(link
, ap
, HOST_FIRST
) {
741 struct ata_eh_context
*ehc
= &link
->eh_context
;
742 struct ata_device
*dev
;
744 memset(&link
->eh_context
, 0, sizeof(link
->eh_context
));
745 link
->eh_context
.i
= link
->eh_info
;
746 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
748 ata_for_each_dev(dev
, link
, ENABLED
) {
749 int devno
= dev
->devno
;
751 ehc
->saved_xfer_mode
[devno
] = dev
->xfer_mode
;
752 if (ata_ncq_enabled(dev
))
753 ehc
->saved_ncq_enabled
|= 1 << devno
;
757 ap
->pflags
|= ATA_PFLAG_EH_IN_PROGRESS
;
758 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
759 ap
->excl_link
= NULL
; /* don't maintain exclusion over EH */
761 spin_unlock_irqrestore(ap
->lock
, flags
);
763 /* invoke EH, skip if unloading or suspended */
764 if (!(ap
->pflags
& (ATA_PFLAG_UNLOADING
| ATA_PFLAG_SUSPENDED
)))
765 ap
->ops
->error_handler(ap
);
767 /* if unloading, commence suicide */
768 if ((ap
->pflags
& ATA_PFLAG_UNLOADING
) &&
769 !(ap
->pflags
& ATA_PFLAG_UNLOADED
))
774 /* process port suspend request */
775 ata_eh_handle_port_suspend(ap
);
777 /* Exception might have happened after ->error_handler
778 * recovered the port but before this point. Repeat
781 spin_lock_irqsave(ap
->lock
, flags
);
783 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
) {
784 if (--ap
->eh_tries
) {
785 spin_unlock_irqrestore(ap
->lock
, flags
);
789 "EH pending after %d tries, giving up\n",
791 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
794 /* this run is complete, make sure EH info is clear */
795 ata_for_each_link(link
, ap
, HOST_FIRST
)
796 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
798 /* end eh (clear host_eh_scheduled) while holding
799 * ap->lock such that if exception occurs after this
800 * point but before EH completion, SCSI midlayer will
805 spin_unlock_irqrestore(ap
->lock
, flags
);
808 WARN_ON(ata_qc_from_tag(ap
, ap
->link
.active_tag
) == NULL
);
809 ap
->ops
->eng_timeout(ap
);
812 scsi_eh_flush_done_q(&ap
->eh_done_q
);
815 spin_lock_irqsave(ap
->lock
, flags
);
817 if (ap
->pflags
& ATA_PFLAG_LOADING
)
818 ap
->pflags
&= ~ATA_PFLAG_LOADING
;
819 else if (ap
->pflags
& ATA_PFLAG_SCSI_HOTPLUG
)
820 schedule_delayed_work(&ap
->hotplug_task
, 0);
822 if (ap
->pflags
& ATA_PFLAG_RECOVERED
)
823 ata_port_info(ap
, "EH complete\n");
825 ap
->pflags
&= ~(ATA_PFLAG_SCSI_HOTPLUG
| ATA_PFLAG_RECOVERED
);
827 /* tell wait_eh that we're done */
828 ap
->pflags
&= ~ATA_PFLAG_EH_IN_PROGRESS
;
829 wake_up_all(&ap
->eh_wait_q
);
831 spin_unlock_irqrestore(ap
->lock
, flags
);
833 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler
);
836 * ata_port_wait_eh - Wait for the currently pending EH to complete
837 * @ap: Port to wait EH for
839 * Wait until the currently pending EH is complete.
842 * Kernel thread context (may sleep).
844 void ata_port_wait_eh(struct ata_port
*ap
)
850 spin_lock_irqsave(ap
->lock
, flags
);
852 while (ap
->pflags
& (ATA_PFLAG_EH_PENDING
| ATA_PFLAG_EH_IN_PROGRESS
)) {
853 prepare_to_wait(&ap
->eh_wait_q
, &wait
, TASK_UNINTERRUPTIBLE
);
854 spin_unlock_irqrestore(ap
->lock
, flags
);
856 spin_lock_irqsave(ap
->lock
, flags
);
858 finish_wait(&ap
->eh_wait_q
, &wait
);
860 spin_unlock_irqrestore(ap
->lock
, flags
);
862 /* make sure SCSI EH is complete */
863 if (scsi_host_in_recovery(ap
->scsi_host
)) {
868 EXPORT_SYMBOL_GPL(ata_port_wait_eh
);
870 static int ata_eh_nr_in_flight(struct ata_port
*ap
)
875 /* count only non-internal commands */
876 for (tag
= 0; tag
< ATA_MAX_QUEUE
- 1; tag
++)
877 if (ata_qc_from_tag(ap
, tag
))
883 void ata_eh_fastdrain_timerfn(unsigned long arg
)
885 struct ata_port
*ap
= (void *)arg
;
889 spin_lock_irqsave(ap
->lock
, flags
);
891 cnt
= ata_eh_nr_in_flight(ap
);
897 if (cnt
== ap
->fastdrain_cnt
) {
900 /* No progress during the last interval, tag all
901 * in-flight qcs as timed out and freeze the port.
903 for (tag
= 0; tag
< ATA_MAX_QUEUE
- 1; tag
++) {
904 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
906 qc
->err_mask
|= AC_ERR_TIMEOUT
;
911 /* some qcs have finished, give it another chance */
912 ap
->fastdrain_cnt
= cnt
;
913 ap
->fastdrain_timer
.expires
=
914 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
915 add_timer(&ap
->fastdrain_timer
);
919 spin_unlock_irqrestore(ap
->lock
, flags
);
923 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
924 * @ap: target ATA port
925 * @fastdrain: activate fast drain
927 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
928 * is non-zero and EH wasn't pending before. Fast drain ensures
929 * that EH kicks in in timely manner.
932 * spin_lock_irqsave(host lock)
934 static void ata_eh_set_pending(struct ata_port
*ap
, int fastdrain
)
938 /* already scheduled? */
939 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
)
942 ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
947 /* do we have in-flight qcs? */
948 cnt
= ata_eh_nr_in_flight(ap
);
952 /* activate fast drain */
953 ap
->fastdrain_cnt
= cnt
;
954 ap
->fastdrain_timer
.expires
=
955 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
956 add_timer(&ap
->fastdrain_timer
);
960 * ata_qc_schedule_eh - schedule qc for error handling
961 * @qc: command to schedule error handling for
963 * Schedule error handling for @qc. EH will kick in as soon as
964 * other commands are drained.
967 * spin_lock_irqsave(host lock)
969 void ata_qc_schedule_eh(struct ata_queued_cmd
*qc
)
971 struct ata_port
*ap
= qc
->ap
;
972 struct request_queue
*q
= qc
->scsicmd
->device
->request_queue
;
975 WARN_ON(!ap
->ops
->error_handler
);
977 qc
->flags
|= ATA_QCFLAG_FAILED
;
978 ata_eh_set_pending(ap
, 1);
980 /* The following will fail if timeout has already expired.
981 * ata_scsi_error() takes care of such scmds on EH entry.
982 * Note that ATA_QCFLAG_FAILED is unconditionally set after
983 * this function completes.
985 spin_lock_irqsave(q
->queue_lock
, flags
);
986 blk_abort_request(qc
->scsicmd
->request
);
987 spin_unlock_irqrestore(q
->queue_lock
, flags
);
991 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
992 * @ap: ATA port to schedule EH for
994 * LOCKING: inherited from ata_port_schedule_eh
995 * spin_lock_irqsave(host lock)
997 void ata_std_sched_eh(struct ata_port
*ap
)
999 WARN_ON(!ap
->ops
->error_handler
);
1001 if (ap
->pflags
& ATA_PFLAG_INITIALIZING
)
1004 ata_eh_set_pending(ap
, 1);
1005 scsi_schedule_eh(ap
->scsi_host
);
1007 DPRINTK("port EH scheduled\n");
1009 EXPORT_SYMBOL_GPL(ata_std_sched_eh
);
1012 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1013 * @ap: ATA port to end EH for
1015 * In the libata object model there is a 1:1 mapping of ata_port to
1016 * shost, so host fields can be directly manipulated under ap->lock, in
1017 * the libsas case we need to hold a lock at the ha->level to coordinate
1021 * spin_lock_irqsave(host lock)
1023 void ata_std_end_eh(struct ata_port
*ap
)
1025 struct Scsi_Host
*host
= ap
->scsi_host
;
1027 host
->host_eh_scheduled
= 0;
1029 EXPORT_SYMBOL(ata_std_end_eh
);
1033 * ata_port_schedule_eh - schedule error handling without a qc
1034 * @ap: ATA port to schedule EH for
1036 * Schedule error handling for @ap. EH will kick in as soon as
1037 * all commands are drained.
1040 * spin_lock_irqsave(host lock)
1042 void ata_port_schedule_eh(struct ata_port
*ap
)
1044 /* see: ata_std_sched_eh, unless you know better */
1045 ap
->ops
->sched_eh(ap
);
1048 static int ata_do_link_abort(struct ata_port
*ap
, struct ata_link
*link
)
1050 int tag
, nr_aborted
= 0;
1052 WARN_ON(!ap
->ops
->error_handler
);
1054 /* we're gonna abort all commands, no need for fast drain */
1055 ata_eh_set_pending(ap
, 0);
1057 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1058 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
1060 if (qc
&& (!link
|| qc
->dev
->link
== link
)) {
1061 qc
->flags
|= ATA_QCFLAG_FAILED
;
1062 ata_qc_complete(qc
);
1068 ata_port_schedule_eh(ap
);
1074 * ata_link_abort - abort all qc's on the link
1075 * @link: ATA link to abort qc's for
1077 * Abort all active qc's active on @link and schedule EH.
1080 * spin_lock_irqsave(host lock)
1083 * Number of aborted qc's.
1085 int ata_link_abort(struct ata_link
*link
)
1087 return ata_do_link_abort(link
->ap
, link
);
1091 * ata_port_abort - abort all qc's on the port
1092 * @ap: ATA port to abort qc's for
1094 * Abort all active qc's of @ap and schedule EH.
1097 * spin_lock_irqsave(host_set lock)
1100 * Number of aborted qc's.
1102 int ata_port_abort(struct ata_port
*ap
)
1104 return ata_do_link_abort(ap
, NULL
);
1108 * __ata_port_freeze - freeze port
1109 * @ap: ATA port to freeze
1111 * This function is called when HSM violation or some other
1112 * condition disrupts normal operation of the port. Frozen port
1113 * is not allowed to perform any operation until the port is
1114 * thawed, which usually follows a successful reset.
1116 * ap->ops->freeze() callback can be used for freezing the port
1117 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1118 * port cannot be frozen hardware-wise, the interrupt handler
1119 * must ack and clear interrupts unconditionally while the port
1123 * spin_lock_irqsave(host lock)
1125 static void __ata_port_freeze(struct ata_port
*ap
)
1127 WARN_ON(!ap
->ops
->error_handler
);
1129 if (ap
->ops
->freeze
)
1130 ap
->ops
->freeze(ap
);
1132 ap
->pflags
|= ATA_PFLAG_FROZEN
;
1134 DPRINTK("ata%u port frozen\n", ap
->print_id
);
1138 * ata_port_freeze - abort & freeze port
1139 * @ap: ATA port to freeze
1141 * Abort and freeze @ap. The freeze operation must be called
1142 * first, because some hardware requires special operations
1143 * before the taskfile registers are accessible.
1146 * spin_lock_irqsave(host lock)
1149 * Number of aborted commands.
1151 int ata_port_freeze(struct ata_port
*ap
)
1155 WARN_ON(!ap
->ops
->error_handler
);
1157 __ata_port_freeze(ap
);
1158 nr_aborted
= ata_port_abort(ap
);
1164 * sata_async_notification - SATA async notification handler
1165 * @ap: ATA port where async notification is received
1167 * Handler to be called when async notification via SDB FIS is
1168 * received. This function schedules EH if necessary.
1171 * spin_lock_irqsave(host lock)
1174 * 1 if EH is scheduled, 0 otherwise.
1176 int sata_async_notification(struct ata_port
*ap
)
1181 if (!(ap
->flags
& ATA_FLAG_AN
))
1184 rc
= sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
);
1186 sata_scr_write(&ap
->link
, SCR_NOTIFICATION
, sntf
);
1188 if (!sata_pmp_attached(ap
) || rc
) {
1189 /* PMP is not attached or SNTF is not available */
1190 if (!sata_pmp_attached(ap
)) {
1191 /* PMP is not attached. Check whether ATAPI
1192 * AN is configured. If so, notify media
1195 struct ata_device
*dev
= ap
->link
.device
;
1197 if ((dev
->class == ATA_DEV_ATAPI
) &&
1198 (dev
->flags
& ATA_DFLAG_AN
))
1199 ata_scsi_media_change_notify(dev
);
1202 /* PMP is attached but SNTF is not available.
1203 * ATAPI async media change notification is
1204 * not used. The PMP must be reporting PHY
1205 * status change, schedule EH.
1207 ata_port_schedule_eh(ap
);
1211 /* PMP is attached and SNTF is available */
1212 struct ata_link
*link
;
1214 /* check and notify ATAPI AN */
1215 ata_for_each_link(link
, ap
, EDGE
) {
1216 if (!(sntf
& (1 << link
->pmp
)))
1219 if ((link
->device
->class == ATA_DEV_ATAPI
) &&
1220 (link
->device
->flags
& ATA_DFLAG_AN
))
1221 ata_scsi_media_change_notify(link
->device
);
1224 /* If PMP is reporting that PHY status of some
1225 * downstream ports has changed, schedule EH.
1227 if (sntf
& (1 << SATA_PMP_CTRL_PORT
)) {
1228 ata_port_schedule_eh(ap
);
1237 * ata_eh_freeze_port - EH helper to freeze port
1238 * @ap: ATA port to freeze
1245 void ata_eh_freeze_port(struct ata_port
*ap
)
1247 unsigned long flags
;
1249 if (!ap
->ops
->error_handler
)
1252 spin_lock_irqsave(ap
->lock
, flags
);
1253 __ata_port_freeze(ap
);
1254 spin_unlock_irqrestore(ap
->lock
, flags
);
1258 * ata_port_thaw_port - EH helper to thaw port
1259 * @ap: ATA port to thaw
1261 * Thaw frozen port @ap.
1266 void ata_eh_thaw_port(struct ata_port
*ap
)
1268 unsigned long flags
;
1270 if (!ap
->ops
->error_handler
)
1273 spin_lock_irqsave(ap
->lock
, flags
);
1275 ap
->pflags
&= ~ATA_PFLAG_FROZEN
;
1280 spin_unlock_irqrestore(ap
->lock
, flags
);
1282 DPRINTK("ata%u port thawed\n", ap
->print_id
);
1285 static void ata_eh_scsidone(struct scsi_cmnd
*scmd
)
1290 static void __ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1292 struct ata_port
*ap
= qc
->ap
;
1293 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1294 unsigned long flags
;
1296 spin_lock_irqsave(ap
->lock
, flags
);
1297 qc
->scsidone
= ata_eh_scsidone
;
1298 __ata_qc_complete(qc
);
1299 WARN_ON(ata_tag_valid(qc
->tag
));
1300 spin_unlock_irqrestore(ap
->lock
, flags
);
1302 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
1306 * ata_eh_qc_complete - Complete an active ATA command from EH
1307 * @qc: Command to complete
1309 * Indicate to the mid and upper layers that an ATA command has
1310 * completed. To be used from EH.
1312 void ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1314 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1315 scmd
->retries
= scmd
->allowed
;
1316 __ata_eh_qc_complete(qc
);
1320 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1321 * @qc: Command to retry
1323 * Indicate to the mid and upper layers that an ATA command
1324 * should be retried. To be used from EH.
1326 * SCSI midlayer limits the number of retries to scmd->allowed.
1327 * scmd->allowed is incremented for commands which get retried
1328 * due to unrelated failures (qc->err_mask is zero).
1330 void ata_eh_qc_retry(struct ata_queued_cmd
*qc
)
1332 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1335 __ata_eh_qc_complete(qc
);
1339 * ata_dev_disable - disable ATA device
1340 * @dev: ATA device to disable
1347 void ata_dev_disable(struct ata_device
*dev
)
1349 if (!ata_dev_enabled(dev
))
1352 if (ata_msg_drv(dev
->link
->ap
))
1353 ata_dev_warn(dev
, "disabled\n");
1354 ata_acpi_on_disable(dev
);
1355 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
| ATA_DNXFER_QUIET
);
1358 /* From now till the next successful probe, ering is used to
1359 * track probe failures. Clear accumulated device error info.
1361 ata_ering_clear(&dev
->ering
);
1365 * ata_eh_detach_dev - detach ATA device
1366 * @dev: ATA device to detach
1373 void ata_eh_detach_dev(struct ata_device
*dev
)
1375 struct ata_link
*link
= dev
->link
;
1376 struct ata_port
*ap
= link
->ap
;
1377 struct ata_eh_context
*ehc
= &link
->eh_context
;
1378 unsigned long flags
;
1380 ata_dev_disable(dev
);
1382 spin_lock_irqsave(ap
->lock
, flags
);
1384 dev
->flags
&= ~ATA_DFLAG_DETACH
;
1386 if (ata_scsi_offline_dev(dev
)) {
1387 dev
->flags
|= ATA_DFLAG_DETACHED
;
1388 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
1391 /* clear per-dev EH info */
1392 ata_eh_clear_action(link
, dev
, &link
->eh_info
, ATA_EH_PERDEV_MASK
);
1393 ata_eh_clear_action(link
, dev
, &link
->eh_context
.i
, ATA_EH_PERDEV_MASK
);
1394 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
1395 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
1397 spin_unlock_irqrestore(ap
->lock
, flags
);
1401 * ata_eh_about_to_do - about to perform eh_action
1402 * @link: target ATA link
1403 * @dev: target ATA dev for per-dev action (can be NULL)
1404 * @action: action about to be performed
1406 * Called just before performing EH actions to clear related bits
1407 * in @link->eh_info such that eh actions are not unnecessarily
1413 void ata_eh_about_to_do(struct ata_link
*link
, struct ata_device
*dev
,
1414 unsigned int action
)
1416 struct ata_port
*ap
= link
->ap
;
1417 struct ata_eh_info
*ehi
= &link
->eh_info
;
1418 struct ata_eh_context
*ehc
= &link
->eh_context
;
1419 unsigned long flags
;
1421 spin_lock_irqsave(ap
->lock
, flags
);
1423 ata_eh_clear_action(link
, dev
, ehi
, action
);
1425 /* About to take EH action, set RECOVERED. Ignore actions on
1426 * slave links as master will do them again.
1428 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
) && link
!= ap
->slave_link
)
1429 ap
->pflags
|= ATA_PFLAG_RECOVERED
;
1431 spin_unlock_irqrestore(ap
->lock
, flags
);
1435 * ata_eh_done - EH action complete
1436 * @ap: target ATA port
1437 * @dev: target ATA dev for per-dev action (can be NULL)
1438 * @action: action just completed
1440 * Called right after performing EH actions to clear related bits
1441 * in @link->eh_context.
1446 void ata_eh_done(struct ata_link
*link
, struct ata_device
*dev
,
1447 unsigned int action
)
1449 struct ata_eh_context
*ehc
= &link
->eh_context
;
1451 ata_eh_clear_action(link
, dev
, &ehc
->i
, action
);
1455 * ata_err_string - convert err_mask to descriptive string
1456 * @err_mask: error mask to convert to string
1458 * Convert @err_mask to descriptive string. Errors are
1459 * prioritized according to severity and only the most severe
1460 * error is reported.
1466 * Descriptive string for @err_mask
1468 static const char *ata_err_string(unsigned int err_mask
)
1470 if (err_mask
& AC_ERR_HOST_BUS
)
1471 return "host bus error";
1472 if (err_mask
& AC_ERR_ATA_BUS
)
1473 return "ATA bus error";
1474 if (err_mask
& AC_ERR_TIMEOUT
)
1476 if (err_mask
& AC_ERR_HSM
)
1477 return "HSM violation";
1478 if (err_mask
& AC_ERR_SYSTEM
)
1479 return "internal error";
1480 if (err_mask
& AC_ERR_MEDIA
)
1481 return "media error";
1482 if (err_mask
& AC_ERR_INVALID
)
1483 return "invalid argument";
1484 if (err_mask
& AC_ERR_DEV
)
1485 return "device error";
1486 return "unknown error";
1490 * ata_read_log_page - read a specific log page
1491 * @dev: target device
1493 * @page: page to read
1494 * @buf: buffer to store read page
1495 * @sectors: number of sectors to read
1497 * Read log page using READ_LOG_EXT command.
1500 * Kernel thread context (may sleep).
1503 * 0 on success, AC_ERR_* mask otherwise.
1505 unsigned int ata_read_log_page(struct ata_device
*dev
, u8 log
,
1506 u8 page
, void *buf
, unsigned int sectors
)
1508 unsigned long ap_flags
= dev
->link
->ap
->flags
;
1509 struct ata_taskfile tf
;
1510 unsigned int err_mask
;
1513 DPRINTK("read log page - log 0x%x, page 0x%x\n", log
, page
);
1516 * Return error without actually issuing the command on controllers
1517 * which e.g. lockup on a read log page.
1519 if (ap_flags
& ATA_FLAG_NO_LOG_PAGE
)
1523 ata_tf_init(dev
, &tf
);
1524 if (dev
->dma_mode
&& ata_id_has_read_log_dma_ext(dev
->id
) &&
1525 !(dev
->horkage
& ATA_HORKAGE_NO_NCQ_LOG
)) {
1526 tf
.command
= ATA_CMD_READ_LOG_DMA_EXT
;
1527 tf
.protocol
= ATA_PROT_DMA
;
1530 tf
.command
= ATA_CMD_READ_LOG_EXT
;
1531 tf
.protocol
= ATA_PROT_PIO
;
1537 tf
.hob_nsect
= sectors
>> 8;
1538 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA48
| ATA_TFLAG_DEVICE
;
1540 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1541 buf
, sectors
* ATA_SECT_SIZE
, 0);
1543 if (err_mask
&& dma
) {
1544 dev
->horkage
|= ATA_HORKAGE_NO_NCQ_LOG
;
1545 ata_dev_warn(dev
, "READ LOG DMA EXT failed, trying unqueued\n");
1549 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
1554 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1555 * @dev: Device to read log page 10h from
1556 * @tag: Resulting tag of the failed command
1557 * @tf: Resulting taskfile registers of the failed command
1559 * Read log page 10h to obtain NCQ error details and clear error
1563 * Kernel thread context (may sleep).
1566 * 0 on success, -errno otherwise.
1568 static int ata_eh_read_log_10h(struct ata_device
*dev
,
1569 int *tag
, struct ata_taskfile
*tf
)
1571 u8
*buf
= dev
->link
->ap
->sector_buf
;
1572 unsigned int err_mask
;
1576 err_mask
= ata_read_log_page(dev
, ATA_LOG_SATA_NCQ
, 0, buf
, 1);
1581 for (i
= 0; i
< ATA_SECT_SIZE
; i
++)
1584 ata_dev_warn(dev
, "invalid checksum 0x%x on log page 10h\n",
1590 *tag
= buf
[0] & 0x1f;
1592 tf
->command
= buf
[2];
1593 tf
->feature
= buf
[3];
1597 tf
->device
= buf
[7];
1598 tf
->hob_lbal
= buf
[8];
1599 tf
->hob_lbam
= buf
[9];
1600 tf
->hob_lbah
= buf
[10];
1601 tf
->nsect
= buf
[12];
1602 tf
->hob_nsect
= buf
[13];
1603 if (ata_id_has_ncq_autosense(dev
->id
))
1604 tf
->auxiliary
= buf
[14] << 16 | buf
[15] << 8 | buf
[16];
1610 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1611 * @dev: target ATAPI device
1612 * @r_sense_key: out parameter for sense_key
1614 * Perform ATAPI TEST_UNIT_READY.
1617 * EH context (may sleep).
1620 * 0 on success, AC_ERR_* mask on failure.
1622 unsigned int atapi_eh_tur(struct ata_device
*dev
, u8
*r_sense_key
)
1624 u8 cdb
[ATAPI_CDB_LEN
] = { TEST_UNIT_READY
, 0, 0, 0, 0, 0 };
1625 struct ata_taskfile tf
;
1626 unsigned int err_mask
;
1628 ata_tf_init(dev
, &tf
);
1630 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1631 tf
.command
= ATA_CMD_PACKET
;
1632 tf
.protocol
= ATAPI_PROT_NODATA
;
1634 err_mask
= ata_exec_internal(dev
, &tf
, cdb
, DMA_NONE
, NULL
, 0, 0);
1635 if (err_mask
== AC_ERR_DEV
)
1636 *r_sense_key
= tf
.feature
>> 4;
1641 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1642 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1643 * @cmd: scsi command for which the sense code should be set
1645 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1646 * SENSE. This function is an EH helper.
1649 * Kernel thread context (may sleep).
1651 static void ata_eh_request_sense(struct ata_queued_cmd
*qc
,
1652 struct scsi_cmnd
*cmd
)
1654 struct ata_device
*dev
= qc
->dev
;
1655 struct ata_taskfile tf
;
1656 unsigned int err_mask
;
1658 if (qc
->ap
->pflags
& ATA_PFLAG_FROZEN
) {
1659 ata_dev_warn(dev
, "sense data available but port frozen\n");
1663 if (!cmd
|| qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
1666 if (!ata_id_sense_reporting_enabled(dev
->id
)) {
1667 ata_dev_warn(qc
->dev
, "sense data reporting disabled\n");
1671 DPRINTK("ATA request sense\n");
1673 ata_tf_init(dev
, &tf
);
1674 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1675 tf
.flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1676 tf
.command
= ATA_CMD_REQ_SENSE_DATA
;
1677 tf
.protocol
= ATA_PROT_NODATA
;
1679 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1680 /* Ignore err_mask; ATA_ERR might be set */
1681 if (tf
.command
& ATA_SENSE
) {
1682 ata_scsi_set_sense(dev
, cmd
, tf
.lbah
, tf
.lbam
, tf
.lbal
);
1683 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1685 ata_dev_warn(dev
, "request sense failed stat %02x emask %x\n",
1686 tf
.command
, err_mask
);
1691 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1692 * @dev: device to perform REQUEST_SENSE to
1693 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1694 * @dfl_sense_key: default sense key to use
1696 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1697 * SENSE. This function is EH helper.
1700 * Kernel thread context (may sleep).
1703 * 0 on success, AC_ERR_* mask on failure
1705 unsigned int atapi_eh_request_sense(struct ata_device
*dev
,
1706 u8
*sense_buf
, u8 dfl_sense_key
)
1708 u8 cdb
[ATAPI_CDB_LEN
] =
1709 { REQUEST_SENSE
, 0, 0, 0, SCSI_SENSE_BUFFERSIZE
, 0 };
1710 struct ata_port
*ap
= dev
->link
->ap
;
1711 struct ata_taskfile tf
;
1713 DPRINTK("ATAPI request sense\n");
1715 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
1717 /* initialize sense_buf with the error register,
1718 * for the case where they are -not- overwritten
1720 sense_buf
[0] = 0x70;
1721 sense_buf
[2] = dfl_sense_key
;
1723 /* some devices time out if garbage left in tf */
1724 ata_tf_init(dev
, &tf
);
1726 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1727 tf
.command
= ATA_CMD_PACKET
;
1729 /* is it pointless to prefer PIO for "safety reasons"? */
1730 if (ap
->flags
& ATA_FLAG_PIO_DMA
) {
1731 tf
.protocol
= ATAPI_PROT_DMA
;
1732 tf
.feature
|= ATAPI_PKT_DMA
;
1734 tf
.protocol
= ATAPI_PROT_PIO
;
1735 tf
.lbam
= SCSI_SENSE_BUFFERSIZE
;
1739 return ata_exec_internal(dev
, &tf
, cdb
, DMA_FROM_DEVICE
,
1740 sense_buf
, SCSI_SENSE_BUFFERSIZE
, 0);
1744 * ata_eh_analyze_serror - analyze SError for a failed port
1745 * @link: ATA link to analyze SError for
1747 * Analyze SError if available and further determine cause of
1753 static void ata_eh_analyze_serror(struct ata_link
*link
)
1755 struct ata_eh_context
*ehc
= &link
->eh_context
;
1756 u32 serror
= ehc
->i
.serror
;
1757 unsigned int err_mask
= 0, action
= 0;
1760 if (serror
& (SERR_PERSISTENT
| SERR_DATA
)) {
1761 err_mask
|= AC_ERR_ATA_BUS
;
1762 action
|= ATA_EH_RESET
;
1764 if (serror
& SERR_PROTOCOL
) {
1765 err_mask
|= AC_ERR_HSM
;
1766 action
|= ATA_EH_RESET
;
1768 if (serror
& SERR_INTERNAL
) {
1769 err_mask
|= AC_ERR_SYSTEM
;
1770 action
|= ATA_EH_RESET
;
1773 /* Determine whether a hotplug event has occurred. Both
1774 * SError.N/X are considered hotplug events for enabled or
1775 * host links. For disabled PMP links, only N bit is
1776 * considered as X bit is left at 1 for link plugging.
1778 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
)
1779 hotplug_mask
= 0; /* hotplug doesn't work w/ LPM */
1780 else if (!(link
->flags
& ATA_LFLAG_DISABLED
) || ata_is_host_link(link
))
1781 hotplug_mask
= SERR_PHYRDY_CHG
| SERR_DEV_XCHG
;
1783 hotplug_mask
= SERR_PHYRDY_CHG
;
1785 if (serror
& hotplug_mask
)
1786 ata_ehi_hotplugged(&ehc
->i
);
1788 ehc
->i
.err_mask
|= err_mask
;
1789 ehc
->i
.action
|= action
;
1793 * ata_eh_analyze_ncq_error - analyze NCQ error
1794 * @link: ATA link to analyze NCQ error for
1796 * Read log page 10h, determine the offending qc and acquire
1797 * error status TF. For NCQ device errors, all LLDDs have to do
1798 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1802 * Kernel thread context (may sleep).
1804 void ata_eh_analyze_ncq_error(struct ata_link
*link
)
1806 struct ata_port
*ap
= link
->ap
;
1807 struct ata_eh_context
*ehc
= &link
->eh_context
;
1808 struct ata_device
*dev
= link
->device
;
1809 struct ata_queued_cmd
*qc
;
1810 struct ata_taskfile tf
;
1813 /* if frozen, we can't do much */
1814 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
1817 /* is it NCQ device error? */
1818 if (!link
->sactive
|| !(ehc
->i
.err_mask
& AC_ERR_DEV
))
1821 /* has LLDD analyzed already? */
1822 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1823 qc
= __ata_qc_from_tag(ap
, tag
);
1825 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
1832 /* okay, this error is ours */
1833 memset(&tf
, 0, sizeof(tf
));
1834 rc
= ata_eh_read_log_10h(dev
, &tag
, &tf
);
1836 ata_link_err(link
, "failed to read log page 10h (errno=%d)\n",
1841 if (!(link
->sactive
& (1 << tag
))) {
1842 ata_link_err(link
, "log page 10h reported inactive tag %d\n",
1847 /* we've got the perpetrator, condemn it */
1848 qc
= __ata_qc_from_tag(ap
, tag
);
1849 memcpy(&qc
->result_tf
, &tf
, sizeof(tf
));
1850 qc
->result_tf
.flags
= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1851 qc
->err_mask
|= AC_ERR_DEV
| AC_ERR_NCQ
;
1852 if ((qc
->result_tf
.command
& ATA_SENSE
) || qc
->result_tf
.auxiliary
) {
1853 char sense_key
, asc
, ascq
;
1855 sense_key
= (qc
->result_tf
.auxiliary
>> 16) & 0xff;
1856 asc
= (qc
->result_tf
.auxiliary
>> 8) & 0xff;
1857 ascq
= qc
->result_tf
.auxiliary
& 0xff;
1858 ata_scsi_set_sense(dev
, qc
->scsicmd
, sense_key
, asc
, ascq
);
1859 ata_scsi_set_sense_information(dev
, qc
->scsicmd
,
1861 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1864 ehc
->i
.err_mask
&= ~AC_ERR_DEV
;
1868 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1869 * @qc: qc to analyze
1870 * @tf: Taskfile registers to analyze
1872 * Analyze taskfile of @qc and further determine cause of
1873 * failure. This function also requests ATAPI sense data if
1877 * Kernel thread context (may sleep).
1880 * Determined recovery action
1882 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd
*qc
,
1883 const struct ata_taskfile
*tf
)
1885 unsigned int tmp
, action
= 0;
1886 u8 stat
= tf
->command
, err
= tf
->feature
;
1888 if ((stat
& (ATA_BUSY
| ATA_DRQ
| ATA_DRDY
)) != ATA_DRDY
) {
1889 qc
->err_mask
|= AC_ERR_HSM
;
1890 return ATA_EH_RESET
;
1893 if (stat
& (ATA_ERR
| ATA_DF
)) {
1894 qc
->err_mask
|= AC_ERR_DEV
;
1896 * Sense data reporting does not work if the
1897 * device fault bit is set.
1905 switch (qc
->dev
->class) {
1908 if (stat
& ATA_SENSE
)
1909 ata_eh_request_sense(qc
, qc
->scsicmd
);
1911 qc
->err_mask
|= AC_ERR_ATA_BUS
;
1912 if (err
& (ATA_UNC
| ATA_AMNF
))
1913 qc
->err_mask
|= AC_ERR_MEDIA
;
1915 qc
->err_mask
|= AC_ERR_INVALID
;
1919 if (!(qc
->ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1920 tmp
= atapi_eh_request_sense(qc
->dev
,
1921 qc
->scsicmd
->sense_buffer
,
1922 qc
->result_tf
.feature
>> 4);
1924 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1926 qc
->err_mask
|= tmp
;
1930 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
1931 int ret
= scsi_check_sense(qc
->scsicmd
);
1933 * SUCCESS here means that the sense code could
1934 * evaluated and should be passed to the upper layers
1935 * for correct evaluation.
1936 * FAILED means the sense code could not interpreted
1937 * and the device would need to be reset.
1938 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1939 * command would need to be retried.
1941 if (ret
== NEEDS_RETRY
|| ret
== ADD_TO_MLQUEUE
) {
1942 qc
->flags
|= ATA_QCFLAG_RETRY
;
1943 qc
->err_mask
|= AC_ERR_OTHER
;
1944 } else if (ret
!= SUCCESS
) {
1945 qc
->err_mask
|= AC_ERR_HSM
;
1948 if (qc
->err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
| AC_ERR_ATA_BUS
))
1949 action
|= ATA_EH_RESET
;
1954 static int ata_eh_categorize_error(unsigned int eflags
, unsigned int err_mask
,
1959 if (!(eflags
& ATA_EFLAG_DUBIOUS_XFER
))
1963 base
= ATA_ECAT_DUBIOUS_NONE
;
1965 if (err_mask
& AC_ERR_ATA_BUS
)
1966 return base
+ ATA_ECAT_ATA_BUS
;
1968 if (err_mask
& AC_ERR_TIMEOUT
)
1969 return base
+ ATA_ECAT_TOUT_HSM
;
1971 if (eflags
& ATA_EFLAG_IS_IO
) {
1972 if (err_mask
& AC_ERR_HSM
)
1973 return base
+ ATA_ECAT_TOUT_HSM
;
1975 (AC_ERR_DEV
|AC_ERR_MEDIA
|AC_ERR_INVALID
)) == AC_ERR_DEV
)
1976 return base
+ ATA_ECAT_UNK_DEV
;
1982 struct speed_down_verdict_arg
{
1985 int nr_errors
[ATA_ECAT_NR
];
1988 static int speed_down_verdict_cb(struct ata_ering_entry
*ent
, void *void_arg
)
1990 struct speed_down_verdict_arg
*arg
= void_arg
;
1993 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) || (ent
->timestamp
< arg
->since
))
1996 cat
= ata_eh_categorize_error(ent
->eflags
, ent
->err_mask
,
1998 arg
->nr_errors
[cat
]++;
2004 * ata_eh_speed_down_verdict - Determine speed down verdict
2005 * @dev: Device of interest
2007 * This function examines error ring of @dev and determines
2008 * whether NCQ needs to be turned off, transfer speed should be
2009 * stepped down, or falling back to PIO is necessary.
2011 * ECAT_ATA_BUS : ATA_BUS error for any command
2013 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
2016 * ECAT_UNK_DEV : Unknown DEV error for IO commands
2018 * ECAT_DUBIOUS_* : Identical to above three but occurred while
2019 * data transfer hasn't been verified.
2023 * NCQ_OFF : Turn off NCQ.
2025 * SPEED_DOWN : Speed down transfer speed but don't fall back
2028 * FALLBACK_TO_PIO : Fall back to PIO.
2030 * Even if multiple verdicts are returned, only one action is
2031 * taken per error. An action triggered by non-DUBIOUS errors
2032 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
2033 * This is to expedite speed down decisions right after device is
2034 * initially configured.
2036 * The followings are speed down rules. #1 and #2 deal with
2039 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
2040 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
2042 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
2043 * occurred during last 5 mins, NCQ_OFF.
2045 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
2046 * occurred during last 5 mins, FALLBACK_TO_PIO
2048 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
2049 * during last 10 mins, NCQ_OFF.
2051 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
2052 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
2055 * Inherited from caller.
2058 * OR of ATA_EH_SPDN_* flags.
2060 static unsigned int ata_eh_speed_down_verdict(struct ata_device
*dev
)
2062 const u64 j5mins
= 5LLU * 60 * HZ
, j10mins
= 10LLU * 60 * HZ
;
2063 u64 j64
= get_jiffies_64();
2064 struct speed_down_verdict_arg arg
;
2065 unsigned int verdict
= 0;
2067 /* scan past 5 mins of error history */
2068 memset(&arg
, 0, sizeof(arg
));
2069 arg
.since
= j64
- min(j64
, j5mins
);
2070 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
2072 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_ATA_BUS
] +
2073 arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] > 1)
2074 verdict
|= ATA_EH_SPDN_SPEED_DOWN
|
2075 ATA_EH_SPDN_FALLBACK_TO_PIO
| ATA_EH_SPDN_KEEP_ERRORS
;
2077 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] +
2078 arg
.nr_errors
[ATA_ECAT_DUBIOUS_UNK_DEV
] > 1)
2079 verdict
|= ATA_EH_SPDN_NCQ_OFF
| ATA_EH_SPDN_KEEP_ERRORS
;
2081 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
2082 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
2083 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
2084 verdict
|= ATA_EH_SPDN_FALLBACK_TO_PIO
;
2086 /* scan past 10 mins of error history */
2087 memset(&arg
, 0, sizeof(arg
));
2088 arg
.since
= j64
- min(j64
, j10mins
);
2089 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
2091 if (arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
2092 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 3)
2093 verdict
|= ATA_EH_SPDN_NCQ_OFF
;
2095 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
2096 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] > 3 ||
2097 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
2098 verdict
|= ATA_EH_SPDN_SPEED_DOWN
;
2104 * ata_eh_speed_down - record error and speed down if necessary
2105 * @dev: Failed device
2106 * @eflags: mask of ATA_EFLAG_* flags
2107 * @err_mask: err_mask of the error
2109 * Record error and examine error history to determine whether
2110 * adjusting transmission speed is necessary. It also sets
2111 * transmission limits appropriately if such adjustment is
2115 * Kernel thread context (may sleep).
2118 * Determined recovery action.
2120 static unsigned int ata_eh_speed_down(struct ata_device
*dev
,
2121 unsigned int eflags
, unsigned int err_mask
)
2123 struct ata_link
*link
= ata_dev_phys_link(dev
);
2125 unsigned int verdict
;
2126 unsigned int action
= 0;
2128 /* don't bother if Cat-0 error */
2129 if (ata_eh_categorize_error(eflags
, err_mask
, &xfer_ok
) == 0)
2132 /* record error and determine whether speed down is necessary */
2133 ata_ering_record(&dev
->ering
, eflags
, err_mask
);
2134 verdict
= ata_eh_speed_down_verdict(dev
);
2137 if ((verdict
& ATA_EH_SPDN_NCQ_OFF
) &&
2138 (dev
->flags
& (ATA_DFLAG_PIO
| ATA_DFLAG_NCQ
|
2139 ATA_DFLAG_NCQ_OFF
)) == ATA_DFLAG_NCQ
) {
2140 dev
->flags
|= ATA_DFLAG_NCQ_OFF
;
2141 ata_dev_warn(dev
, "NCQ disabled due to excessive errors\n");
2146 if (verdict
& ATA_EH_SPDN_SPEED_DOWN
) {
2147 /* speed down SATA link speed if possible */
2148 if (sata_down_spd_limit(link
, 0) == 0) {
2149 action
|= ATA_EH_RESET
;
2153 /* lower transfer mode */
2154 if (dev
->spdn_cnt
< 2) {
2155 static const int dma_dnxfer_sel
[] =
2156 { ATA_DNXFER_DMA
, ATA_DNXFER_40C
};
2157 static const int pio_dnxfer_sel
[] =
2158 { ATA_DNXFER_PIO
, ATA_DNXFER_FORCE_PIO0
};
2161 if (dev
->xfer_shift
!= ATA_SHIFT_PIO
)
2162 sel
= dma_dnxfer_sel
[dev
->spdn_cnt
];
2164 sel
= pio_dnxfer_sel
[dev
->spdn_cnt
];
2168 if (ata_down_xfermask_limit(dev
, sel
) == 0) {
2169 action
|= ATA_EH_RESET
;
2175 /* Fall back to PIO? Slowing down to PIO is meaningless for
2176 * SATA ATA devices. Consider it only for PATA and SATAPI.
2178 if ((verdict
& ATA_EH_SPDN_FALLBACK_TO_PIO
) && (dev
->spdn_cnt
>= 2) &&
2179 (link
->ap
->cbl
!= ATA_CBL_SATA
|| dev
->class == ATA_DEV_ATAPI
) &&
2180 (dev
->xfer_shift
!= ATA_SHIFT_PIO
)) {
2181 if (ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO
) == 0) {
2183 action
|= ATA_EH_RESET
;
2190 /* device has been slowed down, blow error history */
2191 if (!(verdict
& ATA_EH_SPDN_KEEP_ERRORS
))
2192 ata_ering_clear(&dev
->ering
);
2197 * ata_eh_worth_retry - analyze error and decide whether to retry
2198 * @qc: qc to possibly retry
2200 * Look at the cause of the error and decide if a retry
2201 * might be useful or not. We don't want to retry media errors
2202 * because the drive itself has probably already taken 10-30 seconds
2203 * doing its own internal retries before reporting the failure.
2205 static inline int ata_eh_worth_retry(struct ata_queued_cmd
*qc
)
2207 if (qc
->err_mask
& AC_ERR_MEDIA
)
2208 return 0; /* don't retry media errors */
2209 if (qc
->flags
& ATA_QCFLAG_IO
)
2210 return 1; /* otherwise retry anything from fs stack */
2211 if (qc
->err_mask
& AC_ERR_INVALID
)
2212 return 0; /* don't retry these */
2213 return qc
->err_mask
!= AC_ERR_DEV
; /* retry if not dev error */
2217 * ata_eh_link_autopsy - analyze error and determine recovery action
2218 * @link: host link to perform autopsy on
2220 * Analyze why @link failed and determine which recovery actions
2221 * are needed. This function also sets more detailed AC_ERR_*
2222 * values and fills sense data for ATAPI CHECK SENSE.
2225 * Kernel thread context (may sleep).
2227 static void ata_eh_link_autopsy(struct ata_link
*link
)
2229 struct ata_port
*ap
= link
->ap
;
2230 struct ata_eh_context
*ehc
= &link
->eh_context
;
2231 struct ata_device
*dev
;
2232 unsigned int all_err_mask
= 0, eflags
= 0;
2239 if (ehc
->i
.flags
& ATA_EHI_NO_AUTOPSY
)
2242 /* obtain and analyze SError */
2243 rc
= sata_scr_read(link
, SCR_ERROR
, &serror
);
2245 ehc
->i
.serror
|= serror
;
2246 ata_eh_analyze_serror(link
);
2247 } else if (rc
!= -EOPNOTSUPP
) {
2248 /* SError read failed, force reset and probing */
2249 ehc
->i
.probe_mask
|= ATA_ALL_DEVICES
;
2250 ehc
->i
.action
|= ATA_EH_RESET
;
2251 ehc
->i
.err_mask
|= AC_ERR_OTHER
;
2254 /* analyze NCQ failure */
2255 ata_eh_analyze_ncq_error(link
);
2257 /* any real error trumps AC_ERR_OTHER */
2258 if (ehc
->i
.err_mask
& ~AC_ERR_OTHER
)
2259 ehc
->i
.err_mask
&= ~AC_ERR_OTHER
;
2261 all_err_mask
|= ehc
->i
.err_mask
;
2263 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2264 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2266 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2267 ata_dev_phys_link(qc
->dev
) != link
)
2270 /* inherit upper level err_mask */
2271 qc
->err_mask
|= ehc
->i
.err_mask
;
2274 ehc
->i
.action
|= ata_eh_analyze_tf(qc
, &qc
->result_tf
);
2276 /* DEV errors are probably spurious in case of ATA_BUS error */
2277 if (qc
->err_mask
& AC_ERR_ATA_BUS
)
2278 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_MEDIA
|
2281 /* any real error trumps unknown error */
2282 if (qc
->err_mask
& ~AC_ERR_OTHER
)
2283 qc
->err_mask
&= ~AC_ERR_OTHER
;
2285 /* SENSE_VALID trumps dev/unknown error and revalidation */
2286 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
2287 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_OTHER
);
2289 /* determine whether the command is worth retrying */
2290 if (ata_eh_worth_retry(qc
))
2291 qc
->flags
|= ATA_QCFLAG_RETRY
;
2293 /* accumulate error info */
2294 ehc
->i
.dev
= qc
->dev
;
2295 all_err_mask
|= qc
->err_mask
;
2296 if (qc
->flags
& ATA_QCFLAG_IO
)
2297 eflags
|= ATA_EFLAG_IS_IO
;
2298 trace_ata_eh_link_autopsy_qc(qc
);
2301 /* enforce default EH actions */
2302 if (ap
->pflags
& ATA_PFLAG_FROZEN
||
2303 all_err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
))
2304 ehc
->i
.action
|= ATA_EH_RESET
;
2305 else if (((eflags
& ATA_EFLAG_IS_IO
) && all_err_mask
) ||
2306 (!(eflags
& ATA_EFLAG_IS_IO
) && (all_err_mask
& ~AC_ERR_DEV
)))
2307 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2309 /* If we have offending qcs and the associated failed device,
2310 * perform per-dev EH action only on the offending device.
2313 ehc
->i
.dev_action
[ehc
->i
.dev
->devno
] |=
2314 ehc
->i
.action
& ATA_EH_PERDEV_MASK
;
2315 ehc
->i
.action
&= ~ATA_EH_PERDEV_MASK
;
2318 /* propagate timeout to host link */
2319 if ((all_err_mask
& AC_ERR_TIMEOUT
) && !ata_is_host_link(link
))
2320 ap
->link
.eh_context
.i
.err_mask
|= AC_ERR_TIMEOUT
;
2322 /* record error and consider speeding down */
2324 if (!dev
&& ((ata_link_max_devices(link
) == 1 &&
2325 ata_dev_enabled(link
->device
))))
2329 if (dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)
2330 eflags
|= ATA_EFLAG_DUBIOUS_XFER
;
2331 ehc
->i
.action
|= ata_eh_speed_down(dev
, eflags
, all_err_mask
);
2333 trace_ata_eh_link_autopsy(dev
, ehc
->i
.action
, all_err_mask
);
2338 * ata_eh_autopsy - analyze error and determine recovery action
2339 * @ap: host port to perform autopsy on
2341 * Analyze all links of @ap and determine why they failed and
2342 * which recovery actions are needed.
2345 * Kernel thread context (may sleep).
2347 void ata_eh_autopsy(struct ata_port
*ap
)
2349 struct ata_link
*link
;
2351 ata_for_each_link(link
, ap
, EDGE
)
2352 ata_eh_link_autopsy(link
);
2354 /* Handle the frigging slave link. Autopsy is done similarly
2355 * but actions and flags are transferred over to the master
2356 * link and handled from there.
2358 if (ap
->slave_link
) {
2359 struct ata_eh_context
*mehc
= &ap
->link
.eh_context
;
2360 struct ata_eh_context
*sehc
= &ap
->slave_link
->eh_context
;
2362 /* transfer control flags from master to slave */
2363 sehc
->i
.flags
|= mehc
->i
.flags
& ATA_EHI_TO_SLAVE_MASK
;
2365 /* perform autopsy on the slave link */
2366 ata_eh_link_autopsy(ap
->slave_link
);
2368 /* transfer actions from slave to master and clear slave */
2369 ata_eh_about_to_do(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2370 mehc
->i
.action
|= sehc
->i
.action
;
2371 mehc
->i
.dev_action
[1] |= sehc
->i
.dev_action
[1];
2372 mehc
->i
.flags
|= sehc
->i
.flags
;
2373 ata_eh_done(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2376 /* Autopsy of fanout ports can affect host link autopsy.
2377 * Perform host link autopsy last.
2379 if (sata_pmp_attached(ap
))
2380 ata_eh_link_autopsy(&ap
->link
);
2384 * ata_get_cmd_descript - get description for ATA command
2385 * @command: ATA command code to get description for
2387 * Return a textual description of the given command, or NULL if the
2388 * command is not known.
2393 const char *ata_get_cmd_descript(u8 command
)
2395 #ifdef CONFIG_ATA_VERBOSE_ERROR
2401 { ATA_CMD_DEV_RESET
, "DEVICE RESET" },
2402 { ATA_CMD_CHK_POWER
, "CHECK POWER MODE" },
2403 { ATA_CMD_STANDBY
, "STANDBY" },
2404 { ATA_CMD_IDLE
, "IDLE" },
2405 { ATA_CMD_EDD
, "EXECUTE DEVICE DIAGNOSTIC" },
2406 { ATA_CMD_DOWNLOAD_MICRO
, "DOWNLOAD MICROCODE" },
2407 { ATA_CMD_DOWNLOAD_MICRO_DMA
, "DOWNLOAD MICROCODE DMA" },
2408 { ATA_CMD_NOP
, "NOP" },
2409 { ATA_CMD_FLUSH
, "FLUSH CACHE" },
2410 { ATA_CMD_FLUSH_EXT
, "FLUSH CACHE EXT" },
2411 { ATA_CMD_ID_ATA
, "IDENTIFY DEVICE" },
2412 { ATA_CMD_ID_ATAPI
, "IDENTIFY PACKET DEVICE" },
2413 { ATA_CMD_SERVICE
, "SERVICE" },
2414 { ATA_CMD_READ
, "READ DMA" },
2415 { ATA_CMD_READ_EXT
, "READ DMA EXT" },
2416 { ATA_CMD_READ_QUEUED
, "READ DMA QUEUED" },
2417 { ATA_CMD_READ_STREAM_EXT
, "READ STREAM EXT" },
2418 { ATA_CMD_READ_STREAM_DMA_EXT
, "READ STREAM DMA EXT" },
2419 { ATA_CMD_WRITE
, "WRITE DMA" },
2420 { ATA_CMD_WRITE_EXT
, "WRITE DMA EXT" },
2421 { ATA_CMD_WRITE_QUEUED
, "WRITE DMA QUEUED EXT" },
2422 { ATA_CMD_WRITE_STREAM_EXT
, "WRITE STREAM EXT" },
2423 { ATA_CMD_WRITE_STREAM_DMA_EXT
, "WRITE STREAM DMA EXT" },
2424 { ATA_CMD_WRITE_FUA_EXT
, "WRITE DMA FUA EXT" },
2425 { ATA_CMD_WRITE_QUEUED_FUA_EXT
, "WRITE DMA QUEUED FUA EXT" },
2426 { ATA_CMD_FPDMA_READ
, "READ FPDMA QUEUED" },
2427 { ATA_CMD_FPDMA_WRITE
, "WRITE FPDMA QUEUED" },
2428 { ATA_CMD_FPDMA_SEND
, "SEND FPDMA QUEUED" },
2429 { ATA_CMD_FPDMA_RECV
, "RECEIVE FPDMA QUEUED" },
2430 { ATA_CMD_PIO_READ
, "READ SECTOR(S)" },
2431 { ATA_CMD_PIO_READ_EXT
, "READ SECTOR(S) EXT" },
2432 { ATA_CMD_PIO_WRITE
, "WRITE SECTOR(S)" },
2433 { ATA_CMD_PIO_WRITE_EXT
, "WRITE SECTOR(S) EXT" },
2434 { ATA_CMD_READ_MULTI
, "READ MULTIPLE" },
2435 { ATA_CMD_READ_MULTI_EXT
, "READ MULTIPLE EXT" },
2436 { ATA_CMD_WRITE_MULTI
, "WRITE MULTIPLE" },
2437 { ATA_CMD_WRITE_MULTI_EXT
, "WRITE MULTIPLE EXT" },
2438 { ATA_CMD_WRITE_MULTI_FUA_EXT
, "WRITE MULTIPLE FUA EXT" },
2439 { ATA_CMD_SET_FEATURES
, "SET FEATURES" },
2440 { ATA_CMD_SET_MULTI
, "SET MULTIPLE MODE" },
2441 { ATA_CMD_VERIFY
, "READ VERIFY SECTOR(S)" },
2442 { ATA_CMD_VERIFY_EXT
, "READ VERIFY SECTOR(S) EXT" },
2443 { ATA_CMD_WRITE_UNCORR_EXT
, "WRITE UNCORRECTABLE EXT" },
2444 { ATA_CMD_STANDBYNOW1
, "STANDBY IMMEDIATE" },
2445 { ATA_CMD_IDLEIMMEDIATE
, "IDLE IMMEDIATE" },
2446 { ATA_CMD_SLEEP
, "SLEEP" },
2447 { ATA_CMD_INIT_DEV_PARAMS
, "INITIALIZE DEVICE PARAMETERS" },
2448 { ATA_CMD_READ_NATIVE_MAX
, "READ NATIVE MAX ADDRESS" },
2449 { ATA_CMD_READ_NATIVE_MAX_EXT
, "READ NATIVE MAX ADDRESS EXT" },
2450 { ATA_CMD_SET_MAX
, "SET MAX ADDRESS" },
2451 { ATA_CMD_SET_MAX_EXT
, "SET MAX ADDRESS EXT" },
2452 { ATA_CMD_READ_LOG_EXT
, "READ LOG EXT" },
2453 { ATA_CMD_WRITE_LOG_EXT
, "WRITE LOG EXT" },
2454 { ATA_CMD_READ_LOG_DMA_EXT
, "READ LOG DMA EXT" },
2455 { ATA_CMD_WRITE_LOG_DMA_EXT
, "WRITE LOG DMA EXT" },
2456 { ATA_CMD_TRUSTED_NONDATA
, "TRUSTED NON-DATA" },
2457 { ATA_CMD_TRUSTED_RCV
, "TRUSTED RECEIVE" },
2458 { ATA_CMD_TRUSTED_RCV_DMA
, "TRUSTED RECEIVE DMA" },
2459 { ATA_CMD_TRUSTED_SND
, "TRUSTED SEND" },
2460 { ATA_CMD_TRUSTED_SND_DMA
, "TRUSTED SEND DMA" },
2461 { ATA_CMD_PMP_READ
, "READ BUFFER" },
2462 { ATA_CMD_PMP_READ_DMA
, "READ BUFFER DMA" },
2463 { ATA_CMD_PMP_WRITE
, "WRITE BUFFER" },
2464 { ATA_CMD_PMP_WRITE_DMA
, "WRITE BUFFER DMA" },
2465 { ATA_CMD_CONF_OVERLAY
, "DEVICE CONFIGURATION OVERLAY" },
2466 { ATA_CMD_SEC_SET_PASS
, "SECURITY SET PASSWORD" },
2467 { ATA_CMD_SEC_UNLOCK
, "SECURITY UNLOCK" },
2468 { ATA_CMD_SEC_ERASE_PREP
, "SECURITY ERASE PREPARE" },
2469 { ATA_CMD_SEC_ERASE_UNIT
, "SECURITY ERASE UNIT" },
2470 { ATA_CMD_SEC_FREEZE_LOCK
, "SECURITY FREEZE LOCK" },
2471 { ATA_CMD_SEC_DISABLE_PASS
, "SECURITY DISABLE PASSWORD" },
2472 { ATA_CMD_CONFIG_STREAM
, "CONFIGURE STREAM" },
2473 { ATA_CMD_SMART
, "SMART" },
2474 { ATA_CMD_MEDIA_LOCK
, "DOOR LOCK" },
2475 { ATA_CMD_MEDIA_UNLOCK
, "DOOR UNLOCK" },
2476 { ATA_CMD_DSM
, "DATA SET MANAGEMENT" },
2477 { ATA_CMD_CHK_MED_CRD_TYP
, "CHECK MEDIA CARD TYPE" },
2478 { ATA_CMD_CFA_REQ_EXT_ERR
, "CFA REQUEST EXTENDED ERROR" },
2479 { ATA_CMD_CFA_WRITE_NE
, "CFA WRITE SECTORS WITHOUT ERASE" },
2480 { ATA_CMD_CFA_TRANS_SECT
, "CFA TRANSLATE SECTOR" },
2481 { ATA_CMD_CFA_ERASE
, "CFA ERASE SECTORS" },
2482 { ATA_CMD_CFA_WRITE_MULT_NE
, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2483 { ATA_CMD_REQ_SENSE_DATA
, "REQUEST SENSE DATA EXT" },
2484 { ATA_CMD_SANITIZE_DEVICE
, "SANITIZE DEVICE" },
2485 { ATA_CMD_ZAC_MGMT_IN
, "ZAC MANAGEMENT IN" },
2486 { ATA_CMD_ZAC_MGMT_OUT
, "ZAC MANAGEMENT OUT" },
2487 { ATA_CMD_READ_LONG
, "READ LONG (with retries)" },
2488 { ATA_CMD_READ_LONG_ONCE
, "READ LONG (without retries)" },
2489 { ATA_CMD_WRITE_LONG
, "WRITE LONG (with retries)" },
2490 { ATA_CMD_WRITE_LONG_ONCE
, "WRITE LONG (without retries)" },
2491 { ATA_CMD_RESTORE
, "RECALIBRATE" },
2492 { 0, NULL
} /* terminate list */
2496 for (i
= 0; cmd_descr
[i
].text
; i
++)
2497 if (cmd_descr
[i
].command
== command
)
2498 return cmd_descr
[i
].text
;
2503 EXPORT_SYMBOL_GPL(ata_get_cmd_descript
);
2506 * ata_eh_link_report - report error handling to user
2507 * @link: ATA link EH is going on
2509 * Report EH to user.
2514 static void ata_eh_link_report(struct ata_link
*link
)
2516 struct ata_port
*ap
= link
->ap
;
2517 struct ata_eh_context
*ehc
= &link
->eh_context
;
2518 const char *frozen
, *desc
;
2519 char tries_buf
[6] = "";
2520 int tag
, nr_failed
= 0;
2522 if (ehc
->i
.flags
& ATA_EHI_QUIET
)
2526 if (ehc
->i
.desc
[0] != '\0')
2529 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2530 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2532 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2533 ata_dev_phys_link(qc
->dev
) != link
||
2534 ((qc
->flags
& ATA_QCFLAG_QUIET
) &&
2535 qc
->err_mask
== AC_ERR_DEV
))
2537 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
&& !qc
->err_mask
)
2543 if (!nr_failed
&& !ehc
->i
.err_mask
)
2547 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2550 if (ap
->eh_tries
< ATA_EH_MAX_TRIES
)
2551 snprintf(tries_buf
, sizeof(tries_buf
), " t%d",
2555 ata_dev_err(ehc
->i
.dev
, "exception Emask 0x%x "
2556 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2557 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2558 ehc
->i
.action
, frozen
, tries_buf
);
2560 ata_dev_err(ehc
->i
.dev
, "%s\n", desc
);
2562 ata_link_err(link
, "exception Emask 0x%x "
2563 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2564 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2565 ehc
->i
.action
, frozen
, tries_buf
);
2567 ata_link_err(link
, "%s\n", desc
);
2570 #ifdef CONFIG_ATA_VERBOSE_ERROR
2573 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2574 ehc
->i
.serror
& SERR_DATA_RECOVERED
? "RecovData " : "",
2575 ehc
->i
.serror
& SERR_COMM_RECOVERED
? "RecovComm " : "",
2576 ehc
->i
.serror
& SERR_DATA
? "UnrecovData " : "",
2577 ehc
->i
.serror
& SERR_PERSISTENT
? "Persist " : "",
2578 ehc
->i
.serror
& SERR_PROTOCOL
? "Proto " : "",
2579 ehc
->i
.serror
& SERR_INTERNAL
? "HostInt " : "",
2580 ehc
->i
.serror
& SERR_PHYRDY_CHG
? "PHYRdyChg " : "",
2581 ehc
->i
.serror
& SERR_PHY_INT_ERR
? "PHYInt " : "",
2582 ehc
->i
.serror
& SERR_COMM_WAKE
? "CommWake " : "",
2583 ehc
->i
.serror
& SERR_10B_8B_ERR
? "10B8B " : "",
2584 ehc
->i
.serror
& SERR_DISPARITY
? "Dispar " : "",
2585 ehc
->i
.serror
& SERR_CRC
? "BadCRC " : "",
2586 ehc
->i
.serror
& SERR_HANDSHAKE
? "Handshk " : "",
2587 ehc
->i
.serror
& SERR_LINK_SEQ_ERR
? "LinkSeq " : "",
2588 ehc
->i
.serror
& SERR_TRANS_ST_ERROR
? "TrStaTrns " : "",
2589 ehc
->i
.serror
& SERR_UNRECOG_FIS
? "UnrecFIS " : "",
2590 ehc
->i
.serror
& SERR_DEV_XCHG
? "DevExch " : "");
2593 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2594 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2595 struct ata_taskfile
*cmd
= &qc
->tf
, *res
= &qc
->result_tf
;
2596 char data_buf
[20] = "";
2597 char cdb_buf
[70] = "";
2599 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2600 ata_dev_phys_link(qc
->dev
) != link
|| !qc
->err_mask
)
2603 if (qc
->dma_dir
!= DMA_NONE
) {
2604 static const char *dma_str
[] = {
2605 [DMA_BIDIRECTIONAL
] = "bidi",
2606 [DMA_TO_DEVICE
] = "out",
2607 [DMA_FROM_DEVICE
] = "in",
2609 static const char *prot_str
[] = {
2610 [ATA_PROT_UNKNOWN
] = "unknown",
2611 [ATA_PROT_NODATA
] = "nodata",
2612 [ATA_PROT_PIO
] = "pio",
2613 [ATA_PROT_DMA
] = "dma",
2614 [ATA_PROT_NCQ
] = "ncq dma",
2615 [ATA_PROT_NCQ_NODATA
] = "ncq nodata",
2616 [ATAPI_PROT_NODATA
] = "nodata",
2617 [ATAPI_PROT_PIO
] = "pio",
2618 [ATAPI_PROT_DMA
] = "dma",
2621 snprintf(data_buf
, sizeof(data_buf
), " %s %u %s",
2622 prot_str
[qc
->tf
.protocol
], qc
->nbytes
,
2623 dma_str
[qc
->dma_dir
]);
2626 if (ata_is_atapi(qc
->tf
.protocol
)) {
2627 const u8
*cdb
= qc
->cdb
;
2628 size_t cdb_len
= qc
->dev
->cdb_len
;
2631 cdb
= qc
->scsicmd
->cmnd
;
2632 cdb_len
= qc
->scsicmd
->cmd_len
;
2634 __scsi_format_command(cdb_buf
, sizeof(cdb_buf
),
2637 const char *descr
= ata_get_cmd_descript(cmd
->command
);
2639 ata_dev_err(qc
->dev
, "failed command: %s\n",
2643 ata_dev_err(qc
->dev
,
2644 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2646 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2647 "Emask 0x%x (%s)%s\n",
2648 cmd
->command
, cmd
->feature
, cmd
->nsect
,
2649 cmd
->lbal
, cmd
->lbam
, cmd
->lbah
,
2650 cmd
->hob_feature
, cmd
->hob_nsect
,
2651 cmd
->hob_lbal
, cmd
->hob_lbam
, cmd
->hob_lbah
,
2652 cmd
->device
, qc
->tag
, data_buf
, cdb_buf
,
2653 res
->command
, res
->feature
, res
->nsect
,
2654 res
->lbal
, res
->lbam
, res
->lbah
,
2655 res
->hob_feature
, res
->hob_nsect
,
2656 res
->hob_lbal
, res
->hob_lbam
, res
->hob_lbah
,
2657 res
->device
, qc
->err_mask
, ata_err_string(qc
->err_mask
),
2658 qc
->err_mask
& AC_ERR_NCQ
? " <F>" : "");
2660 #ifdef CONFIG_ATA_VERBOSE_ERROR
2661 if (res
->command
& (ATA_BUSY
| ATA_DRDY
| ATA_DF
| ATA_DRQ
|
2662 ATA_SENSE
| ATA_ERR
)) {
2663 if (res
->command
& ATA_BUSY
)
2664 ata_dev_err(qc
->dev
, "status: { Busy }\n");
2666 ata_dev_err(qc
->dev
, "status: { %s%s%s%s%s}\n",
2667 res
->command
& ATA_DRDY
? "DRDY " : "",
2668 res
->command
& ATA_DF
? "DF " : "",
2669 res
->command
& ATA_DRQ
? "DRQ " : "",
2670 res
->command
& ATA_SENSE
? "SENSE " : "",
2671 res
->command
& ATA_ERR
? "ERR " : "");
2674 if (cmd
->command
!= ATA_CMD_PACKET
&&
2675 (res
->feature
& (ATA_ICRC
| ATA_UNC
| ATA_AMNF
|
2676 ATA_IDNF
| ATA_ABORTED
)))
2677 ata_dev_err(qc
->dev
, "error: { %s%s%s%s%s}\n",
2678 res
->feature
& ATA_ICRC
? "ICRC " : "",
2679 res
->feature
& ATA_UNC
? "UNC " : "",
2680 res
->feature
& ATA_AMNF
? "AMNF " : "",
2681 res
->feature
& ATA_IDNF
? "IDNF " : "",
2682 res
->feature
& ATA_ABORTED
? "ABRT " : "");
2688 * ata_eh_report - report error handling to user
2689 * @ap: ATA port to report EH about
2691 * Report EH to user.
2696 void ata_eh_report(struct ata_port
*ap
)
2698 struct ata_link
*link
;
2700 ata_for_each_link(link
, ap
, HOST_FIRST
)
2701 ata_eh_link_report(link
);
2704 static int ata_do_reset(struct ata_link
*link
, ata_reset_fn_t reset
,
2705 unsigned int *classes
, unsigned long deadline
,
2708 struct ata_device
*dev
;
2711 ata_for_each_dev(dev
, link
, ALL
)
2712 classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
2714 return reset(link
, classes
, deadline
);
2717 static int ata_eh_followup_srst_needed(struct ata_link
*link
, int rc
)
2719 if ((link
->flags
& ATA_LFLAG_NO_SRST
) || ata_link_offline(link
))
2723 if (sata_pmp_supported(link
->ap
) && ata_is_host_link(link
))
2728 int ata_eh_reset(struct ata_link
*link
, int classify
,
2729 ata_prereset_fn_t prereset
, ata_reset_fn_t softreset
,
2730 ata_reset_fn_t hardreset
, ata_postreset_fn_t postreset
)
2732 struct ata_port
*ap
= link
->ap
;
2733 struct ata_link
*slave
= ap
->slave_link
;
2734 struct ata_eh_context
*ehc
= &link
->eh_context
;
2735 struct ata_eh_context
*sehc
= slave
? &slave
->eh_context
: NULL
;
2736 unsigned int *classes
= ehc
->classes
;
2737 unsigned int lflags
= link
->flags
;
2738 int verbose
= !(ehc
->i
.flags
& ATA_EHI_QUIET
);
2739 int max_tries
= 0, try = 0;
2740 struct ata_link
*failed_link
;
2741 struct ata_device
*dev
;
2742 unsigned long deadline
, now
;
2743 ata_reset_fn_t reset
;
2744 unsigned long flags
;
2751 while (ata_eh_reset_timeouts
[max_tries
] != ULONG_MAX
)
2753 if (link
->flags
& ATA_LFLAG_RST_ONCE
)
2755 if (link
->flags
& ATA_LFLAG_NO_HRST
)
2757 if (link
->flags
& ATA_LFLAG_NO_SRST
)
2760 /* make sure each reset attempt is at least COOL_DOWN apart */
2761 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
2763 WARN_ON(time_after(ehc
->last_reset
, now
));
2764 deadline
= ata_deadline(ehc
->last_reset
,
2765 ATA_EH_RESET_COOL_DOWN
);
2766 if (time_before(now
, deadline
))
2767 schedule_timeout_uninterruptible(deadline
- now
);
2770 spin_lock_irqsave(ap
->lock
, flags
);
2771 ap
->pflags
|= ATA_PFLAG_RESETTING
;
2772 spin_unlock_irqrestore(ap
->lock
, flags
);
2774 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2776 ata_for_each_dev(dev
, link
, ALL
) {
2777 /* If we issue an SRST then an ATA drive (not ATAPI)
2778 * may change configuration and be in PIO0 timing. If
2779 * we do a hard reset (or are coming from power on)
2780 * this is true for ATA or ATAPI. Until we've set a
2781 * suitable controller mode we should not touch the
2782 * bus as we may be talking too fast.
2784 dev
->pio_mode
= XFER_PIO_0
;
2785 dev
->dma_mode
= 0xff;
2787 /* If the controller has a pio mode setup function
2788 * then use it to set the chipset to rights. Don't
2789 * touch the DMA setup as that will be dealt with when
2790 * configuring devices.
2792 if (ap
->ops
->set_piomode
)
2793 ap
->ops
->set_piomode(ap
, dev
);
2796 /* prefer hardreset */
2798 ehc
->i
.action
&= ~ATA_EH_RESET
;
2801 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2802 } else if (softreset
) {
2804 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2808 unsigned long deadline
= ata_deadline(jiffies
,
2809 ATA_EH_PRERESET_TIMEOUT
);
2812 sehc
->i
.action
&= ~ATA_EH_RESET
;
2813 sehc
->i
.action
|= ehc
->i
.action
;
2816 rc
= prereset(link
, deadline
);
2818 /* If present, do prereset on slave link too. Reset
2819 * is skipped iff both master and slave links report
2820 * -ENOENT or clear ATA_EH_RESET.
2822 if (slave
&& (rc
== 0 || rc
== -ENOENT
)) {
2825 tmp
= prereset(slave
, deadline
);
2829 ehc
->i
.action
|= sehc
->i
.action
;
2833 if (rc
== -ENOENT
) {
2834 ata_link_dbg(link
, "port disabled--ignoring\n");
2835 ehc
->i
.action
&= ~ATA_EH_RESET
;
2837 ata_for_each_dev(dev
, link
, ALL
)
2838 classes
[dev
->devno
] = ATA_DEV_NONE
;
2843 "prereset failed (errno=%d)\n",
2848 /* prereset() might have cleared ATA_EH_RESET. If so,
2849 * bang classes, thaw and return.
2851 if (reset
&& !(ehc
->i
.action
& ATA_EH_RESET
)) {
2852 ata_for_each_dev(dev
, link
, ALL
)
2853 classes
[dev
->devno
] = ATA_DEV_NONE
;
2854 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) &&
2855 ata_is_host_link(link
))
2856 ata_eh_thaw_port(ap
);
2866 if (ata_is_host_link(link
))
2867 ata_eh_freeze_port(ap
);
2869 deadline
= ata_deadline(jiffies
, ata_eh_reset_timeouts
[try++]);
2873 ata_link_info(link
, "%s resetting link\n",
2874 reset
== softreset
? "soft" : "hard");
2876 /* mark that this EH session started with reset */
2877 ehc
->last_reset
= jiffies
;
2878 if (reset
== hardreset
)
2879 ehc
->i
.flags
|= ATA_EHI_DID_HARDRESET
;
2881 ehc
->i
.flags
|= ATA_EHI_DID_SOFTRESET
;
2883 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2884 if (rc
&& rc
!= -EAGAIN
) {
2889 /* hardreset slave link if existent */
2890 if (slave
&& reset
== hardreset
) {
2894 ata_link_info(slave
, "hard resetting link\n");
2896 ata_eh_about_to_do(slave
, NULL
, ATA_EH_RESET
);
2897 tmp
= ata_do_reset(slave
, reset
, classes
, deadline
,
2905 failed_link
= slave
;
2911 /* perform follow-up SRST if necessary */
2912 if (reset
== hardreset
&&
2913 ata_eh_followup_srst_needed(link
, rc
)) {
2918 "follow-up softreset required but no softreset available\n");
2924 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2925 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2934 "no reset method available, skipping reset\n");
2935 if (!(lflags
& ATA_LFLAG_ASSUME_CLASS
))
2936 lflags
|= ATA_LFLAG_ASSUME_ATA
;
2940 * Post-reset processing
2942 ata_for_each_dev(dev
, link
, ALL
) {
2943 /* After the reset, the device state is PIO 0 and the
2944 * controller state is undefined. Reset also wakes up
2945 * drives from sleeping mode.
2947 dev
->pio_mode
= XFER_PIO_0
;
2948 dev
->flags
&= ~ATA_DFLAG_SLEEPING
;
2950 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
2953 /* apply class override */
2954 if (lflags
& ATA_LFLAG_ASSUME_ATA
)
2955 classes
[dev
->devno
] = ATA_DEV_ATA
;
2956 else if (lflags
& ATA_LFLAG_ASSUME_SEMB
)
2957 classes
[dev
->devno
] = ATA_DEV_SEMB_UNSUP
;
2960 /* record current link speed */
2961 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0)
2962 link
->sata_spd
= (sstatus
>> 4) & 0xf;
2963 if (slave
&& sata_scr_read(slave
, SCR_STATUS
, &sstatus
) == 0)
2964 slave
->sata_spd
= (sstatus
>> 4) & 0xf;
2967 if (ata_is_host_link(link
))
2968 ata_eh_thaw_port(ap
);
2970 /* postreset() should clear hardware SError. Although SError
2971 * is cleared during link resume, clearing SError here is
2972 * necessary as some PHYs raise hotplug events after SRST.
2973 * This introduces race condition where hotplug occurs between
2974 * reset and here. This race is mediated by cross checking
2975 * link onlineness and classification result later.
2978 postreset(link
, classes
);
2980 postreset(slave
, classes
);
2984 * Some controllers can't be frozen very well and may set spurious
2985 * error conditions during reset. Clear accumulated error
2986 * information and re-thaw the port if frozen. As reset is the
2987 * final recovery action and we cross check link onlineness against
2988 * device classification later, no hotplug event is lost by this.
2990 spin_lock_irqsave(link
->ap
->lock
, flags
);
2991 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
2993 memset(&slave
->eh_info
, 0, sizeof(link
->eh_info
));
2994 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
2995 spin_unlock_irqrestore(link
->ap
->lock
, flags
);
2997 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2998 ata_eh_thaw_port(ap
);
3001 * Make sure onlineness and classification result correspond.
3002 * Hotplug could have happened during reset and some
3003 * controllers fail to wait while a drive is spinning up after
3004 * being hotplugged causing misdetection. By cross checking
3005 * link on/offlineness and classification result, those
3006 * conditions can be reliably detected and retried.
3009 ata_for_each_dev(dev
, link
, ALL
) {
3010 if (ata_phys_link_online(ata_dev_phys_link(dev
))) {
3011 if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
3012 ata_dev_dbg(dev
, "link online but device misclassified\n");
3013 classes
[dev
->devno
] = ATA_DEV_NONE
;
3016 } else if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
3017 if (ata_class_enabled(classes
[dev
->devno
]))
3019 "link offline, clearing class %d to NONE\n",
3020 classes
[dev
->devno
]);
3021 classes
[dev
->devno
] = ATA_DEV_NONE
;
3022 } else if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
3024 "link status unknown, clearing UNKNOWN to NONE\n");
3025 classes
[dev
->devno
] = ATA_DEV_NONE
;
3029 if (classify
&& nr_unknown
) {
3030 if (try < max_tries
) {
3032 "link online but %d devices misclassified, retrying\n",
3039 "link online but %d devices misclassified, "
3040 "device detection might fail\n", nr_unknown
);
3043 /* reset successful, schedule revalidation */
3044 ata_eh_done(link
, NULL
, ATA_EH_RESET
);
3046 ata_eh_done(slave
, NULL
, ATA_EH_RESET
);
3047 ehc
->last_reset
= jiffies
; /* update to completion time */
3048 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
3049 link
->lpm_policy
= ATA_LPM_UNKNOWN
; /* reset LPM state */
3053 /* clear hotplug flag */
3054 ehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
3056 sehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
3058 spin_lock_irqsave(ap
->lock
, flags
);
3059 ap
->pflags
&= ~ATA_PFLAG_RESETTING
;
3060 spin_unlock_irqrestore(ap
->lock
, flags
);
3065 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3066 if (!ata_is_host_link(link
) &&
3067 sata_scr_read(link
, SCR_STATUS
, &sstatus
))
3070 if (try >= max_tries
) {
3072 * Thaw host port even if reset failed, so that the port
3073 * can be retried on the next phy event. This risks
3074 * repeated EH runs but seems to be a better tradeoff than
3075 * shutting down a port after a botched hotplug attempt.
3077 if (ata_is_host_link(link
))
3078 ata_eh_thaw_port(ap
);
3083 if (time_before(now
, deadline
)) {
3084 unsigned long delta
= deadline
- now
;
3086 ata_link_warn(failed_link
,
3087 "reset failed (errno=%d), retrying in %u secs\n",
3088 rc
, DIV_ROUND_UP(jiffies_to_msecs(delta
), 1000));
3092 delta
= schedule_timeout_uninterruptible(delta
);
3097 * While disks spinup behind PMP, some controllers fail sending SRST.
3098 * They need to be reset - as well as the PMP - before retrying.
3100 if (rc
== -ERESTART
) {
3101 if (ata_is_host_link(link
))
3102 ata_eh_thaw_port(ap
);
3106 if (try == max_tries
- 1) {
3107 sata_down_spd_limit(link
, 0);
3109 sata_down_spd_limit(slave
, 0);
3110 } else if (rc
== -EPIPE
)
3111 sata_down_spd_limit(failed_link
, 0);
3118 static inline void ata_eh_pull_park_action(struct ata_port
*ap
)
3120 struct ata_link
*link
;
3121 struct ata_device
*dev
;
3122 unsigned long flags
;
3125 * This function can be thought of as an extended version of
3126 * ata_eh_about_to_do() specially crafted to accommodate the
3127 * requirements of ATA_EH_PARK handling. Since the EH thread
3128 * does not leave the do {} while () loop in ata_eh_recover as
3129 * long as the timeout for a park request to *one* device on
3130 * the port has not expired, and since we still want to pick
3131 * up park requests to other devices on the same port or
3132 * timeout updates for the same device, we have to pull
3133 * ATA_EH_PARK actions from eh_info into eh_context.i
3134 * ourselves at the beginning of each pass over the loop.
3136 * Additionally, all write accesses to &ap->park_req_pending
3137 * through reinit_completion() (see below) or complete_all()
3138 * (see ata_scsi_park_store()) are protected by the host lock.
3139 * As a result we have that park_req_pending.done is zero on
3140 * exit from this function, i.e. when ATA_EH_PARK actions for
3141 * *all* devices on port ap have been pulled into the
3142 * respective eh_context structs. If, and only if,
3143 * park_req_pending.done is non-zero by the time we reach
3144 * wait_for_completion_timeout(), another ATA_EH_PARK action
3145 * has been scheduled for at least one of the devices on port
3146 * ap and we have to cycle over the do {} while () loop in
3147 * ata_eh_recover() again.
3150 spin_lock_irqsave(ap
->lock
, flags
);
3151 reinit_completion(&ap
->park_req_pending
);
3152 ata_for_each_link(link
, ap
, EDGE
) {
3153 ata_for_each_dev(dev
, link
, ALL
) {
3154 struct ata_eh_info
*ehi
= &link
->eh_info
;
3156 link
->eh_context
.i
.dev_action
[dev
->devno
] |=
3157 ehi
->dev_action
[dev
->devno
] & ATA_EH_PARK
;
3158 ata_eh_clear_action(link
, dev
, ehi
, ATA_EH_PARK
);
3161 spin_unlock_irqrestore(ap
->lock
, flags
);
3164 static void ata_eh_park_issue_cmd(struct ata_device
*dev
, int park
)
3166 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3167 struct ata_taskfile tf
;
3168 unsigned int err_mask
;
3170 ata_tf_init(dev
, &tf
);
3172 ehc
->unloaded_mask
|= 1 << dev
->devno
;
3173 tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
3179 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
3180 tf
.command
= ATA_CMD_CHK_POWER
;
3183 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
3184 tf
.protocol
= ATA_PROT_NODATA
;
3185 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3186 if (park
&& (err_mask
|| tf
.lbal
!= 0xc4)) {
3187 ata_dev_err(dev
, "head unload failed!\n");
3188 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
3192 static int ata_eh_revalidate_and_attach(struct ata_link
*link
,
3193 struct ata_device
**r_failed_dev
)
3195 struct ata_port
*ap
= link
->ap
;
3196 struct ata_eh_context
*ehc
= &link
->eh_context
;
3197 struct ata_device
*dev
;
3198 unsigned int new_mask
= 0;
3199 unsigned long flags
;
3204 /* For PATA drive side cable detection to work, IDENTIFY must
3205 * be done backwards such that PDIAG- is released by the slave
3206 * device before the master device is identified.
3208 ata_for_each_dev(dev
, link
, ALL_REVERSE
) {
3209 unsigned int action
= ata_eh_dev_action(dev
);
3210 unsigned int readid_flags
= 0;
3212 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
)
3213 readid_flags
|= ATA_READID_POSTRESET
;
3215 if ((action
& ATA_EH_REVALIDATE
) && ata_dev_enabled(dev
)) {
3216 WARN_ON(dev
->class == ATA_DEV_PMP
);
3218 if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
3223 ata_eh_about_to_do(link
, dev
, ATA_EH_REVALIDATE
);
3224 rc
= ata_dev_revalidate(dev
, ehc
->classes
[dev
->devno
],
3229 ata_eh_done(link
, dev
, ATA_EH_REVALIDATE
);
3231 /* Configuration may have changed, reconfigure
3234 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3236 /* schedule the scsi_rescan_device() here */
3237 schedule_work(&(ap
->scsi_rescan_task
));
3238 } else if (dev
->class == ATA_DEV_UNKNOWN
&&
3239 ehc
->tries
[dev
->devno
] &&
3240 ata_class_enabled(ehc
->classes
[dev
->devno
])) {
3241 /* Temporarily set dev->class, it will be
3242 * permanently set once all configurations are
3243 * complete. This is necessary because new
3244 * device configuration is done in two
3247 dev
->class = ehc
->classes
[dev
->devno
];
3249 if (dev
->class == ATA_DEV_PMP
)
3250 rc
= sata_pmp_attach(dev
);
3252 rc
= ata_dev_read_id(dev
, &dev
->class,
3253 readid_flags
, dev
->id
);
3255 /* read_id might have changed class, store and reset */
3256 ehc
->classes
[dev
->devno
] = dev
->class;
3257 dev
->class = ATA_DEV_UNKNOWN
;
3261 /* clear error info accumulated during probe */
3262 ata_ering_clear(&dev
->ering
);
3263 new_mask
|= 1 << dev
->devno
;
3266 /* IDENTIFY was issued to non-existent
3267 * device. No need to reset. Just
3268 * thaw and ignore the device.
3270 ata_eh_thaw_port(ap
);
3278 /* PDIAG- should have been released, ask cable type if post-reset */
3279 if ((ehc
->i
.flags
& ATA_EHI_DID_RESET
) && ata_is_host_link(link
)) {
3280 if (ap
->ops
->cable_detect
)
3281 ap
->cbl
= ap
->ops
->cable_detect(ap
);
3285 /* Configure new devices forward such that user doesn't see
3286 * device detection messages backwards.
3288 ata_for_each_dev(dev
, link
, ALL
) {
3289 if (!(new_mask
& (1 << dev
->devno
)))
3292 dev
->class = ehc
->classes
[dev
->devno
];
3294 if (dev
->class == ATA_DEV_PMP
)
3297 ehc
->i
.flags
|= ATA_EHI_PRINTINFO
;
3298 rc
= ata_dev_configure(dev
);
3299 ehc
->i
.flags
&= ~ATA_EHI_PRINTINFO
;
3301 dev
->class = ATA_DEV_UNKNOWN
;
3305 spin_lock_irqsave(ap
->lock
, flags
);
3306 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
3307 spin_unlock_irqrestore(ap
->lock
, flags
);
3309 /* new device discovered, configure xfermode */
3310 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3316 *r_failed_dev
= dev
;
3317 DPRINTK("EXIT rc=%d\n", rc
);
3322 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3323 * @link: link on which timings will be programmed
3324 * @r_failed_dev: out parameter for failed device
3326 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3327 * ata_set_mode() fails, pointer to the failing device is
3328 * returned in @r_failed_dev.
3331 * PCI/etc. bus probe sem.
3334 * 0 on success, negative errno otherwise
3336 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3338 struct ata_port
*ap
= link
->ap
;
3339 struct ata_device
*dev
;
3342 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3343 ata_for_each_dev(dev
, link
, ENABLED
) {
3344 if (!(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)) {
3345 struct ata_ering_entry
*ent
;
3347 ent
= ata_ering_top(&dev
->ering
);
3349 ent
->eflags
&= ~ATA_EFLAG_DUBIOUS_XFER
;
3353 /* has private set_mode? */
3354 if (ap
->ops
->set_mode
)
3355 rc
= ap
->ops
->set_mode(link
, r_failed_dev
);
3357 rc
= ata_do_set_mode(link
, r_failed_dev
);
3359 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3360 ata_for_each_dev(dev
, link
, ENABLED
) {
3361 struct ata_eh_context
*ehc
= &link
->eh_context
;
3362 u8 saved_xfer_mode
= ehc
->saved_xfer_mode
[dev
->devno
];
3363 u8 saved_ncq
= !!(ehc
->saved_ncq_enabled
& (1 << dev
->devno
));
3365 if (dev
->xfer_mode
!= saved_xfer_mode
||
3366 ata_ncq_enabled(dev
) != saved_ncq
)
3367 dev
->flags
|= ATA_DFLAG_DUBIOUS_XFER
;
3374 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3375 * @dev: ATAPI device to clear UA for
3377 * Resets and other operations can make an ATAPI device raise
3378 * UNIT ATTENTION which causes the next operation to fail. This
3379 * function clears UA.
3382 * EH context (may sleep).
3385 * 0 on success, -errno on failure.
3387 static int atapi_eh_clear_ua(struct ata_device
*dev
)
3391 for (i
= 0; i
< ATA_EH_UA_TRIES
; i
++) {
3392 u8
*sense_buffer
= dev
->link
->ap
->sector_buf
;
3394 unsigned int err_mask
;
3396 err_mask
= atapi_eh_tur(dev
, &sense_key
);
3397 if (err_mask
!= 0 && err_mask
!= AC_ERR_DEV
) {
3399 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3404 if (!err_mask
|| sense_key
!= UNIT_ATTENTION
)
3407 err_mask
= atapi_eh_request_sense(dev
, sense_buffer
, sense_key
);
3409 ata_dev_warn(dev
, "failed to clear "
3410 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask
);
3415 ata_dev_warn(dev
, "UNIT ATTENTION persists after %d tries\n",
3422 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3423 * @dev: ATA device which may need FLUSH retry
3425 * If @dev failed FLUSH, it needs to be reported upper layer
3426 * immediately as it means that @dev failed to remap and already
3427 * lost at least a sector and further FLUSH retrials won't make
3428 * any difference to the lost sector. However, if FLUSH failed
3429 * for other reasons, for example transmission error, FLUSH needs
3432 * This function determines whether FLUSH failure retry is
3433 * necessary and performs it if so.
3436 * 0 if EH can continue, -errno if EH needs to be repeated.
3438 static int ata_eh_maybe_retry_flush(struct ata_device
*dev
)
3440 struct ata_link
*link
= dev
->link
;
3441 struct ata_port
*ap
= link
->ap
;
3442 struct ata_queued_cmd
*qc
;
3443 struct ata_taskfile tf
;
3444 unsigned int err_mask
;
3447 /* did flush fail for this device? */
3448 if (!ata_tag_valid(link
->active_tag
))
3451 qc
= __ata_qc_from_tag(ap
, link
->active_tag
);
3452 if (qc
->dev
!= dev
|| (qc
->tf
.command
!= ATA_CMD_FLUSH_EXT
&&
3453 qc
->tf
.command
!= ATA_CMD_FLUSH
))
3456 /* if the device failed it, it should be reported to upper layers */
3457 if (qc
->err_mask
& AC_ERR_DEV
)
3460 /* flush failed for some other reason, give it another shot */
3461 ata_tf_init(dev
, &tf
);
3463 tf
.command
= qc
->tf
.command
;
3464 tf
.flags
|= ATA_TFLAG_DEVICE
;
3465 tf
.protocol
= ATA_PROT_NODATA
;
3467 ata_dev_warn(dev
, "retrying FLUSH 0x%x Emask 0x%x\n",
3468 tf
.command
, qc
->err_mask
);
3470 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3473 * FLUSH is complete but there's no way to
3474 * successfully complete a failed command from EH.
3475 * Making sure retry is allowed at least once and
3476 * retrying it should do the trick - whatever was in
3477 * the cache is already on the platter and this won't
3478 * cause infinite loop.
3480 qc
->scsicmd
->allowed
= max(qc
->scsicmd
->allowed
, 1);
3482 ata_dev_warn(dev
, "FLUSH failed Emask 0x%x\n",
3486 /* if device failed it, report it to upper layers */
3487 if (err_mask
& AC_ERR_DEV
) {
3488 qc
->err_mask
|= AC_ERR_DEV
;
3490 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
3498 * ata_eh_set_lpm - configure SATA interface power management
3499 * @link: link to configure power management
3500 * @policy: the link power management policy
3501 * @r_failed_dev: out parameter for failed device
3503 * Enable SATA Interface power management. This will enable
3504 * Device Interface Power Management (DIPM) for min_power
3505 * policy, and then call driver specific callbacks for
3506 * enabling Host Initiated Power management.
3512 * 0 on success, -errno on failure.
3514 static int ata_eh_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
3515 struct ata_device
**r_failed_dev
)
3517 struct ata_port
*ap
= ata_is_host_link(link
) ? link
->ap
: NULL
;
3518 struct ata_eh_context
*ehc
= &link
->eh_context
;
3519 struct ata_device
*dev
, *link_dev
= NULL
, *lpm_dev
= NULL
;
3520 enum ata_lpm_policy old_policy
= link
->lpm_policy
;
3521 bool no_dipm
= link
->ap
->flags
& ATA_FLAG_NO_DIPM
;
3522 unsigned int hints
= ATA_LPM_EMPTY
| ATA_LPM_HIPM
;
3523 unsigned int err_mask
;
3526 /* if the link or host doesn't do LPM, noop */
3527 if ((link
->flags
& ATA_LFLAG_NO_LPM
) || (ap
&& !ap
->ops
->set_lpm
))
3531 * DIPM is enabled only for MIN_POWER as some devices
3532 * misbehave when the host NACKs transition to SLUMBER. Order
3533 * device and link configurations such that the host always
3534 * allows DIPM requests.
3536 ata_for_each_dev(dev
, link
, ENABLED
) {
3537 bool hipm
= ata_id_has_hipm(dev
->id
);
3538 bool dipm
= ata_id_has_dipm(dev
->id
) && !no_dipm
;
3540 /* find the first enabled and LPM enabled devices */
3544 if (!lpm_dev
&& (hipm
|| dipm
))
3547 hints
&= ~ATA_LPM_EMPTY
;
3549 hints
&= ~ATA_LPM_HIPM
;
3551 /* disable DIPM before changing link config */
3552 if (policy
!= ATA_LPM_MIN_POWER
&& dipm
) {
3553 err_mask
= ata_dev_set_feature(dev
,
3554 SETFEATURES_SATA_DISABLE
, SATA_DIPM
);
3555 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3557 "failed to disable DIPM, Emask 0x%x\n",
3566 rc
= ap
->ops
->set_lpm(link
, policy
, hints
);
3567 if (!rc
&& ap
->slave_link
)
3568 rc
= ap
->ops
->set_lpm(ap
->slave_link
, policy
, hints
);
3570 rc
= sata_pmp_set_lpm(link
, policy
, hints
);
3573 * Attribute link config failure to the first (LPM) enabled
3574 * device on the link.
3577 if (rc
== -EOPNOTSUPP
) {
3578 link
->flags
|= ATA_LFLAG_NO_LPM
;
3581 dev
= lpm_dev
? lpm_dev
: link_dev
;
3586 * Low level driver acked the transition. Issue DIPM command
3587 * with the new policy set.
3589 link
->lpm_policy
= policy
;
3590 if (ap
&& ap
->slave_link
)
3591 ap
->slave_link
->lpm_policy
= policy
;
3593 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3594 ata_for_each_dev(dev
, link
, ENABLED
) {
3595 if (policy
== ATA_LPM_MIN_POWER
&& !no_dipm
&&
3596 ata_id_has_dipm(dev
->id
)) {
3597 err_mask
= ata_dev_set_feature(dev
,
3598 SETFEATURES_SATA_ENABLE
, SATA_DIPM
);
3599 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3601 "failed to enable DIPM, Emask 0x%x\n",
3609 link
->last_lpm_change
= jiffies
;
3610 link
->flags
|= ATA_LFLAG_CHANGED
;
3615 /* restore the old policy */
3616 link
->lpm_policy
= old_policy
;
3617 if (ap
&& ap
->slave_link
)
3618 ap
->slave_link
->lpm_policy
= old_policy
;
3620 /* if no device or only one more chance is left, disable LPM */
3621 if (!dev
|| ehc
->tries
[dev
->devno
] <= 2) {
3622 ata_link_warn(link
, "disabling LPM on the link\n");
3623 link
->flags
|= ATA_LFLAG_NO_LPM
;
3626 *r_failed_dev
= dev
;
3630 int ata_link_nr_enabled(struct ata_link
*link
)
3632 struct ata_device
*dev
;
3635 ata_for_each_dev(dev
, link
, ENABLED
)
3640 static int ata_link_nr_vacant(struct ata_link
*link
)
3642 struct ata_device
*dev
;
3645 ata_for_each_dev(dev
, link
, ALL
)
3646 if (dev
->class == ATA_DEV_UNKNOWN
)
3651 static int ata_eh_skip_recovery(struct ata_link
*link
)
3653 struct ata_port
*ap
= link
->ap
;
3654 struct ata_eh_context
*ehc
= &link
->eh_context
;
3655 struct ata_device
*dev
;
3657 /* skip disabled links */
3658 if (link
->flags
& ATA_LFLAG_DISABLED
)
3661 /* skip if explicitly requested */
3662 if (ehc
->i
.flags
& ATA_EHI_NO_RECOVERY
)
3665 /* thaw frozen port and recover failed devices */
3666 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) || ata_link_nr_enabled(link
))
3669 /* reset at least once if reset is requested */
3670 if ((ehc
->i
.action
& ATA_EH_RESET
) &&
3671 !(ehc
->i
.flags
& ATA_EHI_DID_RESET
))
3674 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3675 ata_for_each_dev(dev
, link
, ALL
) {
3676 if (dev
->class == ATA_DEV_UNKNOWN
&&
3677 ehc
->classes
[dev
->devno
] != ATA_DEV_NONE
)
3684 static int ata_count_probe_trials_cb(struct ata_ering_entry
*ent
, void *void_arg
)
3686 u64 interval
= msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL
);
3687 u64 now
= get_jiffies_64();
3688 int *trials
= void_arg
;
3690 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) ||
3691 (ent
->timestamp
< now
- min(now
, interval
)))
3698 static int ata_eh_schedule_probe(struct ata_device
*dev
)
3700 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3701 struct ata_link
*link
= ata_dev_phys_link(dev
);
3704 if (!(ehc
->i
.probe_mask
& (1 << dev
->devno
)) ||
3705 (ehc
->did_probe_mask
& (1 << dev
->devno
)))
3708 ata_eh_detach_dev(dev
);
3710 ehc
->did_probe_mask
|= (1 << dev
->devno
);
3711 ehc
->i
.action
|= ATA_EH_RESET
;
3712 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
3713 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
3715 /* the link maybe in a deep sleep, wake it up */
3716 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
) {
3717 if (ata_is_host_link(link
))
3718 link
->ap
->ops
->set_lpm(link
, ATA_LPM_MAX_POWER
,
3721 sata_pmp_set_lpm(link
, ATA_LPM_MAX_POWER
,
3725 /* Record and count probe trials on the ering. The specific
3726 * error mask used is irrelevant. Because a successful device
3727 * detection clears the ering, this count accumulates only if
3728 * there are consecutive failed probes.
3730 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3731 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3732 * forced to 1.5Gbps.
3734 * This is to work around cases where failed link speed
3735 * negotiation results in device misdetection leading to
3736 * infinite DEVXCHG or PHRDY CHG events.
3738 ata_ering_record(&dev
->ering
, 0, AC_ERR_OTHER
);
3739 ata_ering_map(&dev
->ering
, ata_count_probe_trials_cb
, &trials
);
3741 if (trials
> ATA_EH_PROBE_TRIALS
)
3742 sata_down_spd_limit(link
, 1);
3747 static int ata_eh_handle_dev_fail(struct ata_device
*dev
, int err
)
3749 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3751 /* -EAGAIN from EH routine indicates retry without prejudice.
3752 * The requester is responsible for ensuring forward progress.
3755 ehc
->tries
[dev
->devno
]--;
3759 /* device missing or wrong IDENTIFY data, schedule probing */
3760 ehc
->i
.probe_mask
|= (1 << dev
->devno
);
3762 /* give it just one more chance */
3763 ehc
->tries
[dev
->devno
] = min(ehc
->tries
[dev
->devno
], 1);
3765 if (ehc
->tries
[dev
->devno
] == 1) {
3766 /* This is the last chance, better to slow
3767 * down than lose it.
3769 sata_down_spd_limit(ata_dev_phys_link(dev
), 0);
3770 if (dev
->pio_mode
> XFER_PIO_0
)
3771 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
3775 if (ata_dev_enabled(dev
) && !ehc
->tries
[dev
->devno
]) {
3776 /* disable device if it has used up all its chances */
3777 ata_dev_disable(dev
);
3779 /* detach if offline */
3780 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
3781 ata_eh_detach_dev(dev
);
3783 /* schedule probe if necessary */
3784 if (ata_eh_schedule_probe(dev
)) {
3785 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3786 memset(ehc
->cmd_timeout_idx
[dev
->devno
], 0,
3787 sizeof(ehc
->cmd_timeout_idx
[dev
->devno
]));
3792 ehc
->i
.action
|= ATA_EH_RESET
;
3798 * ata_eh_recover - recover host port after error
3799 * @ap: host port to recover
3800 * @prereset: prereset method (can be NULL)
3801 * @softreset: softreset method (can be NULL)
3802 * @hardreset: hardreset method (can be NULL)
3803 * @postreset: postreset method (can be NULL)
3804 * @r_failed_link: out parameter for failed link
3806 * This is the alpha and omega, eum and yang, heart and soul of
3807 * libata exception handling. On entry, actions required to
3808 * recover each link and hotplug requests are recorded in the
3809 * link's eh_context. This function executes all the operations
3810 * with appropriate retrials and fallbacks to resurrect failed
3811 * devices, detach goners and greet newcomers.
3814 * Kernel thread context (may sleep).
3817 * 0 on success, -errno on failure.
3819 int ata_eh_recover(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3820 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3821 ata_postreset_fn_t postreset
,
3822 struct ata_link
**r_failed_link
)
3824 struct ata_link
*link
;
3825 struct ata_device
*dev
;
3827 unsigned long flags
, deadline
;
3831 /* prep for recovery */
3832 ata_for_each_link(link
, ap
, EDGE
) {
3833 struct ata_eh_context
*ehc
= &link
->eh_context
;
3835 /* re-enable link? */
3836 if (ehc
->i
.action
& ATA_EH_ENABLE_LINK
) {
3837 ata_eh_about_to_do(link
, NULL
, ATA_EH_ENABLE_LINK
);
3838 spin_lock_irqsave(ap
->lock
, flags
);
3839 link
->flags
&= ~ATA_LFLAG_DISABLED
;
3840 spin_unlock_irqrestore(ap
->lock
, flags
);
3841 ata_eh_done(link
, NULL
, ATA_EH_ENABLE_LINK
);
3844 ata_for_each_dev(dev
, link
, ALL
) {
3845 if (link
->flags
& ATA_LFLAG_NO_RETRY
)
3846 ehc
->tries
[dev
->devno
] = 1;
3848 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3850 /* collect port action mask recorded in dev actions */
3851 ehc
->i
.action
|= ehc
->i
.dev_action
[dev
->devno
] &
3852 ~ATA_EH_PERDEV_MASK
;
3853 ehc
->i
.dev_action
[dev
->devno
] &= ATA_EH_PERDEV_MASK
;
3855 /* process hotplug request */
3856 if (dev
->flags
& ATA_DFLAG_DETACH
)
3857 ata_eh_detach_dev(dev
);
3859 /* schedule probe if necessary */
3860 if (!ata_dev_enabled(dev
))
3861 ata_eh_schedule_probe(dev
);
3868 /* if UNLOADING, finish immediately */
3869 if (ap
->pflags
& ATA_PFLAG_UNLOADING
)
3873 ata_for_each_link(link
, ap
, EDGE
) {
3874 struct ata_eh_context
*ehc
= &link
->eh_context
;
3876 /* skip EH if possible. */
3877 if (ata_eh_skip_recovery(link
))
3880 ata_for_each_dev(dev
, link
, ALL
)
3881 ehc
->classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
3885 ata_for_each_link(link
, ap
, EDGE
) {
3886 struct ata_eh_context
*ehc
= &link
->eh_context
;
3888 if (!(ehc
->i
.action
& ATA_EH_RESET
))
3891 rc
= ata_eh_reset(link
, ata_link_nr_vacant(link
),
3892 prereset
, softreset
, hardreset
, postreset
);
3894 ata_link_err(link
, "reset failed, giving up\n");
3903 * clears ATA_EH_PARK in eh_info and resets
3904 * ap->park_req_pending
3906 ata_eh_pull_park_action(ap
);
3909 ata_for_each_link(link
, ap
, EDGE
) {
3910 ata_for_each_dev(dev
, link
, ALL
) {
3911 struct ata_eh_context
*ehc
= &link
->eh_context
;
3914 if (dev
->class != ATA_DEV_ATA
&&
3915 dev
->class != ATA_DEV_ZAC
)
3917 if (!(ehc
->i
.dev_action
[dev
->devno
] &
3920 tmp
= dev
->unpark_deadline
;
3921 if (time_before(deadline
, tmp
))
3923 else if (time_before_eq(tmp
, jiffies
))
3925 if (ehc
->unloaded_mask
& (1 << dev
->devno
))
3928 ata_eh_park_issue_cmd(dev
, 1);
3933 if (time_before_eq(deadline
, now
))
3937 deadline
= wait_for_completion_timeout(&ap
->park_req_pending
,
3941 ata_for_each_link(link
, ap
, EDGE
) {
3942 ata_for_each_dev(dev
, link
, ALL
) {
3943 if (!(link
->eh_context
.unloaded_mask
&
3947 ata_eh_park_issue_cmd(dev
, 0);
3948 ata_eh_done(link
, dev
, ATA_EH_PARK
);
3954 ata_for_each_link(link
, ap
, PMP_FIRST
) {
3955 struct ata_eh_context
*ehc
= &link
->eh_context
;
3957 if (sata_pmp_attached(ap
) && ata_is_host_link(link
))
3960 /* revalidate existing devices and attach new ones */
3961 rc
= ata_eh_revalidate_and_attach(link
, &dev
);
3965 /* if PMP got attached, return, pmp EH will take care of it */
3966 if (link
->device
->class == ATA_DEV_PMP
) {
3971 /* configure transfer mode if necessary */
3972 if (ehc
->i
.flags
& ATA_EHI_SETMODE
) {
3973 rc
= ata_set_mode(link
, &dev
);
3976 ehc
->i
.flags
&= ~ATA_EHI_SETMODE
;
3979 /* If reset has been issued, clear UA to avoid
3980 * disrupting the current users of the device.
3982 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
3983 ata_for_each_dev(dev
, link
, ALL
) {
3984 if (dev
->class != ATA_DEV_ATAPI
)
3986 rc
= atapi_eh_clear_ua(dev
);
3989 if (zpodd_dev_enabled(dev
))
3990 zpodd_post_poweron(dev
);
3994 /* retry flush if necessary */
3995 ata_for_each_dev(dev
, link
, ALL
) {
3996 if (dev
->class != ATA_DEV_ATA
&&
3997 dev
->class != ATA_DEV_ZAC
)
3999 rc
= ata_eh_maybe_retry_flush(dev
);
4005 /* configure link power saving */
4006 if (link
->lpm_policy
!= ap
->target_lpm_policy
) {
4007 rc
= ata_eh_set_lpm(link
, ap
->target_lpm_policy
, &dev
);
4012 /* this link is okay now */
4019 ata_eh_handle_dev_fail(dev
, rc
);
4021 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
4022 /* PMP reset requires working host port.
4023 * Can't retry if it's frozen.
4025 if (sata_pmp_attached(ap
))
4035 if (rc
&& r_failed_link
)
4036 *r_failed_link
= link
;
4038 DPRINTK("EXIT, rc=%d\n", rc
);
4043 * ata_eh_finish - finish up EH
4044 * @ap: host port to finish EH for
4046 * Recovery is complete. Clean up EH states and retry or finish
4052 void ata_eh_finish(struct ata_port
*ap
)
4056 /* retry or finish qcs */
4057 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
4058 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
4060 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
4064 /* FIXME: Once EH migration is complete,
4065 * generate sense data in this function,
4066 * considering both err_mask and tf.
4068 if (qc
->flags
& ATA_QCFLAG_RETRY
)
4069 ata_eh_qc_retry(qc
);
4071 ata_eh_qc_complete(qc
);
4073 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
4074 ata_eh_qc_complete(qc
);
4076 /* feed zero TF to sense generation */
4077 memset(&qc
->result_tf
, 0, sizeof(qc
->result_tf
));
4078 ata_eh_qc_retry(qc
);
4083 /* make sure nr_active_links is zero after EH */
4084 WARN_ON(ap
->nr_active_links
);
4085 ap
->nr_active_links
= 0;
4089 * ata_do_eh - do standard error handling
4090 * @ap: host port to handle error for
4092 * @prereset: prereset method (can be NULL)
4093 * @softreset: softreset method (can be NULL)
4094 * @hardreset: hardreset method (can be NULL)
4095 * @postreset: postreset method (can be NULL)
4097 * Perform standard error handling sequence.
4100 * Kernel thread context (may sleep).
4102 void ata_do_eh(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
4103 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
4104 ata_postreset_fn_t postreset
)
4106 struct ata_device
*dev
;
4112 rc
= ata_eh_recover(ap
, prereset
, softreset
, hardreset
, postreset
,
4115 ata_for_each_dev(dev
, &ap
->link
, ALL
)
4116 ata_dev_disable(dev
);
4123 * ata_std_error_handler - standard error handler
4124 * @ap: host port to handle error for
4126 * Standard error handler
4129 * Kernel thread context (may sleep).
4131 void ata_std_error_handler(struct ata_port
*ap
)
4133 struct ata_port_operations
*ops
= ap
->ops
;
4134 ata_reset_fn_t hardreset
= ops
->hardreset
;
4136 /* ignore built-in hardreset if SCR access is not available */
4137 if (hardreset
== sata_std_hardreset
&& !sata_scr_valid(&ap
->link
))
4140 ata_do_eh(ap
, ops
->prereset
, ops
->softreset
, hardreset
, ops
->postreset
);
4145 * ata_eh_handle_port_suspend - perform port suspend operation
4146 * @ap: port to suspend
4151 * Kernel thread context (may sleep).
4153 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
4155 unsigned long flags
;
4157 struct ata_device
*dev
;
4159 /* are we suspending? */
4160 spin_lock_irqsave(ap
->lock
, flags
);
4161 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
4162 ap
->pm_mesg
.event
& PM_EVENT_RESUME
) {
4163 spin_unlock_irqrestore(ap
->lock
, flags
);
4166 spin_unlock_irqrestore(ap
->lock
, flags
);
4168 WARN_ON(ap
->pflags
& ATA_PFLAG_SUSPENDED
);
4171 * If we have a ZPODD attached, check its zero
4172 * power ready status before the port is frozen.
4173 * Only needed for runtime suspend.
4175 if (PMSG_IS_AUTO(ap
->pm_mesg
)) {
4176 ata_for_each_dev(dev
, &ap
->link
, ENABLED
) {
4177 if (zpodd_dev_enabled(dev
))
4178 zpodd_on_suspend(dev
);
4182 /* tell ACPI we're suspending */
4183 rc
= ata_acpi_on_suspend(ap
);
4188 ata_eh_freeze_port(ap
);
4190 if (ap
->ops
->port_suspend
)
4191 rc
= ap
->ops
->port_suspend(ap
, ap
->pm_mesg
);
4193 ata_acpi_set_state(ap
, ap
->pm_mesg
);
4195 /* update the flags */
4196 spin_lock_irqsave(ap
->lock
, flags
);
4198 ap
->pflags
&= ~ATA_PFLAG_PM_PENDING
;
4200 ap
->pflags
|= ATA_PFLAG_SUSPENDED
;
4201 else if (ap
->pflags
& ATA_PFLAG_FROZEN
)
4202 ata_port_schedule_eh(ap
);
4204 spin_unlock_irqrestore(ap
->lock
, flags
);
4210 * ata_eh_handle_port_resume - perform port resume operation
4211 * @ap: port to resume
4216 * Kernel thread context (may sleep).
4218 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
4220 struct ata_link
*link
;
4221 struct ata_device
*dev
;
4222 unsigned long flags
;
4225 /* are we resuming? */
4226 spin_lock_irqsave(ap
->lock
, flags
);
4227 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
4228 !(ap
->pm_mesg
.event
& PM_EVENT_RESUME
)) {
4229 spin_unlock_irqrestore(ap
->lock
, flags
);
4232 spin_unlock_irqrestore(ap
->lock
, flags
);
4234 WARN_ON(!(ap
->pflags
& ATA_PFLAG_SUSPENDED
));
4237 * Error timestamps are in jiffies which doesn't run while
4238 * suspended and PHY events during resume isn't too uncommon.
4239 * When the two are combined, it can lead to unnecessary speed
4240 * downs if the machine is suspended and resumed repeatedly.
4241 * Clear error history.
4243 ata_for_each_link(link
, ap
, HOST_FIRST
)
4244 ata_for_each_dev(dev
, link
, ALL
)
4245 ata_ering_clear(&dev
->ering
);
4247 ata_acpi_set_state(ap
, ap
->pm_mesg
);
4249 if (ap
->ops
->port_resume
)
4250 rc
= ap
->ops
->port_resume(ap
);
4252 /* tell ACPI that we're resuming */
4253 ata_acpi_on_resume(ap
);
4255 /* update the flags */
4256 spin_lock_irqsave(ap
->lock
, flags
);
4257 ap
->pflags
&= ~(ATA_PFLAG_PM_PENDING
| ATA_PFLAG_SUSPENDED
);
4258 spin_unlock_irqrestore(ap
->lock
, flags
);
4260 #endif /* CONFIG_PM */