2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_eh.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_cmnd.h>
41 #include "../scsi/scsi_transport_api.h"
43 #include <linux/libata.h>
47 static void __ata_port_freeze(struct ata_port
*ap
);
48 static void ata_eh_finish(struct ata_port
*ap
);
49 static void ata_eh_handle_port_suspend(struct ata_port
*ap
);
50 static void ata_eh_handle_port_resume(struct ata_port
*ap
);
52 static void ata_ering_record(struct ata_ering
*ering
, int is_io
,
53 unsigned int err_mask
)
55 struct ata_ering_entry
*ent
;
60 ering
->cursor
%= ATA_ERING_SIZE
;
62 ent
= &ering
->ring
[ering
->cursor
];
64 ent
->err_mask
= err_mask
;
65 ent
->timestamp
= get_jiffies_64();
68 static struct ata_ering_entry
* ata_ering_top(struct ata_ering
*ering
)
70 struct ata_ering_entry
*ent
= &ering
->ring
[ering
->cursor
];
76 static int ata_ering_map(struct ata_ering
*ering
,
77 int (*map_fn
)(struct ata_ering_entry
*, void *),
81 struct ata_ering_entry
*ent
;
85 ent
= &ering
->ring
[idx
];
88 rc
= map_fn(ent
, arg
);
91 idx
= (idx
- 1 + ATA_ERING_SIZE
) % ATA_ERING_SIZE
;
92 } while (idx
!= ering
->cursor
);
97 static unsigned int ata_eh_dev_action(struct ata_device
*dev
)
99 struct ata_eh_context
*ehc
= &dev
->ap
->eh_context
;
101 return ehc
->i
.action
| ehc
->i
.dev_action
[dev
->devno
];
104 static void ata_eh_clear_action(struct ata_device
*dev
,
105 struct ata_eh_info
*ehi
, unsigned int action
)
110 ehi
->action
&= ~action
;
111 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
112 ehi
->dev_action
[i
] &= ~action
;
114 /* doesn't make sense for port-wide EH actions */
115 WARN_ON(!(action
& ATA_EH_PERDEV_MASK
));
117 /* break ehi->action into ehi->dev_action */
118 if (ehi
->action
& action
) {
119 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
120 ehi
->dev_action
[i
] |= ehi
->action
& action
;
121 ehi
->action
&= ~action
;
124 /* turn off the specified per-dev action */
125 ehi
->dev_action
[dev
->devno
] &= ~action
;
130 * ata_scsi_timed_out - SCSI layer time out callback
131 * @cmd: timed out SCSI command
133 * Handles SCSI layer timeout. We race with normal completion of
134 * the qc for @cmd. If the qc is already gone, we lose and let
135 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
136 * timed out and EH should be invoked. Prevent ata_qc_complete()
137 * from finishing it by setting EH_SCHEDULED and return
140 * TODO: kill this function once old EH is gone.
143 * Called from timer context
146 * EH_HANDLED or EH_NOT_HANDLED
148 enum scsi_eh_timer_return
ata_scsi_timed_out(struct scsi_cmnd
*cmd
)
150 struct Scsi_Host
*host
= cmd
->device
->host
;
151 struct ata_port
*ap
= ata_shost_to_port(host
);
153 struct ata_queued_cmd
*qc
;
154 enum scsi_eh_timer_return ret
;
158 if (ap
->ops
->error_handler
) {
159 ret
= EH_NOT_HANDLED
;
164 spin_lock_irqsave(ap
->lock
, flags
);
165 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
167 WARN_ON(qc
->scsicmd
!= cmd
);
168 qc
->flags
|= ATA_QCFLAG_EH_SCHEDULED
;
169 qc
->err_mask
|= AC_ERR_TIMEOUT
;
170 ret
= EH_NOT_HANDLED
;
172 spin_unlock_irqrestore(ap
->lock
, flags
);
175 DPRINTK("EXIT, ret=%d\n", ret
);
180 * ata_scsi_error - SCSI layer error handler callback
181 * @host: SCSI host on which error occurred
183 * Handles SCSI-layer-thrown error events.
186 * Inherited from SCSI layer (none, can sleep)
191 void ata_scsi_error(struct Scsi_Host
*host
)
193 struct ata_port
*ap
= ata_shost_to_port(host
);
194 int i
, repeat_cnt
= ATA_EH_MAX_REPEAT
;
199 /* synchronize with port task */
200 ata_port_flush_task(ap
);
202 /* synchronize with host lock and sort out timeouts */
204 /* For new EH, all qcs are finished in one of three ways -
205 * normal completion, error completion, and SCSI timeout.
206 * Both cmpletions can race against SCSI timeout. When normal
207 * completion wins, the qc never reaches EH. When error
208 * completion wins, the qc has ATA_QCFLAG_FAILED set.
210 * When SCSI timeout wins, things are a bit more complex.
211 * Normal or error completion can occur after the timeout but
212 * before this point. In such cases, both types of
213 * completions are honored. A scmd is determined to have
214 * timed out iff its associated qc is active and not failed.
216 if (ap
->ops
->error_handler
) {
217 struct scsi_cmnd
*scmd
, *tmp
;
220 spin_lock_irqsave(ap
->lock
, flags
);
222 list_for_each_entry_safe(scmd
, tmp
, &host
->eh_cmd_q
, eh_entry
) {
223 struct ata_queued_cmd
*qc
;
225 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
226 qc
= __ata_qc_from_tag(ap
, i
);
227 if (qc
->flags
& ATA_QCFLAG_ACTIVE
&&
232 if (i
< ATA_MAX_QUEUE
) {
233 /* the scmd has an associated qc */
234 if (!(qc
->flags
& ATA_QCFLAG_FAILED
)) {
235 /* which hasn't failed yet, timeout */
236 qc
->err_mask
|= AC_ERR_TIMEOUT
;
237 qc
->flags
|= ATA_QCFLAG_FAILED
;
241 /* Normal completion occurred after
242 * SCSI timeout but before this point.
243 * Successfully complete it.
245 scmd
->retries
= scmd
->allowed
;
246 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
250 /* If we have timed out qcs. They belong to EH from
251 * this point but the state of the controller is
252 * unknown. Freeze the port to make sure the IRQ
253 * handler doesn't diddle with those qcs. This must
254 * be done atomically w.r.t. setting QCFLAG_FAILED.
257 __ata_port_freeze(ap
);
259 spin_unlock_irqrestore(ap
->lock
, flags
);
261 spin_unlock_wait(ap
->lock
);
264 /* invoke error handler */
265 if (ap
->ops
->error_handler
) {
266 /* process port resume request */
267 ata_eh_handle_port_resume(ap
);
269 /* fetch & clear EH info */
270 spin_lock_irqsave(ap
->lock
, flags
);
272 memset(&ap
->eh_context
, 0, sizeof(ap
->eh_context
));
273 ap
->eh_context
.i
= ap
->eh_info
;
274 memset(&ap
->eh_info
, 0, sizeof(ap
->eh_info
));
276 ap
->pflags
|= ATA_PFLAG_EH_IN_PROGRESS
;
277 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
279 spin_unlock_irqrestore(ap
->lock
, flags
);
281 /* invoke EH, skip if unloading or suspended */
282 if (!(ap
->pflags
& (ATA_PFLAG_UNLOADING
| ATA_PFLAG_SUSPENDED
)))
283 ap
->ops
->error_handler(ap
);
287 /* process port suspend request */
288 ata_eh_handle_port_suspend(ap
);
290 /* Exception might have happend after ->error_handler
291 * recovered the port but before this point. Repeat
294 spin_lock_irqsave(ap
->lock
, flags
);
296 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
) {
298 ata_port_printk(ap
, KERN_INFO
,
299 "EH pending after completion, "
300 "repeating EH (cnt=%d)\n", repeat_cnt
);
301 spin_unlock_irqrestore(ap
->lock
, flags
);
304 ata_port_printk(ap
, KERN_ERR
, "EH pending after %d "
305 "tries, giving up\n", ATA_EH_MAX_REPEAT
);
308 /* this run is complete, make sure EH info is clear */
309 memset(&ap
->eh_info
, 0, sizeof(ap
->eh_info
));
311 /* Clear host_eh_scheduled while holding ap->lock such
312 * that if exception occurs after this point but
313 * before EH completion, SCSI midlayer will
316 host
->host_eh_scheduled
= 0;
318 spin_unlock_irqrestore(ap
->lock
, flags
);
320 WARN_ON(ata_qc_from_tag(ap
, ap
->active_tag
) == NULL
);
321 ap
->ops
->eng_timeout(ap
);
324 /* finish or retry handled scmd's and clean up */
325 WARN_ON(host
->host_failed
|| !list_empty(&host
->eh_cmd_q
));
327 scsi_eh_flush_done_q(&ap
->eh_done_q
);
330 spin_lock_irqsave(ap
->lock
, flags
);
332 if (ap
->pflags
& ATA_PFLAG_LOADING
)
333 ap
->pflags
&= ~ATA_PFLAG_LOADING
;
334 else if (ap
->pflags
& ATA_PFLAG_SCSI_HOTPLUG
)
335 queue_work(ata_aux_wq
, &ap
->hotplug_task
);
337 if (ap
->pflags
& ATA_PFLAG_RECOVERED
)
338 ata_port_printk(ap
, KERN_INFO
, "EH complete\n");
340 ap
->pflags
&= ~(ATA_PFLAG_SCSI_HOTPLUG
| ATA_PFLAG_RECOVERED
);
342 /* tell wait_eh that we're done */
343 ap
->pflags
&= ~ATA_PFLAG_EH_IN_PROGRESS
;
344 wake_up_all(&ap
->eh_wait_q
);
346 spin_unlock_irqrestore(ap
->lock
, flags
);
352 * ata_port_wait_eh - Wait for the currently pending EH to complete
353 * @ap: Port to wait EH for
355 * Wait until the currently pending EH is complete.
358 * Kernel thread context (may sleep).
360 void ata_port_wait_eh(struct ata_port
*ap
)
366 spin_lock_irqsave(ap
->lock
, flags
);
368 while (ap
->pflags
& (ATA_PFLAG_EH_PENDING
| ATA_PFLAG_EH_IN_PROGRESS
)) {
369 prepare_to_wait(&ap
->eh_wait_q
, &wait
, TASK_UNINTERRUPTIBLE
);
370 spin_unlock_irqrestore(ap
->lock
, flags
);
372 spin_lock_irqsave(ap
->lock
, flags
);
374 finish_wait(&ap
->eh_wait_q
, &wait
);
376 spin_unlock_irqrestore(ap
->lock
, flags
);
378 /* make sure SCSI EH is complete */
379 if (scsi_host_in_recovery(ap
->scsi_host
)) {
386 * ata_qc_timeout - Handle timeout of queued command
387 * @qc: Command that timed out
389 * Some part of the kernel (currently, only the SCSI layer)
390 * has noticed that the active command on port @ap has not
391 * completed after a specified length of time. Handle this
392 * condition by disabling DMA (if necessary) and completing
393 * transactions, with error if necessary.
395 * This also handles the case of the "lost interrupt", where
396 * for some reason (possibly hardware bug, possibly driver bug)
397 * an interrupt was not delivered to the driver, even though the
398 * transaction completed successfully.
400 * TODO: kill this function once old EH is gone.
403 * Inherited from SCSI layer (none, can sleep)
405 static void ata_qc_timeout(struct ata_queued_cmd
*qc
)
407 struct ata_port
*ap
= qc
->ap
;
408 u8 host_stat
= 0, drv_stat
;
413 ap
->hsm_task_state
= HSM_ST_IDLE
;
415 spin_lock_irqsave(ap
->lock
, flags
);
417 switch (qc
->tf
.protocol
) {
420 case ATA_PROT_ATAPI_DMA
:
421 host_stat
= ap
->ops
->bmdma_status(ap
);
423 /* before we do anything else, clear DMA-Start bit */
424 ap
->ops
->bmdma_stop(qc
);
430 drv_stat
= ata_chk_status(ap
);
432 /* ack bmdma irq events */
433 ap
->ops
->irq_clear(ap
);
435 ata_dev_printk(qc
->dev
, KERN_ERR
, "command 0x%x timeout, "
436 "stat 0x%x host_stat 0x%x\n",
437 qc
->tf
.command
, drv_stat
, host_stat
);
439 /* complete taskfile transaction */
440 qc
->err_mask
|= AC_ERR_TIMEOUT
;
444 spin_unlock_irqrestore(ap
->lock
, flags
);
446 ata_eh_qc_complete(qc
);
452 * ata_eng_timeout - Handle timeout of queued command
453 * @ap: Port on which timed-out command is active
455 * Some part of the kernel (currently, only the SCSI layer)
456 * has noticed that the active command on port @ap has not
457 * completed after a specified length of time. Handle this
458 * condition by disabling DMA (if necessary) and completing
459 * transactions, with error if necessary.
461 * This also handles the case of the "lost interrupt", where
462 * for some reason (possibly hardware bug, possibly driver bug)
463 * an interrupt was not delivered to the driver, even though the
464 * transaction completed successfully.
466 * TODO: kill this function once old EH is gone.
469 * Inherited from SCSI layer (none, can sleep)
471 void ata_eng_timeout(struct ata_port
*ap
)
475 ata_qc_timeout(ata_qc_from_tag(ap
, ap
->active_tag
));
481 * ata_qc_schedule_eh - schedule qc for error handling
482 * @qc: command to schedule error handling for
484 * Schedule error handling for @qc. EH will kick in as soon as
485 * other commands are drained.
488 * spin_lock_irqsave(host lock)
490 void ata_qc_schedule_eh(struct ata_queued_cmd
*qc
)
492 struct ata_port
*ap
= qc
->ap
;
494 WARN_ON(!ap
->ops
->error_handler
);
496 qc
->flags
|= ATA_QCFLAG_FAILED
;
497 qc
->ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
499 /* The following will fail if timeout has already expired.
500 * ata_scsi_error() takes care of such scmds on EH entry.
501 * Note that ATA_QCFLAG_FAILED is unconditionally set after
502 * this function completes.
504 scsi_req_abort_cmd(qc
->scsicmd
);
508 * ata_port_schedule_eh - schedule error handling without a qc
509 * @ap: ATA port to schedule EH for
511 * Schedule error handling for @ap. EH will kick in as soon as
512 * all commands are drained.
515 * spin_lock_irqsave(host lock)
517 void ata_port_schedule_eh(struct ata_port
*ap
)
519 WARN_ON(!ap
->ops
->error_handler
);
521 ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
522 scsi_schedule_eh(ap
->scsi_host
);
524 DPRINTK("port EH scheduled\n");
528 * ata_port_abort - abort all qc's on the port
529 * @ap: ATA port to abort qc's for
531 * Abort all active qc's of @ap and schedule EH.
534 * spin_lock_irqsave(host lock)
537 * Number of aborted qc's.
539 int ata_port_abort(struct ata_port
*ap
)
541 int tag
, nr_aborted
= 0;
543 WARN_ON(!ap
->ops
->error_handler
);
545 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
546 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
549 qc
->flags
|= ATA_QCFLAG_FAILED
;
556 ata_port_schedule_eh(ap
);
562 * __ata_port_freeze - freeze port
563 * @ap: ATA port to freeze
565 * This function is called when HSM violation or some other
566 * condition disrupts normal operation of the port. Frozen port
567 * is not allowed to perform any operation until the port is
568 * thawed, which usually follows a successful reset.
570 * ap->ops->freeze() callback can be used for freezing the port
571 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
572 * port cannot be frozen hardware-wise, the interrupt handler
573 * must ack and clear interrupts unconditionally while the port
577 * spin_lock_irqsave(host lock)
579 static void __ata_port_freeze(struct ata_port
*ap
)
581 WARN_ON(!ap
->ops
->error_handler
);
586 ap
->pflags
|= ATA_PFLAG_FROZEN
;
588 DPRINTK("ata%u port frozen\n", ap
->id
);
592 * ata_port_freeze - abort & freeze port
593 * @ap: ATA port to freeze
595 * Abort and freeze @ap.
598 * spin_lock_irqsave(host lock)
601 * Number of aborted commands.
603 int ata_port_freeze(struct ata_port
*ap
)
607 WARN_ON(!ap
->ops
->error_handler
);
609 nr_aborted
= ata_port_abort(ap
);
610 __ata_port_freeze(ap
);
616 * ata_eh_freeze_port - EH helper to freeze port
617 * @ap: ATA port to freeze
624 void ata_eh_freeze_port(struct ata_port
*ap
)
628 if (!ap
->ops
->error_handler
)
631 spin_lock_irqsave(ap
->lock
, flags
);
632 __ata_port_freeze(ap
);
633 spin_unlock_irqrestore(ap
->lock
, flags
);
637 * ata_port_thaw_port - EH helper to thaw port
638 * @ap: ATA port to thaw
640 * Thaw frozen port @ap.
645 void ata_eh_thaw_port(struct ata_port
*ap
)
649 if (!ap
->ops
->error_handler
)
652 spin_lock_irqsave(ap
->lock
, flags
);
654 ap
->pflags
&= ~ATA_PFLAG_FROZEN
;
659 spin_unlock_irqrestore(ap
->lock
, flags
);
661 DPRINTK("ata%u port thawed\n", ap
->id
);
664 static void ata_eh_scsidone(struct scsi_cmnd
*scmd
)
669 static void __ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
671 struct ata_port
*ap
= qc
->ap
;
672 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
675 spin_lock_irqsave(ap
->lock
, flags
);
676 qc
->scsidone
= ata_eh_scsidone
;
677 __ata_qc_complete(qc
);
678 WARN_ON(ata_tag_valid(qc
->tag
));
679 spin_unlock_irqrestore(ap
->lock
, flags
);
681 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
685 * ata_eh_qc_complete - Complete an active ATA command from EH
686 * @qc: Command to complete
688 * Indicate to the mid and upper layers that an ATA command has
689 * completed. To be used from EH.
691 void ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
693 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
694 scmd
->retries
= scmd
->allowed
;
695 __ata_eh_qc_complete(qc
);
699 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
700 * @qc: Command to retry
702 * Indicate to the mid and upper layers that an ATA command
703 * should be retried. To be used from EH.
705 * SCSI midlayer limits the number of retries to scmd->allowed.
706 * scmd->retries is decremented for commands which get retried
707 * due to unrelated failures (qc->err_mask is zero).
709 void ata_eh_qc_retry(struct ata_queued_cmd
*qc
)
711 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
712 if (!qc
->err_mask
&& scmd
->retries
)
714 __ata_eh_qc_complete(qc
);
718 * ata_eh_detach_dev - detach ATA device
719 * @dev: ATA device to detach
726 static void ata_eh_detach_dev(struct ata_device
*dev
)
728 struct ata_port
*ap
= dev
->ap
;
731 ata_dev_disable(dev
);
733 spin_lock_irqsave(ap
->lock
, flags
);
735 dev
->flags
&= ~ATA_DFLAG_DETACH
;
737 if (ata_scsi_offline_dev(dev
)) {
738 dev
->flags
|= ATA_DFLAG_DETACHED
;
739 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
742 /* clear per-dev EH actions */
743 ata_eh_clear_action(dev
, &ap
->eh_info
, ATA_EH_PERDEV_MASK
);
744 ata_eh_clear_action(dev
, &ap
->eh_context
.i
, ATA_EH_PERDEV_MASK
);
746 spin_unlock_irqrestore(ap
->lock
, flags
);
750 * ata_eh_about_to_do - about to perform eh_action
751 * @ap: target ATA port
752 * @dev: target ATA dev for per-dev action (can be NULL)
753 * @action: action about to be performed
755 * Called just before performing EH actions to clear related bits
756 * in @ap->eh_info such that eh actions are not unnecessarily
762 static void ata_eh_about_to_do(struct ata_port
*ap
, struct ata_device
*dev
,
766 struct ata_eh_info
*ehi
= &ap
->eh_info
;
767 struct ata_eh_context
*ehc
= &ap
->eh_context
;
769 spin_lock_irqsave(ap
->lock
, flags
);
771 /* Reset is represented by combination of actions and EHI
772 * flags. Suck in all related bits before clearing eh_info to
773 * avoid losing requested action.
775 if (action
& ATA_EH_RESET_MASK
) {
776 ehc
->i
.action
|= ehi
->action
& ATA_EH_RESET_MASK
;
777 ehc
->i
.flags
|= ehi
->flags
& ATA_EHI_RESET_MODIFIER_MASK
;
779 /* make sure all reset actions are cleared & clear EHI flags */
780 action
|= ATA_EH_RESET_MASK
;
781 ehi
->flags
&= ~ATA_EHI_RESET_MODIFIER_MASK
;
784 ata_eh_clear_action(dev
, ehi
, action
);
786 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
))
787 ap
->pflags
|= ATA_PFLAG_RECOVERED
;
789 spin_unlock_irqrestore(ap
->lock
, flags
);
793 * ata_eh_done - EH action complete
794 * @ap: target ATA port
795 * @dev: target ATA dev for per-dev action (can be NULL)
796 * @action: action just completed
798 * Called right after performing EH actions to clear related bits
799 * in @ap->eh_context.
804 static void ata_eh_done(struct ata_port
*ap
, struct ata_device
*dev
,
807 /* if reset is complete, clear all reset actions & reset modifier */
808 if (action
& ATA_EH_RESET_MASK
) {
809 action
|= ATA_EH_RESET_MASK
;
810 ap
->eh_context
.i
.flags
&= ~ATA_EHI_RESET_MODIFIER_MASK
;
813 ata_eh_clear_action(dev
, &ap
->eh_context
.i
, action
);
817 * ata_err_string - convert err_mask to descriptive string
818 * @err_mask: error mask to convert to string
820 * Convert @err_mask to descriptive string. Errors are
821 * prioritized according to severity and only the most severe
828 * Descriptive string for @err_mask
830 static const char * ata_err_string(unsigned int err_mask
)
832 if (err_mask
& AC_ERR_HOST_BUS
)
833 return "host bus error";
834 if (err_mask
& AC_ERR_ATA_BUS
)
835 return "ATA bus error";
836 if (err_mask
& AC_ERR_TIMEOUT
)
838 if (err_mask
& AC_ERR_HSM
)
839 return "HSM violation";
840 if (err_mask
& AC_ERR_SYSTEM
)
841 return "internal error";
842 if (err_mask
& AC_ERR_MEDIA
)
843 return "media error";
844 if (err_mask
& AC_ERR_INVALID
)
845 return "invalid argument";
846 if (err_mask
& AC_ERR_DEV
)
847 return "device error";
848 return "unknown error";
852 * ata_read_log_page - read a specific log page
853 * @dev: target device
854 * @page: page to read
855 * @buf: buffer to store read page
856 * @sectors: number of sectors to read
858 * Read log page using READ_LOG_EXT command.
861 * Kernel thread context (may sleep).
864 * 0 on success, AC_ERR_* mask otherwise.
866 static unsigned int ata_read_log_page(struct ata_device
*dev
,
867 u8 page
, void *buf
, unsigned int sectors
)
869 struct ata_taskfile tf
;
870 unsigned int err_mask
;
872 DPRINTK("read log page - page %d\n", page
);
874 ata_tf_init(dev
, &tf
);
875 tf
.command
= ATA_CMD_READ_LOG_EXT
;
878 tf
.hob_nsect
= sectors
>> 8;
879 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_LBA48
| ATA_TFLAG_DEVICE
;
880 tf
.protocol
= ATA_PROT_PIO
;
882 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
883 buf
, sectors
* ATA_SECT_SIZE
);
885 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
890 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
891 * @dev: Device to read log page 10h from
892 * @tag: Resulting tag of the failed command
893 * @tf: Resulting taskfile registers of the failed command
895 * Read log page 10h to obtain NCQ error details and clear error
899 * Kernel thread context (may sleep).
902 * 0 on success, -errno otherwise.
904 static int ata_eh_read_log_10h(struct ata_device
*dev
,
905 int *tag
, struct ata_taskfile
*tf
)
907 u8
*buf
= dev
->ap
->sector_buf
;
908 unsigned int err_mask
;
912 err_mask
= ata_read_log_page(dev
, ATA_LOG_SATA_NCQ
, buf
, 1);
917 for (i
= 0; i
< ATA_SECT_SIZE
; i
++)
920 ata_dev_printk(dev
, KERN_WARNING
,
921 "invalid checksum 0x%x on log page 10h\n", csum
);
926 *tag
= buf
[0] & 0x1f;
928 tf
->command
= buf
[2];
929 tf
->feature
= buf
[3];
934 tf
->hob_lbal
= buf
[8];
935 tf
->hob_lbam
= buf
[9];
936 tf
->hob_lbah
= buf
[10];
938 tf
->hob_nsect
= buf
[13];
944 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
945 * @dev: device to perform REQUEST_SENSE to
946 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
948 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
949 * SENSE. This function is EH helper.
952 * Kernel thread context (may sleep).
955 * 0 on success, AC_ERR_* mask on failure
957 static unsigned int atapi_eh_request_sense(struct ata_device
*dev
,
958 unsigned char *sense_buf
)
960 struct ata_port
*ap
= dev
->ap
;
961 struct ata_taskfile tf
;
962 u8 cdb
[ATAPI_CDB_LEN
];
964 DPRINTK("ATAPI request sense\n");
966 ata_tf_init(dev
, &tf
);
968 /* FIXME: is this needed? */
969 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
971 /* XXX: why tf_read here? */
972 ap
->ops
->tf_read(ap
, &tf
);
974 /* fill these in, for the case where they are -not- overwritten */
976 sense_buf
[2] = tf
.feature
>> 4;
978 memset(cdb
, 0, ATAPI_CDB_LEN
);
979 cdb
[0] = REQUEST_SENSE
;
980 cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
982 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
983 tf
.command
= ATA_CMD_PACKET
;
985 /* is it pointless to prefer PIO for "safety reasons"? */
986 if (ap
->flags
& ATA_FLAG_PIO_DMA
) {
987 tf
.protocol
= ATA_PROT_ATAPI_DMA
;
988 tf
.feature
|= ATAPI_PKT_DMA
;
990 tf
.protocol
= ATA_PROT_ATAPI
;
991 tf
.lbam
= (8 * 1024) & 0xff;
992 tf
.lbah
= (8 * 1024) >> 8;
995 return ata_exec_internal(dev
, &tf
, cdb
, DMA_FROM_DEVICE
,
996 sense_buf
, SCSI_SENSE_BUFFERSIZE
);
1000 * ata_eh_analyze_serror - analyze SError for a failed port
1001 * @ap: ATA port to analyze SError for
1003 * Analyze SError if available and further determine cause of
1009 static void ata_eh_analyze_serror(struct ata_port
*ap
)
1011 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1012 u32 serror
= ehc
->i
.serror
;
1013 unsigned int err_mask
= 0, action
= 0;
1015 if (serror
& SERR_PERSISTENT
) {
1016 err_mask
|= AC_ERR_ATA_BUS
;
1017 action
|= ATA_EH_HARDRESET
;
1020 (SERR_DATA_RECOVERED
| SERR_COMM_RECOVERED
| SERR_DATA
)) {
1021 err_mask
|= AC_ERR_ATA_BUS
;
1022 action
|= ATA_EH_SOFTRESET
;
1024 if (serror
& SERR_PROTOCOL
) {
1025 err_mask
|= AC_ERR_HSM
;
1026 action
|= ATA_EH_SOFTRESET
;
1028 if (serror
& SERR_INTERNAL
) {
1029 err_mask
|= AC_ERR_SYSTEM
;
1030 action
|= ATA_EH_SOFTRESET
;
1032 if (serror
& (SERR_PHYRDY_CHG
| SERR_DEV_XCHG
))
1033 ata_ehi_hotplugged(&ehc
->i
);
1035 ehc
->i
.err_mask
|= err_mask
;
1036 ehc
->i
.action
|= action
;
1040 * ata_eh_analyze_ncq_error - analyze NCQ error
1041 * @ap: ATA port to analyze NCQ error for
1043 * Read log page 10h, determine the offending qc and acquire
1044 * error status TF. For NCQ device errors, all LLDDs have to do
1045 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1049 * Kernel thread context (may sleep).
1051 static void ata_eh_analyze_ncq_error(struct ata_port
*ap
)
1053 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1054 struct ata_device
*dev
= ap
->device
;
1055 struct ata_queued_cmd
*qc
;
1056 struct ata_taskfile tf
;
1059 /* if frozen, we can't do much */
1060 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
1063 /* is it NCQ device error? */
1064 if (!ap
->sactive
|| !(ehc
->i
.err_mask
& AC_ERR_DEV
))
1067 /* has LLDD analyzed already? */
1068 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1069 qc
= __ata_qc_from_tag(ap
, tag
);
1071 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
1078 /* okay, this error is ours */
1079 rc
= ata_eh_read_log_10h(dev
, &tag
, &tf
);
1081 ata_port_printk(ap
, KERN_ERR
, "failed to read log page 10h "
1082 "(errno=%d)\n", rc
);
1086 if (!(ap
->sactive
& (1 << tag
))) {
1087 ata_port_printk(ap
, KERN_ERR
, "log page 10h reported "
1088 "inactive tag %d\n", tag
);
1092 /* we've got the perpetrator, condemn it */
1093 qc
= __ata_qc_from_tag(ap
, tag
);
1094 memcpy(&qc
->result_tf
, &tf
, sizeof(tf
));
1095 qc
->err_mask
|= AC_ERR_DEV
;
1096 ehc
->i
.err_mask
&= ~AC_ERR_DEV
;
1100 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1101 * @qc: qc to analyze
1102 * @tf: Taskfile registers to analyze
1104 * Analyze taskfile of @qc and further determine cause of
1105 * failure. This function also requests ATAPI sense data if
1109 * Kernel thread context (may sleep).
1112 * Determined recovery action
1114 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd
*qc
,
1115 const struct ata_taskfile
*tf
)
1117 unsigned int tmp
, action
= 0;
1118 u8 stat
= tf
->command
, err
= tf
->feature
;
1120 if ((stat
& (ATA_BUSY
| ATA_DRQ
| ATA_DRDY
)) != ATA_DRDY
) {
1121 qc
->err_mask
|= AC_ERR_HSM
;
1122 return ATA_EH_SOFTRESET
;
1125 if (!(qc
->err_mask
& AC_ERR_DEV
))
1128 switch (qc
->dev
->class) {
1131 qc
->err_mask
|= AC_ERR_ATA_BUS
;
1133 qc
->err_mask
|= AC_ERR_MEDIA
;
1135 qc
->err_mask
|= AC_ERR_INVALID
;
1139 tmp
= atapi_eh_request_sense(qc
->dev
,
1140 qc
->scsicmd
->sense_buffer
);
1142 /* ATA_QCFLAG_SENSE_VALID is used to tell
1143 * atapi_qc_complete() that sense data is
1146 * TODO: interpret sense data and set
1147 * appropriate err_mask.
1149 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1151 qc
->err_mask
|= tmp
;
1154 if (qc
->err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
| AC_ERR_ATA_BUS
))
1155 action
|= ATA_EH_SOFTRESET
;
1160 static int ata_eh_categorize_ering_entry(struct ata_ering_entry
*ent
)
1162 if (ent
->err_mask
& (AC_ERR_ATA_BUS
| AC_ERR_TIMEOUT
))
1166 if (ent
->err_mask
& AC_ERR_HSM
)
1168 if ((ent
->err_mask
&
1169 (AC_ERR_DEV
|AC_ERR_MEDIA
|AC_ERR_INVALID
)) == AC_ERR_DEV
)
1176 struct speed_down_needed_arg
{
1181 static int speed_down_needed_cb(struct ata_ering_entry
*ent
, void *void_arg
)
1183 struct speed_down_needed_arg
*arg
= void_arg
;
1185 if (ent
->timestamp
< arg
->since
)
1188 arg
->nr_errors
[ata_eh_categorize_ering_entry(ent
)]++;
1193 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1194 * @dev: Device of interest
1196 * This function examines error ring of @dev and determines
1197 * whether speed down is necessary. Speed down is necessary if
1198 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1199 * errors during last 15 minutes.
1201 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1202 * violation for known supported commands.
1204 * Cat-2 errors are unclassified DEV error for known supported
1208 * Inherited from caller.
1211 * 1 if speed down is necessary, 0 otherwise
1213 static int ata_eh_speed_down_needed(struct ata_device
*dev
)
1215 const u64 interval
= 15LLU * 60 * HZ
;
1216 static const int err_limits
[3] = { -1, 3, 10 };
1217 struct speed_down_needed_arg arg
;
1218 struct ata_ering_entry
*ent
;
1222 ent
= ata_ering_top(&dev
->ering
);
1226 err_cat
= ata_eh_categorize_ering_entry(ent
);
1230 memset(&arg
, 0, sizeof(arg
));
1232 j64
= get_jiffies_64();
1233 if (j64
>= interval
)
1234 arg
.since
= j64
- interval
;
1238 ata_ering_map(&dev
->ering
, speed_down_needed_cb
, &arg
);
1240 return arg
.nr_errors
[err_cat
] > err_limits
[err_cat
];
1244 * ata_eh_speed_down - record error and speed down if necessary
1245 * @dev: Failed device
1246 * @is_io: Did the device fail during normal IO?
1247 * @err_mask: err_mask of the error
1249 * Record error and examine error history to determine whether
1250 * adjusting transmission speed is necessary. It also sets
1251 * transmission limits appropriately if such adjustment is
1255 * Kernel thread context (may sleep).
1258 * 0 on success, -errno otherwise
1260 static int ata_eh_speed_down(struct ata_device
*dev
, int is_io
,
1261 unsigned int err_mask
)
1266 /* record error and determine whether speed down is necessary */
1267 ata_ering_record(&dev
->ering
, is_io
, err_mask
);
1269 if (!ata_eh_speed_down_needed(dev
))
1272 /* speed down SATA link speed if possible */
1273 if (sata_down_spd_limit(dev
->ap
) == 0)
1274 return ATA_EH_HARDRESET
;
1276 /* lower transfer mode */
1277 if (ata_down_xfermask_limit(dev
, 0) == 0)
1278 return ATA_EH_SOFTRESET
;
1280 ata_dev_printk(dev
, KERN_ERR
,
1281 "speed down requested but no transfer mode left\n");
1286 * ata_eh_autopsy - analyze error and determine recovery action
1287 * @ap: ATA port to perform autopsy on
1289 * Analyze why @ap failed and determine which recovery action is
1290 * needed. This function also sets more detailed AC_ERR_* values
1291 * and fills sense data for ATAPI CHECK SENSE.
1294 * Kernel thread context (may sleep).
1296 static void ata_eh_autopsy(struct ata_port
*ap
)
1298 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1299 unsigned int all_err_mask
= 0;
1306 if (ehc
->i
.flags
& ATA_EHI_NO_AUTOPSY
)
1309 /* obtain and analyze SError */
1310 rc
= sata_scr_read(ap
, SCR_ERROR
, &serror
);
1312 ehc
->i
.serror
|= serror
;
1313 ata_eh_analyze_serror(ap
);
1314 } else if (rc
!= -EOPNOTSUPP
)
1315 ehc
->i
.action
|= ATA_EH_HARDRESET
;
1317 /* analyze NCQ failure */
1318 ata_eh_analyze_ncq_error(ap
);
1320 /* any real error trumps AC_ERR_OTHER */
1321 if (ehc
->i
.err_mask
& ~AC_ERR_OTHER
)
1322 ehc
->i
.err_mask
&= ~AC_ERR_OTHER
;
1324 all_err_mask
|= ehc
->i
.err_mask
;
1326 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1327 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
1329 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
1332 /* inherit upper level err_mask */
1333 qc
->err_mask
|= ehc
->i
.err_mask
;
1336 ehc
->i
.action
|= ata_eh_analyze_tf(qc
, &qc
->result_tf
);
1338 /* DEV errors are probably spurious in case of ATA_BUS error */
1339 if (qc
->err_mask
& AC_ERR_ATA_BUS
)
1340 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_MEDIA
|
1343 /* any real error trumps unknown error */
1344 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1345 qc
->err_mask
&= ~AC_ERR_OTHER
;
1347 /* SENSE_VALID trumps dev/unknown error and revalidation */
1348 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
1349 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_OTHER
);
1350 ehc
->i
.action
&= ~ATA_EH_REVALIDATE
;
1353 /* accumulate error info */
1354 ehc
->i
.dev
= qc
->dev
;
1355 all_err_mask
|= qc
->err_mask
;
1356 if (qc
->flags
& ATA_QCFLAG_IO
)
1360 /* enforce default EH actions */
1361 if (ap
->pflags
& ATA_PFLAG_FROZEN
||
1362 all_err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
))
1363 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
1364 else if (all_err_mask
)
1365 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
1367 /* if we have offending qcs and the associated failed device */
1370 ehc
->i
.action
|= ata_eh_speed_down(ehc
->i
.dev
, is_io
,
1373 /* perform per-dev EH action only on the offending device */
1374 ehc
->i
.dev_action
[ehc
->i
.dev
->devno
] |=
1375 ehc
->i
.action
& ATA_EH_PERDEV_MASK
;
1376 ehc
->i
.action
&= ~ATA_EH_PERDEV_MASK
;
1383 * ata_eh_report - report error handling to user
1384 * @ap: ATA port EH is going on
1386 * Report EH to user.
1391 static void ata_eh_report(struct ata_port
*ap
)
1393 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1394 const char *frozen
, *desc
;
1395 int tag
, nr_failed
= 0;
1398 if (ehc
->i
.desc
[0] != '\0')
1401 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1402 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
1404 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
1406 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
&& !qc
->err_mask
)
1412 if (!nr_failed
&& !ehc
->i
.err_mask
)
1416 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
1420 ata_dev_printk(ehc
->i
.dev
, KERN_ERR
, "exception Emask 0x%x "
1421 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1422 ehc
->i
.err_mask
, ap
->sactive
, ehc
->i
.serror
,
1423 ehc
->i
.action
, frozen
);
1425 ata_dev_printk(ehc
->i
.dev
, KERN_ERR
, "(%s)\n", desc
);
1427 ata_port_printk(ap
, KERN_ERR
, "exception Emask 0x%x "
1428 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1429 ehc
->i
.err_mask
, ap
->sactive
, ehc
->i
.serror
,
1430 ehc
->i
.action
, frozen
);
1432 ata_port_printk(ap
, KERN_ERR
, "(%s)\n", desc
);
1435 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
1436 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
1438 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) || !qc
->err_mask
)
1441 ata_dev_printk(qc
->dev
, KERN_ERR
, "tag %d cmd 0x%x "
1442 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1443 qc
->tag
, qc
->tf
.command
, qc
->err_mask
,
1444 qc
->result_tf
.command
, qc
->result_tf
.feature
,
1445 ata_err_string(qc
->err_mask
));
1449 static int ata_do_reset(struct ata_port
*ap
, ata_reset_fn_t reset
,
1450 unsigned int *classes
)
1454 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1455 classes
[i
] = ATA_DEV_UNKNOWN
;
1457 rc
= reset(ap
, classes
);
1461 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1462 * is complete and convert all ATA_DEV_UNKNOWN to
1465 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1466 if (classes
[i
] != ATA_DEV_UNKNOWN
)
1469 if (i
< ATA_MAX_DEVICES
)
1470 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1471 if (classes
[i
] == ATA_DEV_UNKNOWN
)
1472 classes
[i
] = ATA_DEV_NONE
;
1477 static int ata_eh_followup_srst_needed(int rc
, int classify
,
1478 const unsigned int *classes
)
1484 if (classify
&& classes
[0] == ATA_DEV_UNKNOWN
)
1489 static int ata_eh_reset(struct ata_port
*ap
, int classify
,
1490 ata_prereset_fn_t prereset
, ata_reset_fn_t softreset
,
1491 ata_reset_fn_t hardreset
, ata_postreset_fn_t postreset
)
1493 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1494 unsigned int *classes
= ehc
->classes
;
1495 int tries
= ATA_EH_RESET_TRIES
;
1496 int verbose
= !(ehc
->i
.flags
& ATA_EHI_QUIET
);
1497 unsigned int action
;
1498 ata_reset_fn_t reset
;
1499 int i
, did_followup_srst
, rc
;
1501 /* about to reset */
1502 ata_eh_about_to_do(ap
, NULL
, ehc
->i
.action
& ATA_EH_RESET_MASK
);
1504 /* Determine which reset to use and record in ehc->i.action.
1505 * prereset() may examine and modify it.
1507 action
= ehc
->i
.action
;
1508 ehc
->i
.action
&= ~ATA_EH_RESET_MASK
;
1509 if (softreset
&& (!hardreset
|| (!sata_set_spd_needed(ap
) &&
1510 !(action
& ATA_EH_HARDRESET
))))
1511 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
1513 ehc
->i
.action
|= ATA_EH_HARDRESET
;
1518 ata_port_printk(ap
, KERN_ERR
,
1519 "prereset failed (errno=%d)\n", rc
);
1524 /* prereset() might have modified ehc->i.action */
1525 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
1527 else if (ehc
->i
.action
& ATA_EH_SOFTRESET
)
1530 /* prereset told us not to reset, bang classes and return */
1531 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1532 classes
[i
] = ATA_DEV_NONE
;
1536 /* did prereset() screw up? if so, fix up to avoid oopsing */
1538 ata_port_printk(ap
, KERN_ERR
, "BUG: prereset() requested "
1539 "invalid reset type\n");
1547 /* shut up during boot probing */
1549 ata_port_printk(ap
, KERN_INFO
, "%s resetting port\n",
1550 reset
== softreset
? "soft" : "hard");
1552 /* mark that this EH session started with reset */
1553 ehc
->i
.flags
|= ATA_EHI_DID_RESET
;
1555 rc
= ata_do_reset(ap
, reset
, classes
);
1557 did_followup_srst
= 0;
1558 if (reset
== hardreset
&&
1559 ata_eh_followup_srst_needed(rc
, classify
, classes
)) {
1560 /* okay, let's do follow-up softreset */
1561 did_followup_srst
= 1;
1565 ata_port_printk(ap
, KERN_ERR
,
1566 "follow-up softreset required "
1567 "but no softreset avaliable\n");
1571 ata_eh_about_to_do(ap
, NULL
, ATA_EH_RESET_MASK
);
1572 rc
= ata_do_reset(ap
, reset
, classes
);
1574 if (rc
== 0 && classify
&&
1575 classes
[0] == ATA_DEV_UNKNOWN
) {
1576 ata_port_printk(ap
, KERN_ERR
,
1577 "classification failed\n");
1582 if (rc
&& --tries
) {
1585 if (reset
== softreset
) {
1586 if (did_followup_srst
)
1587 type
= "follow-up soft";
1593 ata_port_printk(ap
, KERN_WARNING
,
1594 "%sreset failed, retrying in 5 secs\n", type
);
1597 if (reset
== hardreset
)
1598 sata_down_spd_limit(ap
);
1605 /* After the reset, the device state is PIO 0 and the
1606 * controller state is undefined. Record the mode.
1608 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1609 ap
->device
[i
].pio_mode
= XFER_PIO_0
;
1612 postreset(ap
, classes
);
1614 /* reset successful, schedule revalidation */
1615 ata_eh_done(ap
, NULL
, ehc
->i
.action
& ATA_EH_RESET_MASK
);
1616 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
1622 static int ata_eh_revalidate_and_attach(struct ata_port
*ap
,
1623 struct ata_device
**r_failed_dev
)
1625 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1626 struct ata_device
*dev
;
1627 unsigned long flags
;
1632 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1633 unsigned int action
;
1635 dev
= &ap
->device
[i
];
1636 action
= ata_eh_dev_action(dev
);
1638 if (action
& ATA_EH_REVALIDATE
&& ata_dev_ready(dev
)) {
1639 if (ata_port_offline(ap
)) {
1644 ata_eh_about_to_do(ap
, dev
, ATA_EH_REVALIDATE
);
1645 rc
= ata_dev_revalidate(dev
,
1646 ehc
->i
.flags
& ATA_EHI_DID_RESET
);
1650 ata_eh_done(ap
, dev
, ATA_EH_REVALIDATE
);
1652 /* schedule the scsi_rescan_device() here */
1653 queue_work(ata_aux_wq
, &(ap
->scsi_rescan_task
));
1654 } else if (dev
->class == ATA_DEV_UNKNOWN
&&
1655 ehc
->tries
[dev
->devno
] &&
1656 ata_class_enabled(ehc
->classes
[dev
->devno
])) {
1657 dev
->class = ehc
->classes
[dev
->devno
];
1659 rc
= ata_dev_read_id(dev
, &dev
->class, 1, dev
->id
);
1661 rc
= ata_dev_configure(dev
, 1);
1664 dev
->class = ATA_DEV_UNKNOWN
;
1668 spin_lock_irqsave(ap
->lock
, flags
);
1669 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
1670 spin_unlock_irqrestore(ap
->lock
, flags
);
1675 *r_failed_dev
= dev
;
1682 * ata_eh_suspend - handle suspend EH action
1683 * @ap: target host port
1684 * @r_failed_dev: result parameter to indicate failing device
1686 * Handle suspend EH action. Disk devices are spinned down and
1687 * other types of devices are just marked suspended. Once
1688 * suspended, no EH action to the device is allowed until it is
1692 * Kernel thread context (may sleep).
1695 * 0 on success, -errno otherwise
1697 static int ata_eh_suspend(struct ata_port
*ap
, struct ata_device
**r_failed_dev
)
1699 struct ata_device
*dev
;
1704 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1705 unsigned long flags
;
1706 unsigned int action
, err_mask
;
1708 dev
= &ap
->device
[i
];
1709 action
= ata_eh_dev_action(dev
);
1711 if (!ata_dev_enabled(dev
) || !(action
& ATA_EH_SUSPEND
))
1714 WARN_ON(dev
->flags
& ATA_DFLAG_SUSPENDED
);
1716 ata_eh_about_to_do(ap
, dev
, ATA_EH_SUSPEND
);
1718 if (dev
->class == ATA_DEV_ATA
&& !(action
& ATA_EH_PM_FREEZE
)) {
1720 rc
= ata_flush_cache(dev
);
1725 err_mask
= ata_do_simple_cmd(dev
, ATA_CMD_STANDBYNOW1
);
1727 ata_dev_printk(dev
, KERN_ERR
, "failed to "
1728 "spin down (err_mask=0x%x)\n",
1735 spin_lock_irqsave(ap
->lock
, flags
);
1736 dev
->flags
|= ATA_DFLAG_SUSPENDED
;
1737 spin_unlock_irqrestore(ap
->lock
, flags
);
1739 ata_eh_done(ap
, dev
, ATA_EH_SUSPEND
);
1743 *r_failed_dev
= dev
;
1750 * ata_eh_prep_resume - prep for resume EH action
1751 * @ap: target host port
1753 * Clear SUSPENDED in preparation for scheduled resume actions.
1754 * This allows other parts of EH to access the devices being
1758 * Kernel thread context (may sleep).
1760 static void ata_eh_prep_resume(struct ata_port
*ap
)
1762 struct ata_device
*dev
;
1763 unsigned long flags
;
1768 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1769 unsigned int action
;
1771 dev
= &ap
->device
[i
];
1772 action
= ata_eh_dev_action(dev
);
1774 if (!ata_dev_enabled(dev
) || !(action
& ATA_EH_RESUME
))
1777 spin_lock_irqsave(ap
->lock
, flags
);
1778 dev
->flags
&= ~ATA_DFLAG_SUSPENDED
;
1779 spin_unlock_irqrestore(ap
->lock
, flags
);
1786 * ata_eh_resume - handle resume EH action
1787 * @ap: target host port
1788 * @r_failed_dev: result parameter to indicate failing device
1790 * Handle resume EH action. Target devices are already reset and
1791 * revalidated. Spinning up is the only operation left.
1794 * Kernel thread context (may sleep).
1797 * 0 on success, -errno otherwise
1799 static int ata_eh_resume(struct ata_port
*ap
, struct ata_device
**r_failed_dev
)
1801 struct ata_device
*dev
;
1806 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1807 unsigned int action
, err_mask
;
1809 dev
= &ap
->device
[i
];
1810 action
= ata_eh_dev_action(dev
);
1812 if (!ata_dev_enabled(dev
) || !(action
& ATA_EH_RESUME
))
1815 ata_eh_about_to_do(ap
, dev
, ATA_EH_RESUME
);
1817 if (dev
->class == ATA_DEV_ATA
&& !(action
& ATA_EH_PM_FREEZE
)) {
1818 err_mask
= ata_do_simple_cmd(dev
,
1819 ATA_CMD_IDLEIMMEDIATE
);
1821 ata_dev_printk(dev
, KERN_ERR
, "failed to "
1822 "spin up (err_mask=0x%x)\n",
1829 ata_eh_done(ap
, dev
, ATA_EH_RESUME
);
1833 *r_failed_dev
= dev
;
1839 static int ata_port_nr_enabled(struct ata_port
*ap
)
1843 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1844 if (ata_dev_enabled(&ap
->device
[i
]))
1849 static int ata_port_nr_vacant(struct ata_port
*ap
)
1853 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1854 if (ap
->device
[i
].class == ATA_DEV_UNKNOWN
)
1859 static int ata_eh_skip_recovery(struct ata_port
*ap
)
1861 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1864 /* skip if all possible devices are suspended */
1865 for (i
= 0; i
< ata_port_max_devices(ap
); i
++) {
1866 struct ata_device
*dev
= &ap
->device
[i
];
1868 if (!(dev
->flags
& ATA_DFLAG_SUSPENDED
))
1872 if (i
== ata_port_max_devices(ap
))
1875 /* thaw frozen port, resume link and recover failed devices */
1876 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) ||
1877 (ehc
->i
.flags
& ATA_EHI_RESUME_LINK
) || ata_port_nr_enabled(ap
))
1880 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1881 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1882 struct ata_device
*dev
= &ap
->device
[i
];
1884 if (dev
->class == ATA_DEV_UNKNOWN
&&
1885 ehc
->classes
[dev
->devno
] != ATA_DEV_NONE
)
1893 * ata_eh_recover - recover host port after error
1894 * @ap: host port to recover
1895 * @prereset: prereset method (can be NULL)
1896 * @softreset: softreset method (can be NULL)
1897 * @hardreset: hardreset method (can be NULL)
1898 * @postreset: postreset method (can be NULL)
1900 * This is the alpha and omega, eum and yang, heart and soul of
1901 * libata exception handling. On entry, actions required to
1902 * recover the port and hotplug requests are recorded in
1903 * eh_context. This function executes all the operations with
1904 * appropriate retrials and fallbacks to resurrect failed
1905 * devices, detach goners and greet newcomers.
1908 * Kernel thread context (may sleep).
1911 * 0 on success, -errno on failure.
1913 static int ata_eh_recover(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
1914 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
1915 ata_postreset_fn_t postreset
)
1917 struct ata_eh_context
*ehc
= &ap
->eh_context
;
1918 struct ata_device
*dev
;
1919 int down_xfermask
, i
, rc
;
1923 /* prep for recovery */
1924 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1925 dev
= &ap
->device
[i
];
1927 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
1929 /* process hotplug request */
1930 if (dev
->flags
& ATA_DFLAG_DETACH
)
1931 ata_eh_detach_dev(dev
);
1933 if (!ata_dev_enabled(dev
) &&
1934 ((ehc
->i
.probe_mask
& (1 << dev
->devno
)) &&
1935 !(ehc
->did_probe_mask
& (1 << dev
->devno
)))) {
1936 ata_eh_detach_dev(dev
);
1938 ehc
->did_probe_mask
|= (1 << dev
->devno
);
1939 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
1947 /* if UNLOADING, finish immediately */
1948 if (ap
->pflags
& ATA_PFLAG_UNLOADING
)
1951 /* prep for resume */
1952 ata_eh_prep_resume(ap
);
1954 /* skip EH if possible. */
1955 if (ata_eh_skip_recovery(ap
))
1958 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1959 ehc
->classes
[i
] = ATA_DEV_UNKNOWN
;
1962 if (ehc
->i
.action
& ATA_EH_RESET_MASK
) {
1963 ata_eh_freeze_port(ap
);
1965 rc
= ata_eh_reset(ap
, ata_port_nr_vacant(ap
), prereset
,
1966 softreset
, hardreset
, postreset
);
1968 ata_port_printk(ap
, KERN_ERR
,
1969 "reset failed, giving up\n");
1973 ata_eh_thaw_port(ap
);
1976 /* revalidate existing devices and attach new ones */
1977 rc
= ata_eh_revalidate_and_attach(ap
, &dev
);
1981 /* resume devices */
1982 rc
= ata_eh_resume(ap
, &dev
);
1986 /* configure transfer mode if the port has been reset */
1987 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
1988 rc
= ata_set_mode(ap
, &dev
);
1995 /* suspend devices */
1996 rc
= ata_eh_suspend(ap
, &dev
);
2005 /* device missing, schedule probing */
2006 ehc
->i
.probe_mask
|= (1 << dev
->devno
);
2008 ehc
->tries
[dev
->devno
] = 0;
2011 sata_down_spd_limit(ap
);
2013 ehc
->tries
[dev
->devno
]--;
2014 if (down_xfermask
&&
2015 ata_down_xfermask_limit(dev
, ehc
->tries
[dev
->devno
] == 1))
2016 ehc
->tries
[dev
->devno
] = 0;
2019 if (ata_dev_enabled(dev
) && !ehc
->tries
[dev
->devno
]) {
2020 /* disable device if it has used up all its chances */
2021 ata_dev_disable(dev
);
2023 /* detach if offline */
2024 if (ata_port_offline(ap
))
2025 ata_eh_detach_dev(dev
);
2027 /* probe if requested */
2028 if ((ehc
->i
.probe_mask
& (1 << dev
->devno
)) &&
2029 !(ehc
->did_probe_mask
& (1 << dev
->devno
))) {
2030 ata_eh_detach_dev(dev
);
2033 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
2034 ehc
->did_probe_mask
|= (1 << dev
->devno
);
2035 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2038 /* soft didn't work? be haaaaard */
2039 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
)
2040 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2042 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2045 if (ata_port_nr_enabled(ap
)) {
2046 ata_port_printk(ap
, KERN_WARNING
, "failed to recover some "
2047 "devices, retrying in 5 secs\n");
2050 /* no device left, repeat fast */
2058 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
2059 ata_dev_disable(&ap
->device
[i
]);
2062 DPRINTK("EXIT, rc=%d\n", rc
);
2067 * ata_eh_finish - finish up EH
2068 * @ap: host port to finish EH for
2070 * Recovery is complete. Clean up EH states and retry or finish
2076 static void ata_eh_finish(struct ata_port
*ap
)
2080 /* retry or finish qcs */
2081 for (tag
= 0; tag
< ATA_MAX_QUEUE
; tag
++) {
2082 struct ata_queued_cmd
*qc
= __ata_qc_from_tag(ap
, tag
);
2084 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
2088 /* FIXME: Once EH migration is complete,
2089 * generate sense data in this function,
2090 * considering both err_mask and tf.
2092 if (qc
->err_mask
& AC_ERR_INVALID
)
2093 ata_eh_qc_complete(qc
);
2095 ata_eh_qc_retry(qc
);
2097 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
2098 ata_eh_qc_complete(qc
);
2100 /* feed zero TF to sense generation */
2101 memset(&qc
->result_tf
, 0, sizeof(qc
->result_tf
));
2102 ata_eh_qc_retry(qc
);
2109 * ata_do_eh - do standard error handling
2110 * @ap: host port to handle error for
2111 * @prereset: prereset method (can be NULL)
2112 * @softreset: softreset method (can be NULL)
2113 * @hardreset: hardreset method (can be NULL)
2114 * @postreset: postreset method (can be NULL)
2116 * Perform standard error handling sequence.
2119 * Kernel thread context (may sleep).
2121 void ata_do_eh(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
2122 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
2123 ata_postreset_fn_t postreset
)
2127 ata_eh_recover(ap
, prereset
, softreset
, hardreset
, postreset
);
2132 * ata_eh_handle_port_suspend - perform port suspend operation
2133 * @ap: port to suspend
2138 * Kernel thread context (may sleep).
2140 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
2142 unsigned long flags
;
2145 /* are we suspending? */
2146 spin_lock_irqsave(ap
->lock
, flags
);
2147 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
2148 ap
->pm_mesg
.event
== PM_EVENT_ON
) {
2149 spin_unlock_irqrestore(ap
->lock
, flags
);
2152 spin_unlock_irqrestore(ap
->lock
, flags
);
2154 WARN_ON(ap
->pflags
& ATA_PFLAG_SUSPENDED
);
2157 ata_eh_freeze_port(ap
);
2159 if (ap
->ops
->port_suspend
)
2160 rc
= ap
->ops
->port_suspend(ap
, ap
->pm_mesg
);
2163 spin_lock_irqsave(ap
->lock
, flags
);
2165 ap
->pflags
&= ~ATA_PFLAG_PM_PENDING
;
2167 ap
->pflags
|= ATA_PFLAG_SUSPENDED
;
2169 ata_port_schedule_eh(ap
);
2171 if (ap
->pm_result
) {
2172 *ap
->pm_result
= rc
;
2173 ap
->pm_result
= NULL
;
2176 spin_unlock_irqrestore(ap
->lock
, flags
);
2182 * ata_eh_handle_port_resume - perform port resume operation
2183 * @ap: port to resume
2187 * This function also waits upto one second until all devices
2188 * hanging off this port requests resume EH action. This is to
2189 * prevent invoking EH and thus reset multiple times on resume.
2191 * On DPM resume, where some of devices might not be resumed
2192 * together, this may delay port resume upto one second, but such
2193 * DPM resumes are rare and 1 sec delay isn't too bad.
2196 * Kernel thread context (may sleep).
2198 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
2200 unsigned long timeout
;
2201 unsigned long flags
;
2204 /* are we resuming? */
2205 spin_lock_irqsave(ap
->lock
, flags
);
2206 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
2207 ap
->pm_mesg
.event
!= PM_EVENT_ON
) {
2208 spin_unlock_irqrestore(ap
->lock
, flags
);
2211 spin_unlock_irqrestore(ap
->lock
, flags
);
2214 if (!(ap
->pflags
& ATA_PFLAG_SUSPENDED
))
2217 if (ap
->ops
->port_resume
)
2218 rc
= ap
->ops
->port_resume(ap
);
2220 /* give devices time to request EH */
2221 timeout
= jiffies
+ HZ
; /* 1s max */
2223 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2224 struct ata_device
*dev
= &ap
->device
[i
];
2225 unsigned int action
= ata_eh_dev_action(dev
);
2227 if ((dev
->flags
& ATA_DFLAG_SUSPENDED
) &&
2228 !(action
& ATA_EH_RESUME
))
2232 if (i
== ATA_MAX_DEVICES
|| time_after(jiffies
, timeout
))
2238 spin_lock_irqsave(ap
->lock
, flags
);
2239 ap
->pflags
&= ~(ATA_PFLAG_PM_PENDING
| ATA_PFLAG_SUSPENDED
);
2240 if (ap
->pm_result
) {
2241 *ap
->pm_result
= rc
;
2242 ap
->pm_result
= NULL
;
2244 spin_unlock_irqrestore(ap
->lock
, flags
);