libata: take advantage of cmwq and remove concurrency limitations
[deliverable/linux.git] / drivers / ata / libata-eh.c
1 /*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/pci.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h"
45
46 #include <linux/libata.h>
47
48 #include "libata.h"
49
50 enum {
51 /* speed down verdicts */
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56
57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60
61 /* error categories */
62 ATA_ECAT_NONE = 0,
63 ATA_ECAT_ATA_BUS = 1,
64 ATA_ECAT_TOUT_HSM = 2,
65 ATA_ECAT_UNK_DEV = 3,
66 ATA_ECAT_DUBIOUS_NONE = 4,
67 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
68 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
69 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
70 ATA_ECAT_NR = 8,
71
72 ATA_EH_CMD_DFL_TIMEOUT = 5000,
73
74 /* always put at least this amount of time between resets */
75 ATA_EH_RESET_COOL_DOWN = 5000,
76
77 /* Waiting in ->prereset can never be reliable. It's
78 * sometimes nice to wait there but it can't be depended upon;
79 * otherwise, we wouldn't be resetting. Just give it enough
80 * time for most drives to spin up.
81 */
82 ATA_EH_PRERESET_TIMEOUT = 10000,
83 ATA_EH_FASTDRAIN_INTERVAL = 3000,
84
85 ATA_EH_UA_TRIES = 5,
86
87 /* probe speed down parameters, see ata_eh_schedule_probe() */
88 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
89 ATA_EH_PROBE_TRIALS = 2,
90 };
91
92 /* The following table determines how we sequence resets. Each entry
93 * represents timeout for that try. The first try can be soft or
94 * hardreset. All others are hardreset if available. In most cases
95 * the first reset w/ 10sec timeout should succeed. Following entries
96 * are mostly for error handling, hotplug and retarded devices.
97 */
98 static const unsigned long ata_eh_reset_timeouts[] = {
99 10000, /* most drives spin up by 10sec */
100 10000, /* > 99% working drives spin up before 20sec */
101 35000, /* give > 30 secs of idleness for retarded devices */
102 5000, /* and sweet one last chance */
103 ULONG_MAX, /* > 1 min has elapsed, give up */
104 };
105
106 static const unsigned long ata_eh_identify_timeouts[] = {
107 5000, /* covers > 99% of successes and not too boring on failures */
108 10000, /* combined time till here is enough even for media access */
109 30000, /* for true idiots */
110 ULONG_MAX,
111 };
112
113 static const unsigned long ata_eh_flush_timeouts[] = {
114 15000, /* be generous with flush */
115 15000, /* ditto */
116 30000, /* and even more generous */
117 ULONG_MAX,
118 };
119
120 static const unsigned long ata_eh_other_timeouts[] = {
121 5000, /* same rationale as identify timeout */
122 10000, /* ditto */
123 /* but no merciful 30sec for other commands, it just isn't worth it */
124 ULONG_MAX,
125 };
126
127 struct ata_eh_cmd_timeout_ent {
128 const u8 *commands;
129 const unsigned long *timeouts;
130 };
131
132 /* The following table determines timeouts to use for EH internal
133 * commands. Each table entry is a command class and matches the
134 * commands the entry applies to and the timeout table to use.
135 *
136 * On the retry after a command timed out, the next timeout value from
137 * the table is used. If the table doesn't contain further entries,
138 * the last value is used.
139 *
140 * ehc->cmd_timeout_idx keeps track of which timeout to use per
141 * command class, so if SET_FEATURES times out on the first try, the
142 * next try will use the second timeout value only for that class.
143 */
144 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
145 static const struct ata_eh_cmd_timeout_ent
146 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
147 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
148 .timeouts = ata_eh_identify_timeouts, },
149 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
150 .timeouts = ata_eh_other_timeouts, },
151 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
152 .timeouts = ata_eh_other_timeouts, },
153 { .commands = CMDS(ATA_CMD_SET_FEATURES),
154 .timeouts = ata_eh_other_timeouts, },
155 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
156 .timeouts = ata_eh_other_timeouts, },
157 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
158 .timeouts = ata_eh_flush_timeouts },
159 };
160 #undef CMDS
161
162 static void __ata_port_freeze(struct ata_port *ap);
163 #ifdef CONFIG_PM
164 static void ata_eh_handle_port_suspend(struct ata_port *ap);
165 static void ata_eh_handle_port_resume(struct ata_port *ap);
166 #else /* CONFIG_PM */
167 static void ata_eh_handle_port_suspend(struct ata_port *ap)
168 { }
169
170 static void ata_eh_handle_port_resume(struct ata_port *ap)
171 { }
172 #endif /* CONFIG_PM */
173
174 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
175 va_list args)
176 {
177 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
178 ATA_EH_DESC_LEN - ehi->desc_len,
179 fmt, args);
180 }
181
182 /**
183 * __ata_ehi_push_desc - push error description without adding separator
184 * @ehi: target EHI
185 * @fmt: printf format string
186 *
187 * Format string according to @fmt and append it to @ehi->desc.
188 *
189 * LOCKING:
190 * spin_lock_irqsave(host lock)
191 */
192 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
193 {
194 va_list args;
195
196 va_start(args, fmt);
197 __ata_ehi_pushv_desc(ehi, fmt, args);
198 va_end(args);
199 }
200
201 /**
202 * ata_ehi_push_desc - push error description with separator
203 * @ehi: target EHI
204 * @fmt: printf format string
205 *
206 * Format string according to @fmt and append it to @ehi->desc.
207 * If @ehi->desc is not empty, ", " is added in-between.
208 *
209 * LOCKING:
210 * spin_lock_irqsave(host lock)
211 */
212 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
213 {
214 va_list args;
215
216 if (ehi->desc_len)
217 __ata_ehi_push_desc(ehi, ", ");
218
219 va_start(args, fmt);
220 __ata_ehi_pushv_desc(ehi, fmt, args);
221 va_end(args);
222 }
223
224 /**
225 * ata_ehi_clear_desc - clean error description
226 * @ehi: target EHI
227 *
228 * Clear @ehi->desc.
229 *
230 * LOCKING:
231 * spin_lock_irqsave(host lock)
232 */
233 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
234 {
235 ehi->desc[0] = '\0';
236 ehi->desc_len = 0;
237 }
238
239 /**
240 * ata_port_desc - append port description
241 * @ap: target ATA port
242 * @fmt: printf format string
243 *
244 * Format string according to @fmt and append it to port
245 * description. If port description is not empty, " " is added
246 * in-between. This function is to be used while initializing
247 * ata_host. The description is printed on host registration.
248 *
249 * LOCKING:
250 * None.
251 */
252 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
253 {
254 va_list args;
255
256 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
257
258 if (ap->link.eh_info.desc_len)
259 __ata_ehi_push_desc(&ap->link.eh_info, " ");
260
261 va_start(args, fmt);
262 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
263 va_end(args);
264 }
265
266 #ifdef CONFIG_PCI
267
268 /**
269 * ata_port_pbar_desc - append PCI BAR description
270 * @ap: target ATA port
271 * @bar: target PCI BAR
272 * @offset: offset into PCI BAR
273 * @name: name of the area
274 *
275 * If @offset is negative, this function formats a string which
276 * contains the name, address, size and type of the BAR and
277 * appends it to the port description. If @offset is zero or
278 * positive, only name and offsetted address is appended.
279 *
280 * LOCKING:
281 * None.
282 */
283 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
284 const char *name)
285 {
286 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287 char *type = "";
288 unsigned long long start, len;
289
290 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
291 type = "m";
292 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
293 type = "i";
294
295 start = (unsigned long long)pci_resource_start(pdev, bar);
296 len = (unsigned long long)pci_resource_len(pdev, bar);
297
298 if (offset < 0)
299 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
300 else
301 ata_port_desc(ap, "%s 0x%llx", name,
302 start + (unsigned long long)offset);
303 }
304
305 #endif /* CONFIG_PCI */
306
307 static int ata_lookup_timeout_table(u8 cmd)
308 {
309 int i;
310
311 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
312 const u8 *cur;
313
314 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
315 if (*cur == cmd)
316 return i;
317 }
318
319 return -1;
320 }
321
322 /**
323 * ata_internal_cmd_timeout - determine timeout for an internal command
324 * @dev: target device
325 * @cmd: internal command to be issued
326 *
327 * Determine timeout for internal command @cmd for @dev.
328 *
329 * LOCKING:
330 * EH context.
331 *
332 * RETURNS:
333 * Determined timeout.
334 */
335 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
336 {
337 struct ata_eh_context *ehc = &dev->link->eh_context;
338 int ent = ata_lookup_timeout_table(cmd);
339 int idx;
340
341 if (ent < 0)
342 return ATA_EH_CMD_DFL_TIMEOUT;
343
344 idx = ehc->cmd_timeout_idx[dev->devno][ent];
345 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
346 }
347
348 /**
349 * ata_internal_cmd_timed_out - notification for internal command timeout
350 * @dev: target device
351 * @cmd: internal command which timed out
352 *
353 * Notify EH that internal command @cmd for @dev timed out. This
354 * function should be called only for commands whose timeouts are
355 * determined using ata_internal_cmd_timeout().
356 *
357 * LOCKING:
358 * EH context.
359 */
360 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
361 {
362 struct ata_eh_context *ehc = &dev->link->eh_context;
363 int ent = ata_lookup_timeout_table(cmd);
364 int idx;
365
366 if (ent < 0)
367 return;
368
369 idx = ehc->cmd_timeout_idx[dev->devno][ent];
370 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
371 ehc->cmd_timeout_idx[dev->devno][ent]++;
372 }
373
374 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
375 unsigned int err_mask)
376 {
377 struct ata_ering_entry *ent;
378
379 WARN_ON(!err_mask);
380
381 ering->cursor++;
382 ering->cursor %= ATA_ERING_SIZE;
383
384 ent = &ering->ring[ering->cursor];
385 ent->eflags = eflags;
386 ent->err_mask = err_mask;
387 ent->timestamp = get_jiffies_64();
388 }
389
390 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
391 {
392 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
393
394 if (ent->err_mask)
395 return ent;
396 return NULL;
397 }
398
399 static void ata_ering_clear(struct ata_ering *ering)
400 {
401 memset(ering, 0, sizeof(*ering));
402 }
403
404 static int ata_ering_map(struct ata_ering *ering,
405 int (*map_fn)(struct ata_ering_entry *, void *),
406 void *arg)
407 {
408 int idx, rc = 0;
409 struct ata_ering_entry *ent;
410
411 idx = ering->cursor;
412 do {
413 ent = &ering->ring[idx];
414 if (!ent->err_mask)
415 break;
416 rc = map_fn(ent, arg);
417 if (rc)
418 break;
419 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
420 } while (idx != ering->cursor);
421
422 return rc;
423 }
424
425 static unsigned int ata_eh_dev_action(struct ata_device *dev)
426 {
427 struct ata_eh_context *ehc = &dev->link->eh_context;
428
429 return ehc->i.action | ehc->i.dev_action[dev->devno];
430 }
431
432 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
433 struct ata_eh_info *ehi, unsigned int action)
434 {
435 struct ata_device *tdev;
436
437 if (!dev) {
438 ehi->action &= ~action;
439 ata_for_each_dev(tdev, link, ALL)
440 ehi->dev_action[tdev->devno] &= ~action;
441 } else {
442 /* doesn't make sense for port-wide EH actions */
443 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
444
445 /* break ehi->action into ehi->dev_action */
446 if (ehi->action & action) {
447 ata_for_each_dev(tdev, link, ALL)
448 ehi->dev_action[tdev->devno] |=
449 ehi->action & action;
450 ehi->action &= ~action;
451 }
452
453 /* turn off the specified per-dev action */
454 ehi->dev_action[dev->devno] &= ~action;
455 }
456 }
457
458 /**
459 * ata_scsi_timed_out - SCSI layer time out callback
460 * @cmd: timed out SCSI command
461 *
462 * Handles SCSI layer timeout. We race with normal completion of
463 * the qc for @cmd. If the qc is already gone, we lose and let
464 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
465 * timed out and EH should be invoked. Prevent ata_qc_complete()
466 * from finishing it by setting EH_SCHEDULED and return
467 * EH_NOT_HANDLED.
468 *
469 * TODO: kill this function once old EH is gone.
470 *
471 * LOCKING:
472 * Called from timer context
473 *
474 * RETURNS:
475 * EH_HANDLED or EH_NOT_HANDLED
476 */
477 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
478 {
479 struct Scsi_Host *host = cmd->device->host;
480 struct ata_port *ap = ata_shost_to_port(host);
481 unsigned long flags;
482 struct ata_queued_cmd *qc;
483 enum blk_eh_timer_return ret;
484
485 DPRINTK("ENTER\n");
486
487 if (ap->ops->error_handler) {
488 ret = BLK_EH_NOT_HANDLED;
489 goto out;
490 }
491
492 ret = BLK_EH_HANDLED;
493 spin_lock_irqsave(ap->lock, flags);
494 qc = ata_qc_from_tag(ap, ap->link.active_tag);
495 if (qc) {
496 WARN_ON(qc->scsicmd != cmd);
497 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
498 qc->err_mask |= AC_ERR_TIMEOUT;
499 ret = BLK_EH_NOT_HANDLED;
500 }
501 spin_unlock_irqrestore(ap->lock, flags);
502
503 out:
504 DPRINTK("EXIT, ret=%d\n", ret);
505 return ret;
506 }
507
508 static void ata_eh_unload(struct ata_port *ap)
509 {
510 struct ata_link *link;
511 struct ata_device *dev;
512 unsigned long flags;
513
514 /* Restore SControl IPM and SPD for the next driver and
515 * disable attached devices.
516 */
517 ata_for_each_link(link, ap, PMP_FIRST) {
518 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
519 ata_for_each_dev(dev, link, ALL)
520 ata_dev_disable(dev);
521 }
522
523 /* freeze and set UNLOADED */
524 spin_lock_irqsave(ap->lock, flags);
525
526 ata_port_freeze(ap); /* won't be thawed */
527 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
528 ap->pflags |= ATA_PFLAG_UNLOADED;
529
530 spin_unlock_irqrestore(ap->lock, flags);
531 }
532
533 /**
534 * ata_scsi_error - SCSI layer error handler callback
535 * @host: SCSI host on which error occurred
536 *
537 * Handles SCSI-layer-thrown error events.
538 *
539 * LOCKING:
540 * Inherited from SCSI layer (none, can sleep)
541 *
542 * RETURNS:
543 * Zero.
544 */
545 void ata_scsi_error(struct Scsi_Host *host)
546 {
547 struct ata_port *ap = ata_shost_to_port(host);
548 int i;
549 unsigned long flags;
550
551 DPRINTK("ENTER\n");
552
553 /* make sure sff pio task is not running */
554 ata_sff_flush_pio_task(ap);
555
556 /* synchronize with host lock and sort out timeouts */
557
558 /* For new EH, all qcs are finished in one of three ways -
559 * normal completion, error completion, and SCSI timeout.
560 * Both completions can race against SCSI timeout. When normal
561 * completion wins, the qc never reaches EH. When error
562 * completion wins, the qc has ATA_QCFLAG_FAILED set.
563 *
564 * When SCSI timeout wins, things are a bit more complex.
565 * Normal or error completion can occur after the timeout but
566 * before this point. In such cases, both types of
567 * completions are honored. A scmd is determined to have
568 * timed out iff its associated qc is active and not failed.
569 */
570 if (ap->ops->error_handler) {
571 struct scsi_cmnd *scmd, *tmp;
572 int nr_timedout = 0;
573
574 spin_lock_irqsave(ap->lock, flags);
575
576 /* This must occur under the ap->lock as we don't want
577 a polled recovery to race the real interrupt handler
578
579 The lost_interrupt handler checks for any completed but
580 non-notified command and completes much like an IRQ handler.
581
582 We then fall into the error recovery code which will treat
583 this as if normal completion won the race */
584
585 if (ap->ops->lost_interrupt)
586 ap->ops->lost_interrupt(ap);
587
588 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
589 struct ata_queued_cmd *qc;
590
591 for (i = 0; i < ATA_MAX_QUEUE; i++) {
592 qc = __ata_qc_from_tag(ap, i);
593 if (qc->flags & ATA_QCFLAG_ACTIVE &&
594 qc->scsicmd == scmd)
595 break;
596 }
597
598 if (i < ATA_MAX_QUEUE) {
599 /* the scmd has an associated qc */
600 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
601 /* which hasn't failed yet, timeout */
602 qc->err_mask |= AC_ERR_TIMEOUT;
603 qc->flags |= ATA_QCFLAG_FAILED;
604 nr_timedout++;
605 }
606 } else {
607 /* Normal completion occurred after
608 * SCSI timeout but before this point.
609 * Successfully complete it.
610 */
611 scmd->retries = scmd->allowed;
612 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
613 }
614 }
615
616 /* If we have timed out qcs. They belong to EH from
617 * this point but the state of the controller is
618 * unknown. Freeze the port to make sure the IRQ
619 * handler doesn't diddle with those qcs. This must
620 * be done atomically w.r.t. setting QCFLAG_FAILED.
621 */
622 if (nr_timedout)
623 __ata_port_freeze(ap);
624
625 spin_unlock_irqrestore(ap->lock, flags);
626
627 /* initialize eh_tries */
628 ap->eh_tries = ATA_EH_MAX_TRIES;
629 } else
630 spin_unlock_wait(ap->lock);
631
632 /* If we timed raced normal completion and there is nothing to
633 recover nr_timedout == 0 why exactly are we doing error recovery ? */
634
635 repeat:
636 /* invoke error handler */
637 if (ap->ops->error_handler) {
638 struct ata_link *link;
639
640 /* kill fast drain timer */
641 del_timer_sync(&ap->fastdrain_timer);
642
643 /* process port resume request */
644 ata_eh_handle_port_resume(ap);
645
646 /* fetch & clear EH info */
647 spin_lock_irqsave(ap->lock, flags);
648
649 ata_for_each_link(link, ap, HOST_FIRST) {
650 struct ata_eh_context *ehc = &link->eh_context;
651 struct ata_device *dev;
652
653 memset(&link->eh_context, 0, sizeof(link->eh_context));
654 link->eh_context.i = link->eh_info;
655 memset(&link->eh_info, 0, sizeof(link->eh_info));
656
657 ata_for_each_dev(dev, link, ENABLED) {
658 int devno = dev->devno;
659
660 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
661 if (ata_ncq_enabled(dev))
662 ehc->saved_ncq_enabled |= 1 << devno;
663 }
664 }
665
666 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
667 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
668 ap->excl_link = NULL; /* don't maintain exclusion over EH */
669
670 spin_unlock_irqrestore(ap->lock, flags);
671
672 /* invoke EH, skip if unloading or suspended */
673 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
674 ap->ops->error_handler(ap);
675 else {
676 /* if unloading, commence suicide */
677 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
678 !(ap->pflags & ATA_PFLAG_UNLOADED))
679 ata_eh_unload(ap);
680 ata_eh_finish(ap);
681 }
682
683 /* process port suspend request */
684 ata_eh_handle_port_suspend(ap);
685
686 /* Exception might have happend after ->error_handler
687 * recovered the port but before this point. Repeat
688 * EH in such case.
689 */
690 spin_lock_irqsave(ap->lock, flags);
691
692 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
693 if (--ap->eh_tries) {
694 spin_unlock_irqrestore(ap->lock, flags);
695 goto repeat;
696 }
697 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
698 "tries, giving up\n", ATA_EH_MAX_TRIES);
699 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
700 }
701
702 /* this run is complete, make sure EH info is clear */
703 ata_for_each_link(link, ap, HOST_FIRST)
704 memset(&link->eh_info, 0, sizeof(link->eh_info));
705
706 /* Clear host_eh_scheduled while holding ap->lock such
707 * that if exception occurs after this point but
708 * before EH completion, SCSI midlayer will
709 * re-initiate EH.
710 */
711 host->host_eh_scheduled = 0;
712
713 spin_unlock_irqrestore(ap->lock, flags);
714 } else {
715 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716 ap->ops->eng_timeout(ap);
717 }
718
719 /* finish or retry handled scmd's and clean up */
720 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
721
722 scsi_eh_flush_done_q(&ap->eh_done_q);
723
724 /* clean up */
725 spin_lock_irqsave(ap->lock, flags);
726
727 if (ap->pflags & ATA_PFLAG_LOADING)
728 ap->pflags &= ~ATA_PFLAG_LOADING;
729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
730 schedule_delayed_work(&ap->hotplug_task, 0);
731
732 if (ap->pflags & ATA_PFLAG_RECOVERED)
733 ata_port_printk(ap, KERN_INFO, "EH complete\n");
734
735 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
736
737 /* tell wait_eh that we're done */
738 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
739 wake_up_all(&ap->eh_wait_q);
740
741 spin_unlock_irqrestore(ap->lock, flags);
742
743 DPRINTK("EXIT\n");
744 }
745
746 /**
747 * ata_port_wait_eh - Wait for the currently pending EH to complete
748 * @ap: Port to wait EH for
749 *
750 * Wait until the currently pending EH is complete.
751 *
752 * LOCKING:
753 * Kernel thread context (may sleep).
754 */
755 void ata_port_wait_eh(struct ata_port *ap)
756 {
757 unsigned long flags;
758 DEFINE_WAIT(wait);
759
760 retry:
761 spin_lock_irqsave(ap->lock, flags);
762
763 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
764 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
765 spin_unlock_irqrestore(ap->lock, flags);
766 schedule();
767 spin_lock_irqsave(ap->lock, flags);
768 }
769 finish_wait(&ap->eh_wait_q, &wait);
770
771 spin_unlock_irqrestore(ap->lock, flags);
772
773 /* make sure SCSI EH is complete */
774 if (scsi_host_in_recovery(ap->scsi_host)) {
775 msleep(10);
776 goto retry;
777 }
778 }
779
780 static int ata_eh_nr_in_flight(struct ata_port *ap)
781 {
782 unsigned int tag;
783 int nr = 0;
784
785 /* count only non-internal commands */
786 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
787 if (ata_qc_from_tag(ap, tag))
788 nr++;
789
790 return nr;
791 }
792
793 void ata_eh_fastdrain_timerfn(unsigned long arg)
794 {
795 struct ata_port *ap = (void *)arg;
796 unsigned long flags;
797 int cnt;
798
799 spin_lock_irqsave(ap->lock, flags);
800
801 cnt = ata_eh_nr_in_flight(ap);
802
803 /* are we done? */
804 if (!cnt)
805 goto out_unlock;
806
807 if (cnt == ap->fastdrain_cnt) {
808 unsigned int tag;
809
810 /* No progress during the last interval, tag all
811 * in-flight qcs as timed out and freeze the port.
812 */
813 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
814 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
815 if (qc)
816 qc->err_mask |= AC_ERR_TIMEOUT;
817 }
818
819 ata_port_freeze(ap);
820 } else {
821 /* some qcs have finished, give it another chance */
822 ap->fastdrain_cnt = cnt;
823 ap->fastdrain_timer.expires =
824 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
825 add_timer(&ap->fastdrain_timer);
826 }
827
828 out_unlock:
829 spin_unlock_irqrestore(ap->lock, flags);
830 }
831
832 /**
833 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
834 * @ap: target ATA port
835 * @fastdrain: activate fast drain
836 *
837 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
838 * is non-zero and EH wasn't pending before. Fast drain ensures
839 * that EH kicks in in timely manner.
840 *
841 * LOCKING:
842 * spin_lock_irqsave(host lock)
843 */
844 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
845 {
846 int cnt;
847
848 /* already scheduled? */
849 if (ap->pflags & ATA_PFLAG_EH_PENDING)
850 return;
851
852 ap->pflags |= ATA_PFLAG_EH_PENDING;
853
854 if (!fastdrain)
855 return;
856
857 /* do we have in-flight qcs? */
858 cnt = ata_eh_nr_in_flight(ap);
859 if (!cnt)
860 return;
861
862 /* activate fast drain */
863 ap->fastdrain_cnt = cnt;
864 ap->fastdrain_timer.expires =
865 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
866 add_timer(&ap->fastdrain_timer);
867 }
868
869 /**
870 * ata_qc_schedule_eh - schedule qc for error handling
871 * @qc: command to schedule error handling for
872 *
873 * Schedule error handling for @qc. EH will kick in as soon as
874 * other commands are drained.
875 *
876 * LOCKING:
877 * spin_lock_irqsave(host lock)
878 */
879 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880 {
881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue;
883 unsigned long flags;
884
885 WARN_ON(!ap->ops->error_handler);
886
887 qc->flags |= ATA_QCFLAG_FAILED;
888 ata_eh_set_pending(ap, 1);
889
890 /* The following will fail if timeout has already expired.
891 * ata_scsi_error() takes care of such scmds on EH entry.
892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
893 * this function completes.
894 */
895 spin_lock_irqsave(q->queue_lock, flags);
896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags);
898 }
899
900 /**
901 * ata_port_schedule_eh - schedule error handling without a qc
902 * @ap: ATA port to schedule EH for
903 *
904 * Schedule error handling for @ap. EH will kick in as soon as
905 * all commands are drained.
906 *
907 * LOCKING:
908 * spin_lock_irqsave(host lock)
909 */
910 void ata_port_schedule_eh(struct ata_port *ap)
911 {
912 WARN_ON(!ap->ops->error_handler);
913
914 if (ap->pflags & ATA_PFLAG_INITIALIZING)
915 return;
916
917 ata_eh_set_pending(ap, 1);
918 scsi_schedule_eh(ap->scsi_host);
919
920 DPRINTK("port EH scheduled\n");
921 }
922
923 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
924 {
925 int tag, nr_aborted = 0;
926
927 WARN_ON(!ap->ops->error_handler);
928
929 /* we're gonna abort all commands, no need for fast drain */
930 ata_eh_set_pending(ap, 0);
931
932 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
933 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
934
935 if (qc && (!link || qc->dev->link == link)) {
936 qc->flags |= ATA_QCFLAG_FAILED;
937 ata_qc_complete(qc);
938 nr_aborted++;
939 }
940 }
941
942 if (!nr_aborted)
943 ata_port_schedule_eh(ap);
944
945 return nr_aborted;
946 }
947
948 /**
949 * ata_link_abort - abort all qc's on the link
950 * @link: ATA link to abort qc's for
951 *
952 * Abort all active qc's active on @link and schedule EH.
953 *
954 * LOCKING:
955 * spin_lock_irqsave(host lock)
956 *
957 * RETURNS:
958 * Number of aborted qc's.
959 */
960 int ata_link_abort(struct ata_link *link)
961 {
962 return ata_do_link_abort(link->ap, link);
963 }
964
965 /**
966 * ata_port_abort - abort all qc's on the port
967 * @ap: ATA port to abort qc's for
968 *
969 * Abort all active qc's of @ap and schedule EH.
970 *
971 * LOCKING:
972 * spin_lock_irqsave(host_set lock)
973 *
974 * RETURNS:
975 * Number of aborted qc's.
976 */
977 int ata_port_abort(struct ata_port *ap)
978 {
979 return ata_do_link_abort(ap, NULL);
980 }
981
982 /**
983 * __ata_port_freeze - freeze port
984 * @ap: ATA port to freeze
985 *
986 * This function is called when HSM violation or some other
987 * condition disrupts normal operation of the port. Frozen port
988 * is not allowed to perform any operation until the port is
989 * thawed, which usually follows a successful reset.
990 *
991 * ap->ops->freeze() callback can be used for freezing the port
992 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
993 * port cannot be frozen hardware-wise, the interrupt handler
994 * must ack and clear interrupts unconditionally while the port
995 * is frozen.
996 *
997 * LOCKING:
998 * spin_lock_irqsave(host lock)
999 */
1000 static void __ata_port_freeze(struct ata_port *ap)
1001 {
1002 WARN_ON(!ap->ops->error_handler);
1003
1004 if (ap->ops->freeze)
1005 ap->ops->freeze(ap);
1006
1007 ap->pflags |= ATA_PFLAG_FROZEN;
1008
1009 DPRINTK("ata%u port frozen\n", ap->print_id);
1010 }
1011
1012 /**
1013 * ata_port_freeze - abort & freeze port
1014 * @ap: ATA port to freeze
1015 *
1016 * Abort and freeze @ap. The freeze operation must be called
1017 * first, because some hardware requires special operations
1018 * before the taskfile registers are accessible.
1019 *
1020 * LOCKING:
1021 * spin_lock_irqsave(host lock)
1022 *
1023 * RETURNS:
1024 * Number of aborted commands.
1025 */
1026 int ata_port_freeze(struct ata_port *ap)
1027 {
1028 int nr_aborted;
1029
1030 WARN_ON(!ap->ops->error_handler);
1031
1032 __ata_port_freeze(ap);
1033 nr_aborted = ata_port_abort(ap);
1034
1035 return nr_aborted;
1036 }
1037
1038 /**
1039 * sata_async_notification - SATA async notification handler
1040 * @ap: ATA port where async notification is received
1041 *
1042 * Handler to be called when async notification via SDB FIS is
1043 * received. This function schedules EH if necessary.
1044 *
1045 * LOCKING:
1046 * spin_lock_irqsave(host lock)
1047 *
1048 * RETURNS:
1049 * 1 if EH is scheduled, 0 otherwise.
1050 */
1051 int sata_async_notification(struct ata_port *ap)
1052 {
1053 u32 sntf;
1054 int rc;
1055
1056 if (!(ap->flags & ATA_FLAG_AN))
1057 return 0;
1058
1059 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1060 if (rc == 0)
1061 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1062
1063 if (!sata_pmp_attached(ap) || rc) {
1064 /* PMP is not attached or SNTF is not available */
1065 if (!sata_pmp_attached(ap)) {
1066 /* PMP is not attached. Check whether ATAPI
1067 * AN is configured. If so, notify media
1068 * change.
1069 */
1070 struct ata_device *dev = ap->link.device;
1071
1072 if ((dev->class == ATA_DEV_ATAPI) &&
1073 (dev->flags & ATA_DFLAG_AN))
1074 ata_scsi_media_change_notify(dev);
1075 return 0;
1076 } else {
1077 /* PMP is attached but SNTF is not available.
1078 * ATAPI async media change notification is
1079 * not used. The PMP must be reporting PHY
1080 * status change, schedule EH.
1081 */
1082 ata_port_schedule_eh(ap);
1083 return 1;
1084 }
1085 } else {
1086 /* PMP is attached and SNTF is available */
1087 struct ata_link *link;
1088
1089 /* check and notify ATAPI AN */
1090 ata_for_each_link(link, ap, EDGE) {
1091 if (!(sntf & (1 << link->pmp)))
1092 continue;
1093
1094 if ((link->device->class == ATA_DEV_ATAPI) &&
1095 (link->device->flags & ATA_DFLAG_AN))
1096 ata_scsi_media_change_notify(link->device);
1097 }
1098
1099 /* If PMP is reporting that PHY status of some
1100 * downstream ports has changed, schedule EH.
1101 */
1102 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1103 ata_port_schedule_eh(ap);
1104 return 1;
1105 }
1106
1107 return 0;
1108 }
1109 }
1110
1111 /**
1112 * ata_eh_freeze_port - EH helper to freeze port
1113 * @ap: ATA port to freeze
1114 *
1115 * Freeze @ap.
1116 *
1117 * LOCKING:
1118 * None.
1119 */
1120 void ata_eh_freeze_port(struct ata_port *ap)
1121 {
1122 unsigned long flags;
1123
1124 if (!ap->ops->error_handler)
1125 return;
1126
1127 spin_lock_irqsave(ap->lock, flags);
1128 __ata_port_freeze(ap);
1129 spin_unlock_irqrestore(ap->lock, flags);
1130 }
1131
1132 /**
1133 * ata_port_thaw_port - EH helper to thaw port
1134 * @ap: ATA port to thaw
1135 *
1136 * Thaw frozen port @ap.
1137 *
1138 * LOCKING:
1139 * None.
1140 */
1141 void ata_eh_thaw_port(struct ata_port *ap)
1142 {
1143 unsigned long flags;
1144
1145 if (!ap->ops->error_handler)
1146 return;
1147
1148 spin_lock_irqsave(ap->lock, flags);
1149
1150 ap->pflags &= ~ATA_PFLAG_FROZEN;
1151
1152 if (ap->ops->thaw)
1153 ap->ops->thaw(ap);
1154
1155 spin_unlock_irqrestore(ap->lock, flags);
1156
1157 DPRINTK("ata%u port thawed\n", ap->print_id);
1158 }
1159
1160 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1161 {
1162 /* nada */
1163 }
1164
1165 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1166 {
1167 struct ata_port *ap = qc->ap;
1168 struct scsi_cmnd *scmd = qc->scsicmd;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(ap->lock, flags);
1172 qc->scsidone = ata_eh_scsidone;
1173 __ata_qc_complete(qc);
1174 WARN_ON(ata_tag_valid(qc->tag));
1175 spin_unlock_irqrestore(ap->lock, flags);
1176
1177 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1178 }
1179
1180 /**
1181 * ata_eh_qc_complete - Complete an active ATA command from EH
1182 * @qc: Command to complete
1183 *
1184 * Indicate to the mid and upper layers that an ATA command has
1185 * completed. To be used from EH.
1186 */
1187 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1188 {
1189 struct scsi_cmnd *scmd = qc->scsicmd;
1190 scmd->retries = scmd->allowed;
1191 __ata_eh_qc_complete(qc);
1192 }
1193
1194 /**
1195 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1196 * @qc: Command to retry
1197 *
1198 * Indicate to the mid and upper layers that an ATA command
1199 * should be retried. To be used from EH.
1200 *
1201 * SCSI midlayer limits the number of retries to scmd->allowed.
1202 * scmd->retries is decremented for commands which get retried
1203 * due to unrelated failures (qc->err_mask is zero).
1204 */
1205 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1206 {
1207 struct scsi_cmnd *scmd = qc->scsicmd;
1208 if (!qc->err_mask && scmd->retries)
1209 scmd->retries--;
1210 __ata_eh_qc_complete(qc);
1211 }
1212
1213 /**
1214 * ata_dev_disable - disable ATA device
1215 * @dev: ATA device to disable
1216 *
1217 * Disable @dev.
1218 *
1219 * Locking:
1220 * EH context.
1221 */
1222 void ata_dev_disable(struct ata_device *dev)
1223 {
1224 if (!ata_dev_enabled(dev))
1225 return;
1226
1227 if (ata_msg_drv(dev->link->ap))
1228 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1229 ata_acpi_on_disable(dev);
1230 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1231 dev->class++;
1232
1233 /* From now till the next successful probe, ering is used to
1234 * track probe failures. Clear accumulated device error info.
1235 */
1236 ata_ering_clear(&dev->ering);
1237 }
1238
1239 /**
1240 * ata_eh_detach_dev - detach ATA device
1241 * @dev: ATA device to detach
1242 *
1243 * Detach @dev.
1244 *
1245 * LOCKING:
1246 * None.
1247 */
1248 void ata_eh_detach_dev(struct ata_device *dev)
1249 {
1250 struct ata_link *link = dev->link;
1251 struct ata_port *ap = link->ap;
1252 struct ata_eh_context *ehc = &link->eh_context;
1253 unsigned long flags;
1254
1255 ata_dev_disable(dev);
1256
1257 spin_lock_irqsave(ap->lock, flags);
1258
1259 dev->flags &= ~ATA_DFLAG_DETACH;
1260
1261 if (ata_scsi_offline_dev(dev)) {
1262 dev->flags |= ATA_DFLAG_DETACHED;
1263 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1264 }
1265
1266 /* clear per-dev EH info */
1267 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1268 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1269 ehc->saved_xfer_mode[dev->devno] = 0;
1270 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1271
1272 spin_unlock_irqrestore(ap->lock, flags);
1273 }
1274
1275 /**
1276 * ata_eh_about_to_do - about to perform eh_action
1277 * @link: target ATA link
1278 * @dev: target ATA dev for per-dev action (can be NULL)
1279 * @action: action about to be performed
1280 *
1281 * Called just before performing EH actions to clear related bits
1282 * in @link->eh_info such that eh actions are not unnecessarily
1283 * repeated.
1284 *
1285 * LOCKING:
1286 * None.
1287 */
1288 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1289 unsigned int action)
1290 {
1291 struct ata_port *ap = link->ap;
1292 struct ata_eh_info *ehi = &link->eh_info;
1293 struct ata_eh_context *ehc = &link->eh_context;
1294 unsigned long flags;
1295
1296 spin_lock_irqsave(ap->lock, flags);
1297
1298 ata_eh_clear_action(link, dev, ehi, action);
1299
1300 /* About to take EH action, set RECOVERED. Ignore actions on
1301 * slave links as master will do them again.
1302 */
1303 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1304 ap->pflags |= ATA_PFLAG_RECOVERED;
1305
1306 spin_unlock_irqrestore(ap->lock, flags);
1307 }
1308
1309 /**
1310 * ata_eh_done - EH action complete
1311 * @ap: target ATA port
1312 * @dev: target ATA dev for per-dev action (can be NULL)
1313 * @action: action just completed
1314 *
1315 * Called right after performing EH actions to clear related bits
1316 * in @link->eh_context.
1317 *
1318 * LOCKING:
1319 * None.
1320 */
1321 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1322 unsigned int action)
1323 {
1324 struct ata_eh_context *ehc = &link->eh_context;
1325
1326 ata_eh_clear_action(link, dev, &ehc->i, action);
1327 }
1328
1329 /**
1330 * ata_err_string - convert err_mask to descriptive string
1331 * @err_mask: error mask to convert to string
1332 *
1333 * Convert @err_mask to descriptive string. Errors are
1334 * prioritized according to severity and only the most severe
1335 * error is reported.
1336 *
1337 * LOCKING:
1338 * None.
1339 *
1340 * RETURNS:
1341 * Descriptive string for @err_mask
1342 */
1343 static const char *ata_err_string(unsigned int err_mask)
1344 {
1345 if (err_mask & AC_ERR_HOST_BUS)
1346 return "host bus error";
1347 if (err_mask & AC_ERR_ATA_BUS)
1348 return "ATA bus error";
1349 if (err_mask & AC_ERR_TIMEOUT)
1350 return "timeout";
1351 if (err_mask & AC_ERR_HSM)
1352 return "HSM violation";
1353 if (err_mask & AC_ERR_SYSTEM)
1354 return "internal error";
1355 if (err_mask & AC_ERR_MEDIA)
1356 return "media error";
1357 if (err_mask & AC_ERR_INVALID)
1358 return "invalid argument";
1359 if (err_mask & AC_ERR_DEV)
1360 return "device error";
1361 return "unknown error";
1362 }
1363
1364 /**
1365 * ata_read_log_page - read a specific log page
1366 * @dev: target device
1367 * @page: page to read
1368 * @buf: buffer to store read page
1369 * @sectors: number of sectors to read
1370 *
1371 * Read log page using READ_LOG_EXT command.
1372 *
1373 * LOCKING:
1374 * Kernel thread context (may sleep).
1375 *
1376 * RETURNS:
1377 * 0 on success, AC_ERR_* mask otherwise.
1378 */
1379 static unsigned int ata_read_log_page(struct ata_device *dev,
1380 u8 page, void *buf, unsigned int sectors)
1381 {
1382 struct ata_taskfile tf;
1383 unsigned int err_mask;
1384
1385 DPRINTK("read log page - page %d\n", page);
1386
1387 ata_tf_init(dev, &tf);
1388 tf.command = ATA_CMD_READ_LOG_EXT;
1389 tf.lbal = page;
1390 tf.nsect = sectors;
1391 tf.hob_nsect = sectors >> 8;
1392 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1393 tf.protocol = ATA_PROT_PIO;
1394
1395 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1396 buf, sectors * ATA_SECT_SIZE, 0);
1397
1398 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1399 return err_mask;
1400 }
1401
1402 /**
1403 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1404 * @dev: Device to read log page 10h from
1405 * @tag: Resulting tag of the failed command
1406 * @tf: Resulting taskfile registers of the failed command
1407 *
1408 * Read log page 10h to obtain NCQ error details and clear error
1409 * condition.
1410 *
1411 * LOCKING:
1412 * Kernel thread context (may sleep).
1413 *
1414 * RETURNS:
1415 * 0 on success, -errno otherwise.
1416 */
1417 static int ata_eh_read_log_10h(struct ata_device *dev,
1418 int *tag, struct ata_taskfile *tf)
1419 {
1420 u8 *buf = dev->link->ap->sector_buf;
1421 unsigned int err_mask;
1422 u8 csum;
1423 int i;
1424
1425 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1426 if (err_mask)
1427 return -EIO;
1428
1429 csum = 0;
1430 for (i = 0; i < ATA_SECT_SIZE; i++)
1431 csum += buf[i];
1432 if (csum)
1433 ata_dev_printk(dev, KERN_WARNING,
1434 "invalid checksum 0x%x on log page 10h\n", csum);
1435
1436 if (buf[0] & 0x80)
1437 return -ENOENT;
1438
1439 *tag = buf[0] & 0x1f;
1440
1441 tf->command = buf[2];
1442 tf->feature = buf[3];
1443 tf->lbal = buf[4];
1444 tf->lbam = buf[5];
1445 tf->lbah = buf[6];
1446 tf->device = buf[7];
1447 tf->hob_lbal = buf[8];
1448 tf->hob_lbam = buf[9];
1449 tf->hob_lbah = buf[10];
1450 tf->nsect = buf[12];
1451 tf->hob_nsect = buf[13];
1452
1453 return 0;
1454 }
1455
1456 /**
1457 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1458 * @dev: target ATAPI device
1459 * @r_sense_key: out parameter for sense_key
1460 *
1461 * Perform ATAPI TEST_UNIT_READY.
1462 *
1463 * LOCKING:
1464 * EH context (may sleep).
1465 *
1466 * RETURNS:
1467 * 0 on success, AC_ERR_* mask on failure.
1468 */
1469 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1470 {
1471 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1472 struct ata_taskfile tf;
1473 unsigned int err_mask;
1474
1475 ata_tf_init(dev, &tf);
1476
1477 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1478 tf.command = ATA_CMD_PACKET;
1479 tf.protocol = ATAPI_PROT_NODATA;
1480
1481 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1482 if (err_mask == AC_ERR_DEV)
1483 *r_sense_key = tf.feature >> 4;
1484 return err_mask;
1485 }
1486
1487 /**
1488 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1489 * @dev: device to perform REQUEST_SENSE to
1490 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1491 * @dfl_sense_key: default sense key to use
1492 *
1493 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1494 * SENSE. This function is EH helper.
1495 *
1496 * LOCKING:
1497 * Kernel thread context (may sleep).
1498 *
1499 * RETURNS:
1500 * 0 on success, AC_ERR_* mask on failure
1501 */
1502 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1503 u8 *sense_buf, u8 dfl_sense_key)
1504 {
1505 u8 cdb[ATAPI_CDB_LEN] =
1506 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1507 struct ata_port *ap = dev->link->ap;
1508 struct ata_taskfile tf;
1509
1510 DPRINTK("ATAPI request sense\n");
1511
1512 /* FIXME: is this needed? */
1513 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1514
1515 /* initialize sense_buf with the error register,
1516 * for the case where they are -not- overwritten
1517 */
1518 sense_buf[0] = 0x70;
1519 sense_buf[2] = dfl_sense_key;
1520
1521 /* some devices time out if garbage left in tf */
1522 ata_tf_init(dev, &tf);
1523
1524 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1525 tf.command = ATA_CMD_PACKET;
1526
1527 /* is it pointless to prefer PIO for "safety reasons"? */
1528 if (ap->flags & ATA_FLAG_PIO_DMA) {
1529 tf.protocol = ATAPI_PROT_DMA;
1530 tf.feature |= ATAPI_PKT_DMA;
1531 } else {
1532 tf.protocol = ATAPI_PROT_PIO;
1533 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1534 tf.lbah = 0;
1535 }
1536
1537 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1538 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1539 }
1540
1541 /**
1542 * ata_eh_analyze_serror - analyze SError for a failed port
1543 * @link: ATA link to analyze SError for
1544 *
1545 * Analyze SError if available and further determine cause of
1546 * failure.
1547 *
1548 * LOCKING:
1549 * None.
1550 */
1551 static void ata_eh_analyze_serror(struct ata_link *link)
1552 {
1553 struct ata_eh_context *ehc = &link->eh_context;
1554 u32 serror = ehc->i.serror;
1555 unsigned int err_mask = 0, action = 0;
1556 u32 hotplug_mask;
1557
1558 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1559 err_mask |= AC_ERR_ATA_BUS;
1560 action |= ATA_EH_RESET;
1561 }
1562 if (serror & SERR_PROTOCOL) {
1563 err_mask |= AC_ERR_HSM;
1564 action |= ATA_EH_RESET;
1565 }
1566 if (serror & SERR_INTERNAL) {
1567 err_mask |= AC_ERR_SYSTEM;
1568 action |= ATA_EH_RESET;
1569 }
1570
1571 /* Determine whether a hotplug event has occurred. Both
1572 * SError.N/X are considered hotplug events for enabled or
1573 * host links. For disabled PMP links, only N bit is
1574 * considered as X bit is left at 1 for link plugging.
1575 */
1576 hotplug_mask = 0;
1577
1578 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1579 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1580 else
1581 hotplug_mask = SERR_PHYRDY_CHG;
1582
1583 if (serror & hotplug_mask)
1584 ata_ehi_hotplugged(&ehc->i);
1585
1586 ehc->i.err_mask |= err_mask;
1587 ehc->i.action |= action;
1588 }
1589
1590 /**
1591 * ata_eh_analyze_ncq_error - analyze NCQ error
1592 * @link: ATA link to analyze NCQ error for
1593 *
1594 * Read log page 10h, determine the offending qc and acquire
1595 * error status TF. For NCQ device errors, all LLDDs have to do
1596 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1597 * care of the rest.
1598 *
1599 * LOCKING:
1600 * Kernel thread context (may sleep).
1601 */
1602 void ata_eh_analyze_ncq_error(struct ata_link *link)
1603 {
1604 struct ata_port *ap = link->ap;
1605 struct ata_eh_context *ehc = &link->eh_context;
1606 struct ata_device *dev = link->device;
1607 struct ata_queued_cmd *qc;
1608 struct ata_taskfile tf;
1609 int tag, rc;
1610
1611 /* if frozen, we can't do much */
1612 if (ap->pflags & ATA_PFLAG_FROZEN)
1613 return;
1614
1615 /* is it NCQ device error? */
1616 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1617 return;
1618
1619 /* has LLDD analyzed already? */
1620 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1621 qc = __ata_qc_from_tag(ap, tag);
1622
1623 if (!(qc->flags & ATA_QCFLAG_FAILED))
1624 continue;
1625
1626 if (qc->err_mask)
1627 return;
1628 }
1629
1630 /* okay, this error is ours */
1631 memset(&tf, 0, sizeof(tf));
1632 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1633 if (rc) {
1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1635 "(errno=%d)\n", rc);
1636 return;
1637 }
1638
1639 if (!(link->sactive & (1 << tag))) {
1640 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1641 "inactive tag %d\n", tag);
1642 return;
1643 }
1644
1645 /* we've got the perpetrator, condemn it */
1646 qc = __ata_qc_from_tag(ap, tag);
1647 memcpy(&qc->result_tf, &tf, sizeof(tf));
1648 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1649 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1650 ehc->i.err_mask &= ~AC_ERR_DEV;
1651 }
1652
1653 /**
1654 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1655 * @qc: qc to analyze
1656 * @tf: Taskfile registers to analyze
1657 *
1658 * Analyze taskfile of @qc and further determine cause of
1659 * failure. This function also requests ATAPI sense data if
1660 * avaliable.
1661 *
1662 * LOCKING:
1663 * Kernel thread context (may sleep).
1664 *
1665 * RETURNS:
1666 * Determined recovery action
1667 */
1668 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1669 const struct ata_taskfile *tf)
1670 {
1671 unsigned int tmp, action = 0;
1672 u8 stat = tf->command, err = tf->feature;
1673
1674 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1675 qc->err_mask |= AC_ERR_HSM;
1676 return ATA_EH_RESET;
1677 }
1678
1679 if (stat & (ATA_ERR | ATA_DF))
1680 qc->err_mask |= AC_ERR_DEV;
1681 else
1682 return 0;
1683
1684 switch (qc->dev->class) {
1685 case ATA_DEV_ATA:
1686 if (err & ATA_ICRC)
1687 qc->err_mask |= AC_ERR_ATA_BUS;
1688 if (err & ATA_UNC)
1689 qc->err_mask |= AC_ERR_MEDIA;
1690 if (err & ATA_IDNF)
1691 qc->err_mask |= AC_ERR_INVALID;
1692 break;
1693
1694 case ATA_DEV_ATAPI:
1695 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1696 tmp = atapi_eh_request_sense(qc->dev,
1697 qc->scsicmd->sense_buffer,
1698 qc->result_tf.feature >> 4);
1699 if (!tmp) {
1700 /* ATA_QCFLAG_SENSE_VALID is used to
1701 * tell atapi_qc_complete() that sense
1702 * data is already valid.
1703 *
1704 * TODO: interpret sense data and set
1705 * appropriate err_mask.
1706 */
1707 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1708 } else
1709 qc->err_mask |= tmp;
1710 }
1711 }
1712
1713 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1714 action |= ATA_EH_RESET;
1715
1716 return action;
1717 }
1718
1719 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1720 int *xfer_ok)
1721 {
1722 int base = 0;
1723
1724 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1725 *xfer_ok = 1;
1726
1727 if (!*xfer_ok)
1728 base = ATA_ECAT_DUBIOUS_NONE;
1729
1730 if (err_mask & AC_ERR_ATA_BUS)
1731 return base + ATA_ECAT_ATA_BUS;
1732
1733 if (err_mask & AC_ERR_TIMEOUT)
1734 return base + ATA_ECAT_TOUT_HSM;
1735
1736 if (eflags & ATA_EFLAG_IS_IO) {
1737 if (err_mask & AC_ERR_HSM)
1738 return base + ATA_ECAT_TOUT_HSM;
1739 if ((err_mask &
1740 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1741 return base + ATA_ECAT_UNK_DEV;
1742 }
1743
1744 return 0;
1745 }
1746
1747 struct speed_down_verdict_arg {
1748 u64 since;
1749 int xfer_ok;
1750 int nr_errors[ATA_ECAT_NR];
1751 };
1752
1753 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1754 {
1755 struct speed_down_verdict_arg *arg = void_arg;
1756 int cat;
1757
1758 if (ent->timestamp < arg->since)
1759 return -1;
1760
1761 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1762 &arg->xfer_ok);
1763 arg->nr_errors[cat]++;
1764
1765 return 0;
1766 }
1767
1768 /**
1769 * ata_eh_speed_down_verdict - Determine speed down verdict
1770 * @dev: Device of interest
1771 *
1772 * This function examines error ring of @dev and determines
1773 * whether NCQ needs to be turned off, transfer speed should be
1774 * stepped down, or falling back to PIO is necessary.
1775 *
1776 * ECAT_ATA_BUS : ATA_BUS error for any command
1777 *
1778 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1779 * IO commands
1780 *
1781 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1782 *
1783 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1784 * data transfer hasn't been verified.
1785 *
1786 * Verdicts are
1787 *
1788 * NCQ_OFF : Turn off NCQ.
1789 *
1790 * SPEED_DOWN : Speed down transfer speed but don't fall back
1791 * to PIO.
1792 *
1793 * FALLBACK_TO_PIO : Fall back to PIO.
1794 *
1795 * Even if multiple verdicts are returned, only one action is
1796 * taken per error. An action triggered by non-DUBIOUS errors
1797 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1798 * This is to expedite speed down decisions right after device is
1799 * initially configured.
1800 *
1801 * The followings are speed down rules. #1 and #2 deal with
1802 * DUBIOUS errors.
1803 *
1804 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1805 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1806 *
1807 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1808 * occurred during last 5 mins, NCQ_OFF.
1809 *
1810 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1811 * ocurred during last 5 mins, FALLBACK_TO_PIO
1812 *
1813 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1814 * during last 10 mins, NCQ_OFF.
1815 *
1816 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1817 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1818 *
1819 * LOCKING:
1820 * Inherited from caller.
1821 *
1822 * RETURNS:
1823 * OR of ATA_EH_SPDN_* flags.
1824 */
1825 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1826 {
1827 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1828 u64 j64 = get_jiffies_64();
1829 struct speed_down_verdict_arg arg;
1830 unsigned int verdict = 0;
1831
1832 /* scan past 5 mins of error history */
1833 memset(&arg, 0, sizeof(arg));
1834 arg.since = j64 - min(j64, j5mins);
1835 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1836
1837 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1838 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1839 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1840 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1841
1842 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1843 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1844 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1845
1846 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1847 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1848 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1849 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1850
1851 /* scan past 10 mins of error history */
1852 memset(&arg, 0, sizeof(arg));
1853 arg.since = j64 - min(j64, j10mins);
1854 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1855
1856 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1857 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1858 verdict |= ATA_EH_SPDN_NCQ_OFF;
1859
1860 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1861 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1862 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1863 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1864
1865 return verdict;
1866 }
1867
1868 /**
1869 * ata_eh_speed_down - record error and speed down if necessary
1870 * @dev: Failed device
1871 * @eflags: mask of ATA_EFLAG_* flags
1872 * @err_mask: err_mask of the error
1873 *
1874 * Record error and examine error history to determine whether
1875 * adjusting transmission speed is necessary. It also sets
1876 * transmission limits appropriately if such adjustment is
1877 * necessary.
1878 *
1879 * LOCKING:
1880 * Kernel thread context (may sleep).
1881 *
1882 * RETURNS:
1883 * Determined recovery action.
1884 */
1885 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1886 unsigned int eflags, unsigned int err_mask)
1887 {
1888 struct ata_link *link = ata_dev_phys_link(dev);
1889 int xfer_ok = 0;
1890 unsigned int verdict;
1891 unsigned int action = 0;
1892
1893 /* don't bother if Cat-0 error */
1894 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1895 return 0;
1896
1897 /* record error and determine whether speed down is necessary */
1898 ata_ering_record(&dev->ering, eflags, err_mask);
1899 verdict = ata_eh_speed_down_verdict(dev);
1900
1901 /* turn off NCQ? */
1902 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1903 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1904 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1905 dev->flags |= ATA_DFLAG_NCQ_OFF;
1906 ata_dev_printk(dev, KERN_WARNING,
1907 "NCQ disabled due to excessive errors\n");
1908 goto done;
1909 }
1910
1911 /* speed down? */
1912 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1913 /* speed down SATA link speed if possible */
1914 if (sata_down_spd_limit(link, 0) == 0) {
1915 action |= ATA_EH_RESET;
1916 goto done;
1917 }
1918
1919 /* lower transfer mode */
1920 if (dev->spdn_cnt < 2) {
1921 static const int dma_dnxfer_sel[] =
1922 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1923 static const int pio_dnxfer_sel[] =
1924 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1925 int sel;
1926
1927 if (dev->xfer_shift != ATA_SHIFT_PIO)
1928 sel = dma_dnxfer_sel[dev->spdn_cnt];
1929 else
1930 sel = pio_dnxfer_sel[dev->spdn_cnt];
1931
1932 dev->spdn_cnt++;
1933
1934 if (ata_down_xfermask_limit(dev, sel) == 0) {
1935 action |= ATA_EH_RESET;
1936 goto done;
1937 }
1938 }
1939 }
1940
1941 /* Fall back to PIO? Slowing down to PIO is meaningless for
1942 * SATA ATA devices. Consider it only for PATA and SATAPI.
1943 */
1944 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1945 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1946 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1947 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1948 dev->spdn_cnt = 0;
1949 action |= ATA_EH_RESET;
1950 goto done;
1951 }
1952 }
1953
1954 return 0;
1955 done:
1956 /* device has been slowed down, blow error history */
1957 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1958 ata_ering_clear(&dev->ering);
1959 return action;
1960 }
1961
1962 /**
1963 * ata_eh_link_autopsy - analyze error and determine recovery action
1964 * @link: host link to perform autopsy on
1965 *
1966 * Analyze why @link failed and determine which recovery actions
1967 * are needed. This function also sets more detailed AC_ERR_*
1968 * values and fills sense data for ATAPI CHECK SENSE.
1969 *
1970 * LOCKING:
1971 * Kernel thread context (may sleep).
1972 */
1973 static void ata_eh_link_autopsy(struct ata_link *link)
1974 {
1975 struct ata_port *ap = link->ap;
1976 struct ata_eh_context *ehc = &link->eh_context;
1977 struct ata_device *dev;
1978 unsigned int all_err_mask = 0, eflags = 0;
1979 int tag;
1980 u32 serror;
1981 int rc;
1982
1983 DPRINTK("ENTER\n");
1984
1985 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1986 return;
1987
1988 /* obtain and analyze SError */
1989 rc = sata_scr_read(link, SCR_ERROR, &serror);
1990 if (rc == 0) {
1991 ehc->i.serror |= serror;
1992 ata_eh_analyze_serror(link);
1993 } else if (rc != -EOPNOTSUPP) {
1994 /* SError read failed, force reset and probing */
1995 ehc->i.probe_mask |= ATA_ALL_DEVICES;
1996 ehc->i.action |= ATA_EH_RESET;
1997 ehc->i.err_mask |= AC_ERR_OTHER;
1998 }
1999
2000 /* analyze NCQ failure */
2001 ata_eh_analyze_ncq_error(link);
2002
2003 /* any real error trumps AC_ERR_OTHER */
2004 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2005 ehc->i.err_mask &= ~AC_ERR_OTHER;
2006
2007 all_err_mask |= ehc->i.err_mask;
2008
2009 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2010 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2011
2012 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2013 ata_dev_phys_link(qc->dev) != link)
2014 continue;
2015
2016 /* inherit upper level err_mask */
2017 qc->err_mask |= ehc->i.err_mask;
2018
2019 /* analyze TF */
2020 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2021
2022 /* DEV errors are probably spurious in case of ATA_BUS error */
2023 if (qc->err_mask & AC_ERR_ATA_BUS)
2024 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2025 AC_ERR_INVALID);
2026
2027 /* any real error trumps unknown error */
2028 if (qc->err_mask & ~AC_ERR_OTHER)
2029 qc->err_mask &= ~AC_ERR_OTHER;
2030
2031 /* SENSE_VALID trumps dev/unknown error and revalidation */
2032 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2033 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2034
2035 /* determine whether the command is worth retrying */
2036 if (qc->flags & ATA_QCFLAG_IO ||
2037 (!(qc->err_mask & AC_ERR_INVALID) &&
2038 qc->err_mask != AC_ERR_DEV))
2039 qc->flags |= ATA_QCFLAG_RETRY;
2040
2041 /* accumulate error info */
2042 ehc->i.dev = qc->dev;
2043 all_err_mask |= qc->err_mask;
2044 if (qc->flags & ATA_QCFLAG_IO)
2045 eflags |= ATA_EFLAG_IS_IO;
2046 }
2047
2048 /* enforce default EH actions */
2049 if (ap->pflags & ATA_PFLAG_FROZEN ||
2050 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2051 ehc->i.action |= ATA_EH_RESET;
2052 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2053 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2054 ehc->i.action |= ATA_EH_REVALIDATE;
2055
2056 /* If we have offending qcs and the associated failed device,
2057 * perform per-dev EH action only on the offending device.
2058 */
2059 if (ehc->i.dev) {
2060 ehc->i.dev_action[ehc->i.dev->devno] |=
2061 ehc->i.action & ATA_EH_PERDEV_MASK;
2062 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2063 }
2064
2065 /* propagate timeout to host link */
2066 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2067 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2068
2069 /* record error and consider speeding down */
2070 dev = ehc->i.dev;
2071 if (!dev && ((ata_link_max_devices(link) == 1 &&
2072 ata_dev_enabled(link->device))))
2073 dev = link->device;
2074
2075 if (dev) {
2076 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2077 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2078 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2079 }
2080
2081 DPRINTK("EXIT\n");
2082 }
2083
2084 /**
2085 * ata_eh_autopsy - analyze error and determine recovery action
2086 * @ap: host port to perform autopsy on
2087 *
2088 * Analyze all links of @ap and determine why they failed and
2089 * which recovery actions are needed.
2090 *
2091 * LOCKING:
2092 * Kernel thread context (may sleep).
2093 */
2094 void ata_eh_autopsy(struct ata_port *ap)
2095 {
2096 struct ata_link *link;
2097
2098 ata_for_each_link(link, ap, EDGE)
2099 ata_eh_link_autopsy(link);
2100
2101 /* Handle the frigging slave link. Autopsy is done similarly
2102 * but actions and flags are transferred over to the master
2103 * link and handled from there.
2104 */
2105 if (ap->slave_link) {
2106 struct ata_eh_context *mehc = &ap->link.eh_context;
2107 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2108
2109 /* transfer control flags from master to slave */
2110 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2111
2112 /* perform autopsy on the slave link */
2113 ata_eh_link_autopsy(ap->slave_link);
2114
2115 /* transfer actions from slave to master and clear slave */
2116 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2117 mehc->i.action |= sehc->i.action;
2118 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2119 mehc->i.flags |= sehc->i.flags;
2120 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2121 }
2122
2123 /* Autopsy of fanout ports can affect host link autopsy.
2124 * Perform host link autopsy last.
2125 */
2126 if (sata_pmp_attached(ap))
2127 ata_eh_link_autopsy(&ap->link);
2128 }
2129
2130 /**
2131 * ata_get_cmd_descript - get description for ATA command
2132 * @command: ATA command code to get description for
2133 *
2134 * Return a textual description of the given command, or NULL if the
2135 * command is not known.
2136 *
2137 * LOCKING:
2138 * None
2139 */
2140 const char *ata_get_cmd_descript(u8 command)
2141 {
2142 #ifdef CONFIG_ATA_VERBOSE_ERROR
2143 static const struct
2144 {
2145 u8 command;
2146 const char *text;
2147 } cmd_descr[] = {
2148 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2149 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2150 { ATA_CMD_STANDBY, "STANDBY" },
2151 { ATA_CMD_IDLE, "IDLE" },
2152 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2153 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2154 { ATA_CMD_NOP, "NOP" },
2155 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2156 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2157 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2158 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2159 { ATA_CMD_SERVICE, "SERVICE" },
2160 { ATA_CMD_READ, "READ DMA" },
2161 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2162 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2163 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2164 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2165 { ATA_CMD_WRITE, "WRITE DMA" },
2166 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2167 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2168 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2169 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2170 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2171 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2172 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2173 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2174 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2175 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2176 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2177 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2178 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2179 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2180 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2181 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2182 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2183 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2184 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2185 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2186 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2187 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2188 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2189 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2190 { ATA_CMD_SLEEP, "SLEEP" },
2191 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2192 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2193 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2194 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2195 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2196 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2197 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2198 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2199 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2200 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2201 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2202 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2203 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2204 { ATA_CMD_PMP_READ, "READ BUFFER" },
2205 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2206 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2207 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2208 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2209 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2210 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2211 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2212 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2213 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2214 { ATA_CMD_SMART, "SMART" },
2215 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2216 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2217 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2218 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2219 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2220 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2221 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2222 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2223 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2224 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2225 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2226 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2227 { ATA_CMD_RESTORE, "RECALIBRATE" },
2228 { 0, NULL } /* terminate list */
2229 };
2230
2231 unsigned int i;
2232 for (i = 0; cmd_descr[i].text; i++)
2233 if (cmd_descr[i].command == command)
2234 return cmd_descr[i].text;
2235 #endif
2236
2237 return NULL;
2238 }
2239
2240 /**
2241 * ata_eh_link_report - report error handling to user
2242 * @link: ATA link EH is going on
2243 *
2244 * Report EH to user.
2245 *
2246 * LOCKING:
2247 * None.
2248 */
2249 static void ata_eh_link_report(struct ata_link *link)
2250 {
2251 struct ata_port *ap = link->ap;
2252 struct ata_eh_context *ehc = &link->eh_context;
2253 const char *frozen, *desc;
2254 char tries_buf[6];
2255 int tag, nr_failed = 0;
2256
2257 if (ehc->i.flags & ATA_EHI_QUIET)
2258 return;
2259
2260 desc = NULL;
2261 if (ehc->i.desc[0] != '\0')
2262 desc = ehc->i.desc;
2263
2264 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2265 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2266
2267 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2268 ata_dev_phys_link(qc->dev) != link ||
2269 ((qc->flags & ATA_QCFLAG_QUIET) &&
2270 qc->err_mask == AC_ERR_DEV))
2271 continue;
2272 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2273 continue;
2274
2275 nr_failed++;
2276 }
2277
2278 if (!nr_failed && !ehc->i.err_mask)
2279 return;
2280
2281 frozen = "";
2282 if (ap->pflags & ATA_PFLAG_FROZEN)
2283 frozen = " frozen";
2284
2285 memset(tries_buf, 0, sizeof(tries_buf));
2286 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2287 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2288 ap->eh_tries);
2289
2290 if (ehc->i.dev) {
2291 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2292 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2293 ehc->i.err_mask, link->sactive, ehc->i.serror,
2294 ehc->i.action, frozen, tries_buf);
2295 if (desc)
2296 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2297 } else {
2298 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2299 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2300 ehc->i.err_mask, link->sactive, ehc->i.serror,
2301 ehc->i.action, frozen, tries_buf);
2302 if (desc)
2303 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2304 }
2305
2306 #ifdef CONFIG_ATA_VERBOSE_ERROR
2307 if (ehc->i.serror)
2308 ata_link_printk(link, KERN_ERR,
2309 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2310 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2311 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2312 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2313 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2314 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2315 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2316 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2317 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2318 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2319 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2320 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2321 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2322 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2323 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2324 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2325 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2326 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2327 #endif
2328
2329 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2330 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2331 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2332 const u8 *cdb = qc->cdb;
2333 char data_buf[20] = "";
2334 char cdb_buf[70] = "";
2335
2336 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2337 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2338 continue;
2339
2340 if (qc->dma_dir != DMA_NONE) {
2341 static const char *dma_str[] = {
2342 [DMA_BIDIRECTIONAL] = "bidi",
2343 [DMA_TO_DEVICE] = "out",
2344 [DMA_FROM_DEVICE] = "in",
2345 };
2346 static const char *prot_str[] = {
2347 [ATA_PROT_PIO] = "pio",
2348 [ATA_PROT_DMA] = "dma",
2349 [ATA_PROT_NCQ] = "ncq",
2350 [ATAPI_PROT_PIO] = "pio",
2351 [ATAPI_PROT_DMA] = "dma",
2352 };
2353
2354 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2355 prot_str[qc->tf.protocol], qc->nbytes,
2356 dma_str[qc->dma_dir]);
2357 }
2358
2359 if (ata_is_atapi(qc->tf.protocol)) {
2360 if (qc->scsicmd)
2361 scsi_print_command(qc->scsicmd);
2362 else
2363 snprintf(cdb_buf, sizeof(cdb_buf),
2364 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2365 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2366 cdb[0], cdb[1], cdb[2], cdb[3],
2367 cdb[4], cdb[5], cdb[6], cdb[7],
2368 cdb[8], cdb[9], cdb[10], cdb[11],
2369 cdb[12], cdb[13], cdb[14], cdb[15]);
2370 } else {
2371 const char *descr = ata_get_cmd_descript(cmd->command);
2372 if (descr)
2373 ata_dev_printk(qc->dev, KERN_ERR,
2374 "failed command: %s\n", descr);
2375 }
2376
2377 ata_dev_printk(qc->dev, KERN_ERR,
2378 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2379 "tag %d%s\n %s"
2380 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2381 "Emask 0x%x (%s)%s\n",
2382 cmd->command, cmd->feature, cmd->nsect,
2383 cmd->lbal, cmd->lbam, cmd->lbah,
2384 cmd->hob_feature, cmd->hob_nsect,
2385 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2386 cmd->device, qc->tag, data_buf, cdb_buf,
2387 res->command, res->feature, res->nsect,
2388 res->lbal, res->lbam, res->lbah,
2389 res->hob_feature, res->hob_nsect,
2390 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2391 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2392 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2393
2394 #ifdef CONFIG_ATA_VERBOSE_ERROR
2395 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2396 ATA_ERR)) {
2397 if (res->command & ATA_BUSY)
2398 ata_dev_printk(qc->dev, KERN_ERR,
2399 "status: { Busy }\n");
2400 else
2401 ata_dev_printk(qc->dev, KERN_ERR,
2402 "status: { %s%s%s%s}\n",
2403 res->command & ATA_DRDY ? "DRDY " : "",
2404 res->command & ATA_DF ? "DF " : "",
2405 res->command & ATA_DRQ ? "DRQ " : "",
2406 res->command & ATA_ERR ? "ERR " : "");
2407 }
2408
2409 if (cmd->command != ATA_CMD_PACKET &&
2410 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2411 ATA_ABORTED)))
2412 ata_dev_printk(qc->dev, KERN_ERR,
2413 "error: { %s%s%s%s}\n",
2414 res->feature & ATA_ICRC ? "ICRC " : "",
2415 res->feature & ATA_UNC ? "UNC " : "",
2416 res->feature & ATA_IDNF ? "IDNF " : "",
2417 res->feature & ATA_ABORTED ? "ABRT " : "");
2418 #endif
2419 }
2420 }
2421
2422 /**
2423 * ata_eh_report - report error handling to user
2424 * @ap: ATA port to report EH about
2425 *
2426 * Report EH to user.
2427 *
2428 * LOCKING:
2429 * None.
2430 */
2431 void ata_eh_report(struct ata_port *ap)
2432 {
2433 struct ata_link *link;
2434
2435 ata_for_each_link(link, ap, HOST_FIRST)
2436 ata_eh_link_report(link);
2437 }
2438
2439 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2440 unsigned int *classes, unsigned long deadline,
2441 bool clear_classes)
2442 {
2443 struct ata_device *dev;
2444
2445 if (clear_classes)
2446 ata_for_each_dev(dev, link, ALL)
2447 classes[dev->devno] = ATA_DEV_UNKNOWN;
2448
2449 return reset(link, classes, deadline);
2450 }
2451
2452 static int ata_eh_followup_srst_needed(struct ata_link *link,
2453 int rc, const unsigned int *classes)
2454 {
2455 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2456 return 0;
2457 if (rc == -EAGAIN)
2458 return 1;
2459 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2460 return 1;
2461 return 0;
2462 }
2463
2464 int ata_eh_reset(struct ata_link *link, int classify,
2465 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2466 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2467 {
2468 struct ata_port *ap = link->ap;
2469 struct ata_link *slave = ap->slave_link;
2470 struct ata_eh_context *ehc = &link->eh_context;
2471 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2472 unsigned int *classes = ehc->classes;
2473 unsigned int lflags = link->flags;
2474 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2475 int max_tries = 0, try = 0;
2476 struct ata_link *failed_link;
2477 struct ata_device *dev;
2478 unsigned long deadline, now;
2479 ata_reset_fn_t reset;
2480 unsigned long flags;
2481 u32 sstatus;
2482 int nr_unknown, rc;
2483
2484 /*
2485 * Prepare to reset
2486 */
2487 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2488 max_tries++;
2489 if (link->flags & ATA_LFLAG_NO_HRST)
2490 hardreset = NULL;
2491 if (link->flags & ATA_LFLAG_NO_SRST)
2492 softreset = NULL;
2493
2494 /* make sure each reset attemp is at least COOL_DOWN apart */
2495 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2496 now = jiffies;
2497 WARN_ON(time_after(ehc->last_reset, now));
2498 deadline = ata_deadline(ehc->last_reset,
2499 ATA_EH_RESET_COOL_DOWN);
2500 if (time_before(now, deadline))
2501 schedule_timeout_uninterruptible(deadline - now);
2502 }
2503
2504 spin_lock_irqsave(ap->lock, flags);
2505 ap->pflags |= ATA_PFLAG_RESETTING;
2506 spin_unlock_irqrestore(ap->lock, flags);
2507
2508 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2509
2510 ata_for_each_dev(dev, link, ALL) {
2511 /* If we issue an SRST then an ATA drive (not ATAPI)
2512 * may change configuration and be in PIO0 timing. If
2513 * we do a hard reset (or are coming from power on)
2514 * this is true for ATA or ATAPI. Until we've set a
2515 * suitable controller mode we should not touch the
2516 * bus as we may be talking too fast.
2517 */
2518 dev->pio_mode = XFER_PIO_0;
2519
2520 /* If the controller has a pio mode setup function
2521 * then use it to set the chipset to rights. Don't
2522 * touch the DMA setup as that will be dealt with when
2523 * configuring devices.
2524 */
2525 if (ap->ops->set_piomode)
2526 ap->ops->set_piomode(ap, dev);
2527 }
2528
2529 /* prefer hardreset */
2530 reset = NULL;
2531 ehc->i.action &= ~ATA_EH_RESET;
2532 if (hardreset) {
2533 reset = hardreset;
2534 ehc->i.action |= ATA_EH_HARDRESET;
2535 } else if (softreset) {
2536 reset = softreset;
2537 ehc->i.action |= ATA_EH_SOFTRESET;
2538 }
2539
2540 if (prereset) {
2541 unsigned long deadline = ata_deadline(jiffies,
2542 ATA_EH_PRERESET_TIMEOUT);
2543
2544 if (slave) {
2545 sehc->i.action &= ~ATA_EH_RESET;
2546 sehc->i.action |= ehc->i.action;
2547 }
2548
2549 rc = prereset(link, deadline);
2550
2551 /* If present, do prereset on slave link too. Reset
2552 * is skipped iff both master and slave links report
2553 * -ENOENT or clear ATA_EH_RESET.
2554 */
2555 if (slave && (rc == 0 || rc == -ENOENT)) {
2556 int tmp;
2557
2558 tmp = prereset(slave, deadline);
2559 if (tmp != -ENOENT)
2560 rc = tmp;
2561
2562 ehc->i.action |= sehc->i.action;
2563 }
2564
2565 if (rc) {
2566 if (rc == -ENOENT) {
2567 ata_link_printk(link, KERN_DEBUG,
2568 "port disabled. ignoring.\n");
2569 ehc->i.action &= ~ATA_EH_RESET;
2570
2571 ata_for_each_dev(dev, link, ALL)
2572 classes[dev->devno] = ATA_DEV_NONE;
2573
2574 rc = 0;
2575 } else
2576 ata_link_printk(link, KERN_ERR,
2577 "prereset failed (errno=%d)\n", rc);
2578 goto out;
2579 }
2580
2581 /* prereset() might have cleared ATA_EH_RESET. If so,
2582 * bang classes, thaw and return.
2583 */
2584 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2585 ata_for_each_dev(dev, link, ALL)
2586 classes[dev->devno] = ATA_DEV_NONE;
2587 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2588 ata_is_host_link(link))
2589 ata_eh_thaw_port(ap);
2590 rc = 0;
2591 goto out;
2592 }
2593 }
2594
2595 retry:
2596 /*
2597 * Perform reset
2598 */
2599 if (ata_is_host_link(link))
2600 ata_eh_freeze_port(ap);
2601
2602 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2603
2604 if (reset) {
2605 if (verbose)
2606 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2607 reset == softreset ? "soft" : "hard");
2608
2609 /* mark that this EH session started with reset */
2610 ehc->last_reset = jiffies;
2611 if (reset == hardreset)
2612 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2613 else
2614 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2615
2616 rc = ata_do_reset(link, reset, classes, deadline, true);
2617 if (rc && rc != -EAGAIN) {
2618 failed_link = link;
2619 goto fail;
2620 }
2621
2622 /* hardreset slave link if existent */
2623 if (slave && reset == hardreset) {
2624 int tmp;
2625
2626 if (verbose)
2627 ata_link_printk(slave, KERN_INFO,
2628 "hard resetting link\n");
2629
2630 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2631 tmp = ata_do_reset(slave, reset, classes, deadline,
2632 false);
2633 switch (tmp) {
2634 case -EAGAIN:
2635 rc = -EAGAIN;
2636 case 0:
2637 break;
2638 default:
2639 failed_link = slave;
2640 rc = tmp;
2641 goto fail;
2642 }
2643 }
2644
2645 /* perform follow-up SRST if necessary */
2646 if (reset == hardreset &&
2647 ata_eh_followup_srst_needed(link, rc, classes)) {
2648 reset = softreset;
2649
2650 if (!reset) {
2651 ata_link_printk(link, KERN_ERR,
2652 "follow-up softreset required "
2653 "but no softreset avaliable\n");
2654 failed_link = link;
2655 rc = -EINVAL;
2656 goto fail;
2657 }
2658
2659 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2660 rc = ata_do_reset(link, reset, classes, deadline, true);
2661 if (rc) {
2662 failed_link = link;
2663 goto fail;
2664 }
2665 }
2666 } else {
2667 if (verbose)
2668 ata_link_printk(link, KERN_INFO, "no reset method "
2669 "available, skipping reset\n");
2670 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2671 lflags |= ATA_LFLAG_ASSUME_ATA;
2672 }
2673
2674 /*
2675 * Post-reset processing
2676 */
2677 ata_for_each_dev(dev, link, ALL) {
2678 /* After the reset, the device state is PIO 0 and the
2679 * controller state is undefined. Reset also wakes up
2680 * drives from sleeping mode.
2681 */
2682 dev->pio_mode = XFER_PIO_0;
2683 dev->flags &= ~ATA_DFLAG_SLEEPING;
2684
2685 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2686 continue;
2687
2688 /* apply class override */
2689 if (lflags & ATA_LFLAG_ASSUME_ATA)
2690 classes[dev->devno] = ATA_DEV_ATA;
2691 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2692 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2693 }
2694
2695 /* record current link speed */
2696 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2697 link->sata_spd = (sstatus >> 4) & 0xf;
2698 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2699 slave->sata_spd = (sstatus >> 4) & 0xf;
2700
2701 /* thaw the port */
2702 if (ata_is_host_link(link))
2703 ata_eh_thaw_port(ap);
2704
2705 /* postreset() should clear hardware SError. Although SError
2706 * is cleared during link resume, clearing SError here is
2707 * necessary as some PHYs raise hotplug events after SRST.
2708 * This introduces race condition where hotplug occurs between
2709 * reset and here. This race is mediated by cross checking
2710 * link onlineness and classification result later.
2711 */
2712 if (postreset) {
2713 postreset(link, classes);
2714 if (slave)
2715 postreset(slave, classes);
2716 }
2717
2718 /*
2719 * Some controllers can't be frozen very well and may set
2720 * spuruious error conditions during reset. Clear accumulated
2721 * error information. As reset is the final recovery action,
2722 * nothing is lost by doing this.
2723 */
2724 spin_lock_irqsave(link->ap->lock, flags);
2725 memset(&link->eh_info, 0, sizeof(link->eh_info));
2726 if (slave)
2727 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2728 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2729 spin_unlock_irqrestore(link->ap->lock, flags);
2730
2731 /*
2732 * Make sure onlineness and classification result correspond.
2733 * Hotplug could have happened during reset and some
2734 * controllers fail to wait while a drive is spinning up after
2735 * being hotplugged causing misdetection. By cross checking
2736 * link on/offlineness and classification result, those
2737 * conditions can be reliably detected and retried.
2738 */
2739 nr_unknown = 0;
2740 ata_for_each_dev(dev, link, ALL) {
2741 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2742 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2743 ata_dev_printk(dev, KERN_DEBUG, "link online "
2744 "but device misclassifed\n");
2745 classes[dev->devno] = ATA_DEV_NONE;
2746 nr_unknown++;
2747 }
2748 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2749 if (ata_class_enabled(classes[dev->devno]))
2750 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2751 "clearing class %d to NONE\n",
2752 classes[dev->devno]);
2753 classes[dev->devno] = ATA_DEV_NONE;
2754 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2755 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2756 "clearing UNKNOWN to NONE\n");
2757 classes[dev->devno] = ATA_DEV_NONE;
2758 }
2759 }
2760
2761 if (classify && nr_unknown) {
2762 if (try < max_tries) {
2763 ata_link_printk(link, KERN_WARNING, "link online but "
2764 "%d devices misclassified, retrying\n",
2765 nr_unknown);
2766 failed_link = link;
2767 rc = -EAGAIN;
2768 goto fail;
2769 }
2770 ata_link_printk(link, KERN_WARNING,
2771 "link online but %d devices misclassified, "
2772 "device detection might fail\n", nr_unknown);
2773 }
2774
2775 /* reset successful, schedule revalidation */
2776 ata_eh_done(link, NULL, ATA_EH_RESET);
2777 if (slave)
2778 ata_eh_done(slave, NULL, ATA_EH_RESET);
2779 ehc->last_reset = jiffies; /* update to completion time */
2780 ehc->i.action |= ATA_EH_REVALIDATE;
2781
2782 rc = 0;
2783 out:
2784 /* clear hotplug flag */
2785 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2786 if (slave)
2787 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2788
2789 spin_lock_irqsave(ap->lock, flags);
2790 ap->pflags &= ~ATA_PFLAG_RESETTING;
2791 spin_unlock_irqrestore(ap->lock, flags);
2792
2793 return rc;
2794
2795 fail:
2796 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2797 if (!ata_is_host_link(link) &&
2798 sata_scr_read(link, SCR_STATUS, &sstatus))
2799 rc = -ERESTART;
2800
2801 if (rc == -ERESTART || try >= max_tries)
2802 goto out;
2803
2804 now = jiffies;
2805 if (time_before(now, deadline)) {
2806 unsigned long delta = deadline - now;
2807
2808 ata_link_printk(failed_link, KERN_WARNING,
2809 "reset failed (errno=%d), retrying in %u secs\n",
2810 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2811
2812 while (delta)
2813 delta = schedule_timeout_uninterruptible(delta);
2814 }
2815
2816 if (try == max_tries - 1) {
2817 sata_down_spd_limit(link, 0);
2818 if (slave)
2819 sata_down_spd_limit(slave, 0);
2820 } else if (rc == -EPIPE)
2821 sata_down_spd_limit(failed_link, 0);
2822
2823 if (hardreset)
2824 reset = hardreset;
2825 goto retry;
2826 }
2827
2828 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2829 {
2830 struct ata_link *link;
2831 struct ata_device *dev;
2832 unsigned long flags;
2833
2834 /*
2835 * This function can be thought of as an extended version of
2836 * ata_eh_about_to_do() specially crafted to accommodate the
2837 * requirements of ATA_EH_PARK handling. Since the EH thread
2838 * does not leave the do {} while () loop in ata_eh_recover as
2839 * long as the timeout for a park request to *one* device on
2840 * the port has not expired, and since we still want to pick
2841 * up park requests to other devices on the same port or
2842 * timeout updates for the same device, we have to pull
2843 * ATA_EH_PARK actions from eh_info into eh_context.i
2844 * ourselves at the beginning of each pass over the loop.
2845 *
2846 * Additionally, all write accesses to &ap->park_req_pending
2847 * through INIT_COMPLETION() (see below) or complete_all()
2848 * (see ata_scsi_park_store()) are protected by the host lock.
2849 * As a result we have that park_req_pending.done is zero on
2850 * exit from this function, i.e. when ATA_EH_PARK actions for
2851 * *all* devices on port ap have been pulled into the
2852 * respective eh_context structs. If, and only if,
2853 * park_req_pending.done is non-zero by the time we reach
2854 * wait_for_completion_timeout(), another ATA_EH_PARK action
2855 * has been scheduled for at least one of the devices on port
2856 * ap and we have to cycle over the do {} while () loop in
2857 * ata_eh_recover() again.
2858 */
2859
2860 spin_lock_irqsave(ap->lock, flags);
2861 INIT_COMPLETION(ap->park_req_pending);
2862 ata_for_each_link(link, ap, EDGE) {
2863 ata_for_each_dev(dev, link, ALL) {
2864 struct ata_eh_info *ehi = &link->eh_info;
2865
2866 link->eh_context.i.dev_action[dev->devno] |=
2867 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2868 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2869 }
2870 }
2871 spin_unlock_irqrestore(ap->lock, flags);
2872 }
2873
2874 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2875 {
2876 struct ata_eh_context *ehc = &dev->link->eh_context;
2877 struct ata_taskfile tf;
2878 unsigned int err_mask;
2879
2880 ata_tf_init(dev, &tf);
2881 if (park) {
2882 ehc->unloaded_mask |= 1 << dev->devno;
2883 tf.command = ATA_CMD_IDLEIMMEDIATE;
2884 tf.feature = 0x44;
2885 tf.lbal = 0x4c;
2886 tf.lbam = 0x4e;
2887 tf.lbah = 0x55;
2888 } else {
2889 ehc->unloaded_mask &= ~(1 << dev->devno);
2890 tf.command = ATA_CMD_CHK_POWER;
2891 }
2892
2893 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2894 tf.protocol |= ATA_PROT_NODATA;
2895 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2896 if (park && (err_mask || tf.lbal != 0xc4)) {
2897 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2898 ehc->unloaded_mask &= ~(1 << dev->devno);
2899 }
2900 }
2901
2902 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2903 struct ata_device **r_failed_dev)
2904 {
2905 struct ata_port *ap = link->ap;
2906 struct ata_eh_context *ehc = &link->eh_context;
2907 struct ata_device *dev;
2908 unsigned int new_mask = 0;
2909 unsigned long flags;
2910 int rc = 0;
2911
2912 DPRINTK("ENTER\n");
2913
2914 /* For PATA drive side cable detection to work, IDENTIFY must
2915 * be done backwards such that PDIAG- is released by the slave
2916 * device before the master device is identified.
2917 */
2918 ata_for_each_dev(dev, link, ALL_REVERSE) {
2919 unsigned int action = ata_eh_dev_action(dev);
2920 unsigned int readid_flags = 0;
2921
2922 if (ehc->i.flags & ATA_EHI_DID_RESET)
2923 readid_flags |= ATA_READID_POSTRESET;
2924
2925 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2926 WARN_ON(dev->class == ATA_DEV_PMP);
2927
2928 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2929 rc = -EIO;
2930 goto err;
2931 }
2932
2933 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2934 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2935 readid_flags);
2936 if (rc)
2937 goto err;
2938
2939 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2940
2941 /* Configuration may have changed, reconfigure
2942 * transfer mode.
2943 */
2944 ehc->i.flags |= ATA_EHI_SETMODE;
2945
2946 /* schedule the scsi_rescan_device() here */
2947 schedule_work(&(ap->scsi_rescan_task));
2948 } else if (dev->class == ATA_DEV_UNKNOWN &&
2949 ehc->tries[dev->devno] &&
2950 ata_class_enabled(ehc->classes[dev->devno])) {
2951 /* Temporarily set dev->class, it will be
2952 * permanently set once all configurations are
2953 * complete. This is necessary because new
2954 * device configuration is done in two
2955 * separate loops.
2956 */
2957 dev->class = ehc->classes[dev->devno];
2958
2959 if (dev->class == ATA_DEV_PMP)
2960 rc = sata_pmp_attach(dev);
2961 else
2962 rc = ata_dev_read_id(dev, &dev->class,
2963 readid_flags, dev->id);
2964
2965 /* read_id might have changed class, store and reset */
2966 ehc->classes[dev->devno] = dev->class;
2967 dev->class = ATA_DEV_UNKNOWN;
2968
2969 switch (rc) {
2970 case 0:
2971 /* clear error info accumulated during probe */
2972 ata_ering_clear(&dev->ering);
2973 new_mask |= 1 << dev->devno;
2974 break;
2975 case -ENOENT:
2976 /* IDENTIFY was issued to non-existent
2977 * device. No need to reset. Just
2978 * thaw and ignore the device.
2979 */
2980 ata_eh_thaw_port(ap);
2981 break;
2982 default:
2983 goto err;
2984 }
2985 }
2986 }
2987
2988 /* PDIAG- should have been released, ask cable type if post-reset */
2989 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2990 if (ap->ops->cable_detect)
2991 ap->cbl = ap->ops->cable_detect(ap);
2992 ata_force_cbl(ap);
2993 }
2994
2995 /* Configure new devices forward such that user doesn't see
2996 * device detection messages backwards.
2997 */
2998 ata_for_each_dev(dev, link, ALL) {
2999 if (!(new_mask & (1 << dev->devno)))
3000 continue;
3001
3002 dev->class = ehc->classes[dev->devno];
3003
3004 if (dev->class == ATA_DEV_PMP)
3005 continue;
3006
3007 ehc->i.flags |= ATA_EHI_PRINTINFO;
3008 rc = ata_dev_configure(dev);
3009 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3010 if (rc) {
3011 dev->class = ATA_DEV_UNKNOWN;
3012 goto err;
3013 }
3014
3015 spin_lock_irqsave(ap->lock, flags);
3016 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3017 spin_unlock_irqrestore(ap->lock, flags);
3018
3019 /* new device discovered, configure xfermode */
3020 ehc->i.flags |= ATA_EHI_SETMODE;
3021 }
3022
3023 return 0;
3024
3025 err:
3026 *r_failed_dev = dev;
3027 DPRINTK("EXIT rc=%d\n", rc);
3028 return rc;
3029 }
3030
3031 /**
3032 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3033 * @link: link on which timings will be programmed
3034 * @r_failed_dev: out parameter for failed device
3035 *
3036 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3037 * ata_set_mode() fails, pointer to the failing device is
3038 * returned in @r_failed_dev.
3039 *
3040 * LOCKING:
3041 * PCI/etc. bus probe sem.
3042 *
3043 * RETURNS:
3044 * 0 on success, negative errno otherwise
3045 */
3046 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3047 {
3048 struct ata_port *ap = link->ap;
3049 struct ata_device *dev;
3050 int rc;
3051
3052 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3053 ata_for_each_dev(dev, link, ENABLED) {
3054 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3055 struct ata_ering_entry *ent;
3056
3057 ent = ata_ering_top(&dev->ering);
3058 if (ent)
3059 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3060 }
3061 }
3062
3063 /* has private set_mode? */
3064 if (ap->ops->set_mode)
3065 rc = ap->ops->set_mode(link, r_failed_dev);
3066 else
3067 rc = ata_do_set_mode(link, r_failed_dev);
3068
3069 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3070 ata_for_each_dev(dev, link, ENABLED) {
3071 struct ata_eh_context *ehc = &link->eh_context;
3072 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3073 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3074
3075 if (dev->xfer_mode != saved_xfer_mode ||
3076 ata_ncq_enabled(dev) != saved_ncq)
3077 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3078 }
3079
3080 return rc;
3081 }
3082
3083 /**
3084 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3085 * @dev: ATAPI device to clear UA for
3086 *
3087 * Resets and other operations can make an ATAPI device raise
3088 * UNIT ATTENTION which causes the next operation to fail. This
3089 * function clears UA.
3090 *
3091 * LOCKING:
3092 * EH context (may sleep).
3093 *
3094 * RETURNS:
3095 * 0 on success, -errno on failure.
3096 */
3097 static int atapi_eh_clear_ua(struct ata_device *dev)
3098 {
3099 int i;
3100
3101 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3102 u8 *sense_buffer = dev->link->ap->sector_buf;
3103 u8 sense_key = 0;
3104 unsigned int err_mask;
3105
3106 err_mask = atapi_eh_tur(dev, &sense_key);
3107 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3108 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3109 "failed (err_mask=0x%x)\n", err_mask);
3110 return -EIO;
3111 }
3112
3113 if (!err_mask || sense_key != UNIT_ATTENTION)
3114 return 0;
3115
3116 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3117 if (err_mask) {
3118 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3119 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3120 return -EIO;
3121 }
3122 }
3123
3124 ata_dev_printk(dev, KERN_WARNING,
3125 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3126
3127 return 0;
3128 }
3129
3130 /**
3131 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3132 * @dev: ATA device which may need FLUSH retry
3133 *
3134 * If @dev failed FLUSH, it needs to be reported upper layer
3135 * immediately as it means that @dev failed to remap and already
3136 * lost at least a sector and further FLUSH retrials won't make
3137 * any difference to the lost sector. However, if FLUSH failed
3138 * for other reasons, for example transmission error, FLUSH needs
3139 * to be retried.
3140 *
3141 * This function determines whether FLUSH failure retry is
3142 * necessary and performs it if so.
3143 *
3144 * RETURNS:
3145 * 0 if EH can continue, -errno if EH needs to be repeated.
3146 */
3147 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3148 {
3149 struct ata_link *link = dev->link;
3150 struct ata_port *ap = link->ap;
3151 struct ata_queued_cmd *qc;
3152 struct ata_taskfile tf;
3153 unsigned int err_mask;
3154 int rc = 0;
3155
3156 /* did flush fail for this device? */
3157 if (!ata_tag_valid(link->active_tag))
3158 return 0;
3159
3160 qc = __ata_qc_from_tag(ap, link->active_tag);
3161 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3162 qc->tf.command != ATA_CMD_FLUSH))
3163 return 0;
3164
3165 /* if the device failed it, it should be reported to upper layers */
3166 if (qc->err_mask & AC_ERR_DEV)
3167 return 0;
3168
3169 /* flush failed for some other reason, give it another shot */
3170 ata_tf_init(dev, &tf);
3171
3172 tf.command = qc->tf.command;
3173 tf.flags |= ATA_TFLAG_DEVICE;
3174 tf.protocol = ATA_PROT_NODATA;
3175
3176 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3177 tf.command, qc->err_mask);
3178
3179 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3180 if (!err_mask) {
3181 /*
3182 * FLUSH is complete but there's no way to
3183 * successfully complete a failed command from EH.
3184 * Making sure retry is allowed at least once and
3185 * retrying it should do the trick - whatever was in
3186 * the cache is already on the platter and this won't
3187 * cause infinite loop.
3188 */
3189 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3190 } else {
3191 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3192 err_mask);
3193 rc = -EIO;
3194
3195 /* if device failed it, report it to upper layers */
3196 if (err_mask & AC_ERR_DEV) {
3197 qc->err_mask |= AC_ERR_DEV;
3198 qc->result_tf = tf;
3199 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3200 rc = 0;
3201 }
3202 }
3203 return rc;
3204 }
3205
3206 static int ata_link_nr_enabled(struct ata_link *link)
3207 {
3208 struct ata_device *dev;
3209 int cnt = 0;
3210
3211 ata_for_each_dev(dev, link, ENABLED)
3212 cnt++;
3213 return cnt;
3214 }
3215
3216 static int ata_link_nr_vacant(struct ata_link *link)
3217 {
3218 struct ata_device *dev;
3219 int cnt = 0;
3220
3221 ata_for_each_dev(dev, link, ALL)
3222 if (dev->class == ATA_DEV_UNKNOWN)
3223 cnt++;
3224 return cnt;
3225 }
3226
3227 static int ata_eh_skip_recovery(struct ata_link *link)
3228 {
3229 struct ata_port *ap = link->ap;
3230 struct ata_eh_context *ehc = &link->eh_context;
3231 struct ata_device *dev;
3232
3233 /* skip disabled links */
3234 if (link->flags & ATA_LFLAG_DISABLED)
3235 return 1;
3236
3237 /* thaw frozen port and recover failed devices */
3238 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3239 return 0;
3240
3241 /* reset at least once if reset is requested */
3242 if ((ehc->i.action & ATA_EH_RESET) &&
3243 !(ehc->i.flags & ATA_EHI_DID_RESET))
3244 return 0;
3245
3246 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3247 ata_for_each_dev(dev, link, ALL) {
3248 if (dev->class == ATA_DEV_UNKNOWN &&
3249 ehc->classes[dev->devno] != ATA_DEV_NONE)
3250 return 0;
3251 }
3252
3253 return 1;
3254 }
3255
3256 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3257 {
3258 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3259 u64 now = get_jiffies_64();
3260 int *trials = void_arg;
3261
3262 if (ent->timestamp < now - min(now, interval))
3263 return -1;
3264
3265 (*trials)++;
3266 return 0;
3267 }
3268
3269 static int ata_eh_schedule_probe(struct ata_device *dev)
3270 {
3271 struct ata_eh_context *ehc = &dev->link->eh_context;
3272 struct ata_link *link = ata_dev_phys_link(dev);
3273 int trials = 0;
3274
3275 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3276 (ehc->did_probe_mask & (1 << dev->devno)))
3277 return 0;
3278
3279 ata_eh_detach_dev(dev);
3280 ata_dev_init(dev);
3281 ehc->did_probe_mask |= (1 << dev->devno);
3282 ehc->i.action |= ATA_EH_RESET;
3283 ehc->saved_xfer_mode[dev->devno] = 0;
3284 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3285
3286 /* Record and count probe trials on the ering. The specific
3287 * error mask used is irrelevant. Because a successful device
3288 * detection clears the ering, this count accumulates only if
3289 * there are consecutive failed probes.
3290 *
3291 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3292 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3293 * forced to 1.5Gbps.
3294 *
3295 * This is to work around cases where failed link speed
3296 * negotiation results in device misdetection leading to
3297 * infinite DEVXCHG or PHRDY CHG events.
3298 */
3299 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3300 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3301
3302 if (trials > ATA_EH_PROBE_TRIALS)
3303 sata_down_spd_limit(link, 1);
3304
3305 return 1;
3306 }
3307
3308 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3309 {
3310 struct ata_eh_context *ehc = &dev->link->eh_context;
3311
3312 /* -EAGAIN from EH routine indicates retry without prejudice.
3313 * The requester is responsible for ensuring forward progress.
3314 */
3315 if (err != -EAGAIN)
3316 ehc->tries[dev->devno]--;
3317
3318 switch (err) {
3319 case -ENODEV:
3320 /* device missing or wrong IDENTIFY data, schedule probing */
3321 ehc->i.probe_mask |= (1 << dev->devno);
3322 case -EINVAL:
3323 /* give it just one more chance */
3324 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3325 case -EIO:
3326 if (ehc->tries[dev->devno] == 1) {
3327 /* This is the last chance, better to slow
3328 * down than lose it.
3329 */
3330 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3331 if (dev->pio_mode > XFER_PIO_0)
3332 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3333 }
3334 }
3335
3336 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3337 /* disable device if it has used up all its chances */
3338 ata_dev_disable(dev);
3339
3340 /* detach if offline */
3341 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3342 ata_eh_detach_dev(dev);
3343
3344 /* schedule probe if necessary */
3345 if (ata_eh_schedule_probe(dev)) {
3346 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3347 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3348 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3349 }
3350
3351 return 1;
3352 } else {
3353 ehc->i.action |= ATA_EH_RESET;
3354 return 0;
3355 }
3356 }
3357
3358 /**
3359 * ata_eh_recover - recover host port after error
3360 * @ap: host port to recover
3361 * @prereset: prereset method (can be NULL)
3362 * @softreset: softreset method (can be NULL)
3363 * @hardreset: hardreset method (can be NULL)
3364 * @postreset: postreset method (can be NULL)
3365 * @r_failed_link: out parameter for failed link
3366 *
3367 * This is the alpha and omega, eum and yang, heart and soul of
3368 * libata exception handling. On entry, actions required to
3369 * recover each link and hotplug requests are recorded in the
3370 * link's eh_context. This function executes all the operations
3371 * with appropriate retrials and fallbacks to resurrect failed
3372 * devices, detach goners and greet newcomers.
3373 *
3374 * LOCKING:
3375 * Kernel thread context (may sleep).
3376 *
3377 * RETURNS:
3378 * 0 on success, -errno on failure.
3379 */
3380 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3381 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3382 ata_postreset_fn_t postreset,
3383 struct ata_link **r_failed_link)
3384 {
3385 struct ata_link *link;
3386 struct ata_device *dev;
3387 int nr_failed_devs;
3388 int rc;
3389 unsigned long flags, deadline;
3390
3391 DPRINTK("ENTER\n");
3392
3393 /* prep for recovery */
3394 ata_for_each_link(link, ap, EDGE) {
3395 struct ata_eh_context *ehc = &link->eh_context;
3396
3397 /* re-enable link? */
3398 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3399 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3400 spin_lock_irqsave(ap->lock, flags);
3401 link->flags &= ~ATA_LFLAG_DISABLED;
3402 spin_unlock_irqrestore(ap->lock, flags);
3403 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3404 }
3405
3406 ata_for_each_dev(dev, link, ALL) {
3407 if (link->flags & ATA_LFLAG_NO_RETRY)
3408 ehc->tries[dev->devno] = 1;
3409 else
3410 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3411
3412 /* collect port action mask recorded in dev actions */
3413 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3414 ~ATA_EH_PERDEV_MASK;
3415 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3416
3417 /* process hotplug request */
3418 if (dev->flags & ATA_DFLAG_DETACH)
3419 ata_eh_detach_dev(dev);
3420
3421 /* schedule probe if necessary */
3422 if (!ata_dev_enabled(dev))
3423 ata_eh_schedule_probe(dev);
3424 }
3425 }
3426
3427 retry:
3428 rc = 0;
3429 nr_failed_devs = 0;
3430
3431 /* if UNLOADING, finish immediately */
3432 if (ap->pflags & ATA_PFLAG_UNLOADING)
3433 goto out;
3434
3435 /* prep for EH */
3436 ata_for_each_link(link, ap, EDGE) {
3437 struct ata_eh_context *ehc = &link->eh_context;
3438
3439 /* skip EH if possible. */
3440 if (ata_eh_skip_recovery(link))
3441 ehc->i.action = 0;
3442
3443 ata_for_each_dev(dev, link, ALL)
3444 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3445 }
3446
3447 /* reset */
3448 ata_for_each_link(link, ap, EDGE) {
3449 struct ata_eh_context *ehc = &link->eh_context;
3450
3451 if (!(ehc->i.action & ATA_EH_RESET))
3452 continue;
3453
3454 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3455 prereset, softreset, hardreset, postreset);
3456 if (rc) {
3457 ata_link_printk(link, KERN_ERR,
3458 "reset failed, giving up\n");
3459 goto out;
3460 }
3461 }
3462
3463 do {
3464 unsigned long now;
3465
3466 /*
3467 * clears ATA_EH_PARK in eh_info and resets
3468 * ap->park_req_pending
3469 */
3470 ata_eh_pull_park_action(ap);
3471
3472 deadline = jiffies;
3473 ata_for_each_link(link, ap, EDGE) {
3474 ata_for_each_dev(dev, link, ALL) {
3475 struct ata_eh_context *ehc = &link->eh_context;
3476 unsigned long tmp;
3477
3478 if (dev->class != ATA_DEV_ATA)
3479 continue;
3480 if (!(ehc->i.dev_action[dev->devno] &
3481 ATA_EH_PARK))
3482 continue;
3483 tmp = dev->unpark_deadline;
3484 if (time_before(deadline, tmp))
3485 deadline = tmp;
3486 else if (time_before_eq(tmp, jiffies))
3487 continue;
3488 if (ehc->unloaded_mask & (1 << dev->devno))
3489 continue;
3490
3491 ata_eh_park_issue_cmd(dev, 1);
3492 }
3493 }
3494
3495 now = jiffies;
3496 if (time_before_eq(deadline, now))
3497 break;
3498
3499 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3500 deadline - now);
3501 } while (deadline);
3502 ata_for_each_link(link, ap, EDGE) {
3503 ata_for_each_dev(dev, link, ALL) {
3504 if (!(link->eh_context.unloaded_mask &
3505 (1 << dev->devno)))
3506 continue;
3507
3508 ata_eh_park_issue_cmd(dev, 0);
3509 ata_eh_done(link, dev, ATA_EH_PARK);
3510 }
3511 }
3512
3513 /* the rest */
3514 ata_for_each_link(link, ap, EDGE) {
3515 struct ata_eh_context *ehc = &link->eh_context;
3516
3517 /* revalidate existing devices and attach new ones */
3518 rc = ata_eh_revalidate_and_attach(link, &dev);
3519 if (rc)
3520 goto dev_fail;
3521
3522 /* if PMP got attached, return, pmp EH will take care of it */
3523 if (link->device->class == ATA_DEV_PMP) {
3524 ehc->i.action = 0;
3525 return 0;
3526 }
3527
3528 /* configure transfer mode if necessary */
3529 if (ehc->i.flags & ATA_EHI_SETMODE) {
3530 rc = ata_set_mode(link, &dev);
3531 if (rc)
3532 goto dev_fail;
3533 ehc->i.flags &= ~ATA_EHI_SETMODE;
3534 }
3535
3536 /* If reset has been issued, clear UA to avoid
3537 * disrupting the current users of the device.
3538 */
3539 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3540 ata_for_each_dev(dev, link, ALL) {
3541 if (dev->class != ATA_DEV_ATAPI)
3542 continue;
3543 rc = atapi_eh_clear_ua(dev);
3544 if (rc)
3545 goto dev_fail;
3546 }
3547 }
3548
3549 /* retry flush if necessary */
3550 ata_for_each_dev(dev, link, ALL) {
3551 if (dev->class != ATA_DEV_ATA)
3552 continue;
3553 rc = ata_eh_maybe_retry_flush(dev);
3554 if (rc)
3555 goto dev_fail;
3556 }
3557
3558 /* configure link power saving */
3559 if (ehc->i.action & ATA_EH_LPM)
3560 ata_for_each_dev(dev, link, ALL)
3561 ata_dev_enable_pm(dev, ap->pm_policy);
3562
3563 /* this link is okay now */
3564 ehc->i.flags = 0;
3565 continue;
3566
3567 dev_fail:
3568 nr_failed_devs++;
3569 ata_eh_handle_dev_fail(dev, rc);
3570
3571 if (ap->pflags & ATA_PFLAG_FROZEN) {
3572 /* PMP reset requires working host port.
3573 * Can't retry if it's frozen.
3574 */
3575 if (sata_pmp_attached(ap))
3576 goto out;
3577 break;
3578 }
3579 }
3580
3581 if (nr_failed_devs)
3582 goto retry;
3583
3584 out:
3585 if (rc && r_failed_link)
3586 *r_failed_link = link;
3587
3588 DPRINTK("EXIT, rc=%d\n", rc);
3589 return rc;
3590 }
3591
3592 /**
3593 * ata_eh_finish - finish up EH
3594 * @ap: host port to finish EH for
3595 *
3596 * Recovery is complete. Clean up EH states and retry or finish
3597 * failed qcs.
3598 *
3599 * LOCKING:
3600 * None.
3601 */
3602 void ata_eh_finish(struct ata_port *ap)
3603 {
3604 int tag;
3605
3606 /* retry or finish qcs */
3607 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3608 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3609
3610 if (!(qc->flags & ATA_QCFLAG_FAILED))
3611 continue;
3612
3613 if (qc->err_mask) {
3614 /* FIXME: Once EH migration is complete,
3615 * generate sense data in this function,
3616 * considering both err_mask and tf.
3617 */
3618 if (qc->flags & ATA_QCFLAG_RETRY)
3619 ata_eh_qc_retry(qc);
3620 else
3621 ata_eh_qc_complete(qc);
3622 } else {
3623 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3624 ata_eh_qc_complete(qc);
3625 } else {
3626 /* feed zero TF to sense generation */
3627 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3628 ata_eh_qc_retry(qc);
3629 }
3630 }
3631 }
3632
3633 /* make sure nr_active_links is zero after EH */
3634 WARN_ON(ap->nr_active_links);
3635 ap->nr_active_links = 0;
3636 }
3637
3638 /**
3639 * ata_do_eh - do standard error handling
3640 * @ap: host port to handle error for
3641 *
3642 * @prereset: prereset method (can be NULL)
3643 * @softreset: softreset method (can be NULL)
3644 * @hardreset: hardreset method (can be NULL)
3645 * @postreset: postreset method (can be NULL)
3646 *
3647 * Perform standard error handling sequence.
3648 *
3649 * LOCKING:
3650 * Kernel thread context (may sleep).
3651 */
3652 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3653 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3654 ata_postreset_fn_t postreset)
3655 {
3656 struct ata_device *dev;
3657 int rc;
3658
3659 ata_eh_autopsy(ap);
3660 ata_eh_report(ap);
3661
3662 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3663 NULL);
3664 if (rc) {
3665 ata_for_each_dev(dev, &ap->link, ALL)
3666 ata_dev_disable(dev);
3667 }
3668
3669 ata_eh_finish(ap);
3670 }
3671
3672 /**
3673 * ata_std_error_handler - standard error handler
3674 * @ap: host port to handle error for
3675 *
3676 * Standard error handler
3677 *
3678 * LOCKING:
3679 * Kernel thread context (may sleep).
3680 */
3681 void ata_std_error_handler(struct ata_port *ap)
3682 {
3683 struct ata_port_operations *ops = ap->ops;
3684 ata_reset_fn_t hardreset = ops->hardreset;
3685
3686 /* ignore built-in hardreset if SCR access is not available */
3687 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3688 hardreset = NULL;
3689
3690 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3691 }
3692
3693 #ifdef CONFIG_PM
3694 /**
3695 * ata_eh_handle_port_suspend - perform port suspend operation
3696 * @ap: port to suspend
3697 *
3698 * Suspend @ap.
3699 *
3700 * LOCKING:
3701 * Kernel thread context (may sleep).
3702 */
3703 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3704 {
3705 unsigned long flags;
3706 int rc = 0;
3707
3708 /* are we suspending? */
3709 spin_lock_irqsave(ap->lock, flags);
3710 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3711 ap->pm_mesg.event == PM_EVENT_ON) {
3712 spin_unlock_irqrestore(ap->lock, flags);
3713 return;
3714 }
3715 spin_unlock_irqrestore(ap->lock, flags);
3716
3717 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3718
3719 /* tell ACPI we're suspending */
3720 rc = ata_acpi_on_suspend(ap);
3721 if (rc)
3722 goto out;
3723
3724 /* suspend */
3725 ata_eh_freeze_port(ap);
3726
3727 if (ap->ops->port_suspend)
3728 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3729
3730 ata_acpi_set_state(ap, PMSG_SUSPEND);
3731 out:
3732 /* report result */
3733 spin_lock_irqsave(ap->lock, flags);
3734
3735 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3736 if (rc == 0)
3737 ap->pflags |= ATA_PFLAG_SUSPENDED;
3738 else if (ap->pflags & ATA_PFLAG_FROZEN)
3739 ata_port_schedule_eh(ap);
3740
3741 if (ap->pm_result) {
3742 *ap->pm_result = rc;
3743 ap->pm_result = NULL;
3744 }
3745
3746 spin_unlock_irqrestore(ap->lock, flags);
3747
3748 return;
3749 }
3750
3751 /**
3752 * ata_eh_handle_port_resume - perform port resume operation
3753 * @ap: port to resume
3754 *
3755 * Resume @ap.
3756 *
3757 * LOCKING:
3758 * Kernel thread context (may sleep).
3759 */
3760 static void ata_eh_handle_port_resume(struct ata_port *ap)
3761 {
3762 struct ata_link *link;
3763 struct ata_device *dev;
3764 unsigned long flags;
3765 int rc = 0;
3766
3767 /* are we resuming? */
3768 spin_lock_irqsave(ap->lock, flags);
3769 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3770 ap->pm_mesg.event != PM_EVENT_ON) {
3771 spin_unlock_irqrestore(ap->lock, flags);
3772 return;
3773 }
3774 spin_unlock_irqrestore(ap->lock, flags);
3775
3776 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3777
3778 /*
3779 * Error timestamps are in jiffies which doesn't run while
3780 * suspended and PHY events during resume isn't too uncommon.
3781 * When the two are combined, it can lead to unnecessary speed
3782 * downs if the machine is suspended and resumed repeatedly.
3783 * Clear error history.
3784 */
3785 ata_for_each_link(link, ap, HOST_FIRST)
3786 ata_for_each_dev(dev, link, ALL)
3787 ata_ering_clear(&dev->ering);
3788
3789 ata_acpi_set_state(ap, PMSG_ON);
3790
3791 if (ap->ops->port_resume)
3792 rc = ap->ops->port_resume(ap);
3793
3794 /* tell ACPI that we're resuming */
3795 ata_acpi_on_resume(ap);
3796
3797 /* report result */
3798 spin_lock_irqsave(ap->lock, flags);
3799 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3800 if (ap->pm_result) {
3801 *ap->pm_result = rc;
3802 ap->pm_result = NULL;
3803 }
3804 spin_unlock_irqrestore(ap->lock, flags);
3805 }
3806 #endif /* CONFIG_PM */
This page took 0.218798 seconds and 5 git commands to generate.