MAINTAINERS: Add phy-miphy28lp.c and phy-miphy365x.c to ARCH/STI architecture
[deliverable/linux.git] / drivers / scsi / libsas / sas_ata.c
1 /*
2 * Support for SATA devices on Serial Attached SCSI (SAS) controllers
3 *
4 * Copyright (C) 2006 IBM Corporation
5 *
6 * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 */
23
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/async.h>
27 #include <linux/export.h>
28
29 #include <scsi/sas_ata.h>
30 #include "sas_internal.h"
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_sas.h>
37 #include "../scsi_sas_internal.h"
38 #include "../scsi_transport_api.h"
39 #include <scsi/scsi_eh.h>
40
41 static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
42 {
43 /* Cheesy attempt to translate SAS errors into ATA. Hah! */
44
45 /* transport error */
46 if (ts->resp == SAS_TASK_UNDELIVERED)
47 return AC_ERR_ATA_BUS;
48
49 /* ts->resp == SAS_TASK_COMPLETE */
50 /* task delivered, what happened afterwards? */
51 switch (ts->stat) {
52 case SAS_DEV_NO_RESPONSE:
53 return AC_ERR_TIMEOUT;
54
55 case SAS_INTERRUPTED:
56 case SAS_PHY_DOWN:
57 case SAS_NAK_R_ERR:
58 return AC_ERR_ATA_BUS;
59
60
61 case SAS_DATA_UNDERRUN:
62 /*
63 * Some programs that use the taskfile interface
64 * (smartctl in particular) can cause underrun
65 * problems. Ignore these errors, perhaps at our
66 * peril.
67 */
68 return 0;
69
70 case SAS_DATA_OVERRUN:
71 case SAS_QUEUE_FULL:
72 case SAS_DEVICE_UNKNOWN:
73 case SAS_SG_ERR:
74 return AC_ERR_INVALID;
75
76 case SAS_OPEN_TO:
77 case SAS_OPEN_REJECT:
78 SAS_DPRINTK("%s: Saw error %d. What to do?\n",
79 __func__, ts->stat);
80 return AC_ERR_OTHER;
81
82 case SAM_STAT_CHECK_CONDITION:
83 case SAS_ABORTED_TASK:
84 return AC_ERR_DEV;
85
86 case SAS_PROTO_RESPONSE:
87 /* This means the ending_fis has the error
88 * value; return 0 here to collect it */
89 return 0;
90 default:
91 return 0;
92 }
93 }
94
95 static void sas_ata_task_done(struct sas_task *task)
96 {
97 struct ata_queued_cmd *qc = task->uldd_task;
98 struct domain_device *dev = task->dev;
99 struct task_status_struct *stat = &task->task_status;
100 struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
101 struct sas_ha_struct *sas_ha = dev->port->ha;
102 enum ata_completion_errors ac;
103 unsigned long flags;
104 struct ata_link *link;
105 struct ata_port *ap;
106
107 spin_lock_irqsave(&dev->done_lock, flags);
108 if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
109 task = NULL;
110 else if (qc && qc->scsicmd)
111 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
112 spin_unlock_irqrestore(&dev->done_lock, flags);
113
114 /* check if libsas-eh got to the task before us */
115 if (unlikely(!task))
116 return;
117
118 if (!qc)
119 goto qc_already_gone;
120
121 ap = qc->ap;
122 link = &ap->link;
123
124 spin_lock_irqsave(ap->lock, flags);
125 /* check if we lost the race with libata/sas_ata_post_internal() */
126 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) {
127 spin_unlock_irqrestore(ap->lock, flags);
128 if (qc->scsicmd)
129 goto qc_already_gone;
130 else {
131 /* if eh is not involved and the port is frozen then the
132 * ata internal abort process has taken responsibility
133 * for this sas_task
134 */
135 return;
136 }
137 }
138
139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
140 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
141 dev->sata_dev.class == ATA_DEV_ATAPI))) {
142 memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
143
144 if (!link->sactive) {
145 qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
146 } else {
147 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
148 if (unlikely(link->eh_info.err_mask))
149 qc->flags |= ATA_QCFLAG_FAILED;
150 }
151 } else {
152 ac = sas_to_ata_err(stat);
153 if (ac) {
154 SAS_DPRINTK("%s: SAS error %x\n", __func__,
155 stat->stat);
156 /* We saw a SAS error. Send a vague error. */
157 if (!link->sactive) {
158 qc->err_mask = ac;
159 } else {
160 link->eh_info.err_mask |= AC_ERR_DEV;
161 qc->flags |= ATA_QCFLAG_FAILED;
162 }
163
164 dev->sata_dev.fis[3] = 0x04; /* status err */
165 dev->sata_dev.fis[2] = ATA_ERR;
166 }
167 }
168
169 qc->lldd_task = NULL;
170 ata_qc_complete(qc);
171 spin_unlock_irqrestore(ap->lock, flags);
172
173 qc_already_gone:
174 sas_free_task(task);
175 }
176
177 static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
178 {
179 unsigned long flags;
180 struct sas_task *task;
181 struct scatterlist *sg;
182 int ret = AC_ERR_SYSTEM;
183 unsigned int si, xfer = 0;
184 struct ata_port *ap = qc->ap;
185 struct domain_device *dev = ap->private_data;
186 struct sas_ha_struct *sas_ha = dev->port->ha;
187 struct Scsi_Host *host = sas_ha->core.shost;
188 struct sas_internal *i = to_sas_internal(host->transportt);
189
190 /* TODO: audit callers to ensure they are ready for qc_issue to
191 * unconditionally re-enable interrupts
192 */
193 local_irq_save(flags);
194 spin_unlock(ap->lock);
195
196 /* If the device fell off, no sense in issuing commands */
197 if (test_bit(SAS_DEV_GONE, &dev->state))
198 goto out;
199
200 task = sas_alloc_task(GFP_ATOMIC);
201 if (!task)
202 goto out;
203 task->dev = dev;
204 task->task_proto = SAS_PROTOCOL_STP;
205 task->task_done = sas_ata_task_done;
206
207 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
208 qc->tf.command == ATA_CMD_FPDMA_READ) {
209 /* Need to zero out the tag libata assigned us */
210 qc->tf.nsect = 0;
211 }
212
213 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
214 task->uldd_task = qc;
215 if (ata_is_atapi(qc->tf.protocol)) {
216 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
217 task->total_xfer_len = qc->nbytes;
218 task->num_scatter = qc->n_elem;
219 } else {
220 for_each_sg(qc->sg, sg, qc->n_elem, si)
221 xfer += sg->length;
222
223 task->total_xfer_len = xfer;
224 task->num_scatter = si;
225 }
226
227 task->data_dir = qc->dma_dir;
228 task->scatter = qc->sg;
229 task->ata_task.retry_count = 1;
230 task->task_state_flags = SAS_TASK_STATE_PENDING;
231 qc->lldd_task = task;
232
233 switch (qc->tf.protocol) {
234 case ATA_PROT_NCQ:
235 task->ata_task.use_ncq = 1;
236 /* fall through */
237 case ATAPI_PROT_DMA:
238 case ATA_PROT_DMA:
239 task->ata_task.dma_xfer = 1;
240 break;
241 }
242
243 if (qc->scsicmd)
244 ASSIGN_SAS_TASK(qc->scsicmd, task);
245
246 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
247 if (ret) {
248 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
249
250 if (qc->scsicmd)
251 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
252 sas_free_task(task);
253 ret = AC_ERR_SYSTEM;
254 }
255
256 out:
257 spin_lock(ap->lock);
258 local_irq_restore(flags);
259 return ret;
260 }
261
262 static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
263 {
264 struct domain_device *dev = qc->ap->private_data;
265
266 ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
267 return true;
268 }
269
270 static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
271 {
272 return to_sas_internal(dev->port->ha->core.shost->transportt);
273 }
274
275 static int sas_get_ata_command_set(struct domain_device *dev);
276
277 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
278 {
279 if (phy->attached_tproto & SAS_PROTOCOL_STP)
280 dev->tproto = phy->attached_tproto;
281 if (phy->attached_sata_dev)
282 dev->tproto |= SAS_SATA_DEV;
283
284 if (phy->attached_dev_type == SAS_SATA_PENDING)
285 dev->dev_type = SAS_SATA_PENDING;
286 else {
287 int res;
288
289 dev->dev_type = SAS_SATA_DEV;
290 res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
291 &dev->sata_dev.rps_resp);
292 if (res) {
293 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
294 "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
295 phy->phy_id, res);
296 return res;
297 }
298 memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
299 sizeof(struct dev_to_host_fis));
300 dev->sata_dev.class = sas_get_ata_command_set(dev);
301 }
302 return 0;
303 }
304
305 static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
306 {
307 int res;
308
309 /* we weren't pending, so successfully end the reset sequence now */
310 if (dev->dev_type != SAS_SATA_PENDING)
311 return 1;
312
313 /* hmmm, if this succeeds do we need to repost the domain_device to the
314 * lldd so it can pick up new parameters?
315 */
316 res = sas_get_ata_info(dev, phy);
317 if (res)
318 return 0; /* retry */
319 else
320 return 1;
321 }
322
323 static int smp_ata_check_ready(struct ata_link *link)
324 {
325 int res;
326 struct ata_port *ap = link->ap;
327 struct domain_device *dev = ap->private_data;
328 struct domain_device *ex_dev = dev->parent;
329 struct sas_phy *phy = sas_get_local_phy(dev);
330 struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
331
332 res = sas_ex_phy_discover(ex_dev, phy->number);
333 sas_put_local_phy(phy);
334
335 /* break the wait early if the expander is unreachable,
336 * otherwise keep polling
337 */
338 if (res == -ECOMM)
339 return res;
340 if (res != SMP_RESP_FUNC_ACC)
341 return 0;
342
343 switch (ex_phy->attached_dev_type) {
344 case SAS_SATA_PENDING:
345 return 0;
346 case SAS_END_DEVICE:
347 if (ex_phy->attached_sata_dev)
348 return sas_ata_clear_pending(dev, ex_phy);
349 default:
350 return -ENODEV;
351 }
352 }
353
354 static int local_ata_check_ready(struct ata_link *link)
355 {
356 struct ata_port *ap = link->ap;
357 struct domain_device *dev = ap->private_data;
358 struct sas_internal *i = dev_to_sas_internal(dev);
359
360 if (i->dft->lldd_ata_check_ready)
361 return i->dft->lldd_ata_check_ready(dev);
362 else {
363 /* lldd's that don't implement 'ready' checking get the
364 * old default behavior of not coordinating reset
365 * recovery with libata
366 */
367 return 1;
368 }
369 }
370
371 static int sas_ata_printk(const char *level, const struct domain_device *ddev,
372 const char *fmt, ...)
373 {
374 struct ata_port *ap = ddev->sata_dev.ap;
375 struct device *dev = &ddev->rphy->dev;
376 struct va_format vaf;
377 va_list args;
378 int r;
379
380 va_start(args, fmt);
381
382 vaf.fmt = fmt;
383 vaf.va = &args;
384
385 r = printk("%ssas: ata%u: %s: %pV",
386 level, ap->print_id, dev_name(dev), &vaf);
387
388 va_end(args);
389
390 return r;
391 }
392
393 static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
394 unsigned long deadline)
395 {
396 int ret = 0, res;
397 struct sas_phy *phy;
398 struct ata_port *ap = link->ap;
399 int (*check_ready)(struct ata_link *link);
400 struct domain_device *dev = ap->private_data;
401 struct sas_internal *i = dev_to_sas_internal(dev);
402
403 res = i->dft->lldd_I_T_nexus_reset(dev);
404 if (res == -ENODEV)
405 return res;
406
407 if (res != TMF_RESP_FUNC_COMPLETE)
408 sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
409
410 phy = sas_get_local_phy(dev);
411 if (scsi_is_sas_phy_local(phy))
412 check_ready = local_ata_check_ready;
413 else
414 check_ready = smp_ata_check_ready;
415 sas_put_local_phy(phy);
416
417 ret = ata_wait_after_reset(link, deadline, check_ready);
418 if (ret && ret != -EAGAIN)
419 sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
420
421 *class = dev->sata_dev.class;
422
423 ap->cbl = ATA_CBL_SATA;
424 return ret;
425 }
426
427 /*
428 * notify the lldd to forget the sas_task for this internal ata command
429 * that bypasses scsi-eh
430 */
431 static void sas_ata_internal_abort(struct sas_task *task)
432 {
433 struct sas_internal *si = dev_to_sas_internal(task->dev);
434 unsigned long flags;
435 int res;
436
437 spin_lock_irqsave(&task->task_state_lock, flags);
438 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
439 task->task_state_flags & SAS_TASK_STATE_DONE) {
440 spin_unlock_irqrestore(&task->task_state_lock, flags);
441 SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
442 task);
443 goto out;
444 }
445 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
446 spin_unlock_irqrestore(&task->task_state_lock, flags);
447
448 res = si->dft->lldd_abort_task(task);
449
450 spin_lock_irqsave(&task->task_state_lock, flags);
451 if (task->task_state_flags & SAS_TASK_STATE_DONE ||
452 res == TMF_RESP_FUNC_COMPLETE) {
453 spin_unlock_irqrestore(&task->task_state_lock, flags);
454 goto out;
455 }
456
457 /* XXX we are not prepared to deal with ->lldd_abort_task()
458 * failures. TODO: lldds need to unconditionally forget about
459 * aborted ata tasks, otherwise we (likely) leak the sas task
460 * here
461 */
462 SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
463
464 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
465 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
466 spin_unlock_irqrestore(&task->task_state_lock, flags);
467
468 return;
469 out:
470 sas_free_task(task);
471 }
472
473 static void sas_ata_post_internal(struct ata_queued_cmd *qc)
474 {
475 if (qc->flags & ATA_QCFLAG_FAILED)
476 qc->err_mask |= AC_ERR_OTHER;
477
478 if (qc->err_mask) {
479 /*
480 * Find the sas_task and kill it. By this point, libata
481 * has decided to kill the qc and has frozen the port.
482 * In this state sas_ata_task_done() will no longer free
483 * the sas_task, so we need to notify the lldd (via
484 * ->lldd_abort_task) that the task is dead and free it
485 * ourselves.
486 */
487 struct sas_task *task = qc->lldd_task;
488
489 qc->lldd_task = NULL;
490 if (!task)
491 return;
492 task->uldd_task = NULL;
493 sas_ata_internal_abort(task);
494 }
495 }
496
497
498 static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
499 {
500 struct domain_device *dev = ap->private_data;
501 struct sas_internal *i = dev_to_sas_internal(dev);
502
503 if (i->dft->lldd_ata_set_dmamode)
504 i->dft->lldd_ata_set_dmamode(dev);
505 }
506
507 static void sas_ata_sched_eh(struct ata_port *ap)
508 {
509 struct domain_device *dev = ap->private_data;
510 struct sas_ha_struct *ha = dev->port->ha;
511 unsigned long flags;
512
513 spin_lock_irqsave(&ha->lock, flags);
514 if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
515 ha->eh_active++;
516 ata_std_sched_eh(ap);
517 spin_unlock_irqrestore(&ha->lock, flags);
518 }
519
520 void sas_ata_end_eh(struct ata_port *ap)
521 {
522 struct domain_device *dev = ap->private_data;
523 struct sas_ha_struct *ha = dev->port->ha;
524 unsigned long flags;
525
526 spin_lock_irqsave(&ha->lock, flags);
527 if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
528 ha->eh_active--;
529 spin_unlock_irqrestore(&ha->lock, flags);
530 }
531
532 static struct ata_port_operations sas_sata_ops = {
533 .prereset = ata_std_prereset,
534 .hardreset = sas_ata_hard_reset,
535 .postreset = ata_std_postreset,
536 .error_handler = ata_std_error_handler,
537 .post_internal_cmd = sas_ata_post_internal,
538 .qc_defer = ata_std_qc_defer,
539 .qc_prep = ata_noop_qc_prep,
540 .qc_issue = sas_ata_qc_issue,
541 .qc_fill_rtf = sas_ata_qc_fill_rtf,
542 .port_start = ata_sas_port_start,
543 .port_stop = ata_sas_port_stop,
544 .set_dmamode = sas_ata_set_dmamode,
545 .sched_eh = sas_ata_sched_eh,
546 .end_eh = sas_ata_end_eh,
547 };
548
549 static struct ata_port_info sata_port_info = {
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
551 .pio_mask = ATA_PIO4,
552 .mwdma_mask = ATA_MWDMA2,
553 .udma_mask = ATA_UDMA6,
554 .port_ops = &sas_sata_ops
555 };
556
557 int sas_ata_init(struct domain_device *found_dev)
558 {
559 struct sas_ha_struct *ha = found_dev->port->ha;
560 struct Scsi_Host *shost = ha->core.shost;
561 struct ata_port *ap;
562 int rc;
563
564 ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
565 ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
566 &sata_port_info,
567 shost);
568 if (!ap) {
569 SAS_DPRINTK("ata_sas_port_alloc failed.\n");
570 return -ENODEV;
571 }
572
573 ap->private_data = found_dev;
574 ap->cbl = ATA_CBL_SATA;
575 ap->scsi_host = shost;
576 rc = ata_sas_port_init(ap);
577 if (rc) {
578 ata_sas_port_destroy(ap);
579 return rc;
580 }
581 found_dev->sata_dev.ap = ap;
582
583 return 0;
584 }
585
586 void sas_ata_task_abort(struct sas_task *task)
587 {
588 struct ata_queued_cmd *qc = task->uldd_task;
589 struct completion *waiting;
590
591 /* Bounce SCSI-initiated commands to the SCSI EH */
592 if (qc->scsicmd) {
593 struct request_queue *q = qc->scsicmd->device->request_queue;
594 unsigned long flags;
595
596 spin_lock_irqsave(q->queue_lock, flags);
597 blk_abort_request(qc->scsicmd->request);
598 spin_unlock_irqrestore(q->queue_lock, flags);
599 return;
600 }
601
602 /* Internal command, fake a timeout and complete. */
603 qc->flags &= ~ATA_QCFLAG_ACTIVE;
604 qc->flags |= ATA_QCFLAG_FAILED;
605 qc->err_mask |= AC_ERR_TIMEOUT;
606 waiting = qc->private_data;
607 complete(waiting);
608 }
609
610 static int sas_get_ata_command_set(struct domain_device *dev)
611 {
612 struct dev_to_host_fis *fis =
613 (struct dev_to_host_fis *) dev->frame_rcvd;
614 struct ata_taskfile tf;
615
616 if (dev->dev_type == SAS_SATA_PENDING)
617 return ATA_DEV_UNKNOWN;
618
619 ata_tf_from_fis((const u8 *)fis, &tf);
620
621 return ata_dev_classify(&tf);
622 }
623
624 void sas_probe_sata(struct asd_sas_port *port)
625 {
626 struct domain_device *dev, *n;
627
628 mutex_lock(&port->ha->disco_mutex);
629 list_for_each_entry(dev, &port->disco_list, disco_list_node) {
630 if (!dev_is_sata(dev))
631 continue;
632
633 ata_sas_async_probe(dev->sata_dev.ap);
634 }
635 mutex_unlock(&port->ha->disco_mutex);
636
637 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
638 if (!dev_is_sata(dev))
639 continue;
640
641 sas_ata_wait_eh(dev);
642
643 /* if libata could not bring the link up, don't surface
644 * the device
645 */
646 if (ata_dev_disabled(sas_to_ata_dev(dev)))
647 sas_fail_probe(dev, __func__, -ENODEV);
648 }
649
650 }
651
652 static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
653 {
654 struct domain_device *dev, *n;
655
656 list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
657 if (!dev_is_sata(dev))
658 continue;
659
660 sas_ata_wait_eh(dev);
661
662 /* if libata failed to power manage the device, tear it down */
663 if (ata_dev_disabled(sas_to_ata_dev(dev)))
664 sas_fail_probe(dev, func, -ENODEV);
665 }
666 }
667
668 void sas_suspend_sata(struct asd_sas_port *port)
669 {
670 struct domain_device *dev;
671
672 mutex_lock(&port->ha->disco_mutex);
673 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
674 struct sata_device *sata;
675
676 if (!dev_is_sata(dev))
677 continue;
678
679 sata = &dev->sata_dev;
680 if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
681 continue;
682
683 ata_sas_port_suspend(sata->ap);
684 }
685 mutex_unlock(&port->ha->disco_mutex);
686
687 sas_ata_flush_pm_eh(port, __func__);
688 }
689
690 void sas_resume_sata(struct asd_sas_port *port)
691 {
692 struct domain_device *dev;
693
694 mutex_lock(&port->ha->disco_mutex);
695 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
696 struct sata_device *sata;
697
698 if (!dev_is_sata(dev))
699 continue;
700
701 sata = &dev->sata_dev;
702 if (sata->ap->pm_mesg.event == PM_EVENT_ON)
703 continue;
704
705 ata_sas_port_resume(sata->ap);
706 }
707 mutex_unlock(&port->ha->disco_mutex);
708
709 sas_ata_flush_pm_eh(port, __func__);
710 }
711
712 /**
713 * sas_discover_sata -- discover an STP/SATA domain device
714 * @dev: pointer to struct domain_device of interest
715 *
716 * Devices directly attached to a HA port, have no parents. All other
717 * devices do, and should have their "parent" pointer set appropriately
718 * before calling this function.
719 */
720 int sas_discover_sata(struct domain_device *dev)
721 {
722 int res;
723
724 if (dev->dev_type == SAS_SATA_PM)
725 return -ENODEV;
726
727 dev->sata_dev.class = sas_get_ata_command_set(dev);
728 sas_fill_in_rphy(dev, dev->rphy);
729
730 res = sas_notify_lldd_dev_found(dev);
731 if (res)
732 return res;
733
734 sas_discover_event(dev->port, DISCE_PROBE);
735 return 0;
736 }
737
738 static void async_sas_ata_eh(void *data, async_cookie_t cookie)
739 {
740 struct domain_device *dev = data;
741 struct ata_port *ap = dev->sata_dev.ap;
742 struct sas_ha_struct *ha = dev->port->ha;
743
744 sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
745 ata_scsi_port_error_handler(ha->core.shost, ap);
746 sas_put_device(dev);
747 }
748
749 void sas_ata_strategy_handler(struct Scsi_Host *shost)
750 {
751 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
752 ASYNC_DOMAIN_EXCLUSIVE(async);
753 int i;
754
755 /* it's ok to defer revalidation events during ata eh, these
756 * disks are in one of three states:
757 * 1/ present for initial domain discovery, and these
758 * resets will cause bcn flutters
759 * 2/ hot removed, we'll discover that after eh fails
760 * 3/ hot added after initial discovery, lost the race, and need
761 * to catch the next train.
762 */
763 sas_disable_revalidation(sas_ha);
764
765 spin_lock_irq(&sas_ha->phy_port_lock);
766 for (i = 0; i < sas_ha->num_phys; i++) {
767 struct asd_sas_port *port = sas_ha->sas_port[i];
768 struct domain_device *dev;
769
770 spin_lock(&port->dev_list_lock);
771 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
772 if (!dev_is_sata(dev))
773 continue;
774
775 /* hold a reference over eh since we may be
776 * racing with final remove once all commands
777 * are completed
778 */
779 kref_get(&dev->kref);
780
781 async_schedule_domain(async_sas_ata_eh, dev, &async);
782 }
783 spin_unlock(&port->dev_list_lock);
784 }
785 spin_unlock_irq(&sas_ha->phy_port_lock);
786
787 async_synchronize_full_domain(&async);
788
789 sas_enable_revalidation(sas_ha);
790 }
791
792 void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
793 struct list_head *done_q)
794 {
795 struct scsi_cmnd *cmd, *n;
796 struct domain_device *eh_dev;
797
798 do {
799 LIST_HEAD(sata_q);
800 eh_dev = NULL;
801
802 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
803 struct domain_device *ddev = cmd_to_domain_dev(cmd);
804
805 if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
806 continue;
807 if (eh_dev && eh_dev != ddev)
808 continue;
809 eh_dev = ddev;
810 list_move(&cmd->eh_entry, &sata_q);
811 }
812
813 if (!list_empty(&sata_q)) {
814 struct ata_port *ap = eh_dev->sata_dev.ap;
815
816 sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
817 ata_scsi_cmd_error_handler(shost, ap, &sata_q);
818 /*
819 * ata's error handler may leave the cmd on the list
820 * so make sure they don't remain on a stack list
821 * about to go out of scope.
822 *
823 * This looks strange, since the commands are
824 * now part of no list, but the next error
825 * action will be ata_port_error_handler()
826 * which takes no list and sweeps them up
827 * anyway from the ata tag array.
828 */
829 while (!list_empty(&sata_q))
830 list_del_init(sata_q.next);
831 }
832 } while (eh_dev);
833 }
834
835 void sas_ata_schedule_reset(struct domain_device *dev)
836 {
837 struct ata_eh_info *ehi;
838 struct ata_port *ap;
839 unsigned long flags;
840
841 if (!dev_is_sata(dev))
842 return;
843
844 ap = dev->sata_dev.ap;
845 ehi = &ap->link.eh_info;
846
847 spin_lock_irqsave(ap->lock, flags);
848 ehi->err_mask |= AC_ERR_TIMEOUT;
849 ehi->action |= ATA_EH_RESET;
850 ata_port_schedule_eh(ap);
851 spin_unlock_irqrestore(ap->lock, flags);
852 }
853 EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
854
855 void sas_ata_wait_eh(struct domain_device *dev)
856 {
857 struct ata_port *ap;
858
859 if (!dev_is_sata(dev))
860 return;
861
862 ap = dev->sata_dev.ap;
863 ata_port_wait_eh(ap);
864 }
This page took 0.050322 seconds and 5 git commands to generate.