d70ddfda93fc968cd2d001f80f0a3c1487b32a8c
[deliverable/linux.git] / drivers / scsi / libsas / sas_scsi_host.c
1 /*
2 * Serial Attached SCSI (SAS) class SCSI Host glue.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26 #include <linux/kthread.h>
27
28 #include "sas_internal.h"
29
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_tcq.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_eh.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_sas.h>
37 #include "../scsi_sas_internal.h"
38 #include "../scsi_transport_api.h"
39 #include "../scsi_priv.h"
40
41 #include <linux/err.h>
42 #include <linux/blkdev.h>
43 #include <linux/scatterlist.h>
44
45 /* ---------- SCSI Host glue ---------- */
46
47 #define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
48 #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
49
50 static void sas_scsi_task_done(struct sas_task *task)
51 {
52 struct task_status_struct *ts = &task->task_status;
53 struct scsi_cmnd *sc = task->uldd_task;
54 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
55 unsigned ts_flags = task->task_state_flags;
56 int hs = 0, stat = 0;
57
58 if (unlikely(!sc)) {
59 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
60 list_del_init(&task->list);
61 sas_free_task(task);
62 return;
63 }
64
65 if (ts->resp == SAS_TASK_UNDELIVERED) {
66 /* transport error */
67 hs = DID_NO_CONNECT;
68 } else { /* ts->resp == SAS_TASK_COMPLETE */
69 /* task delivered, what happened afterwards? */
70 switch (ts->stat) {
71 case SAS_DEV_NO_RESPONSE:
72 case SAS_INTERRUPTED:
73 case SAS_PHY_DOWN:
74 case SAS_NAK_R_ERR:
75 case SAS_OPEN_TO:
76 hs = DID_NO_CONNECT;
77 break;
78 case SAS_DATA_UNDERRUN:
79 scsi_set_resid(sc, ts->residual);
80 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
81 hs = DID_ERROR;
82 break;
83 case SAS_DATA_OVERRUN:
84 hs = DID_ERROR;
85 break;
86 case SAS_QUEUE_FULL:
87 hs = DID_SOFT_ERROR; /* retry */
88 break;
89 case SAS_DEVICE_UNKNOWN:
90 hs = DID_BAD_TARGET;
91 break;
92 case SAS_SG_ERR:
93 hs = DID_PARITY;
94 break;
95 case SAS_OPEN_REJECT:
96 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
97 hs = DID_SOFT_ERROR; /* retry */
98 else
99 hs = DID_ERROR;
100 break;
101 case SAS_PROTO_RESPONSE:
102 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
103 "task; please report this\n",
104 task->dev->port->ha->sas_ha_name);
105 break;
106 case SAS_ABORTED_TASK:
107 hs = DID_ABORT;
108 break;
109 case SAM_CHECK_COND:
110 memcpy(sc->sense_buffer, ts->buf,
111 max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
112 stat = SAM_CHECK_COND;
113 break;
114 default:
115 stat = ts->stat;
116 break;
117 }
118 }
119 ASSIGN_SAS_TASK(sc, NULL);
120 sc->result = (hs << 16) | stat;
121 list_del_init(&task->list);
122 sas_free_task(task);
123 /* This is very ugly but this is how SCSI Core works. */
124 if (ts_flags & SAS_TASK_STATE_ABORTED)
125 scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
126 else
127 sc->scsi_done(sc);
128 }
129
130 static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
131 {
132 enum task_attribute ta = TASK_ATTR_SIMPLE;
133 if (cmd->request && blk_rq_tagged(cmd->request)) {
134 if (cmd->device->ordered_tags &&
135 (cmd->request->cmd_flags & REQ_HARDBARRIER))
136 ta = TASK_ATTR_ORDERED;
137 }
138 return ta;
139 }
140
141 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
142 struct domain_device *dev,
143 gfp_t gfp_flags)
144 {
145 struct sas_task *task = sas_alloc_task(gfp_flags);
146 struct scsi_lun lun;
147
148 if (!task)
149 return NULL;
150
151 *(u32 *)cmd->sense_buffer = 0;
152 task->uldd_task = cmd;
153 ASSIGN_SAS_TASK(cmd, task);
154
155 task->dev = dev;
156 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
157
158 task->ssp_task.retry_count = 1;
159 int_to_scsilun(cmd->device->lun, &lun);
160 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
161 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
162 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
163
164 task->scatter = scsi_sglist(cmd);
165 task->num_scatter = scsi_sg_count(cmd);
166 task->total_xfer_len = scsi_bufflen(cmd);
167 task->data_dir = cmd->sc_data_direction;
168
169 task->task_done = sas_scsi_task_done;
170
171 return task;
172 }
173
174 static int sas_queue_up(struct sas_task *task)
175 {
176 struct sas_ha_struct *sas_ha = task->dev->port->ha;
177 struct scsi_core *core = &sas_ha->core;
178 unsigned long flags;
179 LIST_HEAD(list);
180
181 spin_lock_irqsave(&core->task_queue_lock, flags);
182 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
183 spin_unlock_irqrestore(&core->task_queue_lock, flags);
184 return -SAS_QUEUE_FULL;
185 }
186 list_add_tail(&task->list, &core->task_queue);
187 core->task_queue_size += 1;
188 spin_unlock_irqrestore(&core->task_queue_lock, flags);
189 wake_up_process(core->queue_thread);
190
191 return 0;
192 }
193
194 /**
195 * sas_queuecommand -- Enqueue a command for processing
196 * @parameters: See SCSI Core documentation
197 *
198 * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
199 * call us without holding an IRQ spinlock...
200 */
201 int sas_queuecommand(struct scsi_cmnd *cmd,
202 void (*scsi_done)(struct scsi_cmnd *))
203 {
204 int res = 0;
205 struct domain_device *dev = cmd_to_domain_dev(cmd);
206 struct Scsi_Host *host = cmd->device->host;
207 struct sas_internal *i = to_sas_internal(host->transportt);
208
209 spin_unlock_irq(host->host_lock);
210
211 {
212 struct sas_ha_struct *sas_ha = dev->port->ha;
213 struct sas_task *task;
214
215 res = -ENOMEM;
216 task = sas_create_task(cmd, dev, GFP_ATOMIC);
217 if (!task)
218 goto out;
219
220 cmd->scsi_done = scsi_done;
221 /* Queue up, Direct Mode or Task Collector Mode. */
222 if (sas_ha->lldd_max_execute_num < 2)
223 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
224 else
225 res = sas_queue_up(task);
226
227 /* Examine */
228 if (res) {
229 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
230 ASSIGN_SAS_TASK(cmd, NULL);
231 sas_free_task(task);
232 if (res == -SAS_QUEUE_FULL) {
233 cmd->result = DID_SOFT_ERROR << 16; /* retry */
234 res = 0;
235 scsi_done(cmd);
236 }
237 goto out;
238 }
239 }
240 out:
241 spin_lock_irq(host->host_lock);
242 return res;
243 }
244
245 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
246 {
247 struct scsi_cmnd *cmd, *n;
248
249 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
250 if (cmd == my_cmd)
251 list_del_init(&cmd->eh_entry);
252 }
253 }
254
255 static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
256 struct domain_device *dev)
257 {
258 struct scsi_cmnd *cmd, *n;
259
260 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
261 struct domain_device *x = cmd_to_domain_dev(cmd);
262
263 if (x == dev)
264 list_del_init(&cmd->eh_entry);
265 }
266 }
267
268 static void sas_scsi_clear_queue_port(struct list_head *error_q,
269 struct asd_sas_port *port)
270 {
271 struct scsi_cmnd *cmd, *n;
272
273 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
274 struct domain_device *dev = cmd_to_domain_dev(cmd);
275 struct asd_sas_port *x = dev->port;
276
277 if (x == port)
278 list_del_init(&cmd->eh_entry);
279 }
280 }
281
282 enum task_disposition {
283 TASK_IS_DONE,
284 TASK_IS_ABORTED,
285 TASK_IS_AT_LU,
286 TASK_IS_NOT_AT_LU,
287 TASK_ABORT_FAILED,
288 };
289
290 static enum task_disposition sas_scsi_find_task(struct sas_task *task)
291 {
292 struct sas_ha_struct *ha = task->dev->port->ha;
293 unsigned long flags;
294 int i, res;
295 struct sas_internal *si =
296 to_sas_internal(task->dev->port->ha->core.shost->transportt);
297
298 if (ha->lldd_max_execute_num > 1) {
299 struct scsi_core *core = &ha->core;
300 struct sas_task *t, *n;
301
302 spin_lock_irqsave(&core->task_queue_lock, flags);
303 list_for_each_entry_safe(t, n, &core->task_queue, list) {
304 if (task == t) {
305 list_del_init(&t->list);
306 spin_unlock_irqrestore(&core->task_queue_lock,
307 flags);
308 SAS_DPRINTK("%s: task 0x%p aborted from "
309 "task_queue\n",
310 __FUNCTION__, task);
311 return TASK_IS_ABORTED;
312 }
313 }
314 spin_unlock_irqrestore(&core->task_queue_lock, flags);
315 }
316
317 for (i = 0; i < 5; i++) {
318 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
319 res = si->dft->lldd_abort_task(task);
320
321 spin_lock_irqsave(&task->task_state_lock, flags);
322 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
323 spin_unlock_irqrestore(&task->task_state_lock, flags);
324 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
325 task);
326 return TASK_IS_DONE;
327 }
328 spin_unlock_irqrestore(&task->task_state_lock, flags);
329
330 if (res == TMF_RESP_FUNC_COMPLETE) {
331 SAS_DPRINTK("%s: task 0x%p is aborted\n",
332 __FUNCTION__, task);
333 return TASK_IS_ABORTED;
334 } else if (si->dft->lldd_query_task) {
335 SAS_DPRINTK("%s: querying task 0x%p\n",
336 __FUNCTION__, task);
337 res = si->dft->lldd_query_task(task);
338 switch (res) {
339 case TMF_RESP_FUNC_SUCC:
340 SAS_DPRINTK("%s: task 0x%p at LU\n",
341 __FUNCTION__, task);
342 return TASK_IS_AT_LU;
343 case TMF_RESP_FUNC_COMPLETE:
344 SAS_DPRINTK("%s: task 0x%p not at LU\n",
345 __FUNCTION__, task);
346 return TASK_IS_NOT_AT_LU;
347 case TMF_RESP_FUNC_FAILED:
348 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
349 __FUNCTION__, task);
350 return TASK_ABORT_FAILED;
351 }
352
353 }
354 }
355 return res;
356 }
357
358 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
359 {
360 int res = TMF_RESP_FUNC_FAILED;
361 struct scsi_lun lun;
362 struct sas_internal *i =
363 to_sas_internal(dev->port->ha->core.shost->transportt);
364
365 int_to_scsilun(cmd->device->lun, &lun);
366
367 SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
368 SAS_ADDR(dev->sas_addr),
369 cmd->device->lun);
370
371 if (i->dft->lldd_abort_task_set)
372 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
373
374 if (res == TMF_RESP_FUNC_FAILED) {
375 if (i->dft->lldd_clear_task_set)
376 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
377 }
378
379 if (res == TMF_RESP_FUNC_FAILED) {
380 if (i->dft->lldd_lu_reset)
381 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
382 }
383
384 return res;
385 }
386
387 static int sas_recover_I_T(struct domain_device *dev)
388 {
389 int res = TMF_RESP_FUNC_FAILED;
390 struct sas_internal *i =
391 to_sas_internal(dev->port->ha->core.shost->transportt);
392
393 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
394 SAS_ADDR(dev->sas_addr));
395
396 if (i->dft->lldd_I_T_nexus_reset)
397 res = i->dft->lldd_I_T_nexus_reset(dev);
398
399 return res;
400 }
401
402 /* Find the sas_phy that's attached to this device */
403 struct sas_phy *find_local_sas_phy(struct domain_device *dev)
404 {
405 struct domain_device *pdev = dev->parent;
406 struct ex_phy *exphy = NULL;
407 int i;
408
409 /* Directly attached device */
410 if (!pdev)
411 return dev->port->phy;
412
413 /* Otherwise look in the expander */
414 for (i = 0; i < pdev->ex_dev.num_phys; i++)
415 if (!memcmp(dev->sas_addr,
416 pdev->ex_dev.ex_phy[i].attached_sas_addr,
417 SAS_ADDR_SIZE)) {
418 exphy = &pdev->ex_dev.ex_phy[i];
419 break;
420 }
421
422 BUG_ON(!exphy);
423 return exphy->phy;
424 }
425
426 /* Attempt to send a LUN reset message to a device */
427 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
428 {
429 struct domain_device *dev = cmd_to_domain_dev(cmd);
430 struct sas_internal *i =
431 to_sas_internal(dev->port->ha->core.shost->transportt);
432 struct scsi_lun lun;
433 int res;
434
435 int_to_scsilun(cmd->device->lun, &lun);
436
437 if (!i->dft->lldd_lu_reset)
438 return FAILED;
439
440 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
441 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
442 return SUCCESS;
443
444 return FAILED;
445 }
446
447 /* Attempt to send a phy (bus) reset */
448 int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
449 {
450 struct domain_device *dev = cmd_to_domain_dev(cmd);
451 struct sas_phy *phy = find_local_sas_phy(dev);
452 int res;
453
454 res = sas_phy_reset(phy, 1);
455 if (res)
456 SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
457 phy->dev.kobj.k_name,
458 res);
459 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
460 return SUCCESS;
461
462 return FAILED;
463 }
464
465 /* Try to reset a device */
466 static int try_to_reset_cmd_device(struct Scsi_Host *shost,
467 struct scsi_cmnd *cmd)
468 {
469 int res;
470
471 if (!shost->hostt->eh_device_reset_handler)
472 goto try_bus_reset;
473
474 res = shost->hostt->eh_device_reset_handler(cmd);
475 if (res == SUCCESS)
476 return res;
477
478 try_bus_reset:
479 if (shost->hostt->eh_bus_reset_handler)
480 return shost->hostt->eh_bus_reset_handler(cmd);
481
482 return FAILED;
483 }
484
485 static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
486 struct list_head *work_q,
487 struct list_head *done_q)
488 {
489 struct scsi_cmnd *cmd, *n;
490 enum task_disposition res = TASK_IS_DONE;
491 int tmf_resp, need_reset;
492 struct sas_internal *i = to_sas_internal(shost->transportt);
493 unsigned long flags;
494 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
495
496 Again:
497 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
498 struct sas_task *task = TO_SAS_TASK(cmd);
499
500 if (!task)
501 continue;
502
503 list_del_init(&cmd->eh_entry);
504
505 spin_lock_irqsave(&task->task_state_lock, flags);
506 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
507 spin_unlock_irqrestore(&task->task_state_lock, flags);
508
509 SAS_DPRINTK("trying to find task 0x%p\n", task);
510 res = sas_scsi_find_task(task);
511
512 cmd->eh_eflags = 0;
513
514 switch (res) {
515 case TASK_IS_DONE:
516 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
517 task);
518 task->task_done(task);
519 if (need_reset)
520 try_to_reset_cmd_device(shost, cmd);
521 continue;
522 case TASK_IS_ABORTED:
523 SAS_DPRINTK("%s: task 0x%p is aborted\n",
524 __FUNCTION__, task);
525 task->task_done(task);
526 if (need_reset)
527 try_to_reset_cmd_device(shost, cmd);
528 continue;
529 case TASK_IS_AT_LU:
530 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
531 tmf_resp = sas_recover_lu(task->dev, cmd);
532 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
533 SAS_DPRINTK("dev %016llx LU %x is "
534 "recovered\n",
535 SAS_ADDR(task->dev),
536 cmd->device->lun);
537 task->task_done(task);
538 if (need_reset)
539 try_to_reset_cmd_device(shost, cmd);
540 sas_scsi_clear_queue_lu(work_q, cmd);
541 goto Again;
542 }
543 /* fallthrough */
544 case TASK_IS_NOT_AT_LU:
545 case TASK_ABORT_FAILED:
546 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
547 task);
548 tmf_resp = sas_recover_I_T(task->dev);
549 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
550 SAS_DPRINTK("I_T %016llx recovered\n",
551 SAS_ADDR(task->dev->sas_addr));
552 task->task_done(task);
553 if (need_reset)
554 try_to_reset_cmd_device(shost, cmd);
555 sas_scsi_clear_queue_I_T(work_q, task->dev);
556 goto Again;
557 }
558 /* Hammer time :-) */
559 if (i->dft->lldd_clear_nexus_port) {
560 struct asd_sas_port *port = task->dev->port;
561 SAS_DPRINTK("clearing nexus for port:%d\n",
562 port->id);
563 res = i->dft->lldd_clear_nexus_port(port);
564 if (res == TMF_RESP_FUNC_COMPLETE) {
565 SAS_DPRINTK("clear nexus port:%d "
566 "succeeded\n", port->id);
567 task->task_done(task);
568 if (need_reset)
569 try_to_reset_cmd_device(shost, cmd);
570 sas_scsi_clear_queue_port(work_q,
571 port);
572 goto Again;
573 }
574 }
575 if (i->dft->lldd_clear_nexus_ha) {
576 SAS_DPRINTK("clear nexus ha\n");
577 res = i->dft->lldd_clear_nexus_ha(ha);
578 if (res == TMF_RESP_FUNC_COMPLETE) {
579 SAS_DPRINTK("clear nexus ha "
580 "succeeded\n");
581 task->task_done(task);
582 if (need_reset)
583 try_to_reset_cmd_device(shost, cmd);
584 goto out;
585 }
586 }
587 /* If we are here -- this means that no amount
588 * of effort could recover from errors. Quite
589 * possibly the HA just disappeared.
590 */
591 SAS_DPRINTK("error from device %llx, LUN %x "
592 "couldn't be recovered in any way\n",
593 SAS_ADDR(task->dev->sas_addr),
594 cmd->device->lun);
595
596 task->task_done(task);
597 if (need_reset)
598 try_to_reset_cmd_device(shost, cmd);
599 goto clear_q;
600 }
601 }
602 out:
603 return list_empty(work_q);
604 clear_q:
605 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
606 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
607 struct sas_task *task = TO_SAS_TASK(cmd);
608 list_del_init(&cmd->eh_entry);
609 task->task_done(task);
610 }
611 return list_empty(work_q);
612 }
613
614 void sas_scsi_recover_host(struct Scsi_Host *shost)
615 {
616 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
617 unsigned long flags;
618 LIST_HEAD(eh_work_q);
619
620 spin_lock_irqsave(shost->host_lock, flags);
621 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
622 spin_unlock_irqrestore(shost->host_lock, flags);
623
624 SAS_DPRINTK("Enter %s\n", __FUNCTION__);
625 /*
626 * Deal with commands that still have SAS tasks (i.e. they didn't
627 * complete via the normal sas_task completion mechanism)
628 */
629 if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q))
630 goto out;
631
632 /*
633 * Now deal with SCSI commands that completed ok but have a an error
634 * code (and hopefully sense data) attached. This is roughly what
635 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
636 * command we see here has no sas_task and is thus unknown to the HA.
637 */
638 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
639 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
640
641 out:
642 scsi_eh_flush_done_q(&ha->eh_done_q);
643 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
644 return;
645 }
646
647 enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
648 {
649 struct sas_task *task = TO_SAS_TASK(cmd);
650 unsigned long flags;
651
652 if (!task) {
653 cmd->timeout_per_command /= 2;
654 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
655 cmd, task, (cmd->timeout_per_command ?
656 "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
657 if (!cmd->timeout_per_command)
658 return EH_NOT_HANDLED;
659 return EH_RESET_TIMER;
660 }
661
662 spin_lock_irqsave(&task->task_state_lock, flags);
663 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
664 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
665 spin_unlock_irqrestore(&task->task_state_lock, flags);
666 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
667 cmd, task);
668 return EH_HANDLED;
669 }
670 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
671 spin_unlock_irqrestore(&task->task_state_lock, flags);
672 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
673 "EH_RESET_TIMER\n",
674 cmd, task);
675 return EH_RESET_TIMER;
676 }
677 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
678 spin_unlock_irqrestore(&task->task_state_lock, flags);
679
680 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
681 cmd, task);
682
683 return EH_NOT_HANDLED;
684 }
685
686 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
687 {
688 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
689 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
690 struct domain_device *found_dev = NULL;
691 int i;
692 unsigned long flags;
693
694 spin_lock_irqsave(&ha->phy_port_lock, flags);
695 for (i = 0; i < ha->num_phys; i++) {
696 struct asd_sas_port *port = ha->sas_port[i];
697 struct domain_device *dev;
698
699 spin_lock(&port->dev_list_lock);
700 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
701 if (rphy == dev->rphy) {
702 found_dev = dev;
703 spin_unlock(&port->dev_list_lock);
704 goto found;
705 }
706 }
707 spin_unlock(&port->dev_list_lock);
708 }
709 found:
710 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
711
712 return found_dev;
713 }
714
715 static inline struct domain_device *sas_find_target(struct scsi_target *starget)
716 {
717 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
718
719 return sas_find_dev_by_rphy(rphy);
720 }
721
722 int sas_target_alloc(struct scsi_target *starget)
723 {
724 struct domain_device *found_dev = sas_find_target(starget);
725
726 if (!found_dev)
727 return -ENODEV;
728
729 starget->hostdata = found_dev;
730 return 0;
731 }
732
733 #define SAS_DEF_QD 32
734 #define SAS_MAX_QD 64
735
736 int sas_slave_configure(struct scsi_device *scsi_dev)
737 {
738 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
739 struct sas_ha_struct *sas_ha;
740
741 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
742
743 sas_ha = dev->port->ha;
744
745 sas_read_port_mode_page(scsi_dev);
746
747 if (scsi_dev->tagged_supported) {
748 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
749 scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
750 } else {
751 SAS_DPRINTK("device %llx, LUN %x doesn't support "
752 "TCQ\n", SAS_ADDR(dev->sas_addr),
753 scsi_dev->lun);
754 scsi_dev->tagged_supported = 0;
755 scsi_set_tag_type(scsi_dev, 0);
756 scsi_deactivate_tcq(scsi_dev, 1);
757 }
758
759 scsi_dev->allow_restart = 1;
760
761 return 0;
762 }
763
764 void sas_slave_destroy(struct scsi_device *scsi_dev)
765 {
766 }
767
768 int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
769 {
770 int res = min(new_depth, SAS_MAX_QD);
771
772 if (scsi_dev->tagged_supported)
773 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
774 res);
775 else {
776 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
777 sas_printk("device %llx LUN %x queue depth changed to 1\n",
778 SAS_ADDR(dev->sas_addr),
779 scsi_dev->lun);
780 scsi_adjust_queue_depth(scsi_dev, 0, 1);
781 res = 1;
782 }
783
784 return res;
785 }
786
787 int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
788 {
789 if (!scsi_dev->tagged_supported)
790 return 0;
791
792 scsi_deactivate_tcq(scsi_dev, 1);
793
794 scsi_set_tag_type(scsi_dev, qt);
795 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
796
797 return qt;
798 }
799
800 int sas_bios_param(struct scsi_device *scsi_dev,
801 struct block_device *bdev,
802 sector_t capacity, int *hsc)
803 {
804 hsc[0] = 255;
805 hsc[1] = 63;
806 sector_div(capacity, 255*63);
807 hsc[2] = capacity;
808
809 return 0;
810 }
811
812 /* ---------- Task Collector Thread implementation ---------- */
813
814 static void sas_queue(struct sas_ha_struct *sas_ha)
815 {
816 struct scsi_core *core = &sas_ha->core;
817 unsigned long flags;
818 LIST_HEAD(q);
819 int can_queue;
820 int res;
821 struct sas_internal *i = to_sas_internal(core->shost->transportt);
822
823 spin_lock_irqsave(&core->task_queue_lock, flags);
824 while (!kthread_should_stop() &&
825 !list_empty(&core->task_queue)) {
826
827 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
828 if (can_queue >= 0) {
829 can_queue = core->task_queue_size;
830 list_splice_init(&core->task_queue, &q);
831 } else {
832 struct list_head *a, *n;
833
834 can_queue = sas_ha->lldd_queue_size;
835 list_for_each_safe(a, n, &core->task_queue) {
836 list_move_tail(a, &q);
837 if (--can_queue == 0)
838 break;
839 }
840 can_queue = sas_ha->lldd_queue_size;
841 }
842 core->task_queue_size -= can_queue;
843 spin_unlock_irqrestore(&core->task_queue_lock, flags);
844 {
845 struct sas_task *task = list_entry(q.next,
846 struct sas_task,
847 list);
848 list_del_init(&q);
849 res = i->dft->lldd_execute_task(task, can_queue,
850 GFP_KERNEL);
851 if (unlikely(res))
852 __list_add(&q, task->list.prev, &task->list);
853 }
854 spin_lock_irqsave(&core->task_queue_lock, flags);
855 if (res) {
856 list_splice_init(&q, &core->task_queue); /*at head*/
857 core->task_queue_size += can_queue;
858 }
859 }
860 spin_unlock_irqrestore(&core->task_queue_lock, flags);
861 }
862
863 /**
864 * sas_queue_thread -- The Task Collector thread
865 * @_sas_ha: pointer to struct sas_ha
866 */
867 static int sas_queue_thread(void *_sas_ha)
868 {
869 struct sas_ha_struct *sas_ha = _sas_ha;
870
871 current->flags |= PF_NOFREEZE;
872
873 while (1) {
874 set_current_state(TASK_INTERRUPTIBLE);
875 schedule();
876 sas_queue(sas_ha);
877 if (kthread_should_stop())
878 break;
879 }
880
881 return 0;
882 }
883
884 int sas_init_queue(struct sas_ha_struct *sas_ha)
885 {
886 struct scsi_core *core = &sas_ha->core;
887
888 spin_lock_init(&core->task_queue_lock);
889 core->task_queue_size = 0;
890 INIT_LIST_HEAD(&core->task_queue);
891
892 core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
893 "sas_queue_%d", core->shost->host_no);
894 if (IS_ERR(core->queue_thread))
895 return PTR_ERR(core->queue_thread);
896 return 0;
897 }
898
899 void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
900 {
901 unsigned long flags;
902 struct scsi_core *core = &sas_ha->core;
903 struct sas_task *task, *n;
904
905 kthread_stop(core->queue_thread);
906
907 if (!list_empty(&core->task_queue))
908 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
909 SAS_ADDR(sas_ha->sas_addr));
910
911 spin_lock_irqsave(&core->task_queue_lock, flags);
912 list_for_each_entry_safe(task, n, &core->task_queue, list) {
913 struct scsi_cmnd *cmd = task->uldd_task;
914
915 list_del_init(&task->list);
916
917 ASSIGN_SAS_TASK(cmd, NULL);
918 sas_free_task(task);
919 cmd->result = DID_ABORT << 16;
920 cmd->scsi_done(cmd);
921 }
922 spin_unlock_irqrestore(&core->task_queue_lock, flags);
923 }
924
925 /*
926 * Call the LLDD task abort routine directly. This function is intended for
927 * use by upper layers that need to tell the LLDD to abort a task.
928 */
929 int __sas_task_abort(struct sas_task *task)
930 {
931 struct sas_internal *si =
932 to_sas_internal(task->dev->port->ha->core.shost->transportt);
933 unsigned long flags;
934 int res;
935
936 spin_lock_irqsave(&task->task_state_lock, flags);
937 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
938 task->task_state_flags & SAS_TASK_STATE_DONE) {
939 spin_unlock_irqrestore(&task->task_state_lock, flags);
940 SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__,
941 task);
942 return 0;
943 }
944 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
945 spin_unlock_irqrestore(&task->task_state_lock, flags);
946
947 if (!si->dft->lldd_abort_task)
948 return -ENODEV;
949
950 res = si->dft->lldd_abort_task(task);
951
952 spin_lock_irqsave(&task->task_state_lock, flags);
953 if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
954 (res == TMF_RESP_FUNC_COMPLETE))
955 {
956 spin_unlock_irqrestore(&task->task_state_lock, flags);
957 task->task_done(task);
958 return 0;
959 }
960
961 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
962 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
963 spin_unlock_irqrestore(&task->task_state_lock, flags);
964
965 return -EAGAIN;
966 }
967
968 /*
969 * Tell an upper layer that it needs to initiate an abort for a given task.
970 * This should only ever be called by an LLDD.
971 */
972 void sas_task_abort(struct sas_task *task)
973 {
974 struct scsi_cmnd *sc = task->uldd_task;
975
976 /* Escape for libsas internal commands */
977 if (!sc) {
978 if (!del_timer(&task->timer))
979 return;
980 task->timer.function(task->timer.data);
981 return;
982 }
983
984 scsi_req_abort_cmd(sc);
985 scsi_schedule_eh(sc->device->host);
986 }
987
988 EXPORT_SYMBOL_GPL(sas_queuecommand);
989 EXPORT_SYMBOL_GPL(sas_target_alloc);
990 EXPORT_SYMBOL_GPL(sas_slave_configure);
991 EXPORT_SYMBOL_GPL(sas_slave_destroy);
992 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
993 EXPORT_SYMBOL_GPL(sas_change_queue_type);
994 EXPORT_SYMBOL_GPL(sas_bios_param);
995 EXPORT_SYMBOL_GPL(__sas_task_abort);
996 EXPORT_SYMBOL_GPL(sas_task_abort);
997 EXPORT_SYMBOL_GPL(sas_phy_reset);
998 EXPORT_SYMBOL_GPL(sas_phy_enable);
999 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
1000 EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
This page took 0.049382 seconds and 4 git commands to generate.