isci: namespacecheck cleanups
[deliverable/linux.git] / drivers / scsi / isci / task.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include <scsi/sas_ata.h>
59 #include "scic_task_request.h"
60 #include "scic_remote_device.h"
61 #include "scic_io_request.h"
62 #include "scic_sds_remote_device.h"
63 #include "scic_sds_remote_node_context.h"
64 #include "isci.h"
65 #include "request.h"
66 #include "sata.h"
67 #include "task.h"
68
69 /**
70 * isci_task_refuse() - complete the request to the upper layer driver in
71 * the case where an I/O needs to be completed back in the submit path.
72 * @ihost: host on which the the request was queued
73 * @task: request to complete
74 * @response: response code for the completed task.
75 * @status: status code for the completed task.
76 *
77 */
78 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
79 enum service_response response,
80 enum exec_status status)
81
82 {
83 enum isci_completion_selection disposition;
84
85 disposition = isci_perform_normal_io_completion;
86 disposition = isci_task_set_completion_status(task, response, status,
87 disposition);
88
89 /* Tasks aborted specifically by a call to the lldd_abort_task
90 * function should not be completed to the host in the regular path.
91 */
92 switch (disposition) {
93 case isci_perform_normal_io_completion:
94 /* Normal notification (task_done) */
95 dev_dbg(&ihost->pdev->dev,
96 "%s: Normal - task = %p, response=%d, status=%d\n",
97 __func__, task, response, status);
98
99 task->lldd_task = NULL;
100 if (dev_is_sata(task->dev)) {
101 /* Since we are still in the submit path, and since
102 * libsas takes the host lock on behalf of SATA
103 * devices before I/O starts, we need to unlock
104 * before we can call back and report the I/O
105 * submission error.
106 */
107 unsigned long flags;
108
109 raw_local_irq_save(flags);
110 spin_unlock(ihost->shost->host_lock);
111 task->task_done(task);
112 spin_lock(ihost->shost->host_lock);
113 raw_local_irq_restore(flags);
114 } else
115 task->task_done(task);
116 break;
117
118 case isci_perform_aborted_io_completion:
119 /* No notification because this request is already in the
120 * abort path.
121 */
122 dev_warn(&ihost->pdev->dev,
123 "%s: Aborted - task = %p, response=%d, status=%d\n",
124 __func__, task, response, status);
125 break;
126
127 case isci_perform_error_io_completion:
128 /* Use sas_task_abort */
129 dev_warn(&ihost->pdev->dev,
130 "%s: Error - task = %p, response=%d, status=%d\n",
131 __func__, task, response, status);
132 sas_task_abort(task);
133 break;
134
135 default:
136 dev_warn(&ihost->pdev->dev,
137 "%s: isci task notification default case!",
138 __func__);
139 sas_task_abort(task);
140 break;
141 }
142 }
143
144 #define for_each_sas_task(num, task) \
145 for (; num > 0; num--,\
146 task = list_entry(task->list.next, struct sas_task, list))
147
148 /**
149 * isci_task_execute_task() - This function is one of the SAS Domain Template
150 * functions. This function is called by libsas to send a task down to
151 * hardware.
152 * @task: This parameter specifies the SAS task to send.
153 * @num: This parameter specifies the number of tasks to queue.
154 * @gfp_flags: This parameter specifies the context of this call.
155 *
156 * status, zero indicates success.
157 */
158 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
159 {
160 struct isci_host *ihost = task->dev->port->ha->lldd_ha;
161 struct isci_request *request = NULL;
162 struct isci_remote_device *device;
163 unsigned long flags;
164 int ret;
165 enum sci_status status;
166 enum isci_status device_status;
167
168 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
169
170 /* Check if we have room for more tasks */
171 ret = isci_host_can_queue(ihost, num);
172
173 if (ret) {
174 dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
175 return ret;
176 }
177
178 for_each_sas_task(num, task) {
179 dev_dbg(&ihost->pdev->dev,
180 "task = %p, num = %d; dev = %p; cmd = %p\n",
181 task, num, task->dev, task->uldd_task);
182
183 device = isci_dev_from_domain_dev(task->dev);
184
185 if (device)
186 device_status = device->status;
187 else
188 device_status = isci_freed;
189
190 /* From this point onward, any process that needs to guarantee
191 * that there is no kernel I/O being started will have to wait
192 * for the quiesce spinlock.
193 */
194
195 if (device_status != isci_ready_for_io) {
196
197 /* Forces a retry from scsi mid layer. */
198 dev_warn(&ihost->pdev->dev,
199 "%s: task %p: isci_host->status = %d, "
200 "device = %p; device_status = 0x%x\n\n",
201 __func__,
202 task,
203 isci_host_get_state(ihost),
204 device, device_status);
205
206 if (device_status == isci_ready) {
207 /* Indicate QUEUE_FULL so that the scsi midlayer
208 * retries.
209 */
210 isci_task_refuse(ihost, task,
211 SAS_TASK_COMPLETE,
212 SAS_QUEUE_FULL);
213 } else {
214 /* Else, the device is going down. */
215 isci_task_refuse(ihost, task,
216 SAS_TASK_UNDELIVERED,
217 SAS_DEVICE_UNKNOWN);
218 }
219 isci_host_can_dequeue(ihost, 1);
220 } else {
221 /* There is a device and it's ready for I/O. */
222 spin_lock_irqsave(&task->task_state_lock, flags);
223
224 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
225
226 spin_unlock_irqrestore(&task->task_state_lock,
227 flags);
228
229 isci_task_refuse(ihost, task,
230 SAS_TASK_UNDELIVERED,
231 SAM_STAT_TASK_ABORTED);
232
233 /* The I/O was aborted. */
234
235 } else {
236 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
237 spin_unlock_irqrestore(&task->task_state_lock, flags);
238
239 /* build and send the request. */
240 status = isci_request_execute(ihost, task, &request,
241 gfp_flags);
242
243 if (status != SCI_SUCCESS) {
244
245 spin_lock_irqsave(&task->task_state_lock, flags);
246 /* Did not really start this command. */
247 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
248 spin_unlock_irqrestore(&task->task_state_lock, flags);
249
250 /* Indicate QUEUE_FULL so that the scsi
251 * midlayer retries. if the request
252 * failed for remote device reasons,
253 * it gets returned as
254 * SAS_TASK_UNDELIVERED next time
255 * through.
256 */
257 isci_task_refuse(ihost, task,
258 SAS_TASK_COMPLETE,
259 SAS_QUEUE_FULL);
260 isci_host_can_dequeue(ihost, 1);
261 }
262 }
263 }
264 }
265 return 0;
266 }
267
268
269
270 /**
271 * isci_task_request_build() - This function builds the task request object.
272 * @isci_host: This parameter specifies the ISCI host object
273 * @request: This parameter points to the isci_request object allocated in the
274 * request construct function.
275 * @tmf: This parameter is the task management struct to be built
276 *
277 * SCI_SUCCESS on successfull completion, or specific failure code.
278 */
279 static enum sci_status isci_task_request_build(
280 struct isci_host *isci_host,
281 struct isci_request **isci_request,
282 struct isci_tmf *isci_tmf)
283 {
284 struct scic_sds_remote_device *sci_device;
285 enum sci_status status = SCI_FAILURE;
286 struct isci_request *request;
287 struct isci_remote_device *isci_device;
288 /* struct sci_sas_identify_address_frame_protocols dev_protocols; */
289 struct smp_discover_response_protocols dev_protocols;
290
291
292 dev_dbg(&isci_host->pdev->dev,
293 "%s: isci_tmf = %p\n", __func__, isci_tmf);
294
295 isci_device = isci_tmf->device;
296 sci_device = to_sci_dev(isci_device);
297
298 /* do common allocation and init of request object. */
299 status = isci_request_alloc_tmf(
300 isci_host,
301 isci_tmf,
302 &request,
303 isci_device,
304 GFP_ATOMIC
305 );
306
307 if (status != SCI_SUCCESS)
308 goto out;
309
310 /* let the core do it's construct. */
311 status = scic_task_request_construct(
312 isci_host->core_controller,
313 sci_device,
314 SCI_CONTROLLER_INVALID_IO_TAG,
315 request,
316 request->sci_request_mem_ptr,
317 &request->sci_request_handle
318 );
319
320 if (status != SCI_SUCCESS) {
321 dev_warn(&isci_host->pdev->dev,
322 "%s: scic_task_request_construct failed - "
323 "status = 0x%x\n",
324 __func__,
325 status);
326 goto errout;
327 }
328
329 sci_object_set_association(
330 request->sci_request_handle,
331 request
332 );
333
334 scic_remote_device_get_protocols(
335 sci_device,
336 &dev_protocols
337 );
338
339 /* let the core do it's protocol
340 * specific construction.
341 */
342 if (dev_protocols.u.bits.attached_ssp_target) {
343
344 isci_tmf->proto = SAS_PROTOCOL_SSP;
345 status = scic_task_request_construct_ssp(
346 request->sci_request_handle
347 );
348 if (status != SCI_SUCCESS)
349 goto errout;
350 }
351
352 if (dev_protocols.u.bits.attached_stp_target) {
353
354 isci_tmf->proto = SAS_PROTOCOL_SATA;
355 status = isci_sata_management_task_request_build(request);
356
357 if (status != SCI_SUCCESS)
358 goto errout;
359 }
360
361 goto out;
362
363 errout:
364
365 /* release the dma memory if we fail. */
366 isci_request_free(isci_host, request);
367 request = NULL;
368
369 out:
370 *isci_request = request;
371 return status;
372 }
373
374 /**
375 * isci_tmf_timeout_cb() - This function is called as a kernel callback when
376 * the timeout period for the TMF has expired.
377 *
378 *
379 */
380 static void isci_tmf_timeout_cb(void *tmf_request_arg)
381 {
382 struct isci_request *request = (struct isci_request *)tmf_request_arg;
383 struct isci_tmf *tmf = isci_request_access_tmf(request);
384 enum sci_status status;
385
386 BUG_ON(request->ttype != tmf_task);
387
388 /* This task management request has timed-out. Terminate the request
389 * so that the request eventually completes to the requestor in the
390 * request completion callback path.
391 */
392 /* Note - the timer callback function itself has provided spinlock
393 * exclusion from the start and completion paths. No need to take
394 * the request->isci_host->scic_lock here.
395 */
396
397 if (tmf->timeout_timer != NULL) {
398 /* Call the users callback, if any. */
399 if (tmf->cb_state_func != NULL)
400 tmf->cb_state_func(isci_tmf_timed_out, tmf,
401 tmf->cb_data);
402
403 /* Terminate the TMF transmit request. */
404 status = scic_controller_terminate_request(
405 request->isci_host->core_controller,
406 to_sci_dev(request->isci_device),
407 request->sci_request_handle
408 );
409
410 dev_dbg(&request->isci_host->pdev->dev,
411 "%s: tmf_request = %p; tmf = %p; status = %d\n",
412 __func__, request, tmf, status);
413 } else
414 dev_dbg(&request->isci_host->pdev->dev,
415 "%s: timer already canceled! "
416 "tmf_request = %p; tmf = %p\n",
417 __func__, request, tmf);
418
419 /* No need to unlock since the caller to this callback is doing it for
420 * us.
421 * request->isci_host->scic_lock
422 */
423 }
424
425 /**
426 * isci_task_execute_tmf() - This function builds and sends a task request,
427 * then waits for the completion.
428 * @isci_host: This parameter specifies the ISCI host object
429 * @tmf: This parameter is the pointer to the task management structure for
430 * this request.
431 * @timeout_ms: This parameter specifies the timeout period for the task
432 * management request.
433 *
434 * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
435 * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
436 */
437 int isci_task_execute_tmf(
438 struct isci_host *isci_host,
439 struct isci_tmf *tmf,
440 unsigned long timeout_ms)
441 {
442 DECLARE_COMPLETION_ONSTACK(completion);
443 enum sci_status status = SCI_FAILURE;
444 struct scic_sds_remote_device *sci_device;
445 struct isci_remote_device *isci_device = tmf->device;
446 struct isci_request *request;
447 int ret = TMF_RESP_FUNC_FAILED;
448 unsigned long flags;
449
450 /* sanity check, return TMF_RESP_FUNC_FAILED
451 * if the device is not there and ready.
452 */
453 if (!isci_device || isci_device->status != isci_ready_for_io) {
454 dev_dbg(&isci_host->pdev->dev,
455 "%s: isci_device = %p not ready (%d)\n",
456 __func__,
457 isci_device, isci_device->status);
458 return TMF_RESP_FUNC_FAILED;
459 } else
460 dev_dbg(&isci_host->pdev->dev,
461 "%s: isci_device = %p\n",
462 __func__, isci_device);
463
464 sci_device = to_sci_dev(isci_device);
465
466 /* Assign the pointer to the TMF's completion kernel wait structure. */
467 tmf->complete = &completion;
468
469 isci_task_request_build(
470 isci_host,
471 &request,
472 tmf
473 );
474
475 if (!request) {
476 dev_warn(&isci_host->pdev->dev,
477 "%s: isci_task_request_build failed\n",
478 __func__);
479 return TMF_RESP_FUNC_FAILED;
480 }
481
482 /* Allocate the TMF timeout timer. */
483 spin_lock_irqsave(&isci_host->scic_lock, flags);
484 tmf->timeout_timer = isci_timer_create(isci_host, request, isci_tmf_timeout_cb);
485
486 /* Start the timer. */
487 if (tmf->timeout_timer)
488 isci_timer_start(tmf->timeout_timer, timeout_ms);
489 else
490 dev_warn(&isci_host->pdev->dev,
491 "%s: isci_timer_create failed!!!!\n",
492 __func__);
493
494 /* start the TMF io. */
495 status = scic_controller_start_task(
496 isci_host->core_controller,
497 sci_device,
498 request->sci_request_handle,
499 SCI_CONTROLLER_INVALID_IO_TAG
500 );
501
502 if (status != SCI_SUCCESS) {
503 dev_warn(&isci_host->pdev->dev,
504 "%s: start_io failed - status = 0x%x, request = %p\n",
505 __func__,
506 status,
507 request);
508 goto cleanup_request;
509 }
510
511 /* Call the users callback, if any. */
512 if (tmf->cb_state_func != NULL)
513 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
514
515 /* Change the state of the TMF-bearing request to "started". */
516 isci_request_change_state(request, started);
517
518 /* add the request to the remote device request list. */
519 list_add(&request->dev_node, &isci_device->reqs_in_process);
520
521 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
522
523 /* Wait for the TMF to complete, or a timeout. */
524 wait_for_completion(&completion);
525
526 isci_print_tmf(tmf);
527
528 if (tmf->status == SCI_SUCCESS)
529 ret = TMF_RESP_FUNC_COMPLETE;
530 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
531 dev_dbg(&isci_host->pdev->dev,
532 "%s: tmf.status == "
533 "SCI_FAILURE_IO_RESPONSE_VALID\n",
534 __func__);
535 ret = TMF_RESP_FUNC_COMPLETE;
536 }
537 /* Else - leave the default "failed" status alone. */
538
539 dev_dbg(&isci_host->pdev->dev,
540 "%s: completed request = %p\n",
541 __func__,
542 request);
543
544 if (request->io_request_completion != NULL) {
545
546 /* The fact that this is non-NULL for a TMF request
547 * means there is a thread waiting for this TMF to
548 * finish.
549 */
550 complete(request->io_request_completion);
551 }
552
553 spin_lock_irqsave(&isci_host->scic_lock, flags);
554
555 cleanup_request:
556
557 /* Clean up the timer if needed. */
558 if (tmf->timeout_timer) {
559 isci_del_timer(isci_host, tmf->timeout_timer);
560 tmf->timeout_timer = NULL;
561 }
562
563 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
564
565 isci_request_free(isci_host, request);
566
567 return ret;
568 }
569
570 void isci_task_build_tmf(
571 struct isci_tmf *tmf,
572 struct isci_remote_device *isci_device,
573 enum isci_tmf_function_codes code,
574 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
575 struct isci_tmf *,
576 void *),
577 void *cb_data)
578 {
579 dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
580 "%s: isci_device = %p\n", __func__, isci_device);
581
582 memset(tmf, 0, sizeof(*tmf));
583
584 tmf->device = isci_device;
585 tmf->tmf_code = code;
586 tmf->timeout_timer = NULL;
587 tmf->cb_state_func = tmf_sent_cb;
588 tmf->cb_data = cb_data;
589 }
590
591 static void isci_task_build_abort_task_tmf(
592 struct isci_tmf *tmf,
593 struct isci_remote_device *isci_device,
594 enum isci_tmf_function_codes code,
595 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
596 struct isci_tmf *,
597 void *),
598 struct isci_request *old_request)
599 {
600 isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb,
601 (void *)old_request);
602 tmf->io_tag = old_request->io_tag;
603 }
604
605 static struct isci_request *isci_task_get_request_from_task(
606 struct sas_task *task,
607 struct isci_host **isci_host,
608 struct isci_remote_device **isci_device)
609 {
610
611 struct isci_request *request = NULL;
612 unsigned long flags;
613
614 spin_lock_irqsave(&task->task_state_lock, flags);
615
616 request = task->lldd_task;
617
618 /* If task is already done, the request isn't valid */
619 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
620 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
621 (request != NULL)) {
622
623 if (isci_host != NULL)
624 *isci_host = request->isci_host;
625
626 if (isci_device != NULL)
627 *isci_device = request->isci_device;
628 }
629
630 spin_unlock_irqrestore(&task->task_state_lock, flags);
631
632 return request;
633 }
634
635 /**
636 * isci_task_validate_request_to_abort() - This function checks the given I/O
637 * against the "started" state. If the request is still "started", it's
638 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
639 * BEFORE CALLING THIS FUNCTION.
640 * @isci_request: This parameter specifies the request object to control.
641 * @isci_host: This parameter specifies the ISCI host object
642 * @isci_device: This is the device to which the request is pending.
643 * @aborted_io_completion: This is a completion structure that will be added to
644 * the request in case it is changed to aborting; this completion is
645 * triggered when the request is fully completed.
646 *
647 * Either "started" on successful change of the task status to "aborted", or
648 * "unallocated" if the task cannot be controlled.
649 */
650 static enum isci_request_status isci_task_validate_request_to_abort(
651 struct isci_request *isci_request,
652 struct isci_host *isci_host,
653 struct isci_remote_device *isci_device,
654 struct completion *aborted_io_completion)
655 {
656 enum isci_request_status old_state = unallocated;
657
658 /* Only abort the task if it's in the
659 * device's request_in_process list
660 */
661 if (isci_request && !list_empty(&isci_request->dev_node)) {
662 old_state = isci_request_change_started_to_aborted(
663 isci_request, aborted_io_completion);
664
665 }
666
667 return old_state;
668 }
669
670 static void isci_request_cleanup_completed_loiterer(
671 struct isci_host *isci_host,
672 struct isci_remote_device *isci_device,
673 struct isci_request *isci_request)
674 {
675 struct sas_task *task;
676 unsigned long flags;
677
678 task = (isci_request->ttype == io_task)
679 ? isci_request_access_task(isci_request)
680 : NULL;
681
682 dev_dbg(&isci_host->pdev->dev,
683 "%s: isci_device=%p, request=%p, task=%p\n",
684 __func__, isci_device, isci_request, task);
685
686 spin_lock_irqsave(&isci_host->scic_lock, flags);
687 list_del_init(&isci_request->dev_node);
688 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
689
690 if (task != NULL) {
691
692 spin_lock_irqsave(&task->task_state_lock, flags);
693 task->lldd_task = NULL;
694
695 isci_set_task_doneflags(task);
696
697 /* If this task is not in the abort path, call task_done. */
698 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
699
700 spin_unlock_irqrestore(&task->task_state_lock, flags);
701 task->task_done(task);
702 } else
703 spin_unlock_irqrestore(&task->task_state_lock, flags);
704 }
705 isci_request_free(isci_host, isci_request);
706 }
707
708 /**
709 * @isci_termination_timed_out(): this function will deal with a request for
710 * which the wait for termination has timed-out.
711 *
712 * @isci_host This SCU.
713 * @isci_request The I/O request being terminated.
714 */
715 static void
716 isci_termination_timed_out(
717 struct isci_host * host,
718 struct isci_request * request
719 )
720 {
721 unsigned long state_flags;
722
723 dev_warn(&host->pdev->dev,
724 "%s: host = %p; request = %p\n",
725 __func__, host, request);
726
727 /* At this point, the request to terminate
728 * has timed out. The best we can do is to
729 * have the request die a silent death
730 * if it ever completes.
731 */
732 spin_lock_irqsave(&request->state_lock, state_flags);
733
734 if (request->status == started) {
735
736 /* Set the request state to "dead",
737 * and clear the task pointer so that an actual
738 * completion event callback doesn't do
739 * anything.
740 */
741 request->status = dead;
742
743 /* Clear the timeout completion event pointer.*/
744 request->io_request_completion = NULL;
745
746 if (request->ttype == io_task) {
747
748 /* Break links with the sas_task. */
749 if (request->ttype_ptr.io_task_ptr != NULL) {
750
751 request->ttype_ptr.io_task_ptr->lldd_task = NULL;
752 request->ttype_ptr.io_task_ptr = NULL;
753 }
754 }
755 }
756 spin_unlock_irqrestore(&request->state_lock, state_flags);
757 }
758
759
760 /**
761 * isci_terminate_request_core() - This function will terminate the given
762 * request, and wait for it to complete. This function must only be called
763 * from a thread that can wait. Note that the request is terminated and
764 * completed (back to the host, if started there).
765 * @isci_host: This SCU.
766 * @isci_device: The target.
767 * @isci_request: The I/O request to be terminated.
768 *
769 *
770 */
771 static void isci_terminate_request_core(
772 struct isci_host *isci_host,
773 struct isci_remote_device *isci_device,
774 struct isci_request *isci_request)
775 {
776 enum sci_status status = SCI_SUCCESS;
777 bool was_terminated = false;
778 bool needs_cleanup_handling = false;
779 enum isci_request_status request_status;
780 unsigned long flags;
781 unsigned long timeout_remaining;
782
783
784 dev_dbg(&isci_host->pdev->dev,
785 "%s: device = %p; request = %p\n",
786 __func__, isci_device, isci_request);
787
788 spin_lock_irqsave(&isci_host->scic_lock, flags);
789
790 /* Note that we are not going to control
791 * the target to abort the request.
792 */
793 isci_request->complete_in_target = true;
794
795 /* Make sure the request wasn't just sitting around signalling
796 * device condition (if the request handle is NULL, then the
797 * request completed but needed additional handling here).
798 */
799 if (isci_request->sci_request_handle != NULL) {
800 was_terminated = true;
801 needs_cleanup_handling = true;
802 status = scic_controller_terminate_request(
803 isci_host->core_controller,
804 to_sci_dev(isci_device),
805 isci_request->sci_request_handle
806 );
807 }
808 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
809
810 /*
811 * The only time the request to terminate will
812 * fail is when the io request is completed and
813 * being aborted.
814 */
815 if (status != SCI_SUCCESS) {
816 dev_err(&isci_host->pdev->dev,
817 "%s: scic_controller_terminate_request"
818 " returned = 0x%x\n",
819 __func__,
820 status);
821 /* Clear the completion pointer from the request. */
822 isci_request->io_request_completion = NULL;
823
824 } else {
825 if (was_terminated) {
826 dev_dbg(&isci_host->pdev->dev,
827 "%s: before completion wait (%p)\n",
828 __func__,
829 isci_request->io_request_completion);
830
831 /* Wait here for the request to complete. */
832 #define TERMINATION_TIMEOUT_MSEC 50
833 timeout_remaining
834 = wait_for_completion_timeout(
835 isci_request->io_request_completion,
836 msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
837
838 if (!timeout_remaining) {
839
840 isci_termination_timed_out(isci_host,
841 isci_request);
842
843 dev_err(&isci_host->pdev->dev,
844 "%s: *** Timeout waiting for "
845 "termination(%p/%p)\n",
846 __func__,
847 isci_request->io_request_completion,
848 isci_request);
849
850 } else
851 dev_dbg(&isci_host->pdev->dev,
852 "%s: after completion wait (%p)\n",
853 __func__,
854 isci_request->io_request_completion);
855 }
856 /* Clear the completion pointer from the request. */
857 isci_request->io_request_completion = NULL;
858
859 /* Peek at the status of the request. This will tell
860 * us if there was special handling on the request such that it
861 * needs to be detached and freed here.
862 */
863 spin_lock_irqsave(&isci_request->state_lock, flags);
864 request_status = isci_request_get_state(isci_request);
865
866 if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
867 && ((request_status == aborted)
868 || (request_status == aborting)
869 || (request_status == terminating)
870 || (request_status == completed)
871 || (request_status == dead)
872 )
873 ) {
874
875 /* The completion routine won't free a request in
876 * the aborted/aborting/etc. states, so we do
877 * it here.
878 */
879 needs_cleanup_handling = true;
880 }
881 spin_unlock_irqrestore(&isci_request->state_lock, flags);
882
883 if (needs_cleanup_handling)
884 isci_request_cleanup_completed_loiterer(
885 isci_host, isci_device, isci_request
886 );
887 }
888 }
889
890 static void isci_terminate_request(
891 struct isci_host *isci_host,
892 struct isci_remote_device *isci_device,
893 struct isci_request *isci_request,
894 enum isci_request_status new_request_state)
895 {
896 enum isci_request_status old_state;
897 DECLARE_COMPLETION_ONSTACK(request_completion);
898
899 /* Change state to "new_request_state" if it is currently "started" */
900 old_state = isci_request_change_started_to_newstate(
901 isci_request,
902 &request_completion,
903 new_request_state
904 );
905
906 if ((old_state == started) || (old_state == completed)) {
907
908 /* If the old_state is started:
909 * This request was not already being aborted. If it had been,
910 * then the aborting I/O (ie. the TMF request) would not be in
911 * the aborting state, and thus would be terminated here. Note
912 * that since the TMF completion's call to the kernel function
913 * "complete()" does not happen until the pending I/O request
914 * terminate fully completes, we do not have to implement a
915 * special wait here for already aborting requests - the
916 * termination of the TMF request will force the request
917 * to finish it's already started terminate.
918 *
919 * If old_state == completed:
920 * This request completed from the SCU hardware perspective
921 * and now just needs cleaning up in terms of freeing the
922 * request and potentially calling up to libsas.
923 */
924 isci_terminate_request_core(isci_host, isci_device,
925 isci_request);
926 }
927 }
928
929 /**
930 * isci_terminate_pending_requests() - This function will change the all of the
931 * requests on the given device's state to "aborting", will terminate the
932 * requests, and wait for them to complete. This function must only be
933 * called from a thread that can wait. Note that the requests are all
934 * terminated and completed (back to the host, if started there).
935 * @isci_host: This parameter specifies SCU.
936 * @isci_device: This parameter specifies the target.
937 *
938 *
939 */
940 void isci_terminate_pending_requests(
941 struct isci_host *isci_host,
942 struct isci_remote_device *isci_device,
943 enum isci_request_status new_request_state)
944 {
945 struct isci_request *request;
946 struct isci_request *next_request;
947 unsigned long flags;
948 struct list_head aborted_request_list;
949
950 INIT_LIST_HEAD(&aborted_request_list);
951
952 dev_dbg(&isci_host->pdev->dev,
953 "%s: isci_device = %p (new request state = %d)\n",
954 __func__, isci_device, new_request_state);
955
956 spin_lock_irqsave(&isci_host->scic_lock, flags);
957
958 /* Move all of the pending requests off of the device list. */
959 list_splice_init(&isci_device->reqs_in_process,
960 &aborted_request_list);
961
962 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
963
964 /* Iterate through the now-local list. */
965 list_for_each_entry_safe(request, next_request,
966 &aborted_request_list, dev_node) {
967
968 dev_warn(&isci_host->pdev->dev,
969 "%s: isci_device=%p request=%p; task=%p\n",
970 __func__,
971 isci_device, request,
972 ((request->ttype == io_task)
973 ? isci_request_access_task(request)
974 : NULL));
975
976 /* Mark all still pending I/O with the selected next
977 * state, terminate and free it.
978 */
979 isci_terminate_request(isci_host, isci_device,
980 request, new_request_state
981 );
982 }
983 }
984
985 /**
986 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
987 * Template functions.
988 * @lun: This parameter specifies the lun to be reset.
989 *
990 * status, zero indicates success.
991 */
992 static int isci_task_send_lu_reset_sas(
993 struct isci_host *isci_host,
994 struct isci_remote_device *isci_device,
995 u8 *lun)
996 {
997 struct isci_tmf tmf;
998 int ret = TMF_RESP_FUNC_FAILED;
999
1000 dev_dbg(&isci_host->pdev->dev,
1001 "%s: isci_host = %p, isci_device = %p\n",
1002 __func__, isci_host, isci_device);
1003 /* Send the LUN reset to the target. By the time the call returns,
1004 * the TMF has fully exected in the target (in which case the return
1005 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
1006 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
1007 */
1008 isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
1009 NULL);
1010
1011 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
1012 ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
1013
1014 if (ret == TMF_RESP_FUNC_COMPLETE)
1015 dev_dbg(&isci_host->pdev->dev,
1016 "%s: %p: TMF_LU_RESET passed\n",
1017 __func__, isci_device);
1018 else
1019 dev_dbg(&isci_host->pdev->dev,
1020 "%s: %p: TMF_LU_RESET failed (%x)\n",
1021 __func__, isci_device, ret);
1022
1023 return ret;
1024 }
1025
1026 /**
1027 * isci_task_lu_reset() - This function is one of the SAS Domain Template
1028 * functions. This is one of the Task Management functoins called by libsas,
1029 * to reset the given lun. Note the assumption that while this call is
1030 * executing, no I/O will be sent by the host to the device.
1031 * @lun: This parameter specifies the lun to be reset.
1032 *
1033 * status, zero indicates success.
1034 */
1035 int isci_task_lu_reset(
1036 struct domain_device *domain_device,
1037 u8 *lun)
1038 {
1039 struct isci_host *isci_host = NULL;
1040 struct isci_remote_device *isci_device = NULL;
1041 int ret;
1042 bool device_stopping = false;
1043
1044 if (domain_device == NULL) {
1045 pr_warn("%s: domain_device == NULL\n", __func__);
1046 return TMF_RESP_FUNC_FAILED;
1047 }
1048
1049 isci_device = isci_dev_from_domain_dev(domain_device);
1050
1051 if (domain_device->port != NULL)
1052 isci_host = isci_host_from_sas_ha(domain_device->port->ha);
1053
1054 pr_debug("%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
1055 __func__, domain_device, isci_host, isci_device);
1056
1057 if (isci_device != NULL)
1058 device_stopping = (isci_device->status == isci_stopping)
1059 || (isci_device->status == isci_stopped);
1060
1061 /* If there is a device reset pending on any request in the
1062 * device's list, fail this LUN reset request in order to
1063 * escalate to the device reset.
1064 */
1065 if ((isci_device == NULL) ||
1066 (isci_host == NULL) ||
1067 ((isci_host != NULL) &&
1068 (isci_device != NULL) &&
1069 (device_stopping ||
1070 (isci_device_is_reset_pending(isci_host, isci_device))))) {
1071 dev_warn(&isci_host->pdev->dev,
1072 "%s: No dev (%p), no host (%p), or "
1073 "RESET PENDING: domain_device=%p\n",
1074 __func__, isci_device, isci_host, domain_device);
1075 return TMF_RESP_FUNC_FAILED;
1076 }
1077
1078 /* Send the task management part of the reset. */
1079 if (sas_protocol_ata(domain_device->tproto)) {
1080 ret = isci_task_send_lu_reset_sata(
1081 isci_host, isci_device, lun
1082 );
1083 } else
1084 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
1085
1086 /* If the LUN reset worked, all the I/O can now be terminated. */
1087 if (ret == TMF_RESP_FUNC_COMPLETE)
1088 /* Terminate all I/O now. */
1089 isci_terminate_pending_requests(isci_host,
1090 isci_device,
1091 terminating);
1092
1093 return ret;
1094 }
1095
1096
1097 /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
1098 int isci_task_clear_nexus_port(struct asd_sas_port *port)
1099 {
1100 return TMF_RESP_FUNC_FAILED;
1101 }
1102
1103
1104
1105 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
1106 {
1107 return TMF_RESP_FUNC_FAILED;
1108 }
1109
1110 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1111 {
1112 return TMF_RESP_FUNC_FAILED;
1113 }
1114
1115
1116 /* Task Management Functions. Must be called from process context. */
1117
1118 /**
1119 * isci_abort_task_process_cb() - This is a helper function for the abort task
1120 * TMF command. It manages the request state with respect to the successful
1121 * transmission / completion of the abort task request.
1122 * @cb_state: This parameter specifies when this function was called - after
1123 * the TMF request has been started and after it has timed-out.
1124 * @tmf: This parameter specifies the TMF in progress.
1125 *
1126 *
1127 */
1128 static void isci_abort_task_process_cb(
1129 enum isci_tmf_cb_state cb_state,
1130 struct isci_tmf *tmf,
1131 void *cb_data)
1132 {
1133 struct isci_request *old_request;
1134
1135 old_request = (struct isci_request *)cb_data;
1136
1137 dev_dbg(&old_request->isci_host->pdev->dev,
1138 "%s: tmf=%p, old_request=%p\n",
1139 __func__, tmf, old_request);
1140
1141 switch (cb_state) {
1142
1143 case isci_tmf_started:
1144 /* The TMF has been started. Nothing to do here, since the
1145 * request state was already set to "aborted" by the abort
1146 * task function.
1147 */
1148 BUG_ON((old_request->status != aborted)
1149 && (old_request->status != completed));
1150 break;
1151
1152 case isci_tmf_timed_out:
1153
1154 /* Set the task's state to "aborting", since the abort task
1155 * function thread set it to "aborted" (above) in anticipation
1156 * of the task management request working correctly. Since the
1157 * timeout has now fired, the TMF request failed. We set the
1158 * state such that the request completion will indicate the
1159 * device is no longer present.
1160 */
1161 isci_request_change_state(old_request, aborting);
1162 break;
1163
1164 default:
1165 dev_err(&old_request->isci_host->pdev->dev,
1166 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1167 __func__, cb_state, tmf, old_request);
1168 break;
1169 }
1170 }
1171
1172 /**
1173 * isci_task_abort_task() - This function is one of the SAS Domain Template
1174 * functions. This function is called by libsas to abort a specified task.
1175 * @task: This parameter specifies the SAS task to abort.
1176 *
1177 * status, zero indicates success.
1178 */
1179 int isci_task_abort_task(struct sas_task *task)
1180 {
1181 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1182 struct isci_request *old_request = NULL;
1183 enum isci_request_status old_state;
1184 struct isci_remote_device *isci_device = NULL;
1185 struct isci_host *isci_host = NULL;
1186 struct isci_tmf tmf;
1187 int ret = TMF_RESP_FUNC_FAILED;
1188 unsigned long flags;
1189 bool any_dev_reset = false;
1190 bool device_stopping;
1191
1192 /* Get the isci_request reference from the task. Note that
1193 * this check does not depend on the pending request list
1194 * in the device, because tasks driving resets may land here
1195 * after completion in the core.
1196 */
1197 old_request = isci_task_get_request_from_task(task, &isci_host,
1198 &isci_device);
1199
1200 dev_dbg(&isci_host->pdev->dev,
1201 "%s: task = %p\n", __func__, task);
1202
1203 /* Check if the device has been / is currently being removed.
1204 * If so, no task management will be done, and the I/O will
1205 * be terminated.
1206 */
1207 device_stopping = (isci_device->status == isci_stopping)
1208 || (isci_device->status == isci_stopped);
1209
1210 /* This version of the driver will fail abort requests for
1211 * SATA/STP. Failing the abort request this way will cause the
1212 * SCSI error handler thread to escalate to LUN reset
1213 */
1214 if (sas_protocol_ata(task->task_proto) && !device_stopping) {
1215 dev_warn(&isci_host->pdev->dev,
1216 " task %p is for a STP/SATA device;"
1217 " returning TMF_RESP_FUNC_FAILED\n"
1218 " to cause a LUN reset...\n", task);
1219 return TMF_RESP_FUNC_FAILED;
1220 }
1221
1222 dev_dbg(&isci_host->pdev->dev,
1223 "%s: old_request == %p\n", __func__, old_request);
1224
1225 if (!device_stopping)
1226 any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1227
1228 spin_lock_irqsave(&task->task_state_lock, flags);
1229
1230 /* Don't do resets to stopping devices. */
1231 if (device_stopping) {
1232
1233 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1234 any_dev_reset = false;
1235
1236 } else /* See if there is a pending device reset for this device. */
1237 any_dev_reset = any_dev_reset
1238 || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1239
1240 /* If the extraction of the request reference from the task
1241 * failed, then the request has been completed (or if there is a
1242 * pending reset then this abort request function must be failed
1243 * in order to escalate to the target reset).
1244 */
1245 if ((old_request == NULL) || any_dev_reset) {
1246
1247 /* If the device reset task flag is set, fail the task
1248 * management request. Otherwise, the original request
1249 * has completed.
1250 */
1251 if (any_dev_reset) {
1252
1253 /* Turn off the task's DONE to make sure this
1254 * task is escalated to a target reset.
1255 */
1256 task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1257
1258 /* Make the reset happen as soon as possible. */
1259 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1260
1261 spin_unlock_irqrestore(&task->task_state_lock, flags);
1262
1263 /* Fail the task management request in order to
1264 * escalate to the target reset.
1265 */
1266 ret = TMF_RESP_FUNC_FAILED;
1267
1268 dev_dbg(&isci_host->pdev->dev,
1269 "%s: Failing task abort in order to "
1270 "escalate to target reset because\n"
1271 "SAS_TASK_NEED_DEV_RESET is set for "
1272 "task %p on dev %p\n",
1273 __func__, task, isci_device);
1274
1275
1276 } else {
1277 /* The request has already completed and there
1278 * is nothing to do here other than to set the task
1279 * done bit, and indicate that the task abort function
1280 * was sucessful.
1281 */
1282 isci_set_task_doneflags(task);
1283
1284 spin_unlock_irqrestore(&task->task_state_lock, flags);
1285
1286 ret = TMF_RESP_FUNC_COMPLETE;
1287
1288 dev_dbg(&isci_host->pdev->dev,
1289 "%s: abort task not needed for %p\n",
1290 __func__, task);
1291 }
1292
1293 return ret;
1294 }
1295 else
1296 spin_unlock_irqrestore(&task->task_state_lock, flags);
1297
1298 spin_lock_irqsave(&isci_host->scic_lock, flags);
1299
1300 /* Check the request status and change to "aborting" if currently
1301 * "starting"; if true then set the I/O kernel completion
1302 * struct that will be triggered when the request completes.
1303 */
1304 old_state = isci_task_validate_request_to_abort(
1305 old_request, isci_host, isci_device,
1306 &aborted_io_completion);
1307 if ((old_state != started) && (old_state != completed)) {
1308
1309 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1310
1311 /* The request was already being handled by someone else (because
1312 * they got to set the state away from started).
1313 */
1314 dev_dbg(&isci_host->pdev->dev,
1315 "%s: device = %p; old_request %p already being aborted\n",
1316 __func__,
1317 isci_device, old_request);
1318
1319 return TMF_RESP_FUNC_COMPLETE;
1320 }
1321 if ((task->task_proto == SAS_PROTOCOL_SMP)
1322 || device_stopping
1323 || old_request->complete_in_target
1324 ) {
1325
1326 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1327
1328 dev_dbg(&isci_host->pdev->dev,
1329 "%s: SMP request (%d)"
1330 " or device is stopping (%d)"
1331 " or complete_in_target (%d), thus no TMF\n",
1332 __func__, (task->task_proto == SAS_PROTOCOL_SMP),
1333 device_stopping, old_request->complete_in_target);
1334
1335 /* Set the state on the task. */
1336 isci_task_all_done(task);
1337
1338 ret = TMF_RESP_FUNC_COMPLETE;
1339
1340 /* Stopping and SMP devices are not sent a TMF, and are not
1341 * reset, but the outstanding I/O request is terminated below.
1342 */
1343 } else {
1344 /* Fill in the tmf stucture */
1345 isci_task_build_abort_task_tmf(&tmf, isci_device,
1346 isci_tmf_ssp_task_abort,
1347 isci_abort_task_process_cb,
1348 old_request);
1349
1350 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1351
1352 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1353 ret = isci_task_execute_tmf(isci_host, &tmf,
1354 ISCI_ABORT_TASK_TIMEOUT_MS);
1355
1356 if (ret != TMF_RESP_FUNC_COMPLETE)
1357 dev_err(&isci_host->pdev->dev,
1358 "%s: isci_task_send_tmf failed\n",
1359 __func__);
1360 }
1361 if (ret == TMF_RESP_FUNC_COMPLETE) {
1362 old_request->complete_in_target = true;
1363
1364 /* Clean up the request on our side, and wait for the aborted I/O to
1365 * complete.
1366 */
1367 isci_terminate_request_core(isci_host, isci_device, old_request);
1368 }
1369
1370 /* Make sure we do not leave a reference to aborted_io_completion */
1371 old_request->io_request_completion = NULL;
1372 return ret;
1373 }
1374
1375 /**
1376 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1377 * functions. This is one of the Task Management functoins called by libsas,
1378 * to abort all task for the given lun.
1379 * @d_device: This parameter specifies the domain device associated with this
1380 * request.
1381 * @lun: This parameter specifies the lun associated with this request.
1382 *
1383 * status, zero indicates success.
1384 */
1385 int isci_task_abort_task_set(
1386 struct domain_device *d_device,
1387 u8 *lun)
1388 {
1389 return TMF_RESP_FUNC_FAILED;
1390 }
1391
1392
1393 /**
1394 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1395 * functions. This is one of the Task Management functoins called by libsas.
1396 * @d_device: This parameter specifies the domain device associated with this
1397 * request.
1398 * @lun: This parameter specifies the lun associated with this request.
1399 *
1400 * status, zero indicates success.
1401 */
1402 int isci_task_clear_aca(
1403 struct domain_device *d_device,
1404 u8 *lun)
1405 {
1406 return TMF_RESP_FUNC_FAILED;
1407 }
1408
1409
1410
1411 /**
1412 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1413 * functions. This is one of the Task Management functoins called by libsas.
1414 * @d_device: This parameter specifies the domain device associated with this
1415 * request.
1416 * @lun: This parameter specifies the lun associated with this request.
1417 *
1418 * status, zero indicates success.
1419 */
1420 int isci_task_clear_task_set(
1421 struct domain_device *d_device,
1422 u8 *lun)
1423 {
1424 return TMF_RESP_FUNC_FAILED;
1425 }
1426
1427
1428 /**
1429 * isci_task_query_task() - This function is implemented to cause libsas to
1430 * correctly escalate the failed abort to a LUN or target reset (this is
1431 * because sas_scsi_find_task libsas function does not correctly interpret
1432 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
1433 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1434 * returned, libsas will turn this into a target reset
1435 * @task: This parameter specifies the sas task being queried.
1436 * @lun: This parameter specifies the lun associated with this request.
1437 *
1438 * status, zero indicates success.
1439 */
1440 int isci_task_query_task(
1441 struct sas_task *task)
1442 {
1443 /* See if there is a pending device reset for this device. */
1444 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1445 return TMF_RESP_FUNC_FAILED;
1446 else
1447 return TMF_RESP_FUNC_SUCC;
1448 }
1449
1450 /**
1451 * isci_task_request_complete() - This function is called by the sci core when
1452 * an task request completes.
1453 * @isci_host: This parameter specifies the ISCI host object
1454 * @request: This parameter is the completed isci_request object.
1455 * @completion_status: This parameter specifies the completion status from the
1456 * sci core.
1457 *
1458 * none.
1459 */
1460 void isci_task_request_complete(
1461 struct isci_host *isci_host,
1462 struct isci_request *request,
1463 enum sci_task_status completion_status)
1464 {
1465 struct isci_remote_device *isci_device = request->isci_device;
1466 enum isci_request_status old_state;
1467 struct isci_tmf *tmf = isci_request_access_tmf(request);
1468 struct completion *tmf_complete;
1469
1470 dev_dbg(&isci_host->pdev->dev,
1471 "%s: request = %p, status=%d\n",
1472 __func__, request, completion_status);
1473
1474 old_state = isci_request_change_state(request, completed);
1475
1476 tmf->status = completion_status;
1477 request->complete_in_target = true;
1478
1479 if (SAS_PROTOCOL_SSP == tmf->proto) {
1480
1481 memcpy(&tmf->resp.resp_iu,
1482 scic_io_request_get_response_iu_address(
1483 request->sci_request_handle
1484 ),
1485 sizeof(struct sci_ssp_response_iu));
1486
1487 } else if (SAS_PROTOCOL_SATA == tmf->proto) {
1488
1489 memcpy(&tmf->resp.d2h_fis,
1490 scic_stp_io_request_get_d2h_reg_address(
1491 request->sci_request_handle
1492 ),
1493 sizeof(struct sata_fis_reg_d2h)
1494 );
1495 }
1496
1497 /* Manage the timer if it is still running. */
1498 if (tmf->timeout_timer) {
1499 isci_del_timer(isci_host, tmf->timeout_timer);
1500 tmf->timeout_timer = NULL;
1501 }
1502
1503 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1504 tmf_complete = tmf->complete;
1505
1506 scic_controller_complete_task(
1507 isci_host->core_controller,
1508 to_sci_dev(isci_device),
1509 request->sci_request_handle
1510 );
1511 /* NULL the request handle to make sure it cannot be terminated
1512 * or completed again.
1513 */
1514 request->sci_request_handle = NULL;
1515
1516 isci_request_change_state(request, unallocated);
1517 list_del_init(&request->dev_node);
1518
1519 /* The task management part completes last. */
1520 complete(tmf_complete);
1521 }
1522
1523
1524 /**
1525 * isci_task_ssp_request_get_lun() - This function is called by the sci core to
1526 * retrieve the lun for a given task request.
1527 * @request: This parameter is the isci_request object.
1528 *
1529 * lun for specified task request.
1530 */
1531
1532 /**
1533 * isci_task_ssp_request_get_function() - This function is called by the sci
1534 * core to retrieve the function for a given task request.
1535 * @request: This parameter is the isci_request object.
1536 *
1537 * function code for specified task request.
1538 */
1539 u8 isci_task_ssp_request_get_function(struct isci_request *request)
1540 {
1541 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1542
1543 dev_dbg(&request->isci_host->pdev->dev,
1544 "%s: func = %d\n", __func__, isci_tmf->tmf_code);
1545
1546 return isci_tmf->tmf_code;
1547 }
1548
1549 /**
1550 * isci_task_ssp_request_get_io_tag_to_manage() - This function is called by
1551 * the sci core to retrieve the io tag for a given task request.
1552 * @request: This parameter is the isci_request object.
1553 *
1554 * io tag for specified task request.
1555 */
1556 u16 isci_task_ssp_request_get_io_tag_to_manage(struct isci_request *request)
1557 {
1558 u16 io_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1559
1560 if (tmf_task == request->ttype) {
1561 struct isci_tmf *tmf = isci_request_access_tmf(request);
1562 io_tag = tmf->io_tag;
1563 }
1564
1565 dev_dbg(&request->isci_host->pdev->dev,
1566 "%s: request = %p, io_tag = %d\n",
1567 __func__, request, io_tag);
1568
1569 return io_tag;
1570 }
1571
1572 /**
1573 * isci_task_ssp_request_get_response_data_address() - This function is called
1574 * by the sci core to retrieve the response data address for a given task
1575 * request.
1576 * @request: This parameter is the isci_request object.
1577 *
1578 * response data address for specified task request.
1579 */
1580 void *isci_task_ssp_request_get_response_data_address(
1581 struct isci_request *request)
1582 {
1583 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1584
1585 return &isci_tmf->resp.resp_iu;
1586 }
1587
1588 /**
1589 * isci_task_ssp_request_get_response_data_length() - This function is called
1590 * by the sci core to retrieve the response data length for a given task
1591 * request.
1592 * @request: This parameter is the isci_request object.
1593 *
1594 * response data length for specified task request.
1595 */
1596 u32 isci_task_ssp_request_get_response_data_length(
1597 struct isci_request *request)
1598 {
1599 struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
1600
1601 return sizeof(isci_tmf->resp.resp_iu);
1602 }
1603
1604 /**
1605 * isci_bus_reset_handler() - This function performs a target reset of the
1606 * device referenced by "cmd'. This function is exported through the
1607 * "struct scsi_host_template" structure such that it is called when an I/O
1608 * recovery process has escalated to a target reset. Note that this function
1609 * is called from the scsi error handler event thread, so may block on calls.
1610 * @scsi_cmd: This parameter specifies the target to be reset.
1611 *
1612 * SUCCESS if the reset process was successful, else FAILED.
1613 */
1614 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1615 {
1616 unsigned long flags = 0;
1617 struct isci_host *isci_host = NULL;
1618 enum sci_status status;
1619 int base_status;
1620 struct isci_remote_device *isci_dev
1621 = isci_dev_from_domain_dev(
1622 sdev_to_domain_dev(cmd->device));
1623
1624 dev_dbg(&cmd->device->sdev_gendev,
1625 "%s: cmd %p, isci_dev %p\n",
1626 __func__, cmd, isci_dev);
1627
1628 if (!isci_dev) {
1629 dev_warn(&cmd->device->sdev_gendev,
1630 "%s: isci_dev is GONE!\n",
1631 __func__);
1632
1633 return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
1634 }
1635
1636 if (isci_dev->isci_port != NULL)
1637 isci_host = isci_dev->isci_port->isci_host;
1638
1639 if (isci_host != NULL)
1640 spin_lock_irqsave(&isci_host->scic_lock, flags);
1641
1642 status = scic_remote_device_reset(to_sci_dev(isci_dev));
1643 if (status != SCI_SUCCESS) {
1644
1645 if (isci_host != NULL)
1646 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1647
1648 scmd_printk(KERN_WARNING, cmd,
1649 "%s: scic_remote_device_reset(%p) returned %d!\n",
1650 __func__, isci_dev, status);
1651
1652 return TMF_RESP_FUNC_FAILED;
1653 }
1654 if (isci_host != NULL)
1655 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1656
1657 /* Make sure all pending requests are able to be fully terminated. */
1658 isci_device_clear_reset_pending(isci_dev);
1659
1660 /* Terminate in-progress I/O now. */
1661 isci_remote_device_nuke_requests(isci_dev);
1662
1663 /* Call into the libsas default handler (which calls sas_phy_reset). */
1664 base_status = sas_eh_bus_reset_handler(cmd);
1665
1666 if (base_status != SUCCESS) {
1667
1668 /* There can be cases where the resets to individual devices
1669 * behind an expander will fail because of an unplug of the
1670 * expander itself.
1671 */
1672 scmd_printk(KERN_WARNING, cmd,
1673 "%s: sas_eh_bus_reset_handler(%p) returned %d!\n",
1674 __func__, cmd, base_status);
1675 }
1676
1677 /* WHAT TO DO HERE IF sas_phy_reset FAILS? */
1678
1679 if (isci_host != NULL)
1680 spin_lock_irqsave(&isci_host->scic_lock, flags);
1681 status = scic_remote_device_reset_complete(to_sci_dev(isci_dev));
1682
1683 if (isci_host != NULL)
1684 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1685
1686 if (status != SCI_SUCCESS) {
1687 scmd_printk(KERN_WARNING, cmd,
1688 "%s: scic_remote_device_reset_complete(%p) "
1689 "returned %d!\n",
1690 __func__, isci_dev, status);
1691 }
1692 /* WHAT TO DO HERE IF scic_remote_device_reset_complete FAILS? */
1693
1694 dev_dbg(&cmd->device->sdev_gendev,
1695 "%s: cmd %p, isci_dev %p complete.\n",
1696 __func__, cmd, isci_dev);
1697
1698 return TMF_RESP_FUNC_COMPLETE;
1699 }
This page took 0.066495 seconds and 5 git commands to generate.