5201dc58a19123bb4eadac58b8cba0c651f5e543
[deliverable/linux.git] / drivers / scsi / isci / request.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "sata.h"
60 #include "scu_completion_codes.h"
61 #include "sas.h"
62
63 /**
64 * This method returns the sgl element pair for the specificed sgl_pair index.
65 * @sci_req: This parameter specifies the IO request for which to retrieve
66 * the Scatter-Gather List element pair.
67 * @sgl_pair_index: This parameter specifies the index into the SGL element
68 * pair to be retrieved.
69 *
70 * This method returns a pointer to an struct scu_sgl_element_pair.
71 */
72 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
73 struct scic_sds_request *sci_req,
74 u32 sgl_pair_index
75 ) {
76 struct scu_task_context *task_context;
77
78 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
79
80 if (sgl_pair_index == 0) {
81 return &task_context->sgl_pair_ab;
82 } else if (sgl_pair_index == 1) {
83 return &task_context->sgl_pair_cd;
84 }
85
86 return &sci_req->sg_table[sgl_pair_index - 2];
87 }
88
89 /**
90 * This function will build the SGL list for an IO request.
91 * @sci_req: This parameter specifies the IO request for which to build
92 * the Scatter-Gather List.
93 *
94 */
95 void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
96 {
97 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
98 struct isci_host *isci_host = isci_request->isci_host;
99 struct sas_task *task = isci_request_access_task(isci_request);
100 struct scatterlist *sg = NULL;
101 dma_addr_t dma_addr;
102 u32 sg_idx = 0;
103 struct scu_sgl_element_pair *scu_sg = NULL;
104 struct scu_sgl_element_pair *prev_sg = NULL;
105
106 if (task->num_scatter > 0) {
107 sg = task->scatter;
108
109 while (sg) {
110 scu_sg = scic_sds_request_get_sgl_element_pair(
111 sds_request,
112 sg_idx);
113
114 SCU_SGL_COPY(scu_sg->A, sg);
115
116 sg = sg_next(sg);
117
118 if (sg) {
119 SCU_SGL_COPY(scu_sg->B, sg);
120 sg = sg_next(sg);
121 } else
122 SCU_SGL_ZERO(scu_sg->B);
123
124 if (prev_sg) {
125 dma_addr =
126 scic_io_request_get_dma_addr(
127 sds_request,
128 scu_sg);
129
130 prev_sg->next_pair_upper =
131 upper_32_bits(dma_addr);
132 prev_sg->next_pair_lower =
133 lower_32_bits(dma_addr);
134 }
135
136 prev_sg = scu_sg;
137 sg_idx++;
138 }
139 } else { /* handle when no sg */
140 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
141 sg_idx);
142
143 dma_addr = dma_map_single(&isci_host->pdev->dev,
144 task->scatter,
145 task->total_xfer_len,
146 task->data_dir);
147
148 isci_request->zero_scatter_daddr = dma_addr;
149
150 scu_sg->A.length = task->total_xfer_len;
151 scu_sg->A.address_upper = upper_32_bits(dma_addr);
152 scu_sg->A.address_lower = lower_32_bits(dma_addr);
153 }
154
155 if (scu_sg) {
156 scu_sg->next_pair_upper = 0;
157 scu_sg->next_pair_lower = 0;
158 }
159 }
160
161 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
162 {
163 struct ssp_cmd_iu *cmd_iu;
164 struct isci_request *ireq = sci_req_to_ireq(sci_req);
165 struct sas_task *task = isci_request_access_task(ireq);
166
167 cmd_iu = &sci_req->ssp.cmd;
168
169 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
170 cmd_iu->add_cdb_len = 0;
171 cmd_iu->_r_a = 0;
172 cmd_iu->_r_b = 0;
173 cmd_iu->en_fburst = 0; /* unsupported */
174 cmd_iu->task_prio = task->ssp_task.task_prio;
175 cmd_iu->task_attr = task->ssp_task.task_attr;
176 cmd_iu->_r_c = 0;
177
178 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
179 sizeof(task->ssp_task.cdb) / sizeof(u32));
180 }
181
182 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
183 {
184 struct ssp_task_iu *task_iu;
185 struct isci_request *ireq = sci_req_to_ireq(sci_req);
186 struct sas_task *task = isci_request_access_task(ireq);
187 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
188
189 task_iu = &sci_req->ssp.tmf;
190
191 memset(task_iu, 0, sizeof(struct ssp_task_iu));
192
193 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
194
195 task_iu->task_func = isci_tmf->tmf_code;
196 task_iu->task_tag =
197 (ireq->ttype == tmf_task) ?
198 isci_tmf->io_tag :
199 SCI_CONTROLLER_INVALID_IO_TAG;
200 }
201
202 /**
203 * This method is will fill in the SCU Task Context for any type of SSP request.
204 * @sci_req:
205 * @task_context:
206 *
207 */
208 static void scu_ssp_reqeust_construct_task_context(
209 struct scic_sds_request *sds_request,
210 struct scu_task_context *task_context)
211 {
212 dma_addr_t dma_addr;
213 struct scic_sds_controller *controller;
214 struct scic_sds_remote_device *target_device;
215 struct scic_sds_port *target_port;
216
217 controller = scic_sds_request_get_controller(sds_request);
218 target_device = scic_sds_request_get_device(sds_request);
219 target_port = scic_sds_request_get_port(sds_request);
220
221 /* Fill in the TC with the its required data */
222 task_context->abort = 0;
223 task_context->priority = 0;
224 task_context->initiator_request = 1;
225 task_context->connection_rate = target_device->connection_rate;
226 task_context->protocol_engine_index =
227 scic_sds_controller_get_protocol_engine_group(controller);
228 task_context->logical_port_index =
229 scic_sds_port_get_index(target_port);
230 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
231 task_context->valid = SCU_TASK_CONTEXT_VALID;
232 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
233
234 task_context->remote_node_index =
235 scic_sds_remote_device_get_index(sds_request->target_device);
236 task_context->command_code = 0;
237
238 task_context->link_layer_control = 0;
239 task_context->do_not_dma_ssp_good_response = 1;
240 task_context->strict_ordering = 0;
241 task_context->control_frame = 0;
242 task_context->timeout_enable = 0;
243 task_context->block_guard_enable = 0;
244
245 task_context->address_modifier = 0;
246
247 /* task_context->type.ssp.tag = sci_req->io_tag; */
248 task_context->task_phase = 0x01;
249
250 if (sds_request->was_tag_assigned_by_user) {
251 /*
252 * Build the task context now since we have already read
253 * the data
254 */
255 sds_request->post_context =
256 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
257 (scic_sds_controller_get_protocol_engine_group(
258 controller) <<
259 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
260 (scic_sds_port_get_index(target_port) <<
261 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
262 scic_sds_io_tag_get_index(sds_request->io_tag));
263 } else {
264 /*
265 * Build the task context now since we have already read
266 * the data
267 *
268 * I/O tag index is not assigned because we have to wait
269 * until we get a TCi
270 */
271 sds_request->post_context =
272 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
273 (scic_sds_controller_get_protocol_engine_group(
274 owning_controller) <<
275 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
276 (scic_sds_port_get_index(target_port) <<
277 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
278 }
279
280 /*
281 * Copy the physical address for the command buffer to the
282 * SCU Task Context
283 */
284 dma_addr = scic_io_request_get_dma_addr(sds_request,
285 &sds_request->ssp.cmd);
286
287 task_context->command_iu_upper = upper_32_bits(dma_addr);
288 task_context->command_iu_lower = lower_32_bits(dma_addr);
289
290 /*
291 * Copy the physical address for the response buffer to the
292 * SCU Task Context
293 */
294 dma_addr = scic_io_request_get_dma_addr(sds_request,
295 &sds_request->ssp.rsp);
296
297 task_context->response_iu_upper = upper_32_bits(dma_addr);
298 task_context->response_iu_lower = lower_32_bits(dma_addr);
299 }
300
301 /**
302 * This method is will fill in the SCU Task Context for a SSP IO request.
303 * @sci_req:
304 *
305 */
306 static void scu_ssp_io_request_construct_task_context(
307 struct scic_sds_request *sci_req,
308 enum dma_data_direction dir,
309 u32 len)
310 {
311 struct scu_task_context *task_context;
312
313 task_context = scic_sds_request_get_task_context(sci_req);
314
315 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
316
317 task_context->ssp_command_iu_length =
318 sizeof(struct ssp_cmd_iu) / sizeof(u32);
319 task_context->type.ssp.frame_type = SSP_COMMAND;
320
321 switch (dir) {
322 case DMA_FROM_DEVICE:
323 case DMA_NONE:
324 default:
325 task_context->task_type = SCU_TASK_TYPE_IOREAD;
326 break;
327 case DMA_TO_DEVICE:
328 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
329 break;
330 }
331
332 task_context->transfer_length_bytes = len;
333
334 if (task_context->transfer_length_bytes > 0)
335 scic_sds_request_build_sgl(sci_req);
336 }
337
338 /**
339 * This method will fill in the SCU Task Context for a SSP Task request. The
340 * following important settings are utilized: -# priority ==
341 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
342 * ahead of other task destined for the same Remote Node. -# task_type ==
343 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
344 * (i.e. non-raw frame) is being utilized to perform task management. -#
345 * control_frame == 1. This ensures that the proper endianess is set so
346 * that the bytes are transmitted in the right order for a task frame.
347 * @sci_req: This parameter specifies the task request object being
348 * constructed.
349 *
350 */
351 static void scu_ssp_task_request_construct_task_context(
352 struct scic_sds_request *sci_req)
353 {
354 struct scu_task_context *task_context;
355
356 task_context = scic_sds_request_get_task_context(sci_req);
357
358 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
359
360 task_context->control_frame = 1;
361 task_context->priority = SCU_TASK_PRIORITY_HIGH;
362 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
363 task_context->transfer_length_bytes = 0;
364 task_context->type.ssp.frame_type = SSP_TASK;
365 task_context->ssp_command_iu_length =
366 sizeof(struct ssp_task_iu) / sizeof(u32);
367 }
368
369
370 /**
371 * This method constructs the SSP Command IU data for this ssp passthrough
372 * comand request object.
373 * @sci_req: This parameter specifies the request object for which the SSP
374 * command information unit is being built.
375 *
376 * enum sci_status, returns invalid parameter is cdb > 16
377 */
378
379
380 /**
381 * This method constructs the SATA request object.
382 * @sci_req:
383 * @sat_protocol:
384 * @transfer_length:
385 * @data_direction:
386 * @copy_rx_frame:
387 *
388 * enum sci_status
389 */
390 static enum sci_status
391 scic_io_request_construct_sata(struct scic_sds_request *sci_req,
392 u32 len,
393 enum dma_data_direction dir,
394 bool copy)
395 {
396 enum sci_status status = SCI_SUCCESS;
397 struct isci_request *ireq = sci_req_to_ireq(sci_req);
398 struct sas_task *task = isci_request_access_task(ireq);
399
400 /* check for management protocols */
401 if (ireq->ttype == tmf_task) {
402 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
403
404 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
405 tmf->tmf_code == isci_tmf_sata_srst_low)
406 return scic_sds_stp_soft_reset_request_construct(sci_req);
407 else {
408 dev_err(scic_to_dev(sci_req->owning_controller),
409 "%s: Request 0x%p received un-handled SAT "
410 "management protocol 0x%x.\n",
411 __func__, sci_req, tmf->tmf_code);
412
413 return SCI_FAILURE;
414 }
415 }
416
417 if (!sas_protocol_ata(task->task_proto)) {
418 dev_err(scic_to_dev(sci_req->owning_controller),
419 "%s: Non-ATA protocol in SATA path: 0x%x\n",
420 __func__,
421 task->task_proto);
422 return SCI_FAILURE;
423
424 }
425
426 /* non data */
427 if (task->data_dir == DMA_NONE)
428 return scic_sds_stp_non_data_request_construct(sci_req);
429
430 /* NCQ */
431 if (task->ata_task.use_ncq)
432 return scic_sds_stp_ncq_request_construct(sci_req, len, dir);
433
434 /* DMA */
435 if (task->ata_task.dma_xfer)
436 return scic_sds_stp_udma_request_construct(sci_req, len, dir);
437 else /* PIO */
438 return scic_sds_stp_pio_request_construct(sci_req, copy);
439
440 return status;
441 }
442
443 static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
444 {
445 struct isci_request *ireq = sci_req_to_ireq(sci_req);
446 struct sas_task *task = isci_request_access_task(ireq);
447
448 sci_req->protocol = SCIC_SSP_PROTOCOL;
449
450 scu_ssp_io_request_construct_task_context(sci_req,
451 task->data_dir,
452 task->total_xfer_len);
453
454 scic_sds_io_request_build_ssp_command_iu(sci_req);
455
456 sci_base_state_machine_change_state(
457 &sci_req->state_machine,
458 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
459
460 return SCI_SUCCESS;
461 }
462
463 enum sci_status scic_task_request_construct_ssp(
464 struct scic_sds_request *sci_req)
465 {
466 /* Construct the SSP Task SCU Task Context */
467 scu_ssp_task_request_construct_task_context(sci_req);
468
469 /* Fill in the SSP Task IU */
470 scic_sds_task_request_build_ssp_task_iu(sci_req);
471
472 sci_base_state_machine_change_state(&sci_req->state_machine,
473 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
474
475 return SCI_SUCCESS;
476 }
477
478
479 static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
480 {
481 enum sci_status status;
482 struct scic_sds_stp_request *stp_req;
483 bool copy = false;
484 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
485 struct sas_task *task = isci_request_access_task(isci_request);
486
487 stp_req = &sci_req->stp.req;
488 sci_req->protocol = SCIC_STP_PROTOCOL;
489
490 copy = (task->data_dir == DMA_NONE) ? false : true;
491
492 status = scic_io_request_construct_sata(sci_req,
493 task->total_xfer_len,
494 task->data_dir,
495 copy);
496
497 if (status == SCI_SUCCESS)
498 sci_base_state_machine_change_state(&sci_req->state_machine,
499 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
500
501 return status;
502 }
503
504
505 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
506 {
507 enum sci_status status = SCI_SUCCESS;
508 struct isci_request *ireq = sci_req_to_ireq(sci_req);
509
510 /* check for management protocols */
511 if (ireq->ttype == tmf_task) {
512 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
513
514 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
515 tmf->tmf_code == isci_tmf_sata_srst_low) {
516 status = scic_sds_stp_soft_reset_request_construct(sci_req);
517 } else {
518 dev_err(scic_to_dev(sci_req->owning_controller),
519 "%s: Request 0x%p received un-handled SAT "
520 "Protocol 0x%x.\n",
521 __func__, sci_req, tmf->tmf_code);
522
523 return SCI_FAILURE;
524 }
525 }
526
527 if (status == SCI_SUCCESS)
528 sci_base_state_machine_change_state(
529 &sci_req->state_machine,
530 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
531
532 return status;
533 }
534
535 /**
536 * sci_req_tx_bytes - bytes transferred when reply underruns request
537 * @sci_req: request that was terminated early
538 */
539 #define SCU_TASK_CONTEXT_SRAM 0x200000
540 static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
541 {
542 struct scic_sds_controller *scic = sci_req->owning_controller;
543 u32 ret_val = 0;
544
545 if (readl(&scic->smu_registers->address_modifier) == 0) {
546 void __iomem *scu_reg_base = scic->scu_registers;
547
548 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
549 * BAR1 is the scu_registers
550 * 0x20002C = 0x200000 + 0x2c
551 * = start of task context SRAM + offset of (type.ssp.data_offset)
552 * TCi is the io_tag of struct scic_sds_request
553 */
554 ret_val = readl(scu_reg_base +
555 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
556 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
557 }
558
559 return ret_val;
560 }
561
562 enum sci_status
563 scic_sds_request_start(struct scic_sds_request *request)
564 {
565 if (request->device_sequence !=
566 scic_sds_remote_device_get_sequence(request->target_device))
567 return SCI_FAILURE;
568
569 if (request->state_handlers->start_handler)
570 return request->state_handlers->start_handler(request);
571
572 dev_warn(scic_to_dev(request->owning_controller),
573 "%s: SCIC IO Request requested to start while in wrong "
574 "state %d\n",
575 __func__,
576 sci_base_state_machine_get_state(&request->state_machine));
577
578 return SCI_FAILURE_INVALID_STATE;
579 }
580
581 enum sci_status
582 scic_sds_io_request_terminate(struct scic_sds_request *request)
583 {
584 if (request->state_handlers->abort_handler)
585 return request->state_handlers->abort_handler(request);
586
587 dev_warn(scic_to_dev(request->owning_controller),
588 "%s: SCIC IO Request requested to abort while in wrong "
589 "state %d\n",
590 __func__,
591 sci_base_state_machine_get_state(&request->state_machine));
592
593 return SCI_FAILURE_INVALID_STATE;
594 }
595
596 enum sci_status scic_sds_io_request_event_handler(
597 struct scic_sds_request *request,
598 u32 event_code)
599 {
600 if (request->state_handlers->event_handler)
601 return request->state_handlers->event_handler(request, event_code);
602
603 dev_warn(scic_to_dev(request->owning_controller),
604 "%s: SCIC IO Request given event code notification %x while "
605 "in wrong state %d\n",
606 __func__,
607 event_code,
608 sci_base_state_machine_get_state(&request->state_machine));
609
610 return SCI_FAILURE_INVALID_STATE;
611 }
612
613 /**
614 *
615 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
616 * operation is to be executed.
617 * @frame_index: The frame index returned by the hardware for the reqeust
618 * object.
619 *
620 * This method invokes the core state frame handler for the
621 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
622 */
623 enum sci_status scic_sds_io_request_frame_handler(
624 struct scic_sds_request *request,
625 u32 frame_index)
626 {
627 if (request->state_handlers->frame_handler)
628 return request->state_handlers->frame_handler(request, frame_index);
629
630 dev_warn(scic_to_dev(request->owning_controller),
631 "%s: SCIC IO Request given unexpected frame %x while in "
632 "state %d\n",
633 __func__,
634 frame_index,
635 sci_base_state_machine_get_state(&request->state_machine));
636
637 scic_sds_controller_release_frame(request->owning_controller, frame_index);
638 return SCI_FAILURE_INVALID_STATE;
639 }
640
641 /*
642 * This function copies response data for requests returning response data
643 * instead of sense data.
644 * @sci_req: This parameter specifies the request object for which to copy
645 * the response data.
646 */
647 static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
648 {
649 void *resp_buf;
650 u32 len;
651 struct ssp_response_iu *ssp_response;
652 struct isci_request *ireq = sci_req_to_ireq(sci_req);
653 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
654
655 ssp_response = &sci_req->ssp.rsp;
656
657 resp_buf = &isci_tmf->resp.resp_iu;
658
659 len = min_t(u32,
660 SSP_RESP_IU_MAX_SIZE,
661 be32_to_cpu(ssp_response->response_data_len));
662
663 memcpy(resp_buf, ssp_response->resp_data, len);
664 }
665
666 /*
667 * This method implements the action taken when a constructed
668 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
669 * This method will, if necessary, allocate a TCi for the io request object and
670 * then will, if necessary, copy the constructed TC data into the actual TC
671 * buffer. If everything is successful the post context field is updated with
672 * the TCi so the controller can post the request to the hardware. enum sci_status
673 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
674 */
675 static enum sci_status scic_sds_request_constructed_state_start_handler(
676 struct scic_sds_request *request)
677 {
678 struct scu_task_context *task_context;
679
680 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
681 request->io_tag =
682 scic_controller_allocate_io_tag(request->owning_controller);
683 }
684
685 /* Record the IO Tag in the request */
686 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
687 task_context = request->task_context_buffer;
688
689 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
690
691 switch (task_context->protocol_type) {
692 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
693 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
694 /* SSP/SMP Frame */
695 task_context->type.ssp.tag = request->io_tag;
696 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
697 break;
698
699 case SCU_TASK_CONTEXT_PROTOCOL_STP:
700 /*
701 * STP/SATA Frame
702 * task_context->type.stp.ncq_tag = request->ncq_tag; */
703 break;
704
705 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
706 /* / @todo When do we set no protocol type? */
707 break;
708
709 default:
710 /* This should never happen since we build the IO requests */
711 break;
712 }
713
714 /*
715 * Check to see if we need to copy the task context buffer
716 * or have been building into the task context buffer */
717 if (request->was_tag_assigned_by_user == false) {
718 scic_sds_controller_copy_task_context(
719 request->owning_controller, request);
720 }
721
722 /* Add to the post_context the io tag value */
723 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
724
725 /* Everything is good go ahead and change state */
726 sci_base_state_machine_change_state(&request->state_machine,
727 SCI_BASE_REQUEST_STATE_STARTED);
728
729 return SCI_SUCCESS;
730 }
731
732 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
733 }
734
735 /*
736 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
737 * object receives a scic_sds_request_terminate() request. Since the request
738 * has not yet been posted to the hardware the request transitions to the
739 * completed state. enum sci_status SCI_SUCCESS
740 */
741 static enum sci_status scic_sds_request_constructed_state_abort_handler(
742 struct scic_sds_request *request)
743 {
744 /*
745 * This request has been terminated by the user make sure that the correct
746 * status code is returned */
747 scic_sds_request_set_status(request,
748 SCU_TASK_DONE_TASK_ABORT,
749 SCI_FAILURE_IO_TERMINATED);
750
751 sci_base_state_machine_change_state(&request->state_machine,
752 SCI_BASE_REQUEST_STATE_COMPLETED);
753 return SCI_SUCCESS;
754 }
755
756 /*
757 * *****************************************************************************
758 * * STARTED STATE HANDLERS
759 * ***************************************************************************** */
760
761 /*
762 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
763 * object receives a scic_sds_request_terminate() request. Since the request
764 * has been posted to the hardware the io request state is changed to the
765 * aborting state. enum sci_status SCI_SUCCESS
766 */
767 enum sci_status scic_sds_request_started_state_abort_handler(
768 struct scic_sds_request *request)
769 {
770 if (request->has_started_substate_machine)
771 sci_base_state_machine_stop(&request->started_substate_machine);
772
773 sci_base_state_machine_change_state(&request->state_machine,
774 SCI_BASE_REQUEST_STATE_ABORTING);
775 return SCI_SUCCESS;
776 }
777
778 /*
779 * scic_sds_request_started_state_tc_completion_handler() - This method process
780 * TC (task context) completions for normal IO request (i.e. Task/Abort
781 * Completions of type 0). This method will update the
782 * SCIC_SDS_IO_REQUEST_T::status field.
783 * @sci_req: This parameter specifies the request for which a completion
784 * occurred.
785 * @completion_code: This parameter specifies the completion code received from
786 * the SCU.
787 *
788 */
789 static enum sci_status
790 scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req,
791 u32 completion_code)
792 {
793 u8 datapres;
794 struct ssp_response_iu *resp_iu;
795
796 /*
797 * TODO: Any SDMA return code of other than 0 is bad
798 * decode 0x003C0000 to determine SDMA status
799 */
800 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
801 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
802 scic_sds_request_set_status(sci_req,
803 SCU_TASK_DONE_GOOD,
804 SCI_SUCCESS);
805 break;
806
807 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
808 {
809 /*
810 * There are times when the SCU hardware will return an early
811 * response because the io request specified more data than is
812 * returned by the target device (mode pages, inquiry data,
813 * etc.). We must check the response stats to see if this is
814 * truly a failed request or a good request that just got
815 * completed early.
816 */
817 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
818 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
819
820 sci_swab32_cpy(&sci_req->ssp.rsp,
821 &sci_req->ssp.rsp,
822 word_cnt);
823
824 if (resp->status == 0) {
825 scic_sds_request_set_status(
826 sci_req,
827 SCU_TASK_DONE_GOOD,
828 SCI_SUCCESS_IO_DONE_EARLY);
829 } else {
830 scic_sds_request_set_status(
831 sci_req,
832 SCU_TASK_DONE_CHECK_RESPONSE,
833 SCI_FAILURE_IO_RESPONSE_VALID);
834 }
835 }
836 break;
837
838 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
839 {
840 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
841
842 sci_swab32_cpy(&sci_req->ssp.rsp,
843 &sci_req->ssp.rsp,
844 word_cnt);
845
846 scic_sds_request_set_status(sci_req,
847 SCU_TASK_DONE_CHECK_RESPONSE,
848 SCI_FAILURE_IO_RESPONSE_VALID);
849 break;
850 }
851
852 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
853 /*
854 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
855 * guaranteed to be received before this completion status is
856 * posted?
857 */
858 resp_iu = &sci_req->ssp.rsp;
859 datapres = resp_iu->datapres;
860
861 if ((datapres == 0x01) || (datapres == 0x02)) {
862 scic_sds_request_set_status(
863 sci_req,
864 SCU_TASK_DONE_CHECK_RESPONSE,
865 SCI_FAILURE_IO_RESPONSE_VALID);
866 } else
867 scic_sds_request_set_status(
868 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
869 break;
870
871 /* only stp device gets suspended. */
872 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
873 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
874 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
875 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
876 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
877 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
878 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
879 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
880 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
881 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
882 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
883 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
884 scic_sds_request_set_status(
885 sci_req,
886 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
887 SCU_COMPLETION_TL_STATUS_SHIFT,
888 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
889 } else {
890 scic_sds_request_set_status(
891 sci_req,
892 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
893 SCU_COMPLETION_TL_STATUS_SHIFT,
894 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
895 }
896 break;
897
898 /* both stp/ssp device gets suspended */
899 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
901 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
902 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
903 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
904 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
905 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
906 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
907 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
908 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
909 scic_sds_request_set_status(
910 sci_req,
911 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
912 SCU_COMPLETION_TL_STATUS_SHIFT,
913 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
914 break;
915
916 /* neither ssp nor stp gets suspended. */
917 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
918 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
919 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
920 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
921 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
922 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
923 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
924 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
925 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
926 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
927 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
928 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
932 default:
933 scic_sds_request_set_status(
934 sci_req,
935 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
936 SCU_COMPLETION_TL_STATUS_SHIFT,
937 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
938 break;
939 }
940
941 /*
942 * TODO: This is probably wrong for ACK/NAK timeout conditions
943 */
944
945 /* In all cases we will treat this as the completion of the IO req. */
946 sci_base_state_machine_change_state(
947 &sci_req->state_machine,
948 SCI_BASE_REQUEST_STATE_COMPLETED);
949 return SCI_SUCCESS;
950 }
951
952 enum sci_status
953 scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
954 {
955 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
956 request->has_started_substate_machine == false)
957 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
958 else if (request->state_handlers->tc_completion_handler)
959 return request->state_handlers->tc_completion_handler(request, completion_code);
960
961 dev_warn(scic_to_dev(request->owning_controller),
962 "%s: SCIC IO Request given task completion notification %x "
963 "while in wrong state %d\n",
964 __func__,
965 completion_code,
966 sci_base_state_machine_get_state(&request->state_machine));
967
968 return SCI_FAILURE_INVALID_STATE;
969 }
970
971 /*
972 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
973 * object receives a scic_sds_request_frame_handler() request. This method
974 * first determines the frame type received. If this is a response frame then
975 * the response data is copied to the io request response buffer for processing
976 * at completion time. If the frame type is not a response buffer an error is
977 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
978 */
979 static enum sci_status
980 scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
981 u32 frame_index)
982 {
983 enum sci_status status;
984 u32 *frame_header;
985 struct ssp_frame_hdr ssp_hdr;
986 ssize_t word_cnt;
987
988 status = scic_sds_unsolicited_frame_control_get_header(
989 &(scic_sds_request_get_controller(sci_req)->uf_control),
990 frame_index,
991 (void **)&frame_header);
992
993 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
994 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
995
996 if (ssp_hdr.frame_type == SSP_RESPONSE) {
997 struct ssp_response_iu *resp_iu;
998 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
999
1000 status = scic_sds_unsolicited_frame_control_get_buffer(
1001 &(scic_sds_request_get_controller(sci_req)->uf_control),
1002 frame_index,
1003 (void **)&resp_iu);
1004
1005 sci_swab32_cpy(&sci_req->ssp.rsp,
1006 resp_iu, word_cnt);
1007
1008 resp_iu = &sci_req->ssp.rsp;
1009
1010 if ((resp_iu->datapres == 0x01) ||
1011 (resp_iu->datapres == 0x02)) {
1012 scic_sds_request_set_status(
1013 sci_req,
1014 SCU_TASK_DONE_CHECK_RESPONSE,
1015 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1016 } else
1017 scic_sds_request_set_status(
1018 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1019 } else {
1020 /* This was not a response frame why did it get forwarded? */
1021 dev_err(scic_to_dev(sci_req->owning_controller),
1022 "%s: SCIC IO Request 0x%p received unexpected "
1023 "frame %d type 0x%02x\n",
1024 __func__,
1025 sci_req,
1026 frame_index,
1027 ssp_hdr.frame_type);
1028 }
1029
1030 /*
1031 * In any case we are done with this frame buffer return it to the
1032 * controller
1033 */
1034 scic_sds_controller_release_frame(
1035 sci_req->owning_controller, frame_index);
1036
1037 return SCI_SUCCESS;
1038 }
1039
1040 /*
1041 * *****************************************************************************
1042 * * COMPLETED STATE HANDLERS
1043 * ***************************************************************************** */
1044
1045
1046 /*
1047 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1048 * object receives a scic_sds_request_complete() request. This method frees up
1049 * any io request resources that have been allocated and transitions the
1050 * request to its final state. Consider stopping the state machine instead of
1051 * transitioning to the final state? enum sci_status SCI_SUCCESS
1052 */
1053 static enum sci_status scic_sds_request_completed_state_complete_handler(
1054 struct scic_sds_request *request)
1055 {
1056 if (request->was_tag_assigned_by_user != true) {
1057 scic_controller_free_io_tag(
1058 request->owning_controller, request->io_tag);
1059 }
1060
1061 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1062 scic_sds_controller_release_frame(
1063 request->owning_controller, request->saved_rx_frame_index);
1064 }
1065
1066 sci_base_state_machine_change_state(&request->state_machine,
1067 SCI_BASE_REQUEST_STATE_FINAL);
1068 return SCI_SUCCESS;
1069 }
1070
1071 /*
1072 * *****************************************************************************
1073 * * ABORTING STATE HANDLERS
1074 * ***************************************************************************** */
1075
1076 /*
1077 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1078 * object receives a scic_sds_request_terminate() request. This method is the
1079 * io request aborting state abort handlers. On receipt of a multiple
1080 * terminate requests the io request will transition to the completed state.
1081 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1082 */
1083 static enum sci_status scic_sds_request_aborting_state_abort_handler(
1084 struct scic_sds_request *request)
1085 {
1086 sci_base_state_machine_change_state(&request->state_machine,
1087 SCI_BASE_REQUEST_STATE_COMPLETED);
1088 return SCI_SUCCESS;
1089 }
1090
1091 /*
1092 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1093 * object receives a scic_sds_request_task_completion() request. This method
1094 * decodes the completion type waiting for the abort task complete
1095 * notification. When the abort task complete is received the io request
1096 * transitions to the completed state. enum sci_status SCI_SUCCESS
1097 */
1098 static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1099 struct scic_sds_request *sci_req,
1100 u32 completion_code)
1101 {
1102 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1103 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1104 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1105 scic_sds_request_set_status(
1106 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1107 );
1108
1109 sci_base_state_machine_change_state(&sci_req->state_machine,
1110 SCI_BASE_REQUEST_STATE_COMPLETED);
1111 break;
1112
1113 default:
1114 /*
1115 * Unless we get some strange error wait for the task abort to complete
1116 * TODO: Should there be a state change for this completion? */
1117 break;
1118 }
1119
1120 return SCI_SUCCESS;
1121 }
1122
1123 /*
1124 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1125 * object receives a scic_sds_request_frame_handler() request. This method
1126 * discards the unsolicited frame since we are waiting for the abort task
1127 * completion. enum sci_status SCI_SUCCESS
1128 */
1129 static enum sci_status scic_sds_request_aborting_state_frame_handler(
1130 struct scic_sds_request *sci_req,
1131 u32 frame_index)
1132 {
1133 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1134
1135 scic_sds_controller_release_frame(
1136 sci_req->owning_controller, frame_index);
1137
1138 return SCI_SUCCESS;
1139 }
1140
1141 /**
1142 * This method processes the completions transport layer (TL) status to
1143 * determine if the RAW task management frame was sent successfully. If the
1144 * raw frame was sent successfully, then the state for the task request
1145 * transitions to waiting for a response frame.
1146 * @sci_req: This parameter specifies the request for which the TC
1147 * completion was received.
1148 * @completion_code: This parameter indicates the completion status information
1149 * for the TC.
1150 *
1151 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1152 * this method always returns success.
1153 */
1154 static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler(
1155 struct scic_sds_request *sci_req,
1156 u32 completion_code)
1157 {
1158 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1159 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1160 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1161 SCI_SUCCESS);
1162
1163 sci_base_state_machine_change_state(&sci_req->state_machine,
1164 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1165 break;
1166
1167 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1168 /*
1169 * Currently, the decision is to simply allow the task request to
1170 * timeout if the task IU wasn't received successfully.
1171 * There is a potential for receiving multiple task responses if we
1172 * decide to send the task IU again. */
1173 dev_warn(scic_to_dev(sci_req->owning_controller),
1174 "%s: TaskRequest:0x%p CompletionCode:%x - "
1175 "ACK/NAK timeout\n",
1176 __func__,
1177 sci_req,
1178 completion_code);
1179
1180 sci_base_state_machine_change_state(&sci_req->state_machine,
1181 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1182 break;
1183
1184 default:
1185 /*
1186 * All other completion status cause the IO to be complete. If a NAK
1187 * was received, then it is up to the user to retry the request. */
1188 scic_sds_request_set_status(
1189 sci_req,
1190 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1191 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1192 );
1193
1194 sci_base_state_machine_change_state(&sci_req->state_machine,
1195 SCI_BASE_REQUEST_STATE_COMPLETED);
1196 break;
1197 }
1198
1199 return SCI_SUCCESS;
1200 }
1201
1202 /**
1203 * This method is responsible for processing a terminate/abort request for this
1204 * TC while the request is waiting for the task management response
1205 * unsolicited frame.
1206 * @sci_req: This parameter specifies the request for which the
1207 * termination was requested.
1208 *
1209 * This method returns an indication as to whether the abort request was
1210 * successfully handled. need to update to ensure the received UF doesn't cause
1211 * damage to subsequent requests (i.e. put the extended tag in a holding
1212 * pattern for this particular device).
1213 */
1214 static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler(
1215 struct scic_sds_request *request)
1216 {
1217 sci_base_state_machine_change_state(&request->state_machine,
1218 SCI_BASE_REQUEST_STATE_ABORTING);
1219 sci_base_state_machine_change_state(&request->state_machine,
1220 SCI_BASE_REQUEST_STATE_COMPLETED);
1221 return SCI_SUCCESS;
1222 }
1223
1224 /**
1225 * This method processes an unsolicited frame while the task mgmt request is
1226 * waiting for a response frame. It will copy the response data, release
1227 * the unsolicited frame, and transition the request to the
1228 * SCI_BASE_REQUEST_STATE_COMPLETED state.
1229 * @sci_req: This parameter specifies the request for which the
1230 * unsolicited frame was received.
1231 * @frame_index: This parameter indicates the unsolicited frame index that
1232 * should contain the response.
1233 *
1234 * This method returns an indication of whether the TC response frame was
1235 * handled successfully or not. SCI_SUCCESS Currently this value is always
1236 * returned and indicates successful processing of the TC response. Should
1237 * probably update to check frame type and make sure it is a response frame.
1238 */
1239 static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler(
1240 struct scic_sds_request *request,
1241 u32 frame_index)
1242 {
1243 scic_sds_io_request_copy_response(request);
1244
1245 sci_base_state_machine_change_state(&request->state_machine,
1246 SCI_BASE_REQUEST_STATE_COMPLETED);
1247 scic_sds_controller_release_frame(request->owning_controller,
1248 frame_index);
1249 return SCI_SUCCESS;
1250 }
1251
1252 /**
1253 * This method processes an abnormal TC completion while the SMP request is
1254 * waiting for a response frame. It decides what happened to the IO based
1255 * on TC completion status.
1256 * @sci_req: This parameter specifies the request for which the TC
1257 * completion was received.
1258 * @completion_code: This parameter indicates the completion status information
1259 * for the TC.
1260 *
1261 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1262 * this method always returns success.
1263 */
1264 static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler(
1265 struct scic_sds_request *sci_req,
1266 u32 completion_code)
1267 {
1268 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1269 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1270 /*
1271 * In the AWAIT RESPONSE state, any TC completion is unexpected.
1272 * but if the TC has success status, we complete the IO anyway. */
1273 scic_sds_request_set_status(
1274 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1275 );
1276
1277 sci_base_state_machine_change_state(
1278 &sci_req->state_machine,
1279 SCI_BASE_REQUEST_STATE_COMPLETED);
1280 break;
1281
1282 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1283 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1284 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1285 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1286 /*
1287 * These status has been seen in a specific LSI expander, which sometimes
1288 * is not able to send smp response within 2 ms. This causes our hardware
1289 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR
1290 * status. For these type of error, we ask scic user to retry the request. */
1291 scic_sds_request_set_status(
1292 sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED
1293 );
1294
1295 sci_base_state_machine_change_state(
1296 &sci_req->state_machine,
1297 SCI_BASE_REQUEST_STATE_COMPLETED);
1298 break;
1299
1300 default:
1301 /*
1302 * All other completion status cause the IO to be complete. If a NAK
1303 * was received, then it is up to the user to retry the request. */
1304 scic_sds_request_set_status(
1305 sci_req,
1306 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1307 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1308 );
1309
1310 sci_base_state_machine_change_state(
1311 &sci_req->state_machine,
1312 SCI_BASE_REQUEST_STATE_COMPLETED);
1313 break;
1314 }
1315
1316 return SCI_SUCCESS;
1317 }
1318
1319 /*
1320 * This function processes an unsolicited frame while the SMP request is waiting
1321 * for a response frame. It will copy the response data, release the
1322 * unsolicited frame, and transition the request to the
1323 * SCI_BASE_REQUEST_STATE_COMPLETED state.
1324 * @sci_req: This parameter specifies the request for which the
1325 * unsolicited frame was received.
1326 * @frame_index: This parameter indicates the unsolicited frame index that
1327 * should contain the response.
1328 *
1329 * This function returns an indication of whether the response frame was handled
1330 * successfully or not. SCI_SUCCESS Currently this value is always returned and
1331 * indicates successful processing of the TC response.
1332 */
1333 static enum sci_status
1334 scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req,
1335 u32 frame_index)
1336 {
1337 enum sci_status status;
1338 void *frame_header;
1339 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1340 ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1341
1342 status = scic_sds_unsolicited_frame_control_get_header(
1343 &(scic_sds_request_get_controller(sci_req)->uf_control),
1344 frame_index,
1345 &frame_header);
1346
1347 /* byte swap the header. */
1348 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1349
1350 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1351 void *smp_resp;
1352
1353 status = scic_sds_unsolicited_frame_control_get_buffer(
1354 &(scic_sds_request_get_controller(sci_req)->uf_control),
1355 frame_index,
1356 &smp_resp);
1357
1358 word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1359 sizeof(u32);
1360
1361 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1362 smp_resp, word_cnt);
1363
1364 scic_sds_request_set_status(
1365 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1366
1367 sci_base_state_machine_change_state(&sci_req->state_machine,
1368 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
1369 } else {
1370 /* This was not a response frame why did it get forwarded? */
1371 dev_err(scic_to_dev(sci_req->owning_controller),
1372 "%s: SCIC SMP Request 0x%p received unexpected frame "
1373 "%d type 0x%02x\n",
1374 __func__,
1375 sci_req,
1376 frame_index,
1377 rsp_hdr->frame_type);
1378
1379 scic_sds_request_set_status(
1380 sci_req,
1381 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1382 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1383
1384 sci_base_state_machine_change_state(
1385 &sci_req->state_machine,
1386 SCI_BASE_REQUEST_STATE_COMPLETED);
1387 }
1388
1389 scic_sds_controller_release_frame(sci_req->owning_controller,
1390 frame_index);
1391
1392 return SCI_SUCCESS;
1393 }
1394
1395 /**
1396 * This method processes the completions transport layer (TL) status to
1397 * determine if the SMP request was sent successfully. If the SMP request
1398 * was sent successfully, then the state for the SMP request transits to
1399 * waiting for a response frame.
1400 * @sci_req: This parameter specifies the request for which the TC
1401 * completion was received.
1402 * @completion_code: This parameter indicates the completion status information
1403 * for the TC.
1404 *
1405 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1406 * this method always returns success.
1407 */
1408 static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler(
1409 struct scic_sds_request *sci_req,
1410 u32 completion_code)
1411 {
1412 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1413 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1414 scic_sds_request_set_status(
1415 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1416 );
1417
1418 sci_base_state_machine_change_state(
1419 &sci_req->state_machine,
1420 SCI_BASE_REQUEST_STATE_COMPLETED);
1421 break;
1422
1423 default:
1424 /*
1425 * All other completion status cause the IO to be complete. If a NAK
1426 * was received, then it is up to the user to retry the request. */
1427 scic_sds_request_set_status(
1428 sci_req,
1429 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1430 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1431 );
1432
1433 sci_base_state_machine_change_state(
1434 &sci_req->state_machine,
1435 SCI_BASE_REQUEST_STATE_COMPLETED);
1436 break;
1437 }
1438
1439 return SCI_SUCCESS;
1440 }
1441
1442 static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
1443 [SCI_BASE_REQUEST_STATE_INITIAL] = { },
1444 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1445 .start_handler = scic_sds_request_constructed_state_start_handler,
1446 .abort_handler = scic_sds_request_constructed_state_abort_handler,
1447 },
1448 [SCI_BASE_REQUEST_STATE_STARTED] = {
1449 .abort_handler = scic_sds_request_started_state_abort_handler,
1450 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
1451 .frame_handler = scic_sds_request_started_state_frame_handler,
1452 },
1453 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
1454 .abort_handler = scic_sds_request_started_state_abort_handler,
1455 .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler,
1456 },
1457 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
1458 .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler,
1459 .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler,
1460 },
1461 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
1462 .abort_handler = scic_sds_request_started_state_abort_handler,
1463 .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler,
1464 .frame_handler = scic_sds_smp_request_await_response_frame_handler,
1465 },
1466 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
1467 .abort_handler = scic_sds_request_started_state_abort_handler,
1468 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler,
1469 },
1470 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1471 .complete_handler = scic_sds_request_completed_state_complete_handler,
1472 },
1473 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1474 .abort_handler = scic_sds_request_aborting_state_abort_handler,
1475 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
1476 .frame_handler = scic_sds_request_aborting_state_frame_handler,
1477 },
1478 [SCI_BASE_REQUEST_STATE_FINAL] = { },
1479 };
1480
1481
1482 /**
1483 * isci_request_process_response_iu() - This function sets the status and
1484 * response iu, in the task struct, from the request object for the upper
1485 * layer driver.
1486 * @sas_task: This parameter is the task struct from the upper layer driver.
1487 * @resp_iu: This parameter points to the response iu of the completed request.
1488 * @dev: This parameter specifies the linux device struct.
1489 *
1490 * none.
1491 */
1492 static void isci_request_process_response_iu(
1493 struct sas_task *task,
1494 struct ssp_response_iu *resp_iu,
1495 struct device *dev)
1496 {
1497 dev_dbg(dev,
1498 "%s: resp_iu = %p "
1499 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
1500 "resp_iu->response_data_len = %x, "
1501 "resp_iu->sense_data_len = %x\nrepsonse data: ",
1502 __func__,
1503 resp_iu,
1504 resp_iu->status,
1505 resp_iu->datapres,
1506 resp_iu->response_data_len,
1507 resp_iu->sense_data_len);
1508
1509 task->task_status.stat = resp_iu->status;
1510
1511 /* libsas updates the task status fields based on the response iu. */
1512 sas_ssp_task_response(dev, task, resp_iu);
1513 }
1514
1515 /**
1516 * isci_request_set_open_reject_status() - This function prepares the I/O
1517 * completion for OPEN_REJECT conditions.
1518 * @request: This parameter is the completed isci_request object.
1519 * @response_ptr: This parameter specifies the service response for the I/O.
1520 * @status_ptr: This parameter specifies the exec status for the I/O.
1521 * @complete_to_host_ptr: This parameter specifies the action to be taken by
1522 * the LLDD with respect to completing this request or forcing an abort
1523 * condition on the I/O.
1524 * @open_rej_reason: This parameter specifies the encoded reason for the
1525 * abandon-class reject.
1526 *
1527 * none.
1528 */
1529 static void isci_request_set_open_reject_status(
1530 struct isci_request *request,
1531 struct sas_task *task,
1532 enum service_response *response_ptr,
1533 enum exec_status *status_ptr,
1534 enum isci_completion_selection *complete_to_host_ptr,
1535 enum sas_open_rej_reason open_rej_reason)
1536 {
1537 /* Task in the target is done. */
1538 request->complete_in_target = true;
1539 *response_ptr = SAS_TASK_UNDELIVERED;
1540 *status_ptr = SAS_OPEN_REJECT;
1541 *complete_to_host_ptr = isci_perform_normal_io_completion;
1542 task->task_status.open_rej_reason = open_rej_reason;
1543 }
1544
1545 /**
1546 * isci_request_handle_controller_specific_errors() - This function decodes
1547 * controller-specific I/O completion error conditions.
1548 * @request: This parameter is the completed isci_request object.
1549 * @response_ptr: This parameter specifies the service response for the I/O.
1550 * @status_ptr: This parameter specifies the exec status for the I/O.
1551 * @complete_to_host_ptr: This parameter specifies the action to be taken by
1552 * the LLDD with respect to completing this request or forcing an abort
1553 * condition on the I/O.
1554 *
1555 * none.
1556 */
1557 static void isci_request_handle_controller_specific_errors(
1558 struct isci_remote_device *isci_device,
1559 struct isci_request *request,
1560 struct sas_task *task,
1561 enum service_response *response_ptr,
1562 enum exec_status *status_ptr,
1563 enum isci_completion_selection *complete_to_host_ptr)
1564 {
1565 unsigned int cstatus;
1566
1567 cstatus = request->sci.scu_status;
1568
1569 dev_dbg(&request->isci_host->pdev->dev,
1570 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
1571 "- controller status = 0x%x\n",
1572 __func__, request, cstatus);
1573
1574 /* Decode the controller-specific errors; most
1575 * important is to recognize those conditions in which
1576 * the target may still have a task outstanding that
1577 * must be aborted.
1578 *
1579 * Note that there are SCU completion codes being
1580 * named in the decode below for which SCIC has already
1581 * done work to handle them in a way other than as
1582 * a controller-specific completion code; these are left
1583 * in the decode below for completeness sake.
1584 */
1585 switch (cstatus) {
1586 case SCU_TASK_DONE_DMASETUP_DIRERR:
1587 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
1588 case SCU_TASK_DONE_XFERCNT_ERR:
1589 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
1590 if (task->task_proto == SAS_PROTOCOL_SMP) {
1591 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
1592 *response_ptr = SAS_TASK_COMPLETE;
1593
1594 /* See if the device has been/is being stopped. Note
1595 * that we ignore the quiesce state, since we are
1596 * concerned about the actual device state.
1597 */
1598 if ((isci_device->status == isci_stopping) ||
1599 (isci_device->status == isci_stopped))
1600 *status_ptr = SAS_DEVICE_UNKNOWN;
1601 else
1602 *status_ptr = SAS_ABORTED_TASK;
1603
1604 request->complete_in_target = true;
1605
1606 *complete_to_host_ptr =
1607 isci_perform_normal_io_completion;
1608 } else {
1609 /* Task in the target is not done. */
1610 *response_ptr = SAS_TASK_UNDELIVERED;
1611
1612 if ((isci_device->status == isci_stopping) ||
1613 (isci_device->status == isci_stopped))
1614 *status_ptr = SAS_DEVICE_UNKNOWN;
1615 else
1616 *status_ptr = SAM_STAT_TASK_ABORTED;
1617
1618 request->complete_in_target = false;
1619
1620 *complete_to_host_ptr =
1621 isci_perform_error_io_completion;
1622 }
1623
1624 break;
1625
1626 case SCU_TASK_DONE_CRC_ERR:
1627 case SCU_TASK_DONE_NAK_CMD_ERR:
1628 case SCU_TASK_DONE_EXCESS_DATA:
1629 case SCU_TASK_DONE_UNEXP_FIS:
1630 /* Also SCU_TASK_DONE_UNEXP_RESP: */
1631 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
1632 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
1633 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
1634 /* These are conditions in which the target
1635 * has completed the task, so that no cleanup
1636 * is necessary.
1637 */
1638 *response_ptr = SAS_TASK_COMPLETE;
1639
1640 /* See if the device has been/is being stopped. Note
1641 * that we ignore the quiesce state, since we are
1642 * concerned about the actual device state.
1643 */
1644 if ((isci_device->status == isci_stopping) ||
1645 (isci_device->status == isci_stopped))
1646 *status_ptr = SAS_DEVICE_UNKNOWN;
1647 else
1648 *status_ptr = SAS_ABORTED_TASK;
1649
1650 request->complete_in_target = true;
1651
1652 *complete_to_host_ptr = isci_perform_normal_io_completion;
1653 break;
1654
1655
1656 /* Note that the only open reject completion codes seen here will be
1657 * abandon-class codes; all others are automatically retried in the SCU.
1658 */
1659 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
1660
1661 isci_request_set_open_reject_status(
1662 request, task, response_ptr, status_ptr,
1663 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
1664 break;
1665
1666 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
1667
1668 /* Note - the return of AB0 will change when
1669 * libsas implements detection of zone violations.
1670 */
1671 isci_request_set_open_reject_status(
1672 request, task, response_ptr, status_ptr,
1673 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
1674 break;
1675
1676 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
1677
1678 isci_request_set_open_reject_status(
1679 request, task, response_ptr, status_ptr,
1680 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
1681 break;
1682
1683 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
1684
1685 isci_request_set_open_reject_status(
1686 request, task, response_ptr, status_ptr,
1687 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
1688 break;
1689
1690 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
1691
1692 isci_request_set_open_reject_status(
1693 request, task, response_ptr, status_ptr,
1694 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
1695 break;
1696
1697 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
1698
1699 isci_request_set_open_reject_status(
1700 request, task, response_ptr, status_ptr,
1701 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
1702 break;
1703
1704 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
1705
1706 isci_request_set_open_reject_status(
1707 request, task, response_ptr, status_ptr,
1708 complete_to_host_ptr, SAS_OREJ_STP_NORES);
1709 break;
1710
1711 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
1712
1713 isci_request_set_open_reject_status(
1714 request, task, response_ptr, status_ptr,
1715 complete_to_host_ptr, SAS_OREJ_EPROTO);
1716 break;
1717
1718 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
1719
1720 isci_request_set_open_reject_status(
1721 request, task, response_ptr, status_ptr,
1722 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
1723 break;
1724
1725 case SCU_TASK_DONE_LL_R_ERR:
1726 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
1727 case SCU_TASK_DONE_LL_PERR:
1728 case SCU_TASK_DONE_LL_SY_TERM:
1729 /* Also SCU_TASK_DONE_NAK_ERR:*/
1730 case SCU_TASK_DONE_LL_LF_TERM:
1731 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
1732 case SCU_TASK_DONE_LL_ABORT_ERR:
1733 case SCU_TASK_DONE_SEQ_INV_TYPE:
1734 /* Also SCU_TASK_DONE_UNEXP_XR: */
1735 case SCU_TASK_DONE_XR_IU_LEN_ERR:
1736 case SCU_TASK_DONE_INV_FIS_LEN:
1737 /* Also SCU_TASK_DONE_XR_WD_LEN: */
1738 case SCU_TASK_DONE_SDMA_ERR:
1739 case SCU_TASK_DONE_OFFSET_ERR:
1740 case SCU_TASK_DONE_MAX_PLD_ERR:
1741 case SCU_TASK_DONE_LF_ERR:
1742 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
1743 case SCU_TASK_DONE_SMP_LL_RX_ERR:
1744 case SCU_TASK_DONE_UNEXP_DATA:
1745 case SCU_TASK_DONE_UNEXP_SDBFIS:
1746 case SCU_TASK_DONE_REG_ERR:
1747 case SCU_TASK_DONE_SDB_ERR:
1748 case SCU_TASK_DONE_TASK_ABORT:
1749 default:
1750 /* Task in the target is not done. */
1751 *response_ptr = SAS_TASK_UNDELIVERED;
1752 *status_ptr = SAM_STAT_TASK_ABORTED;
1753 request->complete_in_target = false;
1754
1755 *complete_to_host_ptr = isci_perform_error_io_completion;
1756 break;
1757 }
1758 }
1759
1760 /**
1761 * isci_task_save_for_upper_layer_completion() - This function saves the
1762 * request for later completion to the upper layer driver.
1763 * @host: This parameter is a pointer to the host on which the the request
1764 * should be queued (either as an error or success).
1765 * @request: This parameter is the completed request.
1766 * @response: This parameter is the response code for the completed task.
1767 * @status: This parameter is the status code for the completed task.
1768 *
1769 * none.
1770 */
1771 static void isci_task_save_for_upper_layer_completion(
1772 struct isci_host *host,
1773 struct isci_request *request,
1774 enum service_response response,
1775 enum exec_status status,
1776 enum isci_completion_selection task_notification_selection)
1777 {
1778 struct sas_task *task = isci_request_access_task(request);
1779
1780 task_notification_selection
1781 = isci_task_set_completion_status(task, response, status,
1782 task_notification_selection);
1783
1784 /* Tasks aborted specifically by a call to the lldd_abort_task
1785 * function should not be completed to the host in the regular path.
1786 */
1787 switch (task_notification_selection) {
1788
1789 case isci_perform_normal_io_completion:
1790
1791 /* Normal notification (task_done) */
1792 dev_dbg(&host->pdev->dev,
1793 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
1794 __func__,
1795 task,
1796 task->task_status.resp, response,
1797 task->task_status.stat, status);
1798 /* Add to the completed list. */
1799 list_add(&request->completed_node,
1800 &host->requests_to_complete);
1801
1802 /* Take the request off the device's pending request list. */
1803 list_del_init(&request->dev_node);
1804 break;
1805
1806 case isci_perform_aborted_io_completion:
1807 /* No notification to libsas because this request is
1808 * already in the abort path.
1809 */
1810 dev_warn(&host->pdev->dev,
1811 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
1812 __func__,
1813 task,
1814 task->task_status.resp, response,
1815 task->task_status.stat, status);
1816
1817 /* Wake up whatever process was waiting for this
1818 * request to complete.
1819 */
1820 WARN_ON(request->io_request_completion == NULL);
1821
1822 if (request->io_request_completion != NULL) {
1823
1824 /* Signal whoever is waiting that this
1825 * request is complete.
1826 */
1827 complete(request->io_request_completion);
1828 }
1829 break;
1830
1831 case isci_perform_error_io_completion:
1832 /* Use sas_task_abort */
1833 dev_warn(&host->pdev->dev,
1834 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
1835 __func__,
1836 task,
1837 task->task_status.resp, response,
1838 task->task_status.stat, status);
1839 /* Add to the aborted list. */
1840 list_add(&request->completed_node,
1841 &host->requests_to_errorback);
1842 break;
1843
1844 default:
1845 dev_warn(&host->pdev->dev,
1846 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
1847 __func__,
1848 task,
1849 task->task_status.resp, response,
1850 task->task_status.stat, status);
1851
1852 /* Add to the error to libsas list. */
1853 list_add(&request->completed_node,
1854 &host->requests_to_errorback);
1855 break;
1856 }
1857 }
1858
1859 static void isci_request_io_request_complete(struct isci_host *isci_host,
1860 struct isci_request *request,
1861 enum sci_io_status completion_status)
1862 {
1863 struct sas_task *task = isci_request_access_task(request);
1864 struct ssp_response_iu *resp_iu;
1865 void *resp_buf;
1866 unsigned long task_flags;
1867 struct isci_remote_device *isci_device = request->isci_device;
1868 enum service_response response = SAS_TASK_UNDELIVERED;
1869 enum exec_status status = SAS_ABORTED_TASK;
1870 enum isci_request_status request_status;
1871 enum isci_completion_selection complete_to_host
1872 = isci_perform_normal_io_completion;
1873
1874 dev_dbg(&isci_host->pdev->dev,
1875 "%s: request = %p, task = %p,\n"
1876 "task->data_dir = %d completion_status = 0x%x\n",
1877 __func__,
1878 request,
1879 task,
1880 task->data_dir,
1881 completion_status);
1882
1883 spin_lock(&request->state_lock);
1884 request_status = isci_request_get_state(request);
1885
1886 /* Decode the request status. Note that if the request has been
1887 * aborted by a task management function, we don't care
1888 * what the status is.
1889 */
1890 switch (request_status) {
1891
1892 case aborted:
1893 /* "aborted" indicates that the request was aborted by a task
1894 * management function, since once a task management request is
1895 * perfomed by the device, the request only completes because
1896 * of the subsequent driver terminate.
1897 *
1898 * Aborted also means an external thread is explicitly managing
1899 * this request, so that we do not complete it up the stack.
1900 *
1901 * The target is still there (since the TMF was successful).
1902 */
1903 request->complete_in_target = true;
1904 response = SAS_TASK_COMPLETE;
1905
1906 /* See if the device has been/is being stopped. Note
1907 * that we ignore the quiesce state, since we are
1908 * concerned about the actual device state.
1909 */
1910 if ((isci_device->status == isci_stopping)
1911 || (isci_device->status == isci_stopped)
1912 )
1913 status = SAS_DEVICE_UNKNOWN;
1914 else
1915 status = SAS_ABORTED_TASK;
1916
1917 complete_to_host = isci_perform_aborted_io_completion;
1918 /* This was an aborted request. */
1919
1920 spin_unlock(&request->state_lock);
1921 break;
1922
1923 case aborting:
1924 /* aborting means that the task management function tried and
1925 * failed to abort the request. We need to note the request
1926 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
1927 * target as down.
1928 *
1929 * Aborting also means an external thread is explicitly managing
1930 * this request, so that we do not complete it up the stack.
1931 */
1932 request->complete_in_target = true;
1933 response = SAS_TASK_UNDELIVERED;
1934
1935 if ((isci_device->status == isci_stopping) ||
1936 (isci_device->status == isci_stopped))
1937 /* The device has been /is being stopped. Note that
1938 * we ignore the quiesce state, since we are
1939 * concerned about the actual device state.
1940 */
1941 status = SAS_DEVICE_UNKNOWN;
1942 else
1943 status = SAS_PHY_DOWN;
1944
1945 complete_to_host = isci_perform_aborted_io_completion;
1946
1947 /* This was an aborted request. */
1948
1949 spin_unlock(&request->state_lock);
1950 break;
1951
1952 case terminating:
1953
1954 /* This was an terminated request. This happens when
1955 * the I/O is being terminated because of an action on
1956 * the device (reset, tear down, etc.), and the I/O needs
1957 * to be completed up the stack.
1958 */
1959 request->complete_in_target = true;
1960 response = SAS_TASK_UNDELIVERED;
1961
1962 /* See if the device has been/is being stopped. Note
1963 * that we ignore the quiesce state, since we are
1964 * concerned about the actual device state.
1965 */
1966 if ((isci_device->status == isci_stopping) ||
1967 (isci_device->status == isci_stopped))
1968 status = SAS_DEVICE_UNKNOWN;
1969 else
1970 status = SAS_ABORTED_TASK;
1971
1972 complete_to_host = isci_perform_aborted_io_completion;
1973
1974 /* This was a terminated request. */
1975
1976 spin_unlock(&request->state_lock);
1977 break;
1978
1979 default:
1980
1981 /* The request is done from an SCU HW perspective. */
1982 request->status = completed;
1983
1984 spin_unlock(&request->state_lock);
1985
1986 /* This is an active request being completed from the core. */
1987 switch (completion_status) {
1988
1989 case SCI_IO_FAILURE_RESPONSE_VALID:
1990 dev_dbg(&isci_host->pdev->dev,
1991 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
1992 __func__,
1993 request,
1994 task);
1995
1996 if (sas_protocol_ata(task->task_proto)) {
1997 resp_buf = &request->sci.stp.rsp;
1998 isci_request_process_stp_response(task,
1999 resp_buf);
2000 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2001
2002 /* crack the iu response buffer. */
2003 resp_iu = &request->sci.ssp.rsp;
2004 isci_request_process_response_iu(task, resp_iu,
2005 &isci_host->pdev->dev);
2006
2007 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2008
2009 dev_err(&isci_host->pdev->dev,
2010 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2011 "SAS_PROTOCOL_SMP protocol\n",
2012 __func__);
2013
2014 } else
2015 dev_err(&isci_host->pdev->dev,
2016 "%s: unknown protocol\n", __func__);
2017
2018 /* use the task status set in the task struct by the
2019 * isci_request_process_response_iu call.
2020 */
2021 request->complete_in_target = true;
2022 response = task->task_status.resp;
2023 status = task->task_status.stat;
2024 break;
2025
2026 case SCI_IO_SUCCESS:
2027 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2028
2029 response = SAS_TASK_COMPLETE;
2030 status = SAM_STAT_GOOD;
2031 request->complete_in_target = true;
2032
2033 if (task->task_proto == SAS_PROTOCOL_SMP) {
2034 void *rsp = &request->sci.smp.rsp;
2035
2036 dev_dbg(&isci_host->pdev->dev,
2037 "%s: SMP protocol completion\n",
2038 __func__);
2039
2040 sg_copy_from_buffer(
2041 &task->smp_task.smp_resp, 1,
2042 rsp, sizeof(struct smp_resp));
2043 } else if (completion_status
2044 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2045
2046 /* This was an SSP / STP / SATA transfer.
2047 * There is a possibility that less data than
2048 * the maximum was transferred.
2049 */
2050 u32 transferred_length = sci_req_tx_bytes(&request->sci);
2051
2052 task->task_status.residual
2053 = task->total_xfer_len - transferred_length;
2054
2055 /* If there were residual bytes, call this an
2056 * underrun.
2057 */
2058 if (task->task_status.residual != 0)
2059 status = SAS_DATA_UNDERRUN;
2060
2061 dev_dbg(&isci_host->pdev->dev,
2062 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2063 __func__,
2064 status);
2065
2066 } else
2067 dev_dbg(&isci_host->pdev->dev,
2068 "%s: SCI_IO_SUCCESS\n",
2069 __func__);
2070
2071 break;
2072
2073 case SCI_IO_FAILURE_TERMINATED:
2074 dev_dbg(&isci_host->pdev->dev,
2075 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2076 __func__,
2077 request,
2078 task);
2079
2080 /* The request was terminated explicitly. No handling
2081 * is needed in the SCSI error handler path.
2082 */
2083 request->complete_in_target = true;
2084 response = SAS_TASK_UNDELIVERED;
2085
2086 /* See if the device has been/is being stopped. Note
2087 * that we ignore the quiesce state, since we are
2088 * concerned about the actual device state.
2089 */
2090 if ((isci_device->status == isci_stopping) ||
2091 (isci_device->status == isci_stopped))
2092 status = SAS_DEVICE_UNKNOWN;
2093 else
2094 status = SAS_ABORTED_TASK;
2095
2096 complete_to_host = isci_perform_normal_io_completion;
2097 break;
2098
2099 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2100
2101 isci_request_handle_controller_specific_errors(
2102 isci_device, request, task, &response, &status,
2103 &complete_to_host);
2104
2105 break;
2106
2107 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2108 /* This is a special case, in that the I/O completion
2109 * is telling us that the device needs a reset.
2110 * In order for the device reset condition to be
2111 * noticed, the I/O has to be handled in the error
2112 * handler. Set the reset flag and cause the
2113 * SCSI error thread to be scheduled.
2114 */
2115 spin_lock_irqsave(&task->task_state_lock, task_flags);
2116 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2117 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2118
2119 /* Fail the I/O. */
2120 response = SAS_TASK_UNDELIVERED;
2121 status = SAM_STAT_TASK_ABORTED;
2122
2123 complete_to_host = isci_perform_error_io_completion;
2124 request->complete_in_target = false;
2125 break;
2126
2127 default:
2128 /* Catch any otherwise unhandled error codes here. */
2129 dev_warn(&isci_host->pdev->dev,
2130 "%s: invalid completion code: 0x%x - "
2131 "isci_request = %p\n",
2132 __func__, completion_status, request);
2133
2134 response = SAS_TASK_UNDELIVERED;
2135
2136 /* See if the device has been/is being stopped. Note
2137 * that we ignore the quiesce state, since we are
2138 * concerned about the actual device state.
2139 */
2140 if ((isci_device->status == isci_stopping) ||
2141 (isci_device->status == isci_stopped))
2142 status = SAS_DEVICE_UNKNOWN;
2143 else
2144 status = SAS_ABORTED_TASK;
2145
2146 complete_to_host = isci_perform_error_io_completion;
2147 request->complete_in_target = false;
2148 break;
2149 }
2150 break;
2151 }
2152
2153 isci_request_unmap_sgl(request, isci_host->pdev);
2154
2155 /* Put the completed request on the correct list */
2156 isci_task_save_for_upper_layer_completion(isci_host, request, response,
2157 status, complete_to_host
2158 );
2159
2160 /* complete the io request to the core. */
2161 scic_controller_complete_io(&isci_host->sci,
2162 &isci_device->sci,
2163 &request->sci);
2164 /* set terminated handle so it cannot be completed or
2165 * terminated again, and to cause any calls into abort
2166 * task to recognize the already completed case.
2167 */
2168 request->terminated = true;
2169
2170 isci_host_can_dequeue(isci_host, 1);
2171 }
2172
2173 /**
2174 * scic_sds_request_initial_state_enter() -
2175 * @object: This parameter specifies the base object for which the state
2176 * transition is occurring.
2177 *
2178 * This method implements the actions taken when entering the
2179 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
2180 * base request is constructed. Entry into the initial state sets all handlers
2181 * for the io request object to their default handlers. none
2182 */
2183 static void scic_sds_request_initial_state_enter(void *object)
2184 {
2185 struct scic_sds_request *sci_req = object;
2186
2187 SET_STATE_HANDLER(
2188 sci_req,
2189 scic_sds_request_state_handler_table,
2190 SCI_BASE_REQUEST_STATE_INITIAL
2191 );
2192 }
2193
2194 /**
2195 * scic_sds_request_constructed_state_enter() -
2196 * @object: The io request object that is to enter the constructed state.
2197 *
2198 * This method implements the actions taken when entering the
2199 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
2200 * for the the constructed state. none
2201 */
2202 static void scic_sds_request_constructed_state_enter(void *object)
2203 {
2204 struct scic_sds_request *sci_req = object;
2205
2206 SET_STATE_HANDLER(
2207 sci_req,
2208 scic_sds_request_state_handler_table,
2209 SCI_BASE_REQUEST_STATE_CONSTRUCTED
2210 );
2211 }
2212
2213 /**
2214 * scic_sds_request_started_state_enter() -
2215 * @object: This parameter specifies the base object for which the state
2216 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
2217 *
2218 * This method implements the actions taken when entering the
2219 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
2220 * SCSI Task request we must enter the started substate machine. none
2221 */
2222 static void scic_sds_request_started_state_enter(void *object)
2223 {
2224 struct scic_sds_request *sci_req = object;
2225 struct sci_base_state_machine *sm = &sci_req->state_machine;
2226 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2227 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
2228 struct sas_task *task;
2229
2230 /* XXX as hch said always creating an internal sas_task for tmf
2231 * requests would simplify the driver
2232 */
2233 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2234
2235 SET_STATE_HANDLER(
2236 sci_req,
2237 scic_sds_request_state_handler_table,
2238 SCI_BASE_REQUEST_STATE_STARTED
2239 );
2240
2241 /* Most of the request state machines have a started substate machine so
2242 * start its execution on the entry to the started state.
2243 */
2244 if (sci_req->has_started_substate_machine == true)
2245 sci_base_state_machine_start(&sci_req->started_substate_machine);
2246
2247 if (!task && dev->dev_type == SAS_END_DEV) {
2248 sci_base_state_machine_change_state(sm,
2249 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
2250 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2251 sci_base_state_machine_change_state(sm,
2252 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
2253 }
2254 }
2255
2256 /**
2257 * scic_sds_request_started_state_exit() -
2258 * @object: This parameter specifies the base object for which the state
2259 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2260 * object.
2261 *
2262 * This method implements the actions taken when exiting the
2263 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
2264 * to stop the started substate machine. none
2265 */
2266 static void scic_sds_request_started_state_exit(void *object)
2267 {
2268 struct scic_sds_request *sci_req = object;
2269
2270 if (sci_req->has_started_substate_machine == true)
2271 sci_base_state_machine_stop(&sci_req->started_substate_machine);
2272 }
2273
2274 /**
2275 * scic_sds_request_completed_state_enter() -
2276 * @object: This parameter specifies the base object for which the state
2277 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2278 * object.
2279 *
2280 * This method implements the actions taken when entering the
2281 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
2282 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
2283 * completion status and convert it to an enum sci_status to return in the
2284 * completion callback function. none
2285 */
2286 static void scic_sds_request_completed_state_enter(void *object)
2287 {
2288 struct scic_sds_request *sci_req = object;
2289 struct scic_sds_controller *scic =
2290 scic_sds_request_get_controller(sci_req);
2291 struct isci_host *ihost = scic_to_ihost(scic);
2292 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2293
2294 SET_STATE_HANDLER(sci_req,
2295 scic_sds_request_state_handler_table,
2296 SCI_BASE_REQUEST_STATE_COMPLETED);
2297
2298 /* Tell the SCI_USER that the IO request is complete */
2299 if (sci_req->is_task_management_request == false)
2300 isci_request_io_request_complete(ihost, ireq,
2301 sci_req->sci_status);
2302 else
2303 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
2304 }
2305
2306 /**
2307 * scic_sds_request_aborting_state_enter() -
2308 * @object: This parameter specifies the base object for which the state
2309 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
2310 * object.
2311 *
2312 * This method implements the actions taken when entering the
2313 * SCI_BASE_REQUEST_STATE_ABORTING state. none
2314 */
2315 static void scic_sds_request_aborting_state_enter(void *object)
2316 {
2317 struct scic_sds_request *sci_req = object;
2318
2319 /* Setting the abort bit in the Task Context is required by the silicon. */
2320 sci_req->task_context_buffer->abort = 1;
2321
2322 SET_STATE_HANDLER(
2323 sci_req,
2324 scic_sds_request_state_handler_table,
2325 SCI_BASE_REQUEST_STATE_ABORTING
2326 );
2327 }
2328
2329 /**
2330 * scic_sds_request_final_state_enter() -
2331 * @object: This parameter specifies the base object for which the state
2332 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
2333 *
2334 * This method implements the actions taken when entering the
2335 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
2336 * state handlers in place. none
2337 */
2338 static void scic_sds_request_final_state_enter(void *object)
2339 {
2340 struct scic_sds_request *sci_req = object;
2341
2342 SET_STATE_HANDLER(
2343 sci_req,
2344 scic_sds_request_state_handler_table,
2345 SCI_BASE_REQUEST_STATE_FINAL
2346 );
2347 }
2348
2349 static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter(
2350 void *object)
2351 {
2352 struct scic_sds_request *sci_req = object;
2353
2354 SET_STATE_HANDLER(
2355 sci_req,
2356 scic_sds_request_state_handler_table,
2357 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
2358 );
2359 }
2360
2361 static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter(
2362 void *object)
2363 {
2364 struct scic_sds_request *sci_req = object;
2365
2366 SET_STATE_HANDLER(
2367 sci_req,
2368 scic_sds_request_state_handler_table,
2369 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
2370 );
2371 }
2372
2373 static void scic_sds_smp_request_started_await_response_substate_enter(void *object)
2374 {
2375 struct scic_sds_request *sci_req = object;
2376
2377 SET_STATE_HANDLER(
2378 sci_req,
2379 scic_sds_request_state_handler_table,
2380 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE
2381 );
2382 }
2383
2384 static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object)
2385 {
2386 struct scic_sds_request *sci_req = object;
2387
2388 SET_STATE_HANDLER(
2389 sci_req,
2390 scic_sds_request_state_handler_table,
2391 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION
2392 );
2393 }
2394
2395 static const struct sci_base_state scic_sds_request_state_table[] = {
2396 [SCI_BASE_REQUEST_STATE_INITIAL] = {
2397 .enter_state = scic_sds_request_initial_state_enter,
2398 },
2399 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
2400 .enter_state = scic_sds_request_constructed_state_enter,
2401 },
2402 [SCI_BASE_REQUEST_STATE_STARTED] = {
2403 .enter_state = scic_sds_request_started_state_enter,
2404 .exit_state = scic_sds_request_started_state_exit
2405 },
2406 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
2407 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
2408 },
2409 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
2410 .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter,
2411 },
2412 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
2413 .enter_state = scic_sds_smp_request_started_await_response_substate_enter,
2414 },
2415 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
2416 .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter,
2417 },
2418 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
2419 .enter_state = scic_sds_request_completed_state_enter,
2420 },
2421 [SCI_BASE_REQUEST_STATE_ABORTING] = {
2422 .enter_state = scic_sds_request_aborting_state_enter,
2423 },
2424 [SCI_BASE_REQUEST_STATE_FINAL] = {
2425 .enter_state = scic_sds_request_final_state_enter,
2426 },
2427 };
2428
2429 static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
2430 struct scic_sds_remote_device *sci_dev,
2431 u16 io_tag, struct scic_sds_request *sci_req)
2432 {
2433 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
2434 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
2435 sci_base_state_machine_start(&sci_req->state_machine);
2436
2437 sci_req->io_tag = io_tag;
2438 sci_req->owning_controller = scic;
2439 sci_req->target_device = sci_dev;
2440 sci_req->has_started_substate_machine = false;
2441 sci_req->protocol = SCIC_NO_PROTOCOL;
2442 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2443 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
2444
2445 sci_req->sci_status = SCI_SUCCESS;
2446 sci_req->scu_status = 0;
2447 sci_req->post_context = 0xFFFFFFFF;
2448
2449 sci_req->is_task_management_request = false;
2450
2451 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
2452 sci_req->was_tag_assigned_by_user = false;
2453 sci_req->task_context_buffer = &sci_req->tc;
2454 } else {
2455 sci_req->was_tag_assigned_by_user = true;
2456
2457 sci_req->task_context_buffer =
2458 scic_sds_controller_get_task_context_buffer(scic, io_tag);
2459 }
2460 }
2461
2462 static enum sci_status
2463 scic_io_request_construct(struct scic_sds_controller *scic,
2464 struct scic_sds_remote_device *sci_dev,
2465 u16 io_tag, struct scic_sds_request *sci_req)
2466 {
2467 struct domain_device *dev = sci_dev_to_domain(sci_dev);
2468 enum sci_status status = SCI_SUCCESS;
2469
2470 /* Build the common part of the request */
2471 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
2472
2473 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
2474 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
2475
2476 if (dev->dev_type == SAS_END_DEV)
2477 /* pass */;
2478 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
2479 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
2480 else if (dev_is_expander(dev))
2481 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
2482 else
2483 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2484
2485 memset(sci_req->task_context_buffer, 0,
2486 offsetof(struct scu_task_context, sgl_pair_ab));
2487
2488 return status;
2489 }
2490
2491 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
2492 struct scic_sds_remote_device *sci_dev,
2493 u16 io_tag, struct scic_sds_request *sci_req)
2494 {
2495 struct domain_device *dev = sci_dev_to_domain(sci_dev);
2496 enum sci_status status = SCI_SUCCESS;
2497
2498 /* Build the common part of the request */
2499 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
2500
2501 if (dev->dev_type == SAS_END_DEV ||
2502 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
2503 sci_req->is_task_management_request = true;
2504 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
2505 } else
2506 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2507
2508 return status;
2509 }
2510
2511 static enum sci_status isci_request_ssp_request_construct(
2512 struct isci_request *request)
2513 {
2514 enum sci_status status;
2515
2516 dev_dbg(&request->isci_host->pdev->dev,
2517 "%s: request = %p\n",
2518 __func__,
2519 request);
2520 status = scic_io_request_construct_basic_ssp(&request->sci);
2521 return status;
2522 }
2523
2524 static enum sci_status isci_request_stp_request_construct(
2525 struct isci_request *request)
2526 {
2527 struct sas_task *task = isci_request_access_task(request);
2528 enum sci_status status;
2529 struct host_to_dev_fis *register_fis;
2530
2531 dev_dbg(&request->isci_host->pdev->dev,
2532 "%s: request = %p\n",
2533 __func__,
2534 request);
2535
2536 /* Get the host_to_dev_fis from the core and copy
2537 * the fis from the task into it.
2538 */
2539 register_fis = isci_sata_task_to_fis_copy(task);
2540
2541 status = scic_io_request_construct_basic_sata(&request->sci);
2542
2543 /* Set the ncq tag in the fis, from the queue
2544 * command in the task.
2545 */
2546 if (isci_sata_is_task_ncq(task)) {
2547
2548 isci_sata_set_ncq_tag(
2549 register_fis,
2550 task
2551 );
2552 }
2553
2554 return status;
2555 }
2556
2557 /*
2558 * This function will fill in the SCU Task Context for a SMP request. The
2559 * following important settings are utilized: -# task_type ==
2560 * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type
2561 * (i.e. non-raw frame) is being utilized to perform task management. -#
2562 * control_frame == 1. This ensures that the proper endianess is set so
2563 * that the bytes are transmitted in the right order for a smp request frame.
2564 * @sci_req: This parameter specifies the smp request object being
2565 * constructed.
2566 *
2567 */
2568 static void
2569 scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
2570 struct smp_req *smp_req)
2571 {
2572 dma_addr_t dma_addr;
2573 struct scic_sds_controller *scic;
2574 struct scic_sds_remote_device *sci_dev;
2575 struct scic_sds_port *sci_port;
2576 struct scu_task_context *task_context;
2577 ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
2578
2579 /* byte swap the smp request. */
2580 sci_swab32_cpy(&sci_req->smp.cmd, smp_req,
2581 word_cnt);
2582
2583 task_context = scic_sds_request_get_task_context(sci_req);
2584
2585 scic = scic_sds_request_get_controller(sci_req);
2586 sci_dev = scic_sds_request_get_device(sci_req);
2587 sci_port = scic_sds_request_get_port(sci_req);
2588
2589 /*
2590 * Fill in the TC with the its required data
2591 * 00h
2592 */
2593 task_context->priority = 0;
2594 task_context->initiator_request = 1;
2595 task_context->connection_rate = sci_dev->connection_rate;
2596 task_context->protocol_engine_index =
2597 scic_sds_controller_get_protocol_engine_group(scic);
2598 task_context->logical_port_index = scic_sds_port_get_index(sci_port);
2599 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
2600 task_context->abort = 0;
2601 task_context->valid = SCU_TASK_CONTEXT_VALID;
2602 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
2603
2604 /* 04h */
2605 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
2606 task_context->command_code = 0;
2607 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
2608
2609 /* 08h */
2610 task_context->link_layer_control = 0;
2611 task_context->do_not_dma_ssp_good_response = 1;
2612 task_context->strict_ordering = 0;
2613 task_context->control_frame = 1;
2614 task_context->timeout_enable = 0;
2615 task_context->block_guard_enable = 0;
2616
2617 /* 0ch */
2618 task_context->address_modifier = 0;
2619
2620 /* 10h */
2621 task_context->ssp_command_iu_length = smp_req->req_len;
2622
2623 /* 14h */
2624 task_context->transfer_length_bytes = 0;
2625
2626 /*
2627 * 18h ~ 30h, protocol specific
2628 * since commandIU has been build by framework at this point, we just
2629 * copy the frist DWord from command IU to this location. */
2630 memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
2631
2632 /*
2633 * 40h
2634 * "For SMP you could program it to zero. We would prefer that way
2635 * so that done code will be consistent." - Venki
2636 */
2637 task_context->task_phase = 0;
2638
2639 if (sci_req->was_tag_assigned_by_user) {
2640 /*
2641 * Build the task context now since we have already read
2642 * the data
2643 */
2644 sci_req->post_context =
2645 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
2646 (scic_sds_controller_get_protocol_engine_group(scic) <<
2647 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
2648 (scic_sds_port_get_index(sci_port) <<
2649 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
2650 scic_sds_io_tag_get_index(sci_req->io_tag));
2651 } else {
2652 /*
2653 * Build the task context now since we have already read
2654 * the data.
2655 * I/O tag index is not assigned because we have to wait
2656 * until we get a TCi.
2657 */
2658 sci_req->post_context =
2659 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
2660 (scic_sds_controller_get_protocol_engine_group(scic) <<
2661 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
2662 (scic_sds_port_get_index(sci_port) <<
2663 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
2664 }
2665
2666 /*
2667 * Copy the physical address for the command buffer to the SCU Task
2668 * Context command buffer should not contain command header.
2669 */
2670 dma_addr = scic_io_request_get_dma_addr(sci_req,
2671 ((char *) &sci_req->smp.cmd) +
2672 sizeof(u32));
2673
2674 task_context->command_iu_upper = upper_32_bits(dma_addr);
2675 task_context->command_iu_lower = lower_32_bits(dma_addr);
2676
2677 /* SMP response comes as UF, so no need to set response IU address. */
2678 task_context->response_iu_upper = 0;
2679 task_context->response_iu_lower = 0;
2680 }
2681
2682 static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req)
2683 {
2684 struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL);
2685
2686 if (!smp_req)
2687 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2688
2689 sci_req->protocol = SCIC_SMP_PROTOCOL;
2690
2691 /* Construct the SMP SCU Task Context */
2692 memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req));
2693
2694 /*
2695 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
2696 * functions under SAS 2.0, a zero request length really indicates
2697 * a non-zero default length. */
2698 if (smp_req->req_len == 0) {
2699 switch (smp_req->func) {
2700 case SMP_DISCOVER:
2701 case SMP_REPORT_PHY_ERR_LOG:
2702 case SMP_REPORT_PHY_SATA:
2703 case SMP_REPORT_ROUTE_INFO:
2704 smp_req->req_len = 2;
2705 break;
2706 case SMP_CONF_ROUTE_INFO:
2707 case SMP_PHY_CONTROL:
2708 case SMP_PHY_TEST_FUNCTION:
2709 smp_req->req_len = 9;
2710 break;
2711 /* Default - zero is a valid default for 2.0. */
2712 }
2713 }
2714
2715 scu_smp_request_construct_task_context(sci_req, smp_req);
2716
2717 sci_base_state_machine_change_state(&sci_req->state_machine,
2718 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
2719
2720 kfree(smp_req);
2721
2722 return SCI_SUCCESS;
2723 }
2724
2725 /*
2726 * isci_smp_request_build() - This function builds the smp request.
2727 * @ireq: This parameter points to the isci_request allocated in the
2728 * request construct function.
2729 *
2730 * SCI_SUCCESS on successfull completion, or specific failure code.
2731 */
2732 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
2733 {
2734 enum sci_status status = SCI_FAILURE;
2735 struct sas_task *task = isci_request_access_task(ireq);
2736 struct scic_sds_request *sci_req = &ireq->sci;
2737
2738 dev_dbg(&ireq->isci_host->pdev->dev,
2739 "%s: request = %p\n", __func__, ireq);
2740
2741 dev_dbg(&ireq->isci_host->pdev->dev,
2742 "%s: smp_req len = %d\n",
2743 __func__,
2744 task->smp_task.smp_req.length);
2745
2746 /* copy the smp_command to the address; */
2747 sg_copy_to_buffer(&task->smp_task.smp_req, 1,
2748 &sci_req->smp.cmd,
2749 sizeof(struct smp_req));
2750
2751 status = scic_io_request_construct_smp(sci_req);
2752 if (status != SCI_SUCCESS)
2753 dev_warn(&ireq->isci_host->pdev->dev,
2754 "%s: failed with status = %d\n",
2755 __func__,
2756 status);
2757
2758 return status;
2759 }
2760
2761 /**
2762 * isci_io_request_build() - This function builds the io request object.
2763 * @isci_host: This parameter specifies the ISCI host object
2764 * @request: This parameter points to the isci_request object allocated in the
2765 * request construct function.
2766 * @sci_device: This parameter is the handle for the sci core's remote device
2767 * object that is the destination for this request.
2768 *
2769 * SCI_SUCCESS on successfull completion, or specific failure code.
2770 */
2771 static enum sci_status isci_io_request_build(
2772 struct isci_host *isci_host,
2773 struct isci_request *request,
2774 struct isci_remote_device *isci_device)
2775 {
2776 enum sci_status status = SCI_SUCCESS;
2777 struct sas_task *task = isci_request_access_task(request);
2778 struct scic_sds_remote_device *sci_device = &isci_device->sci;
2779
2780 dev_dbg(&isci_host->pdev->dev,
2781 "%s: isci_device = 0x%p; request = %p, "
2782 "num_scatter = %d\n",
2783 __func__,
2784 isci_device,
2785 request,
2786 task->num_scatter);
2787
2788 /* map the sgl addresses, if present.
2789 * libata does the mapping for sata devices
2790 * before we get the request.
2791 */
2792 if (task->num_scatter &&
2793 !sas_protocol_ata(task->task_proto) &&
2794 !(SAS_PROTOCOL_SMP & task->task_proto)) {
2795
2796 request->num_sg_entries = dma_map_sg(
2797 &isci_host->pdev->dev,
2798 task->scatter,
2799 task->num_scatter,
2800 task->data_dir
2801 );
2802
2803 if (request->num_sg_entries == 0)
2804 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2805 }
2806
2807 /* build the common request object. For now,
2808 * we will let the core allocate the IO tag.
2809 */
2810 status = scic_io_request_construct(&isci_host->sci, sci_device,
2811 SCI_CONTROLLER_INVALID_IO_TAG,
2812 &request->sci);
2813
2814 if (status != SCI_SUCCESS) {
2815 dev_warn(&isci_host->pdev->dev,
2816 "%s: failed request construct\n",
2817 __func__);
2818 return SCI_FAILURE;
2819 }
2820
2821 switch (task->task_proto) {
2822 case SAS_PROTOCOL_SMP:
2823 status = isci_smp_request_build(request);
2824 break;
2825 case SAS_PROTOCOL_SSP:
2826 status = isci_request_ssp_request_construct(request);
2827 break;
2828 case SAS_PROTOCOL_SATA:
2829 case SAS_PROTOCOL_STP:
2830 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2831 status = isci_request_stp_request_construct(request);
2832 break;
2833 default:
2834 dev_warn(&isci_host->pdev->dev,
2835 "%s: unknown protocol\n", __func__);
2836 return SCI_FAILURE;
2837 }
2838
2839 return SCI_SUCCESS;
2840 }
2841
2842 /**
2843 * isci_request_alloc_core() - This function gets the request object from the
2844 * isci_host dma cache.
2845 * @isci_host: This parameter specifies the ISCI host object
2846 * @isci_request: This parameter will contain the pointer to the new
2847 * isci_request object.
2848 * @isci_device: This parameter is the pointer to the isci remote device object
2849 * that is the destination for this request.
2850 * @gfp_flags: This parameter specifies the os allocation flags.
2851 *
2852 * SCI_SUCCESS on successfull completion, or specific failure code.
2853 */
2854 static int isci_request_alloc_core(
2855 struct isci_host *isci_host,
2856 struct isci_request **isci_request,
2857 struct isci_remote_device *isci_device,
2858 gfp_t gfp_flags)
2859 {
2860 int ret = 0;
2861 dma_addr_t handle;
2862 struct isci_request *request;
2863
2864
2865 /* get pointer to dma memory. This actually points
2866 * to both the isci_remote_device object and the
2867 * sci object. The isci object is at the beginning
2868 * of the memory allocated here.
2869 */
2870 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
2871 if (!request) {
2872 dev_warn(&isci_host->pdev->dev,
2873 "%s: dma_pool_alloc returned NULL\n", __func__);
2874 return -ENOMEM;
2875 }
2876
2877 /* initialize the request object. */
2878 spin_lock_init(&request->state_lock);
2879 request->request_daddr = handle;
2880 request->isci_host = isci_host;
2881 request->isci_device = isci_device;
2882 request->io_request_completion = NULL;
2883 request->terminated = false;
2884
2885 request->num_sg_entries = 0;
2886
2887 request->complete_in_target = false;
2888
2889 INIT_LIST_HEAD(&request->completed_node);
2890 INIT_LIST_HEAD(&request->dev_node);
2891
2892 *isci_request = request;
2893 isci_request_change_state(request, allocated);
2894
2895 return ret;
2896 }
2897
2898 static int isci_request_alloc_io(
2899 struct isci_host *isci_host,
2900 struct sas_task *task,
2901 struct isci_request **isci_request,
2902 struct isci_remote_device *isci_device,
2903 gfp_t gfp_flags)
2904 {
2905 int retval = isci_request_alloc_core(isci_host, isci_request,
2906 isci_device, gfp_flags);
2907
2908 if (!retval) {
2909 (*isci_request)->ttype_ptr.io_task_ptr = task;
2910 (*isci_request)->ttype = io_task;
2911
2912 task->lldd_task = *isci_request;
2913 }
2914 return retval;
2915 }
2916
2917 /**
2918 * isci_request_alloc_tmf() - This function gets the request object from the
2919 * isci_host dma cache and initializes the relevant fields as a sas_task.
2920 * @isci_host: This parameter specifies the ISCI host object
2921 * @sas_task: This parameter is the task struct from the upper layer driver.
2922 * @isci_request: This parameter will contain the pointer to the new
2923 * isci_request object.
2924 * @isci_device: This parameter is the pointer to the isci remote device object
2925 * that is the destination for this request.
2926 * @gfp_flags: This parameter specifies the os allocation flags.
2927 *
2928 * SCI_SUCCESS on successfull completion, or specific failure code.
2929 */
2930 int isci_request_alloc_tmf(
2931 struct isci_host *isci_host,
2932 struct isci_tmf *isci_tmf,
2933 struct isci_request **isci_request,
2934 struct isci_remote_device *isci_device,
2935 gfp_t gfp_flags)
2936 {
2937 int retval = isci_request_alloc_core(isci_host, isci_request,
2938 isci_device, gfp_flags);
2939
2940 if (!retval) {
2941
2942 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
2943 (*isci_request)->ttype = tmf_task;
2944 }
2945 return retval;
2946 }
2947
2948 /**
2949 * isci_request_execute() - This function allocates the isci_request object,
2950 * all fills in some common fields.
2951 * @isci_host: This parameter specifies the ISCI host object
2952 * @sas_task: This parameter is the task struct from the upper layer driver.
2953 * @isci_request: This parameter will contain the pointer to the new
2954 * isci_request object.
2955 * @gfp_flags: This parameter specifies the os allocation flags.
2956 *
2957 * SCI_SUCCESS on successfull completion, or specific failure code.
2958 */
2959 int isci_request_execute(
2960 struct isci_host *isci_host,
2961 struct sas_task *task,
2962 struct isci_request **isci_request,
2963 gfp_t gfp_flags)
2964 {
2965 int ret = 0;
2966 struct scic_sds_remote_device *sci_device;
2967 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2968 struct isci_remote_device *isci_device;
2969 struct isci_request *request;
2970 unsigned long flags;
2971
2972 isci_device = task->dev->lldd_dev;
2973 sci_device = &isci_device->sci;
2974
2975 /* do common allocation and init of request object. */
2976 ret = isci_request_alloc_io(
2977 isci_host,
2978 task,
2979 &request,
2980 isci_device,
2981 gfp_flags
2982 );
2983
2984 if (ret)
2985 goto out;
2986
2987 status = isci_io_request_build(isci_host, request, isci_device);
2988 if (status != SCI_SUCCESS) {
2989 dev_warn(&isci_host->pdev->dev,
2990 "%s: request_construct failed - status = 0x%x\n",
2991 __func__,
2992 status);
2993 goto out;
2994 }
2995
2996 spin_lock_irqsave(&isci_host->scic_lock, flags);
2997
2998 /* send the request, let the core assign the IO TAG. */
2999 status = scic_controller_start_io(&isci_host->sci, sci_device,
3000 &request->sci,
3001 SCI_CONTROLLER_INVALID_IO_TAG);
3002 if (status != SCI_SUCCESS &&
3003 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3004 dev_warn(&isci_host->pdev->dev,
3005 "%s: failed request start (0x%x)\n",
3006 __func__, status);
3007 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
3008 goto out;
3009 }
3010
3011 /* Either I/O started OK, or the core has signaled that
3012 * the device needs a target reset.
3013 *
3014 * In either case, hold onto the I/O for later.
3015 *
3016 * Update it's status and add it to the list in the
3017 * remote device object.
3018 */
3019 isci_request_change_state(request, started);
3020 list_add(&request->dev_node, &isci_device->reqs_in_process);
3021
3022 if (status == SCI_SUCCESS) {
3023 /* Save the tag for possible task mgmt later. */
3024 request->io_tag = request->sci.io_tag;
3025 } else {
3026 /* The request did not really start in the
3027 * hardware, so clear the request handle
3028 * here so no terminations will be done.
3029 */
3030 request->terminated = true;
3031 }
3032 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
3033
3034 if (status ==
3035 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3036 /* Signal libsas that we need the SCSI error
3037 * handler thread to work on this I/O and that
3038 * we want a device reset.
3039 */
3040 spin_lock_irqsave(&task->task_state_lock, flags);
3041 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3042 spin_unlock_irqrestore(&task->task_state_lock, flags);
3043
3044 /* Cause this task to be scheduled in the SCSI error
3045 * handler thread.
3046 */
3047 isci_execpath_callback(isci_host, task,
3048 sas_task_abort);
3049
3050 /* Change the status, since we are holding
3051 * the I/O until it is managed by the SCSI
3052 * error handler.
3053 */
3054 status = SCI_SUCCESS;
3055 }
3056
3057 out:
3058 if (status != SCI_SUCCESS) {
3059 /* release dma memory on failure. */
3060 isci_request_free(isci_host, request);
3061 request = NULL;
3062 ret = SCI_FAILURE;
3063 }
3064
3065 *isci_request = request;
3066 return ret;
3067 }
3068
3069
3070
This page took 0.0870379999999999 seconds and 4 git commands to generate.