isci: unify request abort handlers
[deliverable/linux.git] / drivers / scsi / isci / request.c
CommitLineData
6f231dda
DW
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
6f231dda
DW
57#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
5dec6f4e 61#include "scu_event_codes.h"
2ec53eb4 62#include "sas.h"
6f231dda 63
f1f52e75
DW
64/**
65 * This method returns the sgl element pair for the specificed sgl_pair index.
66 * @sci_req: This parameter specifies the IO request for which to retrieve
67 * the Scatter-Gather List element pair.
68 * @sgl_pair_index: This parameter specifies the index into the SGL element
69 * pair to be retrieved.
70 *
71 * This method returns a pointer to an struct scu_sgl_element_pair.
72 */
73static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74 struct scic_sds_request *sci_req,
75 u32 sgl_pair_index
76 ) {
77 struct scu_task_context *task_context;
78
79 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
80
81 if (sgl_pair_index == 0) {
82 return &task_context->sgl_pair_ab;
83 } else if (sgl_pair_index == 1) {
84 return &task_context->sgl_pair_cd;
85 }
6f231dda 86
f1f52e75 87 return &sci_req->sg_table[sgl_pair_index - 2];
6f231dda
DW
88}
89
f1f52e75
DW
90/**
91 * This function will build the SGL list for an IO request.
92 * @sci_req: This parameter specifies the IO request for which to build
93 * the Scatter-Gather List.
94 *
95 */
5dec6f4e 96static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
6f231dda 97{
f1f52e75
DW
98 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99 struct isci_host *isci_host = isci_request->isci_host;
100 struct sas_task *task = isci_request_access_task(isci_request);
101 struct scatterlist *sg = NULL;
102 dma_addr_t dma_addr;
103 u32 sg_idx = 0;
104 struct scu_sgl_element_pair *scu_sg = NULL;
105 struct scu_sgl_element_pair *prev_sg = NULL;
106
107 if (task->num_scatter > 0) {
108 sg = task->scatter;
109
110 while (sg) {
111 scu_sg = scic_sds_request_get_sgl_element_pair(
112 sds_request,
113 sg_idx);
114
115 SCU_SGL_COPY(scu_sg->A, sg);
116
117 sg = sg_next(sg);
118
119 if (sg) {
120 SCU_SGL_COPY(scu_sg->B, sg);
121 sg = sg_next(sg);
122 } else
123 SCU_SGL_ZERO(scu_sg->B);
124
125 if (prev_sg) {
126 dma_addr =
127 scic_io_request_get_dma_addr(
128 sds_request,
129 scu_sg);
130
131 prev_sg->next_pair_upper =
132 upper_32_bits(dma_addr);
133 prev_sg->next_pair_lower =
134 lower_32_bits(dma_addr);
135 }
136
137 prev_sg = scu_sg;
138 sg_idx++;
139 }
140 } else { /* handle when no sg */
141 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
142 sg_idx);
6f231dda 143
f1f52e75
DW
144 dma_addr = dma_map_single(&isci_host->pdev->dev,
145 task->scatter,
146 task->total_xfer_len,
147 task->data_dir);
6f231dda 148
f1f52e75 149 isci_request->zero_scatter_daddr = dma_addr;
6f231dda 150
f1f52e75
DW
151 scu_sg->A.length = task->total_xfer_len;
152 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154 }
6f231dda 155
f1f52e75
DW
156 if (scu_sg) {
157 scu_sg->next_pair_upper = 0;
158 scu_sg->next_pair_lower = 0;
6f231dda 159 }
f1f52e75 160}
6f231dda 161
f1f52e75 162static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
6f231dda 163{
f1f52e75
DW
164 struct ssp_cmd_iu *cmd_iu;
165 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2ec53eb4 166 struct sas_task *task = isci_request_access_task(ireq);
6f231dda 167
f1f52e75 168 cmd_iu = &sci_req->ssp.cmd;
6f231dda 169
f1f52e75
DW
170 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171 cmd_iu->add_cdb_len = 0;
172 cmd_iu->_r_a = 0;
173 cmd_iu->_r_b = 0;
174 cmd_iu->en_fburst = 0; /* unsupported */
175 cmd_iu->task_prio = task->ssp_task.task_prio;
176 cmd_iu->task_attr = task->ssp_task.task_attr;
177 cmd_iu->_r_c = 0;
6f231dda 178
f1f52e75
DW
179 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180 sizeof(task->ssp_task.cdb) / sizeof(u32));
181}
6f231dda 182
f1f52e75
DW
183static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
184{
185 struct ssp_task_iu *task_iu;
186 struct isci_request *ireq = sci_req_to_ireq(sci_req);
187 struct sas_task *task = isci_request_access_task(ireq);
188 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
6f231dda 189
f1f52e75
DW
190 task_iu = &sci_req->ssp.tmf;
191
192 memset(task_iu, 0, sizeof(struct ssp_task_iu));
193
194 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
195
196 task_iu->task_func = isci_tmf->tmf_code;
197 task_iu->task_tag =
198 (ireq->ttype == tmf_task) ?
199 isci_tmf->io_tag :
200 SCI_CONTROLLER_INVALID_IO_TAG;
6f231dda
DW
201}
202
203/**
f1f52e75
DW
204 * This method is will fill in the SCU Task Context for any type of SSP request.
205 * @sci_req:
206 * @task_context:
6f231dda 207 *
6f231dda 208 */
f1f52e75
DW
209static void scu_ssp_reqeust_construct_task_context(
210 struct scic_sds_request *sds_request,
211 struct scu_task_context *task_context)
6f231dda 212{
f1f52e75
DW
213 dma_addr_t dma_addr;
214 struct scic_sds_controller *controller;
215 struct scic_sds_remote_device *target_device;
216 struct scic_sds_port *target_port;
217
218 controller = scic_sds_request_get_controller(sds_request);
219 target_device = scic_sds_request_get_device(sds_request);
220 target_port = scic_sds_request_get_port(sds_request);
221
222 /* Fill in the TC with the its required data */
223 task_context->abort = 0;
224 task_context->priority = 0;
225 task_context->initiator_request = 1;
226 task_context->connection_rate = target_device->connection_rate;
227 task_context->protocol_engine_index =
228 scic_sds_controller_get_protocol_engine_group(controller);
229 task_context->logical_port_index =
230 scic_sds_port_get_index(target_port);
231 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
232 task_context->valid = SCU_TASK_CONTEXT_VALID;
233 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
234
235 task_context->remote_node_index =
236 scic_sds_remote_device_get_index(sds_request->target_device);
237 task_context->command_code = 0;
238
239 task_context->link_layer_control = 0;
240 task_context->do_not_dma_ssp_good_response = 1;
241 task_context->strict_ordering = 0;
242 task_context->control_frame = 0;
243 task_context->timeout_enable = 0;
244 task_context->block_guard_enable = 0;
245
246 task_context->address_modifier = 0;
247
248 /* task_context->type.ssp.tag = sci_req->io_tag; */
249 task_context->task_phase = 0x01;
250
251 if (sds_request->was_tag_assigned_by_user) {
252 /*
253 * Build the task context now since we have already read
254 * the data
255 */
256 sds_request->post_context =
257 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
258 (scic_sds_controller_get_protocol_engine_group(
259 controller) <<
260 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
261 (scic_sds_port_get_index(target_port) <<
262 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
263 scic_sds_io_tag_get_index(sds_request->io_tag));
264 } else {
265 /*
266 * Build the task context now since we have already read
267 * the data
268 *
269 * I/O tag index is not assigned because we have to wait
270 * until we get a TCi
271 */
272 sds_request->post_context =
273 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
274 (scic_sds_controller_get_protocol_engine_group(
275 owning_controller) <<
276 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
277 (scic_sds_port_get_index(target_port) <<
278 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
279 }
6f231dda 280
f1f52e75
DW
281 /*
282 * Copy the physical address for the command buffer to the
283 * SCU Task Context
284 */
285 dma_addr = scic_io_request_get_dma_addr(sds_request,
286 &sds_request->ssp.cmd);
6f231dda 287
f1f52e75
DW
288 task_context->command_iu_upper = upper_32_bits(dma_addr);
289 task_context->command_iu_lower = lower_32_bits(dma_addr);
290
291 /*
292 * Copy the physical address for the response buffer to the
293 * SCU Task Context
6f231dda 294 */
f1f52e75
DW
295 dma_addr = scic_io_request_get_dma_addr(sds_request,
296 &sds_request->ssp.rsp);
6f231dda 297
f1f52e75
DW
298 task_context->response_iu_upper = upper_32_bits(dma_addr);
299 task_context->response_iu_lower = lower_32_bits(dma_addr);
300}
6f231dda 301
f1f52e75
DW
302/**
303 * This method is will fill in the SCU Task Context for a SSP IO request.
304 * @sci_req:
305 *
306 */
307static void scu_ssp_io_request_construct_task_context(
308 struct scic_sds_request *sci_req,
309 enum dma_data_direction dir,
310 u32 len)
311{
312 struct scu_task_context *task_context;
6f231dda 313
f1f52e75 314 task_context = scic_sds_request_get_task_context(sci_req);
6f231dda 315
f1f52e75 316 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
6f231dda 317
f1f52e75
DW
318 task_context->ssp_command_iu_length =
319 sizeof(struct ssp_cmd_iu) / sizeof(u32);
320 task_context->type.ssp.frame_type = SSP_COMMAND;
321
322 switch (dir) {
323 case DMA_FROM_DEVICE:
324 case DMA_NONE:
325 default:
326 task_context->task_type = SCU_TASK_TYPE_IOREAD;
a1a113b0 327 break;
f1f52e75
DW
328 case DMA_TO_DEVICE:
329 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
a1a113b0 330 break;
6f231dda
DW
331 }
332
f1f52e75
DW
333 task_context->transfer_length_bytes = len;
334
335 if (task_context->transfer_length_bytes > 0)
336 scic_sds_request_build_sgl(sci_req);
6f231dda
DW
337}
338
6f231dda 339/**
f1f52e75
DW
340 * This method will fill in the SCU Task Context for a SSP Task request. The
341 * following important settings are utilized: -# priority ==
342 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
343 * ahead of other task destined for the same Remote Node. -# task_type ==
344 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
345 * (i.e. non-raw frame) is being utilized to perform task management. -#
346 * control_frame == 1. This ensures that the proper endianess is set so
347 * that the bytes are transmitted in the right order for a task frame.
348 * @sci_req: This parameter specifies the task request object being
349 * constructed.
6f231dda 350 *
6f231dda 351 */
f1f52e75
DW
352static void scu_ssp_task_request_construct_task_context(
353 struct scic_sds_request *sci_req)
6f231dda 354{
f1f52e75 355 struct scu_task_context *task_context;
6f231dda 356
f1f52e75 357 task_context = scic_sds_request_get_task_context(sci_req);
6f231dda 358
f1f52e75 359 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
6f231dda 360
f1f52e75
DW
361 task_context->control_frame = 1;
362 task_context->priority = SCU_TASK_PRIORITY_HIGH;
363 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
364 task_context->transfer_length_bytes = 0;
365 task_context->type.ssp.frame_type = SSP_TASK;
366 task_context->ssp_command_iu_length =
367 sizeof(struct ssp_task_iu) / sizeof(u32);
6f231dda
DW
368}
369
5dec6f4e
DW
370/**
371 * This method is will fill in the SCU Task Context for any type of SATA
372 * request. This is called from the various SATA constructors.
373 * @sci_req: The general IO request object which is to be used in
374 * constructing the SCU task context.
375 * @task_context: The buffer pointer for the SCU task context which is being
376 * constructed.
377 *
378 * The general io request construction is complete. The buffer assignment for
379 * the command buffer is complete. none Revisit task context construction to
380 * determine what is common for SSP/SMP/STP task context structures.
381 */
382static void scu_sata_reqeust_construct_task_context(
383 struct scic_sds_request *sci_req,
384 struct scu_task_context *task_context)
385{
386 dma_addr_t dma_addr;
387 struct scic_sds_controller *controller;
388 struct scic_sds_remote_device *target_device;
389 struct scic_sds_port *target_port;
390
391 controller = scic_sds_request_get_controller(sci_req);
392 target_device = scic_sds_request_get_device(sci_req);
393 target_port = scic_sds_request_get_port(sci_req);
394
395 /* Fill in the TC with the its required data */
396 task_context->abort = 0;
397 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
398 task_context->initiator_request = 1;
399 task_context->connection_rate = target_device->connection_rate;
400 task_context->protocol_engine_index =
401 scic_sds_controller_get_protocol_engine_group(controller);
402 task_context->logical_port_index =
403 scic_sds_port_get_index(target_port);
404 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
405 task_context->valid = SCU_TASK_CONTEXT_VALID;
406 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
407
408 task_context->remote_node_index =
409 scic_sds_remote_device_get_index(sci_req->target_device);
410 task_context->command_code = 0;
411
412 task_context->link_layer_control = 0;
413 task_context->do_not_dma_ssp_good_response = 1;
414 task_context->strict_ordering = 0;
415 task_context->control_frame = 0;
416 task_context->timeout_enable = 0;
417 task_context->block_guard_enable = 0;
418
419 task_context->address_modifier = 0;
420 task_context->task_phase = 0x01;
421
422 task_context->ssp_command_iu_length =
423 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
424
425 /* Set the first word of the H2D REG FIS */
426 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
427
428 if (sci_req->was_tag_assigned_by_user) {
429 /*
430 * Build the task context now since we have already read
431 * the data
432 */
433 sci_req->post_context =
434 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
435 (scic_sds_controller_get_protocol_engine_group(
436 controller) <<
437 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
438 (scic_sds_port_get_index(target_port) <<
439 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
440 scic_sds_io_tag_get_index(sci_req->io_tag));
441 } else {
442 /*
443 * Build the task context now since we have already read
444 * the data.
445 * I/O tag index is not assigned because we have to wait
446 * until we get a TCi.
447 */
448 sci_req->post_context =
449 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
450 (scic_sds_controller_get_protocol_engine_group(
451 controller) <<
452 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
453 (scic_sds_port_get_index(target_port) <<
454 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
455 }
456
457 /*
458 * Copy the physical address for the command buffer to the SCU Task
459 * Context. We must offset the command buffer by 4 bytes because the
460 * first 4 bytes are transfered in the body of the TC.
461 */
462 dma_addr = scic_io_request_get_dma_addr(sci_req,
463 ((char *) &sci_req->stp.cmd) +
464 sizeof(u32));
465
466 task_context->command_iu_upper = upper_32_bits(dma_addr);
467 task_context->command_iu_lower = lower_32_bits(dma_addr);
468
469 /* SATA Requests do not have a response buffer */
470 task_context->response_iu_upper = 0;
471 task_context->response_iu_lower = 0;
472}
473
474
6f231dda 475
f1f52e75 476/**
5dec6f4e
DW
477 * scu_stp_raw_request_construct_task_context -
478 * @sci_req: This parameter specifies the STP request object for which to
479 * construct a RAW command frame task context.
480 * @task_context: This parameter specifies the SCU specific task context buffer
481 * to construct.
f1f52e75 482 *
5dec6f4e
DW
483 * This method performs the operations common to all SATA/STP requests
484 * utilizing the raw frame method. none
f1f52e75 485 */
5dec6f4e
DW
486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
487 struct scu_task_context *task_context)
488{
489 struct scic_sds_request *sci_req = to_sci_req(stp_req);
490
491 scu_sata_reqeust_construct_task_context(sci_req, task_context);
492
493 task_context->control_frame = 0;
494 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
495 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
496 task_context->type.stp.fis_type = FIS_REGH2D;
497 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
498}
499
500static enum sci_status
501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
502 bool copy_rx_frame)
503{
504 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
505 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
506
507 scu_stp_raw_request_construct_task_context(stp_req,
508 sci_req->task_context_buffer);
509
510 pio->current_transfer_bytes = 0;
511 pio->ending_error = 0;
512 pio->ending_status = 0;
513
514 pio->request_current.sgl_offset = 0;
515 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
516
517 if (copy_rx_frame) {
518 scic_sds_request_build_sgl(sci_req);
519 /* Since the IO request copy of the TC contains the same data as
520 * the actual TC this pointer is vaild for either.
521 */
522 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
523 } else {
524 /* The user does not want the data copied to the SGL buffer location */
525 pio->request_current.sgl_pair = NULL;
526 }
6f231dda 527
5dec6f4e
DW
528 return SCI_SUCCESS;
529}
6f231dda
DW
530
531/**
6f231dda 532 *
5dec6f4e
DW
533 * @sci_req: This parameter specifies the request to be constructed as an
534 * optimized request.
535 * @optimized_task_type: This parameter specifies whether the request is to be
536 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
537 * value of 1 indicates NCQ.
538 *
539 * This method will perform request construction common to all types of STP
540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
541 * returns an indication as to whether the construction was successful.
6f231dda 542 */
5dec6f4e
DW
543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
544 u8 optimized_task_type,
545 u32 len,
546 enum dma_data_direction dir)
547{
548 struct scu_task_context *task_context = sci_req->task_context_buffer;
549
550 /* Build the STP task context structure */
551 scu_sata_reqeust_construct_task_context(sci_req, task_context);
552
553 /* Copy over the SGL elements */
554 scic_sds_request_build_sgl(sci_req);
555
556 /* Copy over the number of bytes to be transfered */
557 task_context->transfer_length_bytes = len;
558
559 if (dir == DMA_TO_DEVICE) {
560 /*
561 * The difference between the DMA IN and DMA OUT request task type
562 * values are consistent with the difference between FPDMA READ
563 * and FPDMA WRITE values. Add the supplied task type parameter
564 * to this difference to set the task type properly for this
565 * DATA OUT (WRITE) case. */
566 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
567 - SCU_TASK_TYPE_DMA_IN);
568 } else {
569 /*
570 * For the DATA IN (READ) case, simply save the supplied
571 * optimized task type. */
572 task_context->task_type = optimized_task_type;
573 }
574}
575
576
577
f1f52e75
DW
578static enum sci_status
579scic_io_request_construct_sata(struct scic_sds_request *sci_req,
580 u32 len,
581 enum dma_data_direction dir,
582 bool copy)
6f231dda 583{
f1f52e75
DW
584 enum sci_status status = SCI_SUCCESS;
585 struct isci_request *ireq = sci_req_to_ireq(sci_req);
586 struct sas_task *task = isci_request_access_task(ireq);
6f231dda 587
f1f52e75
DW
588 /* check for management protocols */
589 if (ireq->ttype == tmf_task) {
590 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
6f231dda 591
f1f52e75 592 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
5dec6f4e
DW
593 tmf->tmf_code == isci_tmf_sata_srst_low) {
594 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
595 sci_req->task_context_buffer);
596 return SCI_SUCCESS;
597 } else {
f1f52e75
DW
598 dev_err(scic_to_dev(sci_req->owning_controller),
599 "%s: Request 0x%p received un-handled SAT "
600 "management protocol 0x%x.\n",
601 __func__, sci_req, tmf->tmf_code);
602
603 return SCI_FAILURE;
604 }
6f231dda 605 }
6f231dda 606
f1f52e75
DW
607 if (!sas_protocol_ata(task->task_proto)) {
608 dev_err(scic_to_dev(sci_req->owning_controller),
609 "%s: Non-ATA protocol in SATA path: 0x%x\n",
610 __func__,
611 task->task_proto);
612 return SCI_FAILURE;
613
614 }
615
616 /* non data */
5dec6f4e
DW
617 if (task->data_dir == DMA_NONE) {
618 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
619 sci_req->task_context_buffer);
620 return SCI_SUCCESS;
621 }
f1f52e75
DW
622
623 /* NCQ */
5dec6f4e
DW
624 if (task->ata_task.use_ncq) {
625 scic_sds_stp_optimized_request_construct(sci_req,
626 SCU_TASK_TYPE_FPDMAQ_READ,
627 len, dir);
628 return SCI_SUCCESS;
629 }
f1f52e75
DW
630
631 /* DMA */
5dec6f4e
DW
632 if (task->ata_task.dma_xfer) {
633 scic_sds_stp_optimized_request_construct(sci_req,
634 SCU_TASK_TYPE_DMA_IN,
635 len, dir);
636 return SCI_SUCCESS;
637 } else /* PIO */
f1f52e75
DW
638 return scic_sds_stp_pio_request_construct(sci_req, copy);
639
640 return status;
641}
642
643static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
6f231dda 644{
f1f52e75
DW
645 struct isci_request *ireq = sci_req_to_ireq(sci_req);
646 struct sas_task *task = isci_request_access_task(ireq);
6f231dda 647
f1f52e75 648 sci_req->protocol = SCIC_SSP_PROTOCOL;
6f231dda 649
f1f52e75
DW
650 scu_ssp_io_request_construct_task_context(sci_req,
651 task->data_dir,
652 task->total_xfer_len);
6f231dda 653
f1f52e75 654 scic_sds_io_request_build_ssp_command_iu(sci_req);
6f231dda 655
5dec6f4e
DW
656 sci_base_state_machine_change_state(&sci_req->state_machine,
657 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
ce4f75de 658
f1f52e75
DW
659 return SCI_SUCCESS;
660}
6f231dda 661
f1f52e75
DW
662enum sci_status scic_task_request_construct_ssp(
663 struct scic_sds_request *sci_req)
664{
665 /* Construct the SSP Task SCU Task Context */
666 scu_ssp_task_request_construct_task_context(sci_req);
6f231dda 667
f1f52e75
DW
668 /* Fill in the SSP Task IU */
669 scic_sds_task_request_build_ssp_task_iu(sci_req);
c4b9e24c 670
f1f52e75 671 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 672 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
67ea838d 673
f1f52e75
DW
674 return SCI_SUCCESS;
675}
67ea838d 676
f1f52e75
DW
677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
678{
679 enum sci_status status;
680 struct scic_sds_stp_request *stp_req;
681 bool copy = false;
682 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
683 struct sas_task *task = isci_request_access_task(isci_request);
6f231dda 684
f1f52e75
DW
685 stp_req = &sci_req->stp.req;
686 sci_req->protocol = SCIC_STP_PROTOCOL;
6f231dda 687
f1f52e75
DW
688 copy = (task->data_dir == DMA_NONE) ? false : true;
689
690 status = scic_io_request_construct_sata(sci_req,
691 task->total_xfer_len,
692 task->data_dir,
693 copy);
694
695 if (status == SCI_SUCCESS)
696 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 697 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
f1f52e75
DW
698
699 return status;
6f231dda
DW
700}
701
f1f52e75
DW
702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
703{
704 enum sci_status status = SCI_SUCCESS;
705 struct isci_request *ireq = sci_req_to_ireq(sci_req);
706
707 /* check for management protocols */
708 if (ireq->ttype == tmf_task) {
709 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
710
711 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
712 tmf->tmf_code == isci_tmf_sata_srst_low) {
5dec6f4e
DW
713 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
714 sci_req->task_context_buffer);
f1f52e75
DW
715 } else {
716 dev_err(scic_to_dev(sci_req->owning_controller),
717 "%s: Request 0x%p received un-handled SAT "
718 "Protocol 0x%x.\n",
719 __func__, sci_req, tmf->tmf_code);
720
721 return SCI_FAILURE;
722 }
723 }
724
5dec6f4e
DW
725 if (status != SCI_SUCCESS)
726 return status;
727 sci_base_state_machine_change_state(&sci_req->state_machine,
728 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
f1f52e75
DW
729
730 return status;
731}
732
6f231dda 733/**
f1f52e75
DW
734 * sci_req_tx_bytes - bytes transferred when reply underruns request
735 * @sci_req: request that was terminated early
6f231dda 736 */
f1f52e75
DW
737#define SCU_TASK_CONTEXT_SRAM 0x200000
738static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
6f231dda 739{
f1f52e75
DW
740 struct scic_sds_controller *scic = sci_req->owning_controller;
741 u32 ret_val = 0;
742
743 if (readl(&scic->smu_registers->address_modifier) == 0) {
744 void __iomem *scu_reg_base = scic->scu_registers;
745
746 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
747 * BAR1 is the scu_registers
748 * 0x20002C = 0x200000 + 0x2c
749 * = start of task context SRAM + offset of (type.ssp.data_offset)
750 * TCi is the io_tag of struct scic_sds_request
751 */
752 ret_val = readl(scu_reg_base +
753 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
754 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
755 }
756
757 return ret_val;
758}
759
760enum sci_status
761scic_sds_request_start(struct scic_sds_request *request)
762{
763 if (request->device_sequence !=
764 scic_sds_remote_device_get_sequence(request->target_device))
765 return SCI_FAILURE;
766
767 if (request->state_handlers->start_handler)
768 return request->state_handlers->start_handler(request);
769
770 dev_warn(scic_to_dev(request->owning_controller),
771 "%s: SCIC IO Request requested to start while in wrong "
772 "state %d\n",
773 __func__,
774 sci_base_state_machine_get_state(&request->state_machine));
775
776 return SCI_FAILURE_INVALID_STATE;
777}
778
779enum sci_status
f00e6ba4 780scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
f1f52e75 781{
f00e6ba4 782 enum sci_base_request_states state;
f1f52e75 783
f00e6ba4
DW
784 state = sci_req->state_machine.current_state_id;
785
786 switch (state) {
787 case SCI_BASE_REQUEST_STATE_CONSTRUCTED:
788 scic_sds_request_set_status(sci_req,
789 SCU_TASK_DONE_TASK_ABORT,
790 SCI_FAILURE_IO_TERMINATED);
791
792 sci_base_state_machine_change_state(&sci_req->state_machine,
793 SCI_BASE_REQUEST_STATE_COMPLETED);
794 return SCI_SUCCESS;
795 case SCI_BASE_REQUEST_STATE_STARTED:
796 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION:
797 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE:
798 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION:
799 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
800 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
801 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE:
802 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE:
803 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE:
804 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE:
805 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE:
806 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE:
807 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE:
808 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE:
809 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE:
810 sci_base_state_machine_change_state(&sci_req->state_machine,
811 SCI_BASE_REQUEST_STATE_ABORTING);
812 return SCI_SUCCESS;
813 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
814 sci_base_state_machine_change_state(&sci_req->state_machine,
815 SCI_BASE_REQUEST_STATE_ABORTING);
816 sci_base_state_machine_change_state(&sci_req->state_machine,
817 SCI_BASE_REQUEST_STATE_COMPLETED);
818 return SCI_SUCCESS;
819 case SCI_BASE_REQUEST_STATE_ABORTING:
820 sci_base_state_machine_change_state(&sci_req->state_machine,
821 SCI_BASE_REQUEST_STATE_COMPLETED);
822 return SCI_SUCCESS;
823 case SCI_BASE_REQUEST_STATE_COMPLETED:
824 default:
825 dev_warn(scic_to_dev(sci_req->owning_controller),
826 "%s: SCIC IO Request requested to abort while in wrong "
827 "state %d\n",
828 __func__,
829 sci_base_state_machine_get_state(&sci_req->state_machine));
830 break;
831 }
6f231dda 832
f1f52e75
DW
833 return SCI_FAILURE_INVALID_STATE;
834}
6f231dda 835
f1f52e75
DW
836enum sci_status scic_sds_io_request_event_handler(
837 struct scic_sds_request *request,
838 u32 event_code)
839{
840 if (request->state_handlers->event_handler)
841 return request->state_handlers->event_handler(request, event_code);
842
843 dev_warn(scic_to_dev(request->owning_controller),
844 "%s: SCIC IO Request given event code notification %x while "
845 "in wrong state %d\n",
846 __func__,
847 event_code,
848 sci_base_state_machine_get_state(&request->state_machine));
849
850 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
851}
852
853/**
6f231dda 854 *
f1f52e75
DW
855 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
856 * operation is to be executed.
857 * @frame_index: The frame index returned by the hardware for the reqeust
858 * object.
859 *
860 * This method invokes the core state frame handler for the
861 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
6f231dda 862 */
f1f52e75
DW
863enum sci_status scic_sds_io_request_frame_handler(
864 struct scic_sds_request *request,
865 u32 frame_index)
6f231dda 866{
f1f52e75
DW
867 if (request->state_handlers->frame_handler)
868 return request->state_handlers->frame_handler(request, frame_index);
869
870 dev_warn(scic_to_dev(request->owning_controller),
871 "%s: SCIC IO Request given unexpected frame %x while in "
872 "state %d\n",
873 __func__,
874 frame_index,
875 sci_base_state_machine_get_state(&request->state_machine));
876
877 scic_sds_controller_release_frame(request->owning_controller, frame_index);
878 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
879}
880
f1f52e75
DW
881/*
882 * This function copies response data for requests returning response data
883 * instead of sense data.
884 * @sci_req: This parameter specifies the request object for which to copy
885 * the response data.
6f231dda 886 */
f139303d 887static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
6f231dda 888{
f1f52e75
DW
889 void *resp_buf;
890 u32 len;
891 struct ssp_response_iu *ssp_response;
892 struct isci_request *ireq = sci_req_to_ireq(sci_req);
893 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
6f231dda 894
f1f52e75 895 ssp_response = &sci_req->ssp.rsp;
6f231dda 896
f1f52e75 897 resp_buf = &isci_tmf->resp.resp_iu;
6f231dda 898
f1f52e75
DW
899 len = min_t(u32,
900 SSP_RESP_IU_MAX_SIZE,
901 be32_to_cpu(ssp_response->response_data_len));
6f231dda 902
f1f52e75
DW
903 memcpy(resp_buf, ssp_response->resp_data, len);
904}
6f231dda 905
f1f52e75
DW
906/*
907 * This method implements the action taken when a constructed
908 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
909 * This method will, if necessary, allocate a TCi for the io request object and
910 * then will, if necessary, copy the constructed TC data into the actual TC
911 * buffer. If everything is successful the post context field is updated with
912 * the TCi so the controller can post the request to the hardware. enum sci_status
913 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
914 */
915static enum sci_status scic_sds_request_constructed_state_start_handler(
916 struct scic_sds_request *request)
917{
918 struct scu_task_context *task_context;
6f231dda 919
f1f52e75
DW
920 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
921 request->io_tag =
922 scic_controller_allocate_io_tag(request->owning_controller);
923 }
6f231dda 924
f1f52e75
DW
925 /* Record the IO Tag in the request */
926 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
927 task_context = request->task_context_buffer;
6f231dda 928
f1f52e75 929 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
6f231dda 930
f1f52e75
DW
931 switch (task_context->protocol_type) {
932 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
933 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
934 /* SSP/SMP Frame */
935 task_context->type.ssp.tag = request->io_tag;
936 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
937 break;
6f231dda 938
f1f52e75
DW
939 case SCU_TASK_CONTEXT_PROTOCOL_STP:
940 /*
941 * STP/SATA Frame
942 * task_context->type.stp.ncq_tag = request->ncq_tag; */
943 break;
6f231dda 944
f1f52e75
DW
945 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
946 /* / @todo When do we set no protocol type? */
947 break;
6f231dda 948
f1f52e75
DW
949 default:
950 /* This should never happen since we build the IO requests */
951 break;
952 }
6f231dda 953
f1f52e75
DW
954 /*
955 * Check to see if we need to copy the task context buffer
956 * or have been building into the task context buffer */
957 if (request->was_tag_assigned_by_user == false) {
958 scic_sds_controller_copy_task_context(
959 request->owning_controller, request);
960 }
6f231dda 961
f1f52e75
DW
962 /* Add to the post_context the io tag value */
963 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
6f231dda 964
f1f52e75
DW
965 /* Everything is good go ahead and change state */
966 sci_base_state_machine_change_state(&request->state_machine,
5dec6f4e 967 SCI_BASE_REQUEST_STATE_STARTED);
6f231dda 968
f1f52e75
DW
969 return SCI_SUCCESS;
970 }
6f231dda 971
f1f52e75
DW
972 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
973}
6f231dda 974
f1f52e75
DW
975/*
976 * scic_sds_request_started_state_tc_completion_handler() - This method process
977 * TC (task context) completions for normal IO request (i.e. Task/Abort
978 * Completions of type 0). This method will update the
979 * SCIC_SDS_IO_REQUEST_T::status field.
980 * @sci_req: This parameter specifies the request for which a completion
981 * occurred.
982 * @completion_code: This parameter specifies the completion code received from
983 * the SCU.
984 *
985 */
986static enum sci_status
987scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req,
988 u32 completion_code)
989{
990 u8 datapres;
991 struct ssp_response_iu *resp_iu;
6f231dda 992
f1f52e75
DW
993 /*
994 * TODO: Any SDMA return code of other than 0 is bad
995 * decode 0x003C0000 to determine SDMA status
996 */
997 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
998 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
999 scic_sds_request_set_status(sci_req,
1000 SCU_TASK_DONE_GOOD,
1001 SCI_SUCCESS);
6f231dda
DW
1002 break;
1003
f1f52e75
DW
1004 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
1005 {
1006 /*
1007 * There are times when the SCU hardware will return an early
1008 * response because the io request specified more data than is
1009 * returned by the target device (mode pages, inquiry data,
1010 * etc.). We must check the response stats to see if this is
1011 * truly a failed request or a good request that just got
1012 * completed early.
1013 */
1014 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
1015 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1016
1017 sci_swab32_cpy(&sci_req->ssp.rsp,
1018 &sci_req->ssp.rsp,
1019 word_cnt);
1020
1021 if (resp->status == 0) {
1022 scic_sds_request_set_status(
1023 sci_req,
1024 SCU_TASK_DONE_GOOD,
1025 SCI_SUCCESS_IO_DONE_EARLY);
1026 } else {
1027 scic_sds_request_set_status(
1028 sci_req,
1029 SCU_TASK_DONE_CHECK_RESPONSE,
1030 SCI_FAILURE_IO_RESPONSE_VALID);
1031 }
1032 }
1033 break;
6f231dda 1034
f1f52e75
DW
1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
1036 {
1037 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
6f231dda 1038
f1f52e75
DW
1039 sci_swab32_cpy(&sci_req->ssp.rsp,
1040 &sci_req->ssp.rsp,
1041 word_cnt);
6f231dda 1042
f1f52e75
DW
1043 scic_sds_request_set_status(sci_req,
1044 SCU_TASK_DONE_CHECK_RESPONSE,
1045 SCI_FAILURE_IO_RESPONSE_VALID);
6f231dda 1046 break;
f1f52e75 1047 }
6f231dda 1048
f1f52e75
DW
1049 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1050 /*
1051 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
1052 * guaranteed to be received before this completion status is
1053 * posted?
1054 */
1055 resp_iu = &sci_req->ssp.rsp;
1056 datapres = resp_iu->datapres;
1057
1058 if ((datapres == 0x01) || (datapres == 0x02)) {
1059 scic_sds_request_set_status(
1060 sci_req,
1061 SCU_TASK_DONE_CHECK_RESPONSE,
1062 SCI_FAILURE_IO_RESPONSE_VALID);
1063 } else
1064 scic_sds_request_set_status(
1065 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
6f231dda
DW
1066 break;
1067
f1f52e75
DW
1068 /* only stp device gets suspended. */
1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1077 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1078 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1079 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1080 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1081 scic_sds_request_set_status(
1082 sci_req,
1083 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1084 SCU_COMPLETION_TL_STATUS_SHIFT,
1085 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1086 } else {
1087 scic_sds_request_set_status(
1088 sci_req,
1089 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1090 SCU_COMPLETION_TL_STATUS_SHIFT,
1091 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1092 }
6f231dda
DW
1093 break;
1094
f1f52e75
DW
1095 /* both stp/ssp device gets suspended */
1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1097 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1098 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1099 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1100 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1106 scic_sds_request_set_status(
1107 sci_req,
1108 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1109 SCU_COMPLETION_TL_STATUS_SHIFT,
1110 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
6f231dda
DW
1111 break;
1112
f1f52e75
DW
1113 /* neither ssp nor stp gets suspended. */
1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1116 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1117 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1118 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1119 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1120 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1121 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1122 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1123 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1124 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1125 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1126 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1127 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1128 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
6f231dda 1129 default:
f1f52e75
DW
1130 scic_sds_request_set_status(
1131 sci_req,
1132 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1133 SCU_COMPLETION_TL_STATUS_SHIFT,
1134 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1135 break;
1136 }
f1f52e75
DW
1137
1138 /*
1139 * TODO: This is probably wrong for ACK/NAK timeout conditions
1140 */
1141
1142 /* In all cases we will treat this as the completion of the IO req. */
5dec6f4e
DW
1143 sci_base_state_machine_change_state(&sci_req->state_machine,
1144 SCI_BASE_REQUEST_STATE_COMPLETED);
f1f52e75 1145 return SCI_SUCCESS;
6f231dda
DW
1146}
1147
f1f52e75
DW
1148enum sci_status
1149scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
6f231dda 1150{
5dec6f4e 1151 if (request->state_handlers->tc_completion_handler)
f1f52e75
DW
1152 return request->state_handlers->tc_completion_handler(request, completion_code);
1153
1154 dev_warn(scic_to_dev(request->owning_controller),
1155 "%s: SCIC IO Request given task completion notification %x "
1156 "while in wrong state %d\n",
1157 __func__,
1158 completion_code,
1159 sci_base_state_machine_get_state(&request->state_machine));
6f231dda 1160
f1f52e75 1161 return SCI_FAILURE_INVALID_STATE;
f1f52e75 1162}
6f231dda 1163
f1f52e75
DW
1164/*
1165 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1166 * object receives a scic_sds_request_frame_handler() request. This method
1167 * first determines the frame type received. If this is a response frame then
1168 * the response data is copied to the io request response buffer for processing
1169 * at completion time. If the frame type is not a response buffer an error is
1170 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1171 */
1172static enum sci_status
1173scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
1174 u32 frame_index)
1175{
1176 enum sci_status status;
1177 u32 *frame_header;
1178 struct ssp_frame_hdr ssp_hdr;
1179 ssize_t word_cnt;
1180
1181 status = scic_sds_unsolicited_frame_control_get_header(
1182 &(scic_sds_request_get_controller(sci_req)->uf_control),
1183 frame_index,
1184 (void **)&frame_header);
1185
1186 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1187 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1188
1189 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1190 struct ssp_response_iu *resp_iu;
1191 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1192
1193 status = scic_sds_unsolicited_frame_control_get_buffer(
1194 &(scic_sds_request_get_controller(sci_req)->uf_control),
1195 frame_index,
1196 (void **)&resp_iu);
1197
1198 sci_swab32_cpy(&sci_req->ssp.rsp,
1199 resp_iu, word_cnt);
1200
1201 resp_iu = &sci_req->ssp.rsp;
1202
1203 if ((resp_iu->datapres == 0x01) ||
1204 (resp_iu->datapres == 0x02)) {
1205 scic_sds_request_set_status(
1206 sci_req,
1207 SCU_TASK_DONE_CHECK_RESPONSE,
1208 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1209 } else
1210 scic_sds_request_set_status(
1211 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1212 } else {
1213 /* This was not a response frame why did it get forwarded? */
1214 dev_err(scic_to_dev(sci_req->owning_controller),
1215 "%s: SCIC IO Request 0x%p received unexpected "
1216 "frame %d type 0x%02x\n",
6f231dda 1217 __func__,
f1f52e75
DW
1218 sci_req,
1219 frame_index,
1220 ssp_hdr.frame_type);
1221 }
ec6c9638 1222
f1f52e75
DW
1223 /*
1224 * In any case we are done with this frame buffer return it to the
1225 * controller
1226 */
1227 scic_sds_controller_release_frame(
1228 sci_req->owning_controller, frame_index);
6f231dda 1229
f1f52e75
DW
1230 return SCI_SUCCESS;
1231}
a5fde225 1232
f1f52e75
DW
1233/*
1234 * *****************************************************************************
1235 * * COMPLETED STATE HANDLERS
1236 * ***************************************************************************** */
a5fde225 1237
a5fde225 1238
f1f52e75
DW
1239/*
1240 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1241 * object receives a scic_sds_request_complete() request. This method frees up
1242 * any io request resources that have been allocated and transitions the
1243 * request to its final state. Consider stopping the state machine instead of
1244 * transitioning to the final state? enum sci_status SCI_SUCCESS
1245 */
1246static enum sci_status scic_sds_request_completed_state_complete_handler(
1247 struct scic_sds_request *request)
1248{
1249 if (request->was_tag_assigned_by_user != true) {
1250 scic_controller_free_io_tag(
1251 request->owning_controller, request->io_tag);
1252 }
6f231dda 1253
f1f52e75
DW
1254 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1255 scic_sds_controller_release_frame(
1256 request->owning_controller, request->saved_rx_frame_index);
1257 }
6f231dda 1258
f1f52e75 1259 sci_base_state_machine_change_state(&request->state_machine,
5dec6f4e 1260 SCI_BASE_REQUEST_STATE_FINAL);
f1f52e75
DW
1261 return SCI_SUCCESS;
1262}
6f231dda 1263
f1f52e75
DW
1264/*
1265 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1266 * object receives a scic_sds_request_task_completion() request. This method
1267 * decodes the completion type waiting for the abort task complete
1268 * notification. When the abort task complete is received the io request
1269 * transitions to the completed state. enum sci_status SCI_SUCCESS
1270 */
1271static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1272 struct scic_sds_request *sci_req,
1273 u32 completion_code)
1274{
1275 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1276 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1277 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1278 scic_sds_request_set_status(
1279 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1280 );
1281
1282 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 1283 SCI_BASE_REQUEST_STATE_COMPLETED);
f1f52e75
DW
1284 break;
1285
1286 default:
1287 /*
1288 * Unless we get some strange error wait for the task abort to complete
1289 * TODO: Should there be a state change for this completion? */
6f231dda
DW
1290 break;
1291 }
f1f52e75
DW
1292
1293 return SCI_SUCCESS;
1294}
1295
1296/*
1297 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1298 * object receives a scic_sds_request_frame_handler() request. This method
1299 * discards the unsolicited frame since we are waiting for the abort task
1300 * completion. enum sci_status SCI_SUCCESS
1301 */
1302static enum sci_status scic_sds_request_aborting_state_frame_handler(
1303 struct scic_sds_request *sci_req,
1304 u32 frame_index)
1305{
1306 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1307
1308 scic_sds_controller_release_frame(
1309 sci_req->owning_controller, frame_index);
1310
1311 return SCI_SUCCESS;
6f231dda
DW
1312}
1313
f139303d
DW
1314/**
1315 * This method processes the completions transport layer (TL) status to
1316 * determine if the RAW task management frame was sent successfully. If the
1317 * raw frame was sent successfully, then the state for the task request
1318 * transitions to waiting for a response frame.
1319 * @sci_req: This parameter specifies the request for which the TC
1320 * completion was received.
1321 * @completion_code: This parameter indicates the completion status information
1322 * for the TC.
1323 *
1324 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1325 * this method always returns success.
1326 */
1327static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler(
1328 struct scic_sds_request *sci_req,
1329 u32 completion_code)
1330{
1331 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1332 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1333 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1334 SCI_SUCCESS);
1335
1336 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 1337 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
f139303d
DW
1338 break;
1339
1340 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1341 /*
1342 * Currently, the decision is to simply allow the task request to
1343 * timeout if the task IU wasn't received successfully.
1344 * There is a potential for receiving multiple task responses if we
1345 * decide to send the task IU again. */
1346 dev_warn(scic_to_dev(sci_req->owning_controller),
1347 "%s: TaskRequest:0x%p CompletionCode:%x - "
1348 "ACK/NAK timeout\n",
1349 __func__,
1350 sci_req,
1351 completion_code);
1352
1353 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 1354 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
f139303d
DW
1355 break;
1356
1357 default:
1358 /*
1359 * All other completion status cause the IO to be complete. If a NAK
1360 * was received, then it is up to the user to retry the request. */
1361 scic_sds_request_set_status(
1362 sci_req,
1363 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1364 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1365 );
1366
1367 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 1368 SCI_BASE_REQUEST_STATE_COMPLETED);
f139303d
DW
1369 break;
1370 }
1371
1372 return SCI_SUCCESS;
1373}
1374
f139303d
DW
1375/**
1376 * This method processes an unsolicited frame while the task mgmt request is
1377 * waiting for a response frame. It will copy the response data, release
1378 * the unsolicited frame, and transition the request to the
1379 * SCI_BASE_REQUEST_STATE_COMPLETED state.
1380 * @sci_req: This parameter specifies the request for which the
1381 * unsolicited frame was received.
1382 * @frame_index: This parameter indicates the unsolicited frame index that
1383 * should contain the response.
1384 *
1385 * This method returns an indication of whether the TC response frame was
1386 * handled successfully or not. SCI_SUCCESS Currently this value is always
1387 * returned and indicates successful processing of the TC response. Should
1388 * probably update to check frame type and make sure it is a response frame.
1389 */
1390static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler(
1391 struct scic_sds_request *request,
1392 u32 frame_index)
1393{
1394 scic_sds_io_request_copy_response(request);
1395
1396 sci_base_state_machine_change_state(&request->state_machine,
5dec6f4e 1397 SCI_BASE_REQUEST_STATE_COMPLETED);
f139303d
DW
1398 scic_sds_controller_release_frame(request->owning_controller,
1399 frame_index);
1400 return SCI_SUCCESS;
1401}
1402
c72086e3
DW
1403/**
1404 * This method processes an abnormal TC completion while the SMP request is
1405 * waiting for a response frame. It decides what happened to the IO based
1406 * on TC completion status.
1407 * @sci_req: This parameter specifies the request for which the TC
1408 * completion was received.
1409 * @completion_code: This parameter indicates the completion status information
1410 * for the TC.
1411 *
1412 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1413 * this method always returns success.
1414 */
1415static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler(
1416 struct scic_sds_request *sci_req,
1417 u32 completion_code)
1418{
1419 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1420 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1421 /*
1422 * In the AWAIT RESPONSE state, any TC completion is unexpected.
1423 * but if the TC has success status, we complete the IO anyway. */
5dec6f4e
DW
1424 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1425 SCI_SUCCESS);
c72086e3 1426
5dec6f4e
DW
1427 sci_base_state_machine_change_state(&sci_req->state_machine,
1428 SCI_BASE_REQUEST_STATE_COMPLETED);
c72086e3
DW
1429 break;
1430
1431 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1432 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1433 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1434 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1435 /*
1436 * These status has been seen in a specific LSI expander, which sometimes
1437 * is not able to send smp response within 2 ms. This causes our hardware
1438 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR
1439 * status. For these type of error, we ask scic user to retry the request. */
5dec6f4e
DW
1440 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1441 SCI_FAILURE_RETRY_REQUIRED);
c72086e3 1442
5dec6f4e
DW
1443 sci_base_state_machine_change_state(&sci_req->state_machine,
1444 SCI_BASE_REQUEST_STATE_COMPLETED);
c72086e3
DW
1445 break;
1446
1447 default:
1448 /*
1449 * All other completion status cause the IO to be complete. If a NAK
1450 * was received, then it is up to the user to retry the request. */
1451 scic_sds_request_set_status(
1452 sci_req,
1453 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1454 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1455 );
1456
5dec6f4e
DW
1457 sci_base_state_machine_change_state(&sci_req->state_machine,
1458 SCI_BASE_REQUEST_STATE_COMPLETED);
c72086e3
DW
1459 break;
1460 }
1461
1462 return SCI_SUCCESS;
1463}
1464
1465/*
1466 * This function processes an unsolicited frame while the SMP request is waiting
1467 * for a response frame. It will copy the response data, release the
1468 * unsolicited frame, and transition the request to the
1469 * SCI_BASE_REQUEST_STATE_COMPLETED state.
1470 * @sci_req: This parameter specifies the request for which the
1471 * unsolicited frame was received.
1472 * @frame_index: This parameter indicates the unsolicited frame index that
1473 * should contain the response.
1474 *
1475 * This function returns an indication of whether the response frame was handled
1476 * successfully or not. SCI_SUCCESS Currently this value is always returned and
1477 * indicates successful processing of the TC response.
1478 */
1479static enum sci_status
1480scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req,
1481 u32 frame_index)
1482{
1483 enum sci_status status;
1484 void *frame_header;
1485 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1486 ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1487
1488 status = scic_sds_unsolicited_frame_control_get_header(
1489 &(scic_sds_request_get_controller(sci_req)->uf_control),
1490 frame_index,
1491 &frame_header);
1492
1493 /* byte swap the header. */
1494 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1495
1496 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1497 void *smp_resp;
1498
1499 status = scic_sds_unsolicited_frame_control_get_buffer(
1500 &(scic_sds_request_get_controller(sci_req)->uf_control),
1501 frame_index,
1502 &smp_resp);
1503
1504 word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1505 sizeof(u32);
1506
1507 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1508 smp_resp, word_cnt);
1509
1510 scic_sds_request_set_status(
1511 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1512
1513 sci_base_state_machine_change_state(&sci_req->state_machine,
5dec6f4e 1514 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
c72086e3
DW
1515 } else {
1516 /* This was not a response frame why did it get forwarded? */
1517 dev_err(scic_to_dev(sci_req->owning_controller),
1518 "%s: SCIC SMP Request 0x%p received unexpected frame "
1519 "%d type 0x%02x\n",
1520 __func__,
1521 sci_req,
1522 frame_index,
1523 rsp_hdr->frame_type);
1524
1525 scic_sds_request_set_status(
1526 sci_req,
5dec6f4e
DW
1527 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1528 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1529
1530 sci_base_state_machine_change_state(&sci_req->state_machine,
1531 SCI_BASE_REQUEST_STATE_COMPLETED);
1532 }
1533
1534 scic_sds_controller_release_frame(sci_req->owning_controller,
1535 frame_index);
1536
1537 return SCI_SUCCESS;
1538}
1539
1540/**
1541 * This method processes the completions transport layer (TL) status to
1542 * determine if the SMP request was sent successfully. If the SMP request
1543 * was sent successfully, then the state for the SMP request transits to
1544 * waiting for a response frame.
1545 * @sci_req: This parameter specifies the request for which the TC
1546 * completion was received.
1547 * @completion_code: This parameter indicates the completion status information
1548 * for the TC.
1549 *
1550 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1551 * this method always returns success.
1552 */
1553static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler(
1554 struct scic_sds_request *sci_req,
1555 u32 completion_code)
1556{
1557 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1558 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1559 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1560 SCI_SUCCESS);
1561
1562 sci_base_state_machine_change_state(&sci_req->state_machine,
1563 SCI_BASE_REQUEST_STATE_COMPLETED);
1564 break;
1565
1566 default:
1567 /*
1568 * All other completion status cause the IO to be complete. If a NAK
1569 * was received, then it is up to the user to retry the request. */
1570 scic_sds_request_set_status(
1571 sci_req,
1572 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1573 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1574 );
1575
1576 sci_base_state_machine_change_state(
1577 &sci_req->state_machine,
1578 SCI_BASE_REQUEST_STATE_COMPLETED);
1579 break;
1580 }
1581
1582 return SCI_SUCCESS;
1583}
1584
1585void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1586 u16 ncq_tag)
1587{
1588 /**
1589 * @note This could be made to return an error to the user if the user
1590 * attempts to set the NCQ tag in the wrong state.
1591 */
1592 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1593}
1594
1595/**
1596 *
1597 * @sci_req:
1598 *
1599 * Get the next SGL element from the request. - Check on which SGL element pair
1600 * we are working - if working on SLG pair element A - advance to element B -
1601 * else - check to see if there are more SGL element pairs for this IO request
1602 * - if there are more SGL element pairs - advance to the next pair and return
1603 * element A struct scu_sgl_element*
1604 */
1605static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1606{
1607 struct scu_sgl_element *current_sgl;
1608 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1609 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1610
1611 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1612 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1613 pio_sgl->sgl_pair->B.address_upper == 0) {
1614 current_sgl = NULL;
1615 } else {
1616 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1617 current_sgl = &pio_sgl->sgl_pair->B;
1618 }
1619 } else {
1620 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1621 pio_sgl->sgl_pair->next_pair_upper == 0) {
1622 current_sgl = NULL;
1623 } else {
1624 u64 phys_addr;
1625
1626 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1627 phys_addr <<= 32;
1628 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1629
1630 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1631 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1632 current_sgl = &pio_sgl->sgl_pair->A;
1633 }
1634 }
1635
1636 return current_sgl;
1637}
1638
1639/**
1640 *
1641 * @sci_req:
1642 * @completion_code:
1643 *
1644 * This method processes a TC completion. The expected TC completion is for
1645 * the transmission of the H2D register FIS containing the SATA/STP non-data
1646 * request. This method always successfully processes the TC completion.
1647 * SCI_SUCCESS This value is always returned.
1648 */
1649static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
1650 struct scic_sds_request *sci_req,
1651 u32 completion_code)
1652{
1653 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1654 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1655 scic_sds_request_set_status(
1656 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1657 );
1658
1659 sci_base_state_machine_change_state(
1660 &sci_req->state_machine,
1661 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
1662 );
1663 break;
1664
1665 default:
1666 /*
1667 * All other completion status cause the IO to be complete. If a NAK
1668 * was received, then it is up to the user to retry the request. */
1669 scic_sds_request_set_status(
1670 sci_req,
1671 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1672 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1673 );
1674
1675 sci_base_state_machine_change_state(
1676 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1677 break;
1678 }
1679
1680 return SCI_SUCCESS;
1681}
1682
1683/**
1684 *
1685 * @request: This parameter specifies the request for which a frame has been
1686 * received.
1687 * @frame_index: This parameter specifies the index of the frame that has been
1688 * received.
1689 *
1690 * This method processes frames received from the target while waiting for a
1691 * device to host register FIS. If a non-register FIS is received during this
1692 * time, it is treated as a protocol violation from an IO perspective. Indicate
1693 * if the received frame was processed successfully.
1694 */
1695static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
1696 struct scic_sds_request *sci_req,
1697 u32 frame_index)
1698{
1699 enum sci_status status;
1700 struct dev_to_host_fis *frame_header;
1701 u32 *frame_buffer;
1702 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1703 struct scic_sds_controller *scic = sci_req->owning_controller;
1704
1705 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1706 frame_index,
1707 (void **)&frame_header);
1708
1709 if (status != SCI_SUCCESS) {
1710 dev_err(scic_to_dev(sci_req->owning_controller),
1711 "%s: SCIC IO Request 0x%p could not get frame header "
1712 "for frame index %d, status %x\n",
1713 __func__, stp_req, frame_index, status);
1714
1715 return status;
1716 }
1717
1718 switch (frame_header->fis_type) {
1719 case FIS_REGD2H:
1720 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1721 frame_index,
1722 (void **)&frame_buffer);
1723
1724 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1725 frame_header,
1726 frame_buffer);
1727
1728 /* The command has completed with error */
1729 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1730 SCI_FAILURE_IO_RESPONSE_VALID);
1731 break;
1732
1733 default:
1734 dev_warn(scic_to_dev(scic),
1735 "%s: IO Request:0x%p Frame Id:%d protocol "
1736 "violation occurred\n", __func__, stp_req,
1737 frame_index);
1738
1739 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1740 SCI_FAILURE_PROTOCOL_VIOLATION);
1741 break;
1742 }
1743
1744 sci_base_state_machine_change_state(&sci_req->state_machine,
1745 SCI_BASE_REQUEST_STATE_COMPLETED);
1746
1747 /* Frame has been decoded return it to the controller */
1748 scic_sds_controller_release_frame(scic, frame_index);
1749
1750 return status;
1751}
1752
1753#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1754
1755/* transmit DATA_FIS from (current sgl + offset) for input
1756 * parameter length. current sgl and offset is alreay stored in the IO request
1757 */
1758static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1759 struct scic_sds_request *sci_req,
1760 u32 length)
1761{
1762 struct scic_sds_controller *scic = sci_req->owning_controller;
1763 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1764 struct scu_task_context *task_context;
1765 struct scu_sgl_element *current_sgl;
1766
1767 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1768 * for the data from current_sgl+offset for the input length
1769 */
1770 task_context = scic_sds_controller_get_task_context_buffer(scic,
1771 sci_req->io_tag);
1772
1773 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1774 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1775 else
1776 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1777
1778 /* update the TC */
1779 task_context->command_iu_upper = current_sgl->address_upper;
1780 task_context->command_iu_lower = current_sgl->address_lower;
1781 task_context->transfer_length_bytes = length;
1782 task_context->type.stp.fis_type = FIS_DATA;
1783
1784 /* send the new TC out. */
1785 return scic_controller_continue_io(sci_req);
1786}
1787
1788static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1789{
1790
1791 struct scu_sgl_element *current_sgl;
1792 u32 sgl_offset;
1793 u32 remaining_bytes_in_current_sgl = 0;
1794 enum sci_status status = SCI_SUCCESS;
1795 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1796
1797 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1798
1799 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1800 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1801 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1802 } else {
1803 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1804 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1805 }
1806
1807
1808 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1809 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1810 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1811 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1812 if (status == SCI_SUCCESS) {
1813 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1814
1815 /* update the current sgl, sgl_offset and save for future */
1816 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1817 sgl_offset = 0;
1818 }
1819 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1820 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1821 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1822
1823 if (status == SCI_SUCCESS) {
1824 /* Sgl offset will be adjusted and saved for future */
1825 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1826 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1827 stp_req->type.pio.pio_transfer_bytes = 0;
1828 }
1829 }
1830 }
1831
1832 if (status == SCI_SUCCESS) {
1833 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1834 }
1835
1836 return status;
1837}
1838
1839/**
1840 *
1841 * @stp_request: The request that is used for the SGL processing.
1842 * @data_buffer: The buffer of data to be copied.
1843 * @length: The length of the data transfer.
1844 *
1845 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1846 * specified data region. enum sci_status
1847 */
1848static enum sci_status
1849scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1850 u8 *data_buf, u32 len)
1851{
1852 struct scic_sds_request *sci_req;
1853 struct isci_request *ireq;
1854 u8 *src_addr;
1855 int copy_len;
1856 struct sas_task *task;
1857 struct scatterlist *sg;
1858 void *kaddr;
1859 int total_len = len;
1860
1861 sci_req = to_sci_req(stp_req);
1862 ireq = sci_req_to_ireq(sci_req);
1863 task = isci_request_access_task(ireq);
1864 src_addr = data_buf;
1865
1866 if (task->num_scatter > 0) {
1867 sg = task->scatter;
1868
1869 while (total_len > 0) {
1870 struct page *page = sg_page(sg);
1871
1872 copy_len = min_t(int, total_len, sg_dma_len(sg));
1873 kaddr = kmap_atomic(page, KM_IRQ0);
1874 memcpy(kaddr + sg->offset, src_addr, copy_len);
1875 kunmap_atomic(kaddr, KM_IRQ0);
1876 total_len -= copy_len;
1877 src_addr += copy_len;
1878 sg = sg_next(sg);
1879 }
1880 } else {
1881 BUG_ON(task->total_xfer_len < total_len);
1882 memcpy(task->scatter, src_addr, total_len);
1883 }
1884
1885 return SCI_SUCCESS;
1886}
1887
1888/**
1889 *
1890 * @sci_req: The PIO DATA IN request that is to receive the data.
1891 * @data_buffer: The buffer to copy from.
1892 *
1893 * Copy the data buffer to the io request data region. enum sci_status
1894 */
1895static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1896 struct scic_sds_stp_request *sci_req,
1897 u8 *data_buffer)
1898{
1899 enum sci_status status;
1900
1901 /*
1902 * If there is less than 1K remaining in the transfer request
1903 * copy just the data for the transfer */
1904 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1905 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1906 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1907
1908 if (status == SCI_SUCCESS)
1909 sci_req->type.pio.pio_transfer_bytes = 0;
1910 } else {
1911 /* We are transfering the whole frame so copy */
1912 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1913 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1914
1915 if (status == SCI_SUCCESS)
1916 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1917 }
1918
1919 return status;
1920}
1921
1922/**
1923 *
1924 * @sci_req:
1925 * @completion_code:
1926 *
1927 * enum sci_status
1928 */
1929static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
1930 struct scic_sds_request *sci_req,
1931 u32 completion_code)
1932{
1933 enum sci_status status = SCI_SUCCESS;
1934
1935 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1937 scic_sds_request_set_status(
1938 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1939 );
1940
1941 sci_base_state_machine_change_state(
1942 &sci_req->state_machine,
1943 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1944 );
1945 break;
1946
1947 default:
1948 /*
1949 * All other completion status cause the IO to be complete. If a NAK
1950 * was received, then it is up to the user to retry the request. */
1951 scic_sds_request_set_status(
1952 sci_req,
1953 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1954 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1955 );
1956
1957 sci_base_state_machine_change_state(
1958 &sci_req->state_machine,
1959 SCI_BASE_REQUEST_STATE_COMPLETED
1960 );
1961 break;
1962 }
1963
1964 return status;
1965}
1966
1967static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
1968 u32 frame_index)
1969{
1970 struct scic_sds_controller *scic = sci_req->owning_controller;
1971 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1972 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1973 struct sas_task *task = isci_request_access_task(ireq);
1974 struct dev_to_host_fis *frame_header;
1975 enum sci_status status;
1976 u32 *frame_buffer;
1977
1978 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1979 frame_index,
1980 (void **)&frame_header);
1981
1982 if (status != SCI_SUCCESS) {
1983 dev_err(scic_to_dev(scic),
1984 "%s: SCIC IO Request 0x%p could not get frame header "
1985 "for frame index %d, status %x\n",
1986 __func__, stp_req, frame_index, status);
1987 return status;
1988 }
1989
1990 switch (frame_header->fis_type) {
1991 case FIS_PIO_SETUP:
1992 /* Get from the frame buffer the PIO Setup Data */
1993 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1994 frame_index,
1995 (void **)&frame_buffer);
1996
1997 /* Get the data from the PIO Setup The SCU Hardware returns
1998 * first word in the frame_header and the rest of the data is in
1999 * the frame buffer so we need to back up one dword
2000 */
2001
2002 /* transfer_count: first 16bits in the 4th dword */
2003 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
2004
2005 /* ending_status: 4th byte in the 3rd dword */
2006 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
2007
2008 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2009 frame_header,
2010 frame_buffer);
2011
2012 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
2013
2014 /* The next state is dependent on whether the
2015 * request was PIO Data-in or Data out
2016 */
2017 if (task->data_dir == DMA_FROM_DEVICE) {
2018 sci_base_state_machine_change_state(&sci_req->state_machine,
2019 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
2020 } else if (task->data_dir == DMA_TO_DEVICE) {
2021 /* Transmit data */
2022 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2023 if (status != SCI_SUCCESS)
2024 break;
2025 sci_base_state_machine_change_state(&sci_req->state_machine,
2026 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
2027 }
2028 break;
2029 case FIS_SETDEVBITS:
2030 sci_base_state_machine_change_state(&sci_req->state_machine,
2031 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2032 break;
2033 case FIS_REGD2H:
2034 if (frame_header->status & ATA_BUSY) {
2035 /* Now why is the drive sending a D2H Register FIS when
2036 * it is still busy? Do nothing since we are still in
2037 * the right state.
2038 */
2039 dev_dbg(scic_to_dev(scic),
2040 "%s: SCIC PIO Request 0x%p received "
2041 "D2H Register FIS with BSY status "
2042 "0x%x\n", __func__, stp_req,
2043 frame_header->status);
2044 break;
2045 }
2046
2047 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2048 frame_index,
2049 (void **)&frame_buffer);
2050
2051 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
2052 frame_header,
2053 frame_buffer);
2054
2055 scic_sds_request_set_status(sci_req,
2056 SCU_TASK_DONE_CHECK_RESPONSE,
2057 SCI_FAILURE_IO_RESPONSE_VALID);
2058
2059 sci_base_state_machine_change_state(&sci_req->state_machine,
2060 SCI_BASE_REQUEST_STATE_COMPLETED);
2061 break;
2062 default:
2063 /* FIXME: what do we do here? */
2064 break;
2065 }
2066
2067 /* Frame is decoded return it to the controller */
2068 scic_sds_controller_release_frame(scic, frame_index);
2069
2070 return status;
2071}
2072
2073static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
2074 u32 frame_index)
2075{
2076 enum sci_status status;
2077 struct dev_to_host_fis *frame_header;
2078 struct sata_fis_data *frame_buffer;
2079 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2080 struct scic_sds_controller *scic = sci_req->owning_controller;
2081
2082 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2083 frame_index,
2084 (void **)&frame_header);
2085
2086 if (status != SCI_SUCCESS) {
2087 dev_err(scic_to_dev(scic),
2088 "%s: SCIC IO Request 0x%p could not get frame header "
2089 "for frame index %d, status %x\n",
2090 __func__, stp_req, frame_index, status);
2091 return status;
2092 }
2093
2094 if (frame_header->fis_type == FIS_DATA) {
2095 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
2096 sci_req->saved_rx_frame_index = frame_index;
2097 stp_req->type.pio.pio_transfer_bytes = 0;
2098 } else {
2099 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2100 frame_index,
2101 (void **)&frame_buffer);
2102
2103 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
2104 (u8 *)frame_buffer);
2105
2106 /* Frame is decoded return it to the controller */
2107 scic_sds_controller_release_frame(scic, frame_index);
2108 }
2109
2110 /* Check for the end of the transfer, are there more
2111 * bytes remaining for this data transfer
2112 */
2113 if (status != SCI_SUCCESS ||
2114 stp_req->type.pio.pio_transfer_bytes != 0)
2115 return status;
2116
2117 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
2118 scic_sds_request_set_status(sci_req,
2119 SCU_TASK_DONE_CHECK_RESPONSE,
2120 SCI_FAILURE_IO_RESPONSE_VALID);
2121
2122 sci_base_state_machine_change_state(&sci_req->state_machine,
2123 SCI_BASE_REQUEST_STATE_COMPLETED);
2124 } else {
2125 sci_base_state_machine_change_state(&sci_req->state_machine,
2126 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2127 }
2128 } else {
2129 dev_err(scic_to_dev(scic),
2130 "%s: SCIC PIO Request 0x%p received frame %d "
2131 "with fis type 0x%02x when expecting a data "
2132 "fis.\n", __func__, stp_req, frame_index,
2133 frame_header->fis_type);
2134
2135 scic_sds_request_set_status(sci_req,
2136 SCU_TASK_DONE_GOOD,
2137 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
2138
2139 sci_base_state_machine_change_state(&sci_req->state_machine,
2140 SCI_BASE_REQUEST_STATE_COMPLETED);
2141
2142 /* Frame is decoded return it to the controller */
2143 scic_sds_controller_release_frame(scic, frame_index);
2144 }
2145
2146 return status;
2147}
2148
2149
2150/**
2151 *
2152 * @sci_req:
2153 * @completion_code:
2154 *
2155 * enum sci_status
2156 */
2157static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
2158
2159 struct scic_sds_request *sci_req,
2160 u32 completion_code)
2161{
2162 enum sci_status status = SCI_SUCCESS;
2163 bool all_frames_transferred = false;
2164 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2165
2166 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2167 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2168 /* Transmit data */
2169 if (stp_req->type.pio.pio_transfer_bytes != 0) {
2170 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2171 if (status == SCI_SUCCESS) {
2172 if (stp_req->type.pio.pio_transfer_bytes == 0)
2173 all_frames_transferred = true;
2174 }
2175 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
2176 /*
2177 * this will happen if the all data is written at the
2178 * first time after the pio setup fis is received
2179 */
2180 all_frames_transferred = true;
2181 }
2182
2183 /* all data transferred. */
2184 if (all_frames_transferred) {
2185 /*
2186 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
2187 * and wait for PIO_SETUP fis / or D2H REg fis. */
2188 sci_base_state_machine_change_state(
2189 &sci_req->state_machine,
2190 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2191 );
2192 }
2193 break;
2194
2195 default:
2196 /*
2197 * All other completion status cause the IO to be complete. If a NAK
2198 * was received, then it is up to the user to retry the request. */
2199 scic_sds_request_set_status(
2200 sci_req,
2201 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2202 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2203 );
2204
2205 sci_base_state_machine_change_state(
2206 &sci_req->state_machine,
2207 SCI_BASE_REQUEST_STATE_COMPLETED
2208 );
2209 break;
2210 }
2211
2212 return status;
2213}
2214
2215/**
2216 *
2217 * @request: This is the request which is receiving the event.
2218 * @event_code: This is the event code that the request on which the request is
2219 * expected to take action.
2220 *
2221 * This method will handle any link layer events while waiting for the data
2222 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
2223 */
2224static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
2225 struct scic_sds_request *request,
2226 u32 event_code)
2227{
2228 enum sci_status status;
2229
2230 switch (scu_get_event_specifier(event_code)) {
2231 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
2232 /*
2233 * We are waiting for data and the SCU has R_ERR the data frame.
2234 * Go back to waiting for the D2H Register FIS */
2235 sci_base_state_machine_change_state(
2236 &request->state_machine,
2237 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2238 );
2239
2240 status = SCI_SUCCESS;
2241 break;
2242
2243 default:
2244 dev_err(scic_to_dev(request->owning_controller),
2245 "%s: SCIC PIO Request 0x%p received unexpected "
2246 "event 0x%08x\n",
2247 __func__, request, event_code);
2248
2249 /* / @todo Should we fail the PIO request when we get an unexpected event? */
2250 status = SCI_FAILURE;
2251 break;
2252 }
2253
2254 return status;
2255}
2256
2257static void scic_sds_stp_request_udma_complete_request(
2258 struct scic_sds_request *request,
2259 u32 scu_status,
2260 enum sci_status sci_status)
2261{
2262 scic_sds_request_set_status(request, scu_status, sci_status);
2263 sci_base_state_machine_change_state(&request->state_machine,
2264 SCI_BASE_REQUEST_STATE_COMPLETED);
2265}
2266
2267static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
2268 u32 frame_index)
2269{
2270 struct scic_sds_controller *scic = sci_req->owning_controller;
2271 struct dev_to_host_fis *frame_header;
2272 enum sci_status status;
2273 u32 *frame_buffer;
2274
2275 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2276 frame_index,
2277 (void **)&frame_header);
2278
2279 if ((status == SCI_SUCCESS) &&
2280 (frame_header->fis_type == FIS_REGD2H)) {
2281 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2282 frame_index,
2283 (void **)&frame_buffer);
2284
2285 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2286 frame_header,
2287 frame_buffer);
2288 }
2289
2290 scic_sds_controller_release_frame(scic, frame_index);
2291
2292 return status;
2293}
2294
2295static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
2296 struct scic_sds_request *sci_req,
2297 u32 completion_code)
2298{
2299 enum sci_status status = SCI_SUCCESS;
2300
2301 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2302 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2303 scic_sds_stp_request_udma_complete_request(sci_req,
2304 SCU_TASK_DONE_GOOD,
2305 SCI_SUCCESS);
2306 break;
2307 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2308 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2309 /*
2310 * We must check ther response buffer to see if the D2H Register FIS was
2311 * received before we got the TC completion. */
2312 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2313 scic_sds_remote_device_suspend(sci_req->target_device,
2314 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2315
2316 scic_sds_stp_request_udma_complete_request(sci_req,
2317 SCU_TASK_DONE_CHECK_RESPONSE,
2318 SCI_FAILURE_IO_RESPONSE_VALID);
2319 } else {
2320 /*
2321 * If we have an error completion status for the TC then we can expect a
2322 * D2H register FIS from the device so we must change state to wait for it */
2323 sci_base_state_machine_change_state(&sci_req->state_machine,
2324 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
2325 }
2326 break;
2327
2328 /*
2329 * / @todo Check to see if any of these completion status need to wait for
2330 * / the device to host register fis. */
2331 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
2332 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2333 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2334 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2335 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2336 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2337 scic_sds_remote_device_suspend(sci_req->target_device,
2338 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2339 /* Fall through to the default case */
2340 default:
2341 /* All other completion status cause the IO to be complete. */
2342 scic_sds_stp_request_udma_complete_request(sci_req,
2343 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2344 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2345 break;
2346 }
2347
2348 return status;
2349}
2350
2351static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
2352 struct scic_sds_request *sci_req,
2353 u32 frame_index)
2354{
2355 enum sci_status status;
2356
2357 /* Use the general frame handler to copy the resposne data */
2358 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
2359
2360 if (status != SCI_SUCCESS)
2361 return status;
2362
2363 scic_sds_stp_request_udma_complete_request(sci_req,
2364 SCU_TASK_DONE_CHECK_RESPONSE,
2365 SCI_FAILURE_IO_RESPONSE_VALID);
2366
2367 return status;
2368}
2369
2370enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
2371 u32 len,
2372 enum dma_data_direction dir)
2373{
2374 return SCI_SUCCESS;
2375}
2376
2377/**
2378 *
2379 * @sci_req:
2380 * @completion_code:
2381 *
2382 * This method processes a TC completion. The expected TC completion is for
2383 * the transmission of the H2D register FIS containing the SATA/STP non-data
2384 * request. This method always successfully processes the TC completion.
2385 * SCI_SUCCESS This value is always returned.
2386 */
2387static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
2388 struct scic_sds_request *sci_req,
2389 u32 completion_code)
2390{
2391 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2392 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2393 scic_sds_request_set_status(
2394 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
2395 );
2396
2397 sci_base_state_machine_change_state(
2398 &sci_req->state_machine,
2399 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
2400 );
2401 break;
2402
2403 default:
2404 /*
2405 * All other completion status cause the IO to be complete. If a NAK
2406 * was received, then it is up to the user to retry the request. */
2407 scic_sds_request_set_status(
2408 sci_req,
2409 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2410 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2411 );
c72086e3
DW
2412
2413 sci_base_state_machine_change_state(
5dec6f4e
DW
2414 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
2415 break;
c72086e3
DW
2416 }
2417
c72086e3
DW
2418 return SCI_SUCCESS;
2419}
2420
2421/**
c72086e3 2422 *
5dec6f4e
DW
2423 * @sci_req:
2424 * @completion_code:
2425 *
2426 * This method processes a TC completion. The expected TC completion is for
2427 * the transmission of the H2D register FIS containing the SATA/STP non-data
2428 * request. This method always successfully processes the TC completion.
2429 * SCI_SUCCESS This value is always returned.
c72086e3 2430 */
5dec6f4e 2431static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
c72086e3
DW
2432 struct scic_sds_request *sci_req,
2433 u32 completion_code)
2434{
2435 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2436 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
5dec6f4e
DW
2437 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2438 SCI_SUCCESS);
c72086e3 2439
5dec6f4e
DW
2440 sci_base_state_machine_change_state(&sci_req->state_machine,
2441 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
c72086e3
DW
2442 break;
2443
2444 default:
2445 /*
2446 * All other completion status cause the IO to be complete. If a NAK
2447 * was received, then it is up to the user to retry the request. */
2448 scic_sds_request_set_status(
2449 sci_req,
2450 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2451 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2452 );
2453
5dec6f4e
DW
2454 sci_base_state_machine_change_state(&sci_req->state_machine,
2455 SCI_BASE_REQUEST_STATE_COMPLETED);
c72086e3
DW
2456 break;
2457 }
2458
2459 return SCI_SUCCESS;
2460}
2461
5dec6f4e
DW
2462/**
2463 *
2464 * @request: This parameter specifies the request for which a frame has been
2465 * received.
2466 * @frame_index: This parameter specifies the index of the frame that has been
2467 * received.
2468 *
2469 * This method processes frames received from the target while waiting for a
2470 * device to host register FIS. If a non-register FIS is received during this
2471 * time, it is treated as a protocol violation from an IO perspective. Indicate
2472 * if the received frame was processed successfully.
2473 */
2474static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
2475 struct scic_sds_request *sci_req,
2476 u32 frame_index)
2477{
2478 enum sci_status status;
2479 struct dev_to_host_fis *frame_header;
2480 u32 *frame_buffer;
2481 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2482 struct scic_sds_controller *scic = sci_req->owning_controller;
2483
2484 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2485 frame_index,
2486 (void **)&frame_header);
2487 if (status != SCI_SUCCESS) {
2488 dev_err(scic_to_dev(scic),
2489 "%s: SCIC IO Request 0x%p could not get frame header "
2490 "for frame index %d, status %x\n",
2491 __func__, stp_req, frame_index, status);
2492 return status;
2493 }
2494
2495 switch (frame_header->fis_type) {
2496 case FIS_REGD2H:
2497 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2498 frame_index,
2499 (void **)&frame_buffer);
2500
2501 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2502 frame_header,
2503 frame_buffer);
2504
2505 /* The command has completed with error */
2506 scic_sds_request_set_status(sci_req,
2507 SCU_TASK_DONE_CHECK_RESPONSE,
2508 SCI_FAILURE_IO_RESPONSE_VALID);
2509 break;
2510
2511 default:
2512 dev_warn(scic_to_dev(scic),
2513 "%s: IO Request:0x%p Frame Id:%d protocol "
2514 "violation occurred\n", __func__, stp_req,
2515 frame_index);
2516
2517 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
2518 SCI_FAILURE_PROTOCOL_VIOLATION);
2519 break;
2520 }
2521
2522 sci_base_state_machine_change_state(&sci_req->state_machine,
2523 SCI_BASE_REQUEST_STATE_COMPLETED);
2524
2525 /* Frame has been decoded return it to the controller */
2526 scic_sds_controller_release_frame(scic, frame_index);
2527
2528 return status;
2529}
2530
f1f52e75 2531static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
f139303d 2532 [SCI_BASE_REQUEST_STATE_INITIAL] = { },
f1f52e75
DW
2533 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
2534 .start_handler = scic_sds_request_constructed_state_start_handler,
f1f52e75
DW
2535 },
2536 [SCI_BASE_REQUEST_STATE_STARTED] = {
f1f52e75
DW
2537 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
2538 .frame_handler = scic_sds_request_started_state_frame_handler,
2539 },
f139303d 2540 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
f139303d
DW
2541 .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler,
2542 },
2543 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
f139303d
DW
2544 .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler,
2545 },
c72086e3 2546 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
c72086e3
DW
2547 .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler,
2548 .frame_handler = scic_sds_smp_request_await_response_frame_handler,
2549 },
2550 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
c72086e3
DW
2551 .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler,
2552 },
5dec6f4e 2553 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
5dec6f4e
DW
2554 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
2555 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
2556 },
2557 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
5dec6f4e
DW
2558 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
2559 },
2560 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
5dec6f4e
DW
2561 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
2562 },
2563 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
5dec6f4e
DW
2564 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
2565 },
2566 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
5dec6f4e
DW
2567 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
2568 },
2569 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
5dec6f4e
DW
2570 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
2571 },
2572 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
5dec6f4e
DW
2573 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
2574 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
2575 },
2576 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
5dec6f4e
DW
2577 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
2578 },
2579 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
5dec6f4e
DW
2580 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
2581 },
2582 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
5dec6f4e
DW
2583 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
2584 },
2585 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
5dec6f4e
DW
2586 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
2587 },
f1f52e75
DW
2588 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
2589 .complete_handler = scic_sds_request_completed_state_complete_handler,
2590 },
2591 [SCI_BASE_REQUEST_STATE_ABORTING] = {
f1f52e75
DW
2592 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
2593 .frame_handler = scic_sds_request_aborting_state_frame_handler,
2594 },
f139303d 2595 [SCI_BASE_REQUEST_STATE_FINAL] = { },
f1f52e75
DW
2596};
2597
2598
6f231dda 2599/**
f1f52e75
DW
2600 * isci_request_process_response_iu() - This function sets the status and
2601 * response iu, in the task struct, from the request object for the upper
2602 * layer driver.
2603 * @sas_task: This parameter is the task struct from the upper layer driver.
2604 * @resp_iu: This parameter points to the response iu of the completed request.
2605 * @dev: This parameter specifies the linux device struct.
6f231dda
DW
2606 *
2607 * none.
2608 */
f1f52e75
DW
2609static void isci_request_process_response_iu(
2610 struct sas_task *task,
2611 struct ssp_response_iu *resp_iu,
2612 struct device *dev)
6f231dda 2613{
f1f52e75
DW
2614 dev_dbg(dev,
2615 "%s: resp_iu = %p "
2616 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2617 "resp_iu->response_data_len = %x, "
2618 "resp_iu->sense_data_len = %x\nrepsonse data: ",
6f231dda 2619 __func__,
f1f52e75
DW
2620 resp_iu,
2621 resp_iu->status,
2622 resp_iu->datapres,
2623 resp_iu->response_data_len,
2624 resp_iu->sense_data_len);
6f231dda 2625
f1f52e75 2626 task->task_status.stat = resp_iu->status;
6f231dda 2627
f1f52e75
DW
2628 /* libsas updates the task status fields based on the response iu. */
2629 sas_ssp_task_response(dev, task, resp_iu);
2630}
6f231dda 2631
f1f52e75
DW
2632/**
2633 * isci_request_set_open_reject_status() - This function prepares the I/O
2634 * completion for OPEN_REJECT conditions.
2635 * @request: This parameter is the completed isci_request object.
2636 * @response_ptr: This parameter specifies the service response for the I/O.
2637 * @status_ptr: This parameter specifies the exec status for the I/O.
2638 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2639 * the LLDD with respect to completing this request or forcing an abort
2640 * condition on the I/O.
2641 * @open_rej_reason: This parameter specifies the encoded reason for the
2642 * abandon-class reject.
2643 *
2644 * none.
2645 */
2646static void isci_request_set_open_reject_status(
2647 struct isci_request *request,
2648 struct sas_task *task,
2649 enum service_response *response_ptr,
2650 enum exec_status *status_ptr,
2651 enum isci_completion_selection *complete_to_host_ptr,
2652 enum sas_open_rej_reason open_rej_reason)
2653{
2654 /* Task in the target is done. */
2655 request->complete_in_target = true;
2656 *response_ptr = SAS_TASK_UNDELIVERED;
2657 *status_ptr = SAS_OPEN_REJECT;
2658 *complete_to_host_ptr = isci_perform_normal_io_completion;
2659 task->task_status.open_rej_reason = open_rej_reason;
2660}
6f231dda 2661
f1f52e75
DW
2662/**
2663 * isci_request_handle_controller_specific_errors() - This function decodes
2664 * controller-specific I/O completion error conditions.
2665 * @request: This parameter is the completed isci_request object.
2666 * @response_ptr: This parameter specifies the service response for the I/O.
2667 * @status_ptr: This parameter specifies the exec status for the I/O.
2668 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2669 * the LLDD with respect to completing this request or forcing an abort
2670 * condition on the I/O.
2671 *
2672 * none.
2673 */
2674static void isci_request_handle_controller_specific_errors(
2675 struct isci_remote_device *isci_device,
2676 struct isci_request *request,
2677 struct sas_task *task,
2678 enum service_response *response_ptr,
2679 enum exec_status *status_ptr,
2680 enum isci_completion_selection *complete_to_host_ptr)
2681{
2682 unsigned int cstatus;
6f231dda 2683
f1f52e75 2684 cstatus = request->sci.scu_status;
a5fde225 2685
f1f52e75
DW
2686 dev_dbg(&request->isci_host->pdev->dev,
2687 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2688 "- controller status = 0x%x\n",
2689 __func__, request, cstatus);
6f231dda 2690
f1f52e75
DW
2691 /* Decode the controller-specific errors; most
2692 * important is to recognize those conditions in which
2693 * the target may still have a task outstanding that
2694 * must be aborted.
2695 *
2696 * Note that there are SCU completion codes being
2697 * named in the decode below for which SCIC has already
2698 * done work to handle them in a way other than as
2699 * a controller-specific completion code; these are left
2700 * in the decode below for completeness sake.
2701 */
2702 switch (cstatus) {
2703 case SCU_TASK_DONE_DMASETUP_DIRERR:
2704 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2705 case SCU_TASK_DONE_XFERCNT_ERR:
2706 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2707 if (task->task_proto == SAS_PROTOCOL_SMP) {
2708 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2709 *response_ptr = SAS_TASK_COMPLETE;
6f231dda 2710
f1f52e75
DW
2711 /* See if the device has been/is being stopped. Note
2712 * that we ignore the quiesce state, since we are
6f231dda
DW
2713 * concerned about the actual device state.
2714 */
f1f52e75
DW
2715 if ((isci_device->status == isci_stopping) ||
2716 (isci_device->status == isci_stopped))
2717 *status_ptr = SAS_DEVICE_UNKNOWN;
2718 else
2719 *status_ptr = SAS_ABORTED_TASK;
6f231dda 2720
f1f52e75 2721 request->complete_in_target = true;
6f231dda 2722
f1f52e75
DW
2723 *complete_to_host_ptr =
2724 isci_perform_normal_io_completion;
2725 } else {
2726 /* Task in the target is not done. */
2727 *response_ptr = SAS_TASK_UNDELIVERED;
a5fde225 2728
f1f52e75
DW
2729 if ((isci_device->status == isci_stopping) ||
2730 (isci_device->status == isci_stopped))
2731 *status_ptr = SAS_DEVICE_UNKNOWN;
2732 else
2733 *status_ptr = SAM_STAT_TASK_ABORTED;
6f231dda 2734
f1f52e75 2735 request->complete_in_target = false;
6f231dda 2736
f1f52e75
DW
2737 *complete_to_host_ptr =
2738 isci_perform_error_io_completion;
2739 }
2740
2741 break;
2742
2743 case SCU_TASK_DONE_CRC_ERR:
2744 case SCU_TASK_DONE_NAK_CMD_ERR:
2745 case SCU_TASK_DONE_EXCESS_DATA:
2746 case SCU_TASK_DONE_UNEXP_FIS:
2747 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2748 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2749 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2750 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2751 /* These are conditions in which the target
2752 * has completed the task, so that no cleanup
2753 * is necessary.
6f231dda 2754 */
f1f52e75 2755 *response_ptr = SAS_TASK_COMPLETE;
6f231dda
DW
2756
2757 /* See if the device has been/is being stopped. Note
2758 * that we ignore the quiesce state, since we are
2759 * concerned about the actual device state.
2760 */
2761 if ((isci_device->status == isci_stopping) ||
2762 (isci_device->status == isci_stopped))
f1f52e75 2763 *status_ptr = SAS_DEVICE_UNKNOWN;
6f231dda 2764 else
f1f52e75 2765 *status_ptr = SAS_ABORTED_TASK;
6f231dda 2766
f1f52e75 2767 request->complete_in_target = true;
a5fde225 2768
f1f52e75 2769 *complete_to_host_ptr = isci_perform_normal_io_completion;
6f231dda
DW
2770 break;
2771
6f231dda 2772
f1f52e75
DW
2773 /* Note that the only open reject completion codes seen here will be
2774 * abandon-class codes; all others are automatically retried in the SCU.
2775 */
2776 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
a5fde225 2777
f1f52e75
DW
2778 isci_request_set_open_reject_status(
2779 request, task, response_ptr, status_ptr,
2780 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2781 break;
a5fde225 2782
f1f52e75 2783 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
6f231dda 2784
f1f52e75
DW
2785 /* Note - the return of AB0 will change when
2786 * libsas implements detection of zone violations.
2787 */
2788 isci_request_set_open_reject_status(
2789 request, task, response_ptr, status_ptr,
2790 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2791 break;
6f231dda 2792
f1f52e75 2793 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
6f231dda 2794
f1f52e75
DW
2795 isci_request_set_open_reject_status(
2796 request, task, response_ptr, status_ptr,
2797 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2798 break;
6f231dda 2799
f1f52e75 2800 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
6f231dda 2801
f1f52e75
DW
2802 isci_request_set_open_reject_status(
2803 request, task, response_ptr, status_ptr,
2804 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2805 break;
6f231dda 2806
f1f52e75 2807 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
6f231dda 2808
f1f52e75
DW
2809 isci_request_set_open_reject_status(
2810 request, task, response_ptr, status_ptr,
2811 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2812 break;
6f231dda 2813
f1f52e75 2814 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
6f231dda 2815
f1f52e75
DW
2816 isci_request_set_open_reject_status(
2817 request, task, response_ptr, status_ptr,
2818 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2819 break;
6f231dda 2820
f1f52e75 2821 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
6f231dda 2822
f1f52e75
DW
2823 isci_request_set_open_reject_status(
2824 request, task, response_ptr, status_ptr,
2825 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2826 break;
6f231dda 2827
f1f52e75 2828 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
6f231dda 2829
f1f52e75
DW
2830 isci_request_set_open_reject_status(
2831 request, task, response_ptr, status_ptr,
2832 complete_to_host_ptr, SAS_OREJ_EPROTO);
2833 break;
6f231dda 2834
f1f52e75 2835 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
6f231dda 2836
f1f52e75
DW
2837 isci_request_set_open_reject_status(
2838 request, task, response_ptr, status_ptr,
2839 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2840 break;
6f231dda 2841
f1f52e75
DW
2842 case SCU_TASK_DONE_LL_R_ERR:
2843 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2844 case SCU_TASK_DONE_LL_PERR:
2845 case SCU_TASK_DONE_LL_SY_TERM:
2846 /* Also SCU_TASK_DONE_NAK_ERR:*/
2847 case SCU_TASK_DONE_LL_LF_TERM:
2848 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2849 case SCU_TASK_DONE_LL_ABORT_ERR:
2850 case SCU_TASK_DONE_SEQ_INV_TYPE:
2851 /* Also SCU_TASK_DONE_UNEXP_XR: */
2852 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2853 case SCU_TASK_DONE_INV_FIS_LEN:
2854 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2855 case SCU_TASK_DONE_SDMA_ERR:
2856 case SCU_TASK_DONE_OFFSET_ERR:
2857 case SCU_TASK_DONE_MAX_PLD_ERR:
2858 case SCU_TASK_DONE_LF_ERR:
2859 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2860 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2861 case SCU_TASK_DONE_UNEXP_DATA:
2862 case SCU_TASK_DONE_UNEXP_SDBFIS:
2863 case SCU_TASK_DONE_REG_ERR:
2864 case SCU_TASK_DONE_SDB_ERR:
2865 case SCU_TASK_DONE_TASK_ABORT:
2866 default:
2867 /* Task in the target is not done. */
2868 *response_ptr = SAS_TASK_UNDELIVERED;
2869 *status_ptr = SAM_STAT_TASK_ABORTED;
2870 request->complete_in_target = false;
6f231dda 2871
f1f52e75
DW
2872 *complete_to_host_ptr = isci_perform_error_io_completion;
2873 break;
2874 }
2875}
6f231dda 2876
f1f52e75
DW
2877/**
2878 * isci_task_save_for_upper_layer_completion() - This function saves the
2879 * request for later completion to the upper layer driver.
2880 * @host: This parameter is a pointer to the host on which the the request
2881 * should be queued (either as an error or success).
2882 * @request: This parameter is the completed request.
2883 * @response: This parameter is the response code for the completed task.
2884 * @status: This parameter is the status code for the completed task.
2885 *
2886 * none.
2887 */
2888static void isci_task_save_for_upper_layer_completion(
2889 struct isci_host *host,
2890 struct isci_request *request,
2891 enum service_response response,
2892 enum exec_status status,
2893 enum isci_completion_selection task_notification_selection)
2894{
2895 struct sas_task *task = isci_request_access_task(request);
6f231dda 2896
f1f52e75
DW
2897 task_notification_selection
2898 = isci_task_set_completion_status(task, response, status,
2899 task_notification_selection);
6f231dda 2900
f1f52e75
DW
2901 /* Tasks aborted specifically by a call to the lldd_abort_task
2902 * function should not be completed to the host in the regular path.
2903 */
2904 switch (task_notification_selection) {
6f231dda 2905
f1f52e75 2906 case isci_perform_normal_io_completion:
6f231dda 2907
f1f52e75
DW
2908 /* Normal notification (task_done) */
2909 dev_dbg(&host->pdev->dev,
2910 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2911 __func__,
2912 task,
2913 task->task_status.resp, response,
2914 task->task_status.stat, status);
2915 /* Add to the completed list. */
2916 list_add(&request->completed_node,
2917 &host->requests_to_complete);
6f231dda 2918
f1f52e75
DW
2919 /* Take the request off the device's pending request list. */
2920 list_del_init(&request->dev_node);
2921 break;
6f231dda 2922
f1f52e75
DW
2923 case isci_perform_aborted_io_completion:
2924 /* No notification to libsas because this request is
2925 * already in the abort path.
2926 */
2927 dev_warn(&host->pdev->dev,
2928 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2929 __func__,
2930 task,
2931 task->task_status.resp, response,
2932 task->task_status.stat, status);
6f231dda 2933
f1f52e75
DW
2934 /* Wake up whatever process was waiting for this
2935 * request to complete.
2936 */
2937 WARN_ON(request->io_request_completion == NULL);
6f231dda 2938
f1f52e75
DW
2939 if (request->io_request_completion != NULL) {
2940
2941 /* Signal whoever is waiting that this
2942 * request is complete.
2943 */
2944 complete(request->io_request_completion);
2945 }
2946 break;
2947
2948 case isci_perform_error_io_completion:
2949 /* Use sas_task_abort */
2950 dev_warn(&host->pdev->dev,
2951 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2952 __func__,
2953 task,
2954 task->task_status.resp, response,
2955 task->task_status.stat, status);
2956 /* Add to the aborted list. */
2957 list_add(&request->completed_node,
2958 &host->requests_to_errorback);
2959 break;
2960
2961 default:
2962 dev_warn(&host->pdev->dev,
2963 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2964 __func__,
2965 task,
2966 task->task_status.resp, response,
2967 task->task_status.stat, status);
2968
2969 /* Add to the error to libsas list. */
2970 list_add(&request->completed_node,
2971 &host->requests_to_errorback);
2972 break;
2973 }
2974}
2975
2976static void isci_request_io_request_complete(struct isci_host *isci_host,
2977 struct isci_request *request,
2978 enum sci_io_status completion_status)
2979{
2980 struct sas_task *task = isci_request_access_task(request);
2981 struct ssp_response_iu *resp_iu;
2982 void *resp_buf;
2983 unsigned long task_flags;
2984 struct isci_remote_device *isci_device = request->isci_device;
2985 enum service_response response = SAS_TASK_UNDELIVERED;
2986 enum exec_status status = SAS_ABORTED_TASK;
2987 enum isci_request_status request_status;
2988 enum isci_completion_selection complete_to_host
2989 = isci_perform_normal_io_completion;
2990
2991 dev_dbg(&isci_host->pdev->dev,
2992 "%s: request = %p, task = %p,\n"
2993 "task->data_dir = %d completion_status = 0x%x\n",
2994 __func__,
2995 request,
2996 task,
2997 task->data_dir,
2998 completion_status);
2999
3000 spin_lock(&request->state_lock);
3001 request_status = isci_request_get_state(request);
3002
3003 /* Decode the request status. Note that if the request has been
3004 * aborted by a task management function, we don't care
3005 * what the status is.
3006 */
3007 switch (request_status) {
3008
3009 case aborted:
3010 /* "aborted" indicates that the request was aborted by a task
3011 * management function, since once a task management request is
3012 * perfomed by the device, the request only completes because
3013 * of the subsequent driver terminate.
3014 *
3015 * Aborted also means an external thread is explicitly managing
3016 * this request, so that we do not complete it up the stack.
3017 *
3018 * The target is still there (since the TMF was successful).
3019 */
3020 request->complete_in_target = true;
3021 response = SAS_TASK_COMPLETE;
3022
3023 /* See if the device has been/is being stopped. Note
3024 * that we ignore the quiesce state, since we are
3025 * concerned about the actual device state.
3026 */
3027 if ((isci_device->status == isci_stopping)
3028 || (isci_device->status == isci_stopped)
3029 )
3030 status = SAS_DEVICE_UNKNOWN;
3031 else
3032 status = SAS_ABORTED_TASK;
3033
3034 complete_to_host = isci_perform_aborted_io_completion;
3035 /* This was an aborted request. */
3036
3037 spin_unlock(&request->state_lock);
3038 break;
3039
3040 case aborting:
3041 /* aborting means that the task management function tried and
3042 * failed to abort the request. We need to note the request
3043 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
3044 * target as down.
3045 *
3046 * Aborting also means an external thread is explicitly managing
3047 * this request, so that we do not complete it up the stack.
3048 */
3049 request->complete_in_target = true;
3050 response = SAS_TASK_UNDELIVERED;
3051
3052 if ((isci_device->status == isci_stopping) ||
3053 (isci_device->status == isci_stopped))
3054 /* The device has been /is being stopped. Note that
3055 * we ignore the quiesce state, since we are
3056 * concerned about the actual device state.
3057 */
3058 status = SAS_DEVICE_UNKNOWN;
3059 else
3060 status = SAS_PHY_DOWN;
3061
3062 complete_to_host = isci_perform_aborted_io_completion;
3063
3064 /* This was an aborted request. */
3065
3066 spin_unlock(&request->state_lock);
3067 break;
3068
3069 case terminating:
3070
3071 /* This was an terminated request. This happens when
3072 * the I/O is being terminated because of an action on
3073 * the device (reset, tear down, etc.), and the I/O needs
3074 * to be completed up the stack.
3075 */
3076 request->complete_in_target = true;
3077 response = SAS_TASK_UNDELIVERED;
3078
3079 /* See if the device has been/is being stopped. Note
3080 * that we ignore the quiesce state, since we are
3081 * concerned about the actual device state.
3082 */
3083 if ((isci_device->status == isci_stopping) ||
3084 (isci_device->status == isci_stopped))
3085 status = SAS_DEVICE_UNKNOWN;
3086 else
3087 status = SAS_ABORTED_TASK;
3088
3089 complete_to_host = isci_perform_aborted_io_completion;
3090
3091 /* This was a terminated request. */
3092
3093 spin_unlock(&request->state_lock);
3094 break;
3095
3096 default:
3097
3098 /* The request is done from an SCU HW perspective. */
3099 request->status = completed;
3100
3101 spin_unlock(&request->state_lock);
3102
3103 /* This is an active request being completed from the core. */
3104 switch (completion_status) {
3105
3106 case SCI_IO_FAILURE_RESPONSE_VALID:
3107 dev_dbg(&isci_host->pdev->dev,
3108 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
3109 __func__,
3110 request,
3111 task);
3112
3113 if (sas_protocol_ata(task->task_proto)) {
3114 resp_buf = &request->sci.stp.rsp;
3115 isci_request_process_stp_response(task,
3116 resp_buf);
3117 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
3118
3119 /* crack the iu response buffer. */
3120 resp_iu = &request->sci.ssp.rsp;
3121 isci_request_process_response_iu(task, resp_iu,
3122 &isci_host->pdev->dev);
3123
3124 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
3125
3126 dev_err(&isci_host->pdev->dev,
3127 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
3128 "SAS_PROTOCOL_SMP protocol\n",
3129 __func__);
3130
3131 } else
3132 dev_err(&isci_host->pdev->dev,
3133 "%s: unknown protocol\n", __func__);
3134
3135 /* use the task status set in the task struct by the
3136 * isci_request_process_response_iu call.
3137 */
3138 request->complete_in_target = true;
3139 response = task->task_status.resp;
3140 status = task->task_status.stat;
3141 break;
3142
3143 case SCI_IO_SUCCESS:
3144 case SCI_IO_SUCCESS_IO_DONE_EARLY:
3145
3146 response = SAS_TASK_COMPLETE;
3147 status = SAM_STAT_GOOD;
3148 request->complete_in_target = true;
3149
3150 if (task->task_proto == SAS_PROTOCOL_SMP) {
3151 void *rsp = &request->sci.smp.rsp;
3152
3153 dev_dbg(&isci_host->pdev->dev,
3154 "%s: SMP protocol completion\n",
3155 __func__);
3156
3157 sg_copy_from_buffer(
3158 &task->smp_task.smp_resp, 1,
3159 rsp, sizeof(struct smp_resp));
3160 } else if (completion_status
3161 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
3162
3163 /* This was an SSP / STP / SATA transfer.
3164 * There is a possibility that less data than
3165 * the maximum was transferred.
3166 */
3167 u32 transferred_length = sci_req_tx_bytes(&request->sci);
3168
3169 task->task_status.residual
3170 = task->total_xfer_len - transferred_length;
3171
3172 /* If there were residual bytes, call this an
3173 * underrun.
3174 */
3175 if (task->task_status.residual != 0)
3176 status = SAS_DATA_UNDERRUN;
3177
3178 dev_dbg(&isci_host->pdev->dev,
3179 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
3180 __func__,
3181 status);
3182
3183 } else
3184 dev_dbg(&isci_host->pdev->dev,
3185 "%s: SCI_IO_SUCCESS\n",
3186 __func__);
3187
3188 break;
3189
3190 case SCI_IO_FAILURE_TERMINATED:
3191 dev_dbg(&isci_host->pdev->dev,
3192 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
3193 __func__,
3194 request,
3195 task);
3196
3197 /* The request was terminated explicitly. No handling
3198 * is needed in the SCSI error handler path.
3199 */
3200 request->complete_in_target = true;
3201 response = SAS_TASK_UNDELIVERED;
3202
3203 /* See if the device has been/is being stopped. Note
3204 * that we ignore the quiesce state, since we are
3205 * concerned about the actual device state.
3206 */
3207 if ((isci_device->status == isci_stopping) ||
3208 (isci_device->status == isci_stopped))
3209 status = SAS_DEVICE_UNKNOWN;
3210 else
3211 status = SAS_ABORTED_TASK;
3212
3213 complete_to_host = isci_perform_normal_io_completion;
3214 break;
3215
3216 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
3217
3218 isci_request_handle_controller_specific_errors(
3219 isci_device, request, task, &response, &status,
3220 &complete_to_host);
3221
3222 break;
3223
3224 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
3225 /* This is a special case, in that the I/O completion
3226 * is telling us that the device needs a reset.
3227 * In order for the device reset condition to be
3228 * noticed, the I/O has to be handled in the error
3229 * handler. Set the reset flag and cause the
3230 * SCSI error thread to be scheduled.
3231 */
3232 spin_lock_irqsave(&task->task_state_lock, task_flags);
3233 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
6f231dda
DW
3234 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3235
f1f52e75
DW
3236 /* Fail the I/O. */
3237 response = SAS_TASK_UNDELIVERED;
3238 status = SAM_STAT_TASK_ABORTED;
3239
3240 complete_to_host = isci_perform_error_io_completion;
3241 request->complete_in_target = false;
3242 break;
3243
3244 default:
3245 /* Catch any otherwise unhandled error codes here. */
3246 dev_warn(&isci_host->pdev->dev,
3247 "%s: invalid completion code: 0x%x - "
3248 "isci_request = %p\n",
3249 __func__, completion_status, request);
3250
3251 response = SAS_TASK_UNDELIVERED;
3252
3253 /* See if the device has been/is being stopped. Note
3254 * that we ignore the quiesce state, since we are
3255 * concerned about the actual device state.
3256 */
3257 if ((isci_device->status == isci_stopping) ||
3258 (isci_device->status == isci_stopped))
3259 status = SAS_DEVICE_UNKNOWN;
3260 else
3261 status = SAS_ABORTED_TASK;
3262
3263 complete_to_host = isci_perform_error_io_completion;
3264 request->complete_in_target = false;
3265 break;
3266 }
3267 break;
3268 }
3269
3270 isci_request_unmap_sgl(request, isci_host->pdev);
3271
3272 /* Put the completed request on the correct list */
3273 isci_task_save_for_upper_layer_completion(isci_host, request, response,
3274 status, complete_to_host
3275 );
3276
3277 /* complete the io request to the core. */
3278 scic_controller_complete_io(&isci_host->sci,
3279 &isci_device->sci,
3280 &request->sci);
3281 /* set terminated handle so it cannot be completed or
3282 * terminated again, and to cause any calls into abort
3283 * task to recognize the already completed case.
3284 */
3285 request->terminated = true;
3286
3287 isci_host_can_dequeue(isci_host, 1);
3288}
3289
3290/**
3291 * scic_sds_request_initial_state_enter() -
3292 * @object: This parameter specifies the base object for which the state
3293 * transition is occurring.
3294 *
3295 * This method implements the actions taken when entering the
3296 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
3297 * base request is constructed. Entry into the initial state sets all handlers
3298 * for the io request object to their default handlers. none
3299 */
3300static void scic_sds_request_initial_state_enter(void *object)
3301{
3302 struct scic_sds_request *sci_req = object;
3303
3304 SET_STATE_HANDLER(
3305 sci_req,
3306 scic_sds_request_state_handler_table,
3307 SCI_BASE_REQUEST_STATE_INITIAL
3308 );
3309}
3310
3311/**
3312 * scic_sds_request_constructed_state_enter() -
3313 * @object: The io request object that is to enter the constructed state.
3314 *
3315 * This method implements the actions taken when entering the
3316 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
3317 * for the the constructed state. none
3318 */
3319static void scic_sds_request_constructed_state_enter(void *object)
3320{
3321 struct scic_sds_request *sci_req = object;
3322
3323 SET_STATE_HANDLER(
3324 sci_req,
3325 scic_sds_request_state_handler_table,
3326 SCI_BASE_REQUEST_STATE_CONSTRUCTED
3327 );
3328}
3329
f1f52e75
DW
3330static void scic_sds_request_started_state_enter(void *object)
3331{
3332 struct scic_sds_request *sci_req = object;
f139303d
DW
3333 struct sci_base_state_machine *sm = &sci_req->state_machine;
3334 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3335 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
c72086e3
DW
3336 struct sas_task *task;
3337
3338 /* XXX as hch said always creating an internal sas_task for tmf
3339 * requests would simplify the driver
3340 */
3341 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
f1f52e75
DW
3342
3343 SET_STATE_HANDLER(
3344 sci_req,
3345 scic_sds_request_state_handler_table,
3346 SCI_BASE_REQUEST_STATE_STARTED
3347 );
3348
5dec6f4e
DW
3349 /* all unaccelerated request types (non ssp or ncq) handled with
3350 * substates
f139303d 3351 */
c72086e3
DW
3352 if (!task && dev->dev_type == SAS_END_DEV) {
3353 sci_base_state_machine_change_state(sm,
3354 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
5dec6f4e
DW
3355 } else if (!task &&
3356 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3357 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3358 sci_base_state_machine_change_state(sm,
3359 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
c72086e3
DW
3360 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3361 sci_base_state_machine_change_state(sm,
3362 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
5dec6f4e
DW
3363 } else if (task && sas_protocol_ata(task->task_proto) &&
3364 !task->ata_task.use_ncq) {
3365 u32 state;
3366
3367 if (task->data_dir == DMA_NONE)
3368 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
3369 else if (task->ata_task.dma_xfer)
3370 state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
3371 else /* PIO */
3372 state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
3373
3374 sci_base_state_machine_change_state(sm, state);
c72086e3 3375 }
f1f52e75
DW
3376}
3377
f1f52e75
DW
3378/**
3379 * scic_sds_request_completed_state_enter() -
3380 * @object: This parameter specifies the base object for which the state
3381 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
3382 * object.
3383 *
3384 * This method implements the actions taken when entering the
3385 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
3386 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
3387 * completion status and convert it to an enum sci_status to return in the
3388 * completion callback function. none
3389 */
3390static void scic_sds_request_completed_state_enter(void *object)
3391{
3392 struct scic_sds_request *sci_req = object;
3393 struct scic_sds_controller *scic =
3394 scic_sds_request_get_controller(sci_req);
3395 struct isci_host *ihost = scic_to_ihost(scic);
3396 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3397
3398 SET_STATE_HANDLER(sci_req,
3399 scic_sds_request_state_handler_table,
3400 SCI_BASE_REQUEST_STATE_COMPLETED);
3401
3402 /* Tell the SCI_USER that the IO request is complete */
3403 if (sci_req->is_task_management_request == false)
3404 isci_request_io_request_complete(ihost, ireq,
3405 sci_req->sci_status);
3406 else
3407 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
3408}
3409
3410/**
3411 * scic_sds_request_aborting_state_enter() -
3412 * @object: This parameter specifies the base object for which the state
3413 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
3414 * object.
3415 *
3416 * This method implements the actions taken when entering the
3417 * SCI_BASE_REQUEST_STATE_ABORTING state. none
3418 */
3419static void scic_sds_request_aborting_state_enter(void *object)
3420{
3421 struct scic_sds_request *sci_req = object;
3422
3423 /* Setting the abort bit in the Task Context is required by the silicon. */
3424 sci_req->task_context_buffer->abort = 1;
3425
3426 SET_STATE_HANDLER(
3427 sci_req,
3428 scic_sds_request_state_handler_table,
3429 SCI_BASE_REQUEST_STATE_ABORTING
3430 );
3431}
3432
3433/**
3434 * scic_sds_request_final_state_enter() -
3435 * @object: This parameter specifies the base object for which the state
3436 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
3437 *
3438 * This method implements the actions taken when entering the
3439 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
3440 * state handlers in place. none
3441 */
3442static void scic_sds_request_final_state_enter(void *object)
3443{
3444 struct scic_sds_request *sci_req = object;
3445
3446 SET_STATE_HANDLER(
3447 sci_req,
3448 scic_sds_request_state_handler_table,
3449 SCI_BASE_REQUEST_STATE_FINAL
3450 );
3451}
3452
f139303d
DW
3453static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter(
3454 void *object)
3455{
3456 struct scic_sds_request *sci_req = object;
3457
3458 SET_STATE_HANDLER(
3459 sci_req,
3460 scic_sds_request_state_handler_table,
3461 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
3462 );
3463}
3464
3465static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter(
3466 void *object)
3467{
3468 struct scic_sds_request *sci_req = object;
3469
3470 SET_STATE_HANDLER(
3471 sci_req,
3472 scic_sds_request_state_handler_table,
3473 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
3474 );
3475}
3476
c72086e3
DW
3477static void scic_sds_smp_request_started_await_response_substate_enter(void *object)
3478{
3479 struct scic_sds_request *sci_req = object;
3480
3481 SET_STATE_HANDLER(
3482 sci_req,
3483 scic_sds_request_state_handler_table,
3484 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE
3485 );
3486}
3487
3488static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object)
3489{
3490 struct scic_sds_request *sci_req = object;
3491
3492 SET_STATE_HANDLER(
3493 sci_req,
3494 scic_sds_request_state_handler_table,
3495 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION
3496 );
3497}
3498
5dec6f4e
DW
3499static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
3500 void *object)
3501{
3502 struct scic_sds_request *sci_req = object;
3503
3504 SET_STATE_HANDLER(
3505 sci_req,
3506 scic_sds_request_state_handler_table,
3507 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
3508 );
3509
3510 scic_sds_remote_device_set_working_request(
3511 sci_req->target_device, sci_req
3512 );
3513}
3514
3515static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
3516{
3517 struct scic_sds_request *sci_req = object;
3518
3519 SET_STATE_HANDLER(
3520 sci_req,
3521 scic_sds_request_state_handler_table,
3522 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
3523 );
3524}
3525
3526
3527
3528static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
3529 void *object)
3530{
3531 struct scic_sds_request *sci_req = object;
3532
3533 SET_STATE_HANDLER(
3534 sci_req,
3535 scic_sds_request_state_handler_table,
3536 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
3537 );
3538
3539 scic_sds_remote_device_set_working_request(
3540 sci_req->target_device, sci_req);
3541}
3542
3543static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
3544{
3545 struct scic_sds_request *sci_req = object;
3546
3547 SET_STATE_HANDLER(
3548 sci_req,
3549 scic_sds_request_state_handler_table,
3550 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
3551 );
3552}
3553
3554static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
3555 void *object)
3556{
3557 struct scic_sds_request *sci_req = object;
3558
3559 SET_STATE_HANDLER(
3560 sci_req,
3561 scic_sds_request_state_handler_table,
3562 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
3563 );
3564}
3565
3566static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
3567 void *object)
3568{
3569 struct scic_sds_request *sci_req = object;
3570
3571 SET_STATE_HANDLER(
3572 sci_req,
3573 scic_sds_request_state_handler_table,
3574 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
3575 );
3576}
3577
3578
3579
3580static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
3581 void *object)
3582{
3583 struct scic_sds_request *sci_req = object;
3584
3585 SET_STATE_HANDLER(
3586 sci_req,
3587 scic_sds_request_state_handler_table,
3588 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
3589 );
3590}
3591
3592/**
3593 *
3594 *
3595 * This state is entered when there is an TC completion failure. The hardware
3596 * received an unexpected condition while processing the IO request and now
3597 * will UF the D2H register FIS to complete the IO.
3598 */
3599static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
3600 void *object)
3601{
3602 struct scic_sds_request *sci_req = object;
3603
3604 SET_STATE_HANDLER(
3605 sci_req,
3606 scic_sds_request_state_handler_table,
3607 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
3608 );
3609}
3610
3611
3612
3613static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
3614 void *object)
3615{
3616 struct scic_sds_request *sci_req = object;
3617
3618 SET_STATE_HANDLER(
3619 sci_req,
3620 scic_sds_request_state_handler_table,
3621 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
3622 );
3623
3624 scic_sds_remote_device_set_working_request(
3625 sci_req->target_device, sci_req
3626 );
3627}
3628
3629static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
3630 void *object)
3631{
3632 struct scic_sds_request *sci_req = object;
3633 struct scu_task_context *task_context;
3634 struct host_to_dev_fis *h2d_fis;
3635 enum sci_status status;
3636
3637 /* Clear the SRST bit */
3638 h2d_fis = &sci_req->stp.cmd;
3639 h2d_fis->control = 0;
3640
3641 /* Clear the TC control bit */
3642 task_context = scic_sds_controller_get_task_context_buffer(
3643 sci_req->owning_controller, sci_req->io_tag);
3644 task_context->control_frame = 0;
3645
3646 status = scic_controller_continue_io(sci_req);
3647 if (status == SCI_SUCCESS) {
3648 SET_STATE_HANDLER(
3649 sci_req,
3650 scic_sds_request_state_handler_table,
3651 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
3652 );
3653 }
3654}
3655
3656static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
3657 void *object)
3658{
3659 struct scic_sds_request *sci_req = object;
3660
3661 SET_STATE_HANDLER(
3662 sci_req,
3663 scic_sds_request_state_handler_table,
3664 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
3665 );
3666}
3667
f1f52e75
DW
3668static const struct sci_base_state scic_sds_request_state_table[] = {
3669 [SCI_BASE_REQUEST_STATE_INITIAL] = {
3670 .enter_state = scic_sds_request_initial_state_enter,
3671 },
3672 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
3673 .enter_state = scic_sds_request_constructed_state_enter,
3674 },
3675 [SCI_BASE_REQUEST_STATE_STARTED] = {
3676 .enter_state = scic_sds_request_started_state_enter,
5dec6f4e
DW
3677 },
3678 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3679 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3680 },
3681 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
3682 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
3683 },
3684 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3685 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3686 },
3687 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
3688 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
3689 },
3690 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
3691 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
3692 },
3693 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
3694 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
3695 },
3696 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
3697 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
3698 },
3699 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
3700 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
3701 },
3702 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
3703 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3704 },
3705 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
3706 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3707 },
3708 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
3709 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
f1f52e75 3710 },
f139303d
DW
3711 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
3712 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
3713 },
3714 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
3715 .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter,
3716 },
c72086e3
DW
3717 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
3718 .enter_state = scic_sds_smp_request_started_await_response_substate_enter,
3719 },
3720 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
3721 .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter,
3722 },
f1f52e75
DW
3723 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
3724 .enter_state = scic_sds_request_completed_state_enter,
3725 },
3726 [SCI_BASE_REQUEST_STATE_ABORTING] = {
3727 .enter_state = scic_sds_request_aborting_state_enter,
3728 },
3729 [SCI_BASE_REQUEST_STATE_FINAL] = {
3730 .enter_state = scic_sds_request_final_state_enter,
3731 },
3732};
3733
3734static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
3735 struct scic_sds_remote_device *sci_dev,
3736 u16 io_tag, struct scic_sds_request *sci_req)
3737{
3738 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
3739 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
3740 sci_base_state_machine_start(&sci_req->state_machine);
3741
3742 sci_req->io_tag = io_tag;
3743 sci_req->owning_controller = scic;
3744 sci_req->target_device = sci_dev;
f1f52e75
DW
3745 sci_req->protocol = SCIC_NO_PROTOCOL;
3746 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3747 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
3748
3749 sci_req->sci_status = SCI_SUCCESS;
3750 sci_req->scu_status = 0;
3751 sci_req->post_context = 0xFFFFFFFF;
3752
3753 sci_req->is_task_management_request = false;
3754
3755 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3756 sci_req->was_tag_assigned_by_user = false;
c72086e3 3757 sci_req->task_context_buffer = &sci_req->tc;
f1f52e75
DW
3758 } else {
3759 sci_req->was_tag_assigned_by_user = true;
3760
3761 sci_req->task_context_buffer =
3762 scic_sds_controller_get_task_context_buffer(scic, io_tag);
3763 }
3764}
3765
3766static enum sci_status
3767scic_io_request_construct(struct scic_sds_controller *scic,
3768 struct scic_sds_remote_device *sci_dev,
3769 u16 io_tag, struct scic_sds_request *sci_req)
3770{
3771 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3772 enum sci_status status = SCI_SUCCESS;
3773
3774 /* Build the common part of the request */
3775 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3776
c72086e3 3777 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
f1f52e75
DW
3778 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3779
3780 if (dev->dev_type == SAS_END_DEV)
c72086e3
DW
3781 /* pass */;
3782 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
f1f52e75 3783 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
c72086e3 3784 else if (dev_is_expander(dev))
f1f52e75 3785 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
c72086e3
DW
3786 else
3787 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
f1f52e75 3788
c72086e3
DW
3789 memset(sci_req->task_context_buffer, 0,
3790 offsetof(struct scu_task_context, sgl_pair_ab));
f1f52e75
DW
3791
3792 return status;
3793}
3794
3795enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3796 struct scic_sds_remote_device *sci_dev,
3797 u16 io_tag, struct scic_sds_request *sci_req)
3798{
3799 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3800 enum sci_status status = SCI_SUCCESS;
3801
3802 /* Build the common part of the request */
3803 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3804
c72086e3
DW
3805 if (dev->dev_type == SAS_END_DEV ||
3806 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
f1f52e75
DW
3807 sci_req->is_task_management_request = true;
3808 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
c72086e3
DW
3809 } else
3810 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
f1f52e75
DW
3811
3812 return status;
3813}
3814
3815static enum sci_status isci_request_ssp_request_construct(
3816 struct isci_request *request)
3817{
3818 enum sci_status status;
3819
3820 dev_dbg(&request->isci_host->pdev->dev,
3821 "%s: request = %p\n",
3822 __func__,
3823 request);
3824 status = scic_io_request_construct_basic_ssp(&request->sci);
3825 return status;
3826}
3827
3828static enum sci_status isci_request_stp_request_construct(
3829 struct isci_request *request)
3830{
3831 struct sas_task *task = isci_request_access_task(request);
3832 enum sci_status status;
3833 struct host_to_dev_fis *register_fis;
3834
3835 dev_dbg(&request->isci_host->pdev->dev,
3836 "%s: request = %p\n",
3837 __func__,
3838 request);
3839
3840 /* Get the host_to_dev_fis from the core and copy
3841 * the fis from the task into it.
3842 */
3843 register_fis = isci_sata_task_to_fis_copy(task);
3844
3845 status = scic_io_request_construct_basic_sata(&request->sci);
3846
3847 /* Set the ncq tag in the fis, from the queue
3848 * command in the task.
3849 */
3850 if (isci_sata_is_task_ncq(task)) {
3851
3852 isci_sata_set_ncq_tag(
3853 register_fis,
3854 task
3855 );
3856 }
3857
3858 return status;
3859}
3860
c72086e3
DW
3861/*
3862 * This function will fill in the SCU Task Context for a SMP request. The
3863 * following important settings are utilized: -# task_type ==
3864 * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type
3865 * (i.e. non-raw frame) is being utilized to perform task management. -#
3866 * control_frame == 1. This ensures that the proper endianess is set so
3867 * that the bytes are transmitted in the right order for a smp request frame.
3868 * @sci_req: This parameter specifies the smp request object being
3869 * constructed.
3870 *
3871 */
3872static void
3873scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
3874 struct smp_req *smp_req)
3875{
3876 dma_addr_t dma_addr;
3877 struct scic_sds_controller *scic;
3878 struct scic_sds_remote_device *sci_dev;
3879 struct scic_sds_port *sci_port;
3880 struct scu_task_context *task_context;
3881 ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
3882
3883 /* byte swap the smp request. */
3884 sci_swab32_cpy(&sci_req->smp.cmd, smp_req,
3885 word_cnt);
3886
3887 task_context = scic_sds_request_get_task_context(sci_req);
3888
3889 scic = scic_sds_request_get_controller(sci_req);
3890 sci_dev = scic_sds_request_get_device(sci_req);
3891 sci_port = scic_sds_request_get_port(sci_req);
3892
3893 /*
3894 * Fill in the TC with the its required data
3895 * 00h
3896 */
3897 task_context->priority = 0;
3898 task_context->initiator_request = 1;
3899 task_context->connection_rate = sci_dev->connection_rate;
3900 task_context->protocol_engine_index =
3901 scic_sds_controller_get_protocol_engine_group(scic);
3902 task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3903 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3904 task_context->abort = 0;
3905 task_context->valid = SCU_TASK_CONTEXT_VALID;
3906 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3907
3908 /* 04h */
3909 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3910 task_context->command_code = 0;
3911 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3912
3913 /* 08h */
3914 task_context->link_layer_control = 0;
3915 task_context->do_not_dma_ssp_good_response = 1;
3916 task_context->strict_ordering = 0;
3917 task_context->control_frame = 1;
3918 task_context->timeout_enable = 0;
3919 task_context->block_guard_enable = 0;
3920
3921 /* 0ch */
3922 task_context->address_modifier = 0;
3923
3924 /* 10h */
3925 task_context->ssp_command_iu_length = smp_req->req_len;
3926
3927 /* 14h */
3928 task_context->transfer_length_bytes = 0;
3929
3930 /*
3931 * 18h ~ 30h, protocol specific
3932 * since commandIU has been build by framework at this point, we just
3933 * copy the frist DWord from command IU to this location. */
3934 memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
3935
3936 /*
3937 * 40h
3938 * "For SMP you could program it to zero. We would prefer that way
3939 * so that done code will be consistent." - Venki
3940 */
3941 task_context->task_phase = 0;
3942
3943 if (sci_req->was_tag_assigned_by_user) {
3944 /*
3945 * Build the task context now since we have already read
3946 * the data
3947 */
3948 sci_req->post_context =
3949 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3950 (scic_sds_controller_get_protocol_engine_group(scic) <<
3951 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3952 (scic_sds_port_get_index(sci_port) <<
3953 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3954 scic_sds_io_tag_get_index(sci_req->io_tag));
3955 } else {
3956 /*
3957 * Build the task context now since we have already read
3958 * the data.
3959 * I/O tag index is not assigned because we have to wait
3960 * until we get a TCi.
3961 */
3962 sci_req->post_context =
3963 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3964 (scic_sds_controller_get_protocol_engine_group(scic) <<
3965 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3966 (scic_sds_port_get_index(sci_port) <<
3967 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
3968 }
3969
3970 /*
3971 * Copy the physical address for the command buffer to the SCU Task
3972 * Context command buffer should not contain command header.
3973 */
3974 dma_addr = scic_io_request_get_dma_addr(sci_req,
3975 ((char *) &sci_req->smp.cmd) +
3976 sizeof(u32));
3977
3978 task_context->command_iu_upper = upper_32_bits(dma_addr);
3979 task_context->command_iu_lower = lower_32_bits(dma_addr);
3980
3981 /* SMP response comes as UF, so no need to set response IU address. */
3982 task_context->response_iu_upper = 0;
3983 task_context->response_iu_lower = 0;
3984}
3985
3986static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req)
3987{
3988 struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL);
3989
3990 if (!smp_req)
3991 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3992
3993 sci_req->protocol = SCIC_SMP_PROTOCOL;
3994
3995 /* Construct the SMP SCU Task Context */
3996 memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req));
3997
3998 /*
3999 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
4000 * functions under SAS 2.0, a zero request length really indicates
4001 * a non-zero default length. */
4002 if (smp_req->req_len == 0) {
4003 switch (smp_req->func) {
4004 case SMP_DISCOVER:
4005 case SMP_REPORT_PHY_ERR_LOG:
4006 case SMP_REPORT_PHY_SATA:
4007 case SMP_REPORT_ROUTE_INFO:
4008 smp_req->req_len = 2;
4009 break;
4010 case SMP_CONF_ROUTE_INFO:
4011 case SMP_PHY_CONTROL:
4012 case SMP_PHY_TEST_FUNCTION:
4013 smp_req->req_len = 9;
4014 break;
4015 /* Default - zero is a valid default for 2.0. */
4016 }
4017 }
4018
4019 scu_smp_request_construct_task_context(sci_req, smp_req);
4020
4021 sci_base_state_machine_change_state(&sci_req->state_machine,
4022 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
4023
4024 kfree(smp_req);
4025
4026 return SCI_SUCCESS;
4027}
4028
f1f52e75
DW
4029/*
4030 * isci_smp_request_build() - This function builds the smp request.
4031 * @ireq: This parameter points to the isci_request allocated in the
4032 * request construct function.
4033 *
4034 * SCI_SUCCESS on successfull completion, or specific failure code.
4035 */
4036static enum sci_status isci_smp_request_build(struct isci_request *ireq)
4037{
4038 enum sci_status status = SCI_FAILURE;
4039 struct sas_task *task = isci_request_access_task(ireq);
4040 struct scic_sds_request *sci_req = &ireq->sci;
4041
4042 dev_dbg(&ireq->isci_host->pdev->dev,
4043 "%s: request = %p\n", __func__, ireq);
4044
4045 dev_dbg(&ireq->isci_host->pdev->dev,
4046 "%s: smp_req len = %d\n",
4047 __func__,
4048 task->smp_task.smp_req.length);
4049
4050 /* copy the smp_command to the address; */
4051 sg_copy_to_buffer(&task->smp_task.smp_req, 1,
4052 &sci_req->smp.cmd,
4053 sizeof(struct smp_req));
4054
4055 status = scic_io_request_construct_smp(sci_req);
4056 if (status != SCI_SUCCESS)
4057 dev_warn(&ireq->isci_host->pdev->dev,
4058 "%s: failed with status = %d\n",
4059 __func__,
4060 status);
4061
4062 return status;
4063}
4064
4065/**
4066 * isci_io_request_build() - This function builds the io request object.
4067 * @isci_host: This parameter specifies the ISCI host object
4068 * @request: This parameter points to the isci_request object allocated in the
4069 * request construct function.
4070 * @sci_device: This parameter is the handle for the sci core's remote device
4071 * object that is the destination for this request.
4072 *
4073 * SCI_SUCCESS on successfull completion, or specific failure code.
4074 */
4075static enum sci_status isci_io_request_build(
4076 struct isci_host *isci_host,
4077 struct isci_request *request,
4078 struct isci_remote_device *isci_device)
4079{
4080 enum sci_status status = SCI_SUCCESS;
4081 struct sas_task *task = isci_request_access_task(request);
4082 struct scic_sds_remote_device *sci_device = &isci_device->sci;
4083
4084 dev_dbg(&isci_host->pdev->dev,
4085 "%s: isci_device = 0x%p; request = %p, "
4086 "num_scatter = %d\n",
4087 __func__,
4088 isci_device,
4089 request,
4090 task->num_scatter);
4091
4092 /* map the sgl addresses, if present.
4093 * libata does the mapping for sata devices
4094 * before we get the request.
4095 */
4096 if (task->num_scatter &&
4097 !sas_protocol_ata(task->task_proto) &&
4098 !(SAS_PROTOCOL_SMP & task->task_proto)) {
4099
4100 request->num_sg_entries = dma_map_sg(
4101 &isci_host->pdev->dev,
4102 task->scatter,
4103 task->num_scatter,
4104 task->data_dir
4105 );
4106
4107 if (request->num_sg_entries == 0)
4108 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
4109 }
4110
4111 /* build the common request object. For now,
4112 * we will let the core allocate the IO tag.
4113 */
4114 status = scic_io_request_construct(&isci_host->sci, sci_device,
4115 SCI_CONTROLLER_INVALID_IO_TAG,
4116 &request->sci);
4117
4118 if (status != SCI_SUCCESS) {
4119 dev_warn(&isci_host->pdev->dev,
4120 "%s: failed request construct\n",
4121 __func__);
4122 return SCI_FAILURE;
4123 }
4124
4125 switch (task->task_proto) {
4126 case SAS_PROTOCOL_SMP:
4127 status = isci_smp_request_build(request);
4128 break;
4129 case SAS_PROTOCOL_SSP:
4130 status = isci_request_ssp_request_construct(request);
4131 break;
4132 case SAS_PROTOCOL_SATA:
4133 case SAS_PROTOCOL_STP:
4134 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
4135 status = isci_request_stp_request_construct(request);
4136 break;
4137 default:
4138 dev_warn(&isci_host->pdev->dev,
4139 "%s: unknown protocol\n", __func__);
4140 return SCI_FAILURE;
4141 }
4142
4143 return SCI_SUCCESS;
4144}
4145
4146/**
4147 * isci_request_alloc_core() - This function gets the request object from the
4148 * isci_host dma cache.
4149 * @isci_host: This parameter specifies the ISCI host object
4150 * @isci_request: This parameter will contain the pointer to the new
4151 * isci_request object.
4152 * @isci_device: This parameter is the pointer to the isci remote device object
4153 * that is the destination for this request.
4154 * @gfp_flags: This parameter specifies the os allocation flags.
4155 *
4156 * SCI_SUCCESS on successfull completion, or specific failure code.
4157 */
4158static int isci_request_alloc_core(
4159 struct isci_host *isci_host,
4160 struct isci_request **isci_request,
4161 struct isci_remote_device *isci_device,
4162 gfp_t gfp_flags)
4163{
4164 int ret = 0;
4165 dma_addr_t handle;
4166 struct isci_request *request;
4167
aa145102 4168
f1f52e75
DW
4169 /* get pointer to dma memory. This actually points
4170 * to both the isci_remote_device object and the
4171 * sci object. The isci object is at the beginning
4172 * of the memory allocated here.
4173 */
4174 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
4175 if (!request) {
4176 dev_warn(&isci_host->pdev->dev,
4177 "%s: dma_pool_alloc returned NULL\n", __func__);
4178 return -ENOMEM;
4179 }
6f231dda 4180
f1f52e75
DW
4181 /* initialize the request object. */
4182 spin_lock_init(&request->state_lock);
4183 request->request_daddr = handle;
4184 request->isci_host = isci_host;
4185 request->isci_device = isci_device;
4186 request->io_request_completion = NULL;
4187 request->terminated = false;
6f231dda 4188
f1f52e75 4189 request->num_sg_entries = 0;
6f231dda 4190
f1f52e75 4191 request->complete_in_target = false;
6f231dda 4192
f1f52e75
DW
4193 INIT_LIST_HEAD(&request->completed_node);
4194 INIT_LIST_HEAD(&request->dev_node);
4195
4196 *isci_request = request;
4197 isci_request_change_state(request, allocated);
4198
4199 return ret;
4200}
4201
4202static int isci_request_alloc_io(
4203 struct isci_host *isci_host,
4204 struct sas_task *task,
4205 struct isci_request **isci_request,
4206 struct isci_remote_device *isci_device,
4207 gfp_t gfp_flags)
4208{
4209 int retval = isci_request_alloc_core(isci_host, isci_request,
4210 isci_device, gfp_flags);
4211
4212 if (!retval) {
4213 (*isci_request)->ttype_ptr.io_task_ptr = task;
4214 (*isci_request)->ttype = io_task;
4215
4216 task->lldd_task = *isci_request;
6f231dda 4217 }
f1f52e75
DW
4218 return retval;
4219}
6f231dda 4220
f1f52e75
DW
4221/**
4222 * isci_request_alloc_tmf() - This function gets the request object from the
4223 * isci_host dma cache and initializes the relevant fields as a sas_task.
4224 * @isci_host: This parameter specifies the ISCI host object
4225 * @sas_task: This parameter is the task struct from the upper layer driver.
4226 * @isci_request: This parameter will contain the pointer to the new
4227 * isci_request object.
4228 * @isci_device: This parameter is the pointer to the isci remote device object
4229 * that is the destination for this request.
4230 * @gfp_flags: This parameter specifies the os allocation flags.
4231 *
4232 * SCI_SUCCESS on successfull completion, or specific failure code.
4233 */
4234int isci_request_alloc_tmf(
4235 struct isci_host *isci_host,
4236 struct isci_tmf *isci_tmf,
4237 struct isci_request **isci_request,
4238 struct isci_remote_device *isci_device,
4239 gfp_t gfp_flags)
4240{
4241 int retval = isci_request_alloc_core(isci_host, isci_request,
4242 isci_device, gfp_flags);
6f231dda 4243
f1f52e75 4244 if (!retval) {
6f231dda 4245
f1f52e75
DW
4246 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
4247 (*isci_request)->ttype = tmf_task;
4248 }
4249 return retval;
4250}
4251
4252/**
4253 * isci_request_execute() - This function allocates the isci_request object,
4254 * all fills in some common fields.
4255 * @isci_host: This parameter specifies the ISCI host object
4256 * @sas_task: This parameter is the task struct from the upper layer driver.
4257 * @isci_request: This parameter will contain the pointer to the new
4258 * isci_request object.
4259 * @gfp_flags: This parameter specifies the os allocation flags.
4260 *
4261 * SCI_SUCCESS on successfull completion, or specific failure code.
4262 */
4263int isci_request_execute(
4264 struct isci_host *isci_host,
4265 struct sas_task *task,
4266 struct isci_request **isci_request,
4267 gfp_t gfp_flags)
4268{
4269 int ret = 0;
4270 struct scic_sds_remote_device *sci_device;
4271 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
4272 struct isci_remote_device *isci_device;
4273 struct isci_request *request;
4274 unsigned long flags;
4275
4276 isci_device = task->dev->lldd_dev;
4277 sci_device = &isci_device->sci;
4278
4279 /* do common allocation and init of request object. */
4280 ret = isci_request_alloc_io(
4281 isci_host,
4282 task,
4283 &request,
4284 isci_device,
4285 gfp_flags
4286 );
4287
4288 if (ret)
4289 goto out;
4290
4291 status = isci_io_request_build(isci_host, request, isci_device);
4292 if (status != SCI_SUCCESS) {
4293 dev_warn(&isci_host->pdev->dev,
4294 "%s: request_construct failed - status = 0x%x\n",
4295 __func__,
4296 status);
4297 goto out;
4298 }
4299
4300 spin_lock_irqsave(&isci_host->scic_lock, flags);
4301
4302 /* send the request, let the core assign the IO TAG. */
4303 status = scic_controller_start_io(&isci_host->sci, sci_device,
4304 &request->sci,
4305 SCI_CONTROLLER_INVALID_IO_TAG);
4306 if (status != SCI_SUCCESS &&
4307 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4308 dev_warn(&isci_host->pdev->dev,
4309 "%s: failed request start (0x%x)\n",
4310 __func__, status);
4311 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4312 goto out;
4313 }
4314
4315 /* Either I/O started OK, or the core has signaled that
4316 * the device needs a target reset.
4317 *
4318 * In either case, hold onto the I/O for later.
4319 *
4320 * Update it's status and add it to the list in the
4321 * remote device object.
6f231dda 4322 */
f1f52e75
DW
4323 isci_request_change_state(request, started);
4324 list_add(&request->dev_node, &isci_device->reqs_in_process);
6f231dda 4325
f1f52e75
DW
4326 if (status == SCI_SUCCESS) {
4327 /* Save the tag for possible task mgmt later. */
4328 request->io_tag = request->sci.io_tag;
4329 } else {
4330 /* The request did not really start in the
4331 * hardware, so clear the request handle
4332 * here so no terminations will be done.
4333 */
4334 request->terminated = true;
4335 }
4336 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4337
4338 if (status ==
4339 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4340 /* Signal libsas that we need the SCSI error
4341 * handler thread to work on this I/O and that
4342 * we want a device reset.
4343 */
4344 spin_lock_irqsave(&task->task_state_lock, flags);
4345 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
4346 spin_unlock_irqrestore(&task->task_state_lock, flags);
4347
4348 /* Cause this task to be scheduled in the SCSI error
4349 * handler thread.
4350 */
4351 isci_execpath_callback(isci_host, task,
4352 sas_task_abort);
4353
4354 /* Change the status, since we are holding
4355 * the I/O until it is managed by the SCSI
4356 * error handler.
4357 */
4358 status = SCI_SUCCESS;
4359 }
4360
4361 out:
4362 if (status != SCI_SUCCESS) {
4363 /* release dma memory on failure. */
4364 isci_request_free(isci_host, request);
4365 request = NULL;
4366 ret = SCI_FAILURE;
4367 }
4368
4369 *isci_request = request;
4370 return ret;
6f231dda 4371}
This page took 0.217609 seconds and 5 git commands to generate.