isci: Using Linux SSP frame header
[deliverable/linux.git] / drivers / scsi / isci / core / scic_sds_request.c
CommitLineData
6f231dda
DW
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
f2f30080 56#include <scsi/sas.h>
0cfa890e 57#include "sas.h"
6f231dda 58#include "intel_sas.h"
6f231dda
DW
59#include "scic_controller.h"
60#include "scic_io_request.h"
6f231dda 61#include "scic_sds_controller.h"
bc99aa47 62#include "scu_registers.h"
6f231dda 63#include "scic_sds_port.h"
88f3b62a 64#include "remote_device.h"
6f231dda
DW
65#include "scic_sds_request.h"
66#include "scic_sds_smp_request.h"
67#include "scic_sds_stp_request.h"
68#include "scic_sds_unsolicited_frame_control.h"
6f231dda 69#include "sci_environment.h"
6f231dda
DW
70#include "sci_util.h"
71#include "scu_completion_codes.h"
72#include "scu_constants.h"
73#include "scu_task_context.h"
74
6f231dda
DW
75/*
76 * ****************************************************************************
77 * * SCIC SDS IO REQUEST CONSTANTS
78 * **************************************************************************** */
79
80/**
81 *
82 *
83 * We have no timer requirements for IO requests right now
84 */
85#define SCIC_SDS_IO_REQUEST_MINIMUM_TIMER_COUNT (0)
86#define SCIC_SDS_IO_REQUEST_MAXIMUM_TIMER_COUNT (0)
87
88/*
89 * ****************************************************************************
90 * * SCIC SDS IO REQUEST MACROS
91 * **************************************************************************** */
92
6f231dda
DW
93/**
94 * scic_ssp_io_request_get_object_size() -
95 *
96 * This macro returns the sizeof memory required to store the an SSP IO
97 * request. This does not include the size of the SGL or SCU Task Context
98 * memory.
99 */
100#define scic_ssp_io_request_get_object_size() \
101 (\
0cfa890e 102 sizeof(struct ssp_cmd_iu) \
af5ae893 103 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
104 )
105
106/**
107 * scic_sds_ssp_request_get_command_buffer() -
108 *
109 * This macro returns the address of the ssp command buffer in the io request
110 * memory
111 */
112#define scic_sds_ssp_request_get_command_buffer(memory) \
0cfa890e 113 ((struct ssp_cmd_iu *)(\
6f231dda
DW
114 ((char *)(memory)) + sizeof(struct scic_sds_request) \
115 ))
116
117/**
118 * scic_sds_ssp_request_get_response_buffer() -
119 *
120 * This macro returns the address of the ssp response buffer in the io request
121 * memory
122 */
123#define scic_sds_ssp_request_get_response_buffer(memory) \
af5ae893 124 ((struct ssp_response_iu *)(\
6f231dda 125 ((char *)(scic_sds_ssp_request_get_command_buffer(memory))) \
0cfa890e 126 + sizeof(struct ssp_cmd_iu) \
6f231dda
DW
127 ))
128
129/**
130 * scic_sds_ssp_request_get_task_context_buffer() -
131 *
132 * This macro returns the address of the task context buffer in the io request
133 * memory
134 */
135#define scic_sds_ssp_request_get_task_context_buffer(memory) \
136 ((struct scu_task_context *)(\
137 ((char *)(scic_sds_ssp_request_get_response_buffer(memory))) \
af5ae893 138 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
139 ))
140
141/**
142 * scic_sds_ssp_request_get_sgl_element_buffer() -
143 *
144 * This macro returns the address of the sgl elment pairs in the io request
145 * memory buffer
146 */
147#define scic_sds_ssp_request_get_sgl_element_buffer(memory) \
148 ((struct scu_sgl_element_pair *)(\
149 ((char *)(scic_sds_ssp_request_get_task_context_buffer(memory))) \
150 + sizeof(struct scu_task_context) \
151 ))
152
153
154/**
155 * scic_ssp_task_request_get_object_size() -
156 *
157 * This macro returns the sizeof of memory required to store an SSP Task
158 * request. This does not include the size of the SCU Task Context memory.
159 */
160#define scic_ssp_task_request_get_object_size() \
161 (\
0cfa890e 162 sizeof(struct ssp_task_iu) \
af5ae893 163 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
164 )
165
166/**
167 * scic_sds_ssp_task_request_get_command_buffer() -
168 *
169 * This macro returns the address of the ssp command buffer in the task request
170 * memory. Yes its the same as the above macro except for the name.
171 */
172#define scic_sds_ssp_task_request_get_command_buffer(memory) \
0cfa890e 173 ((struct ssp_task_iu *)(\
6f231dda
DW
174 ((char *)(memory)) + sizeof(struct scic_sds_request) \
175 ))
176
177/**
178 * scic_sds_ssp_task_request_get_response_buffer() -
179 *
180 * This macro returns the address of the ssp response buffer in the task
181 * request memory.
182 */
183#define scic_sds_ssp_task_request_get_response_buffer(memory) \
af5ae893 184 ((struct ssp_response_iu *)(\
6f231dda 185 ((char *)(scic_sds_ssp_task_request_get_command_buffer(memory))) \
0cfa890e 186 + sizeof(struct ssp_task_iu) \
6f231dda
DW
187 ))
188
189/**
190 * scic_sds_ssp_task_request_get_task_context_buffer() -
191 *
192 * This macro returs the task context buffer for the SSP task request.
193 */
194#define scic_sds_ssp_task_request_get_task_context_buffer(memory) \
195 ((struct scu_task_context *)(\
196 ((char *)(scic_sds_ssp_task_request_get_response_buffer(memory))) \
af5ae893 197 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
198 ))
199
200
201
202/*
203 * ****************************************************************************
204 * * SCIC SDS IO REQUEST PRIVATE METHODS
205 * **************************************************************************** */
206
207/**
208 *
209 *
210 * This method returns the size required to store an SSP IO request object. u32
211 */
212static u32 scic_sds_ssp_request_get_object_size(void)
213{
214 return sizeof(struct scic_sds_request)
215 + scic_ssp_io_request_get_object_size()
216 + sizeof(struct scu_task_context)
fe9a6431 217 + SMP_CACHE_BYTES
6f231dda
DW
218 + sizeof(struct scu_sgl_element_pair) * SCU_MAX_SGL_ELEMENT_PAIRS;
219}
220
221/**
222 * This method returns the sgl element pair for the specificed sgl_pair index.
e2023b87 223 * @sci_req: This parameter specifies the IO request for which to retrieve
6f231dda
DW
224 * the Scatter-Gather List element pair.
225 * @sgl_pair_index: This parameter specifies the index into the SGL element
226 * pair to be retrieved.
227 *
228 * This method returns a pointer to an struct scu_sgl_element_pair.
229 */
230static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
e2023b87 231 struct scic_sds_request *sci_req,
6f231dda
DW
232 u32 sgl_pair_index
233 ) {
234 struct scu_task_context *task_context;
235
e2023b87 236 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
6f231dda
DW
237
238 if (sgl_pair_index == 0) {
239 return &task_context->sgl_pair_ab;
240 } else if (sgl_pair_index == 1) {
241 return &task_context->sgl_pair_cd;
242 }
243
e2023b87 244 return &sci_req->sgl_element_pair_buffer[sgl_pair_index - 2];
6f231dda
DW
245}
246
247/**
248 * This function will build the SGL list for an IO request.
e2023b87 249 * @sci_req: This parameter specifies the IO request for which to build
6f231dda
DW
250 * the Scatter-Gather List.
251 *
252 */
6389a775 253void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
6f231dda 254{
890cae9b 255 struct isci_request *isci_request = sds_request->ireq;
6389a775
DJ
256 struct isci_host *isci_host = isci_request->isci_host;
257 struct sas_task *task = isci_request_access_task(isci_request);
258 struct scatterlist *sg = NULL;
259 dma_addr_t dma_addr;
260 u32 sg_idx = 0;
261 struct scu_sgl_element_pair *scu_sg = NULL;
262 struct scu_sgl_element_pair *prev_sg = NULL;
263
264 if (task->num_scatter > 0) {
265 sg = task->scatter;
266
267 while (sg) {
268 scu_sg = scic_sds_request_get_sgl_element_pair(
269 sds_request,
270 sg_idx);
271
272 SCU_SGL_COPY(scu_sg->A, sg);
273
274 sg = sg_next(sg);
275
276 if (sg) {
277 SCU_SGL_COPY(scu_sg->B, sg);
278 sg = sg_next(sg);
279 } else
280 SCU_SGL_ZERO(scu_sg->B);
281
282 if (prev_sg) {
283 dma_addr =
284 scic_io_request_get_dma_addr(
285 sds_request,
286 scu_sg);
287
288 prev_sg->next_pair_upper =
289 upper_32_bits(dma_addr);
290 prev_sg->next_pair_lower =
291 lower_32_bits(dma_addr);
292 }
293
294 prev_sg = scu_sg;
295 sg_idx++;
6f231dda 296 }
6389a775
DJ
297 } else { /* handle when no sg */
298 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
299 sg_idx);
6f231dda 300
6389a775
DJ
301 dma_addr = dma_map_single(&isci_host->pdev->dev,
302 task->scatter,
303 task->total_xfer_len,
304 task->data_dir);
6f231dda 305
6389a775
DJ
306 isci_request->zero_scatter_daddr = dma_addr;
307
308 scu_sg->A.length = task->total_xfer_len;
309 scu_sg->A.address_upper = upper_32_bits(dma_addr);
310 scu_sg->A.address_lower = lower_32_bits(dma_addr);
6f231dda
DW
311 }
312
6389a775
DJ
313 if (scu_sg) {
314 scu_sg->next_pair_upper = 0;
315 scu_sg->next_pair_lower = 0;
6f231dda
DW
316 }
317}
318
6f231dda
DW
319/**
320 * This method build the remainder of the IO request object.
e2023b87 321 * @sci_req: This parameter specifies the request object being constructed.
6f231dda
DW
322 *
323 * The scic_sds_general_request_construct() must be called before this call is
324 * valid. none
325 */
326static void scic_sds_ssp_io_request_assign_buffers(
e2023b87 327 struct scic_sds_request *sci_req)
6f231dda 328{
e2023b87
DJ
329 sci_req->command_buffer =
330 scic_sds_ssp_request_get_command_buffer(sci_req);
331 sci_req->response_buffer =
332 scic_sds_ssp_request_get_response_buffer(sci_req);
333 sci_req->sgl_element_pair_buffer =
334 scic_sds_ssp_request_get_sgl_element_buffer(sci_req);
335 sci_req->sgl_element_pair_buffer =
336 PTR_ALIGN(sci_req->sgl_element_pair_buffer,
fe9a6431 337 sizeof(struct scu_sgl_element_pair));
6f231dda 338
e2023b87
DJ
339 if (sci_req->was_tag_assigned_by_user == false) {
340 sci_req->task_context_buffer =
341 scic_sds_ssp_request_get_task_context_buffer(sci_req);
342 sci_req->task_context_buffer =
343 PTR_ALIGN(sci_req->task_context_buffer,
fe9a6431 344 SMP_CACHE_BYTES);
6f231dda
DW
345 }
346}
347
0cfa890e 348static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
6f231dda 349{
0cfa890e
DJ
350 struct ssp_cmd_iu *cmd_iu;
351 struct isci_request *ireq = sci_req->ireq;
352 struct sas_task *task = isci_request_access_task(ireq);
6f231dda 353
0cfa890e 354 cmd_iu = sci_req->command_buffer;
6f231dda 355
0cfa890e
DJ
356 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
357 cmd_iu->add_cdb_len = 0;
358 cmd_iu->_r_a = 0;
359 cmd_iu->_r_b = 0;
360 cmd_iu->en_fburst = 0; /* unsupported */
361 cmd_iu->task_prio = task->ssp_task.task_prio;
362 cmd_iu->task_attr = task->ssp_task.task_attr;
363 cmd_iu->_r_c = 0;
6f231dda 364
51a57cff
DJ
365 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
366 sizeof(task->ssp_task.cdb) / sizeof(u32));
6f231dda
DW
367}
368
0cfa890e 369static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
6f231dda 370{
0cfa890e
DJ
371 struct ssp_task_iu *task_iu;
372 struct isci_request *ireq = sci_req->ireq;
373 struct sas_task *task = isci_request_access_task(ireq);
374 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
6f231dda 375
0cfa890e 376 task_iu = sci_req->command_buffer;
6f231dda 377
0cfa890e 378 memset(task_iu, 0, sizeof(struct ssp_task_iu));
6f231dda 379
0cfa890e 380 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
6f231dda 381
0cfa890e
DJ
382 task_iu->task_func = isci_tmf->tmf_code;
383 task_iu->task_tag =
384 (ireq->ttype == tmf_task) ?
385 isci_tmf->io_tag :
386 SCI_CONTROLLER_INVALID_IO_TAG;
6f231dda
DW
387}
388
6f231dda
DW
389/**
390 * This method is will fill in the SCU Task Context for any type of SSP request.
e2023b87 391 * @sci_req:
6f231dda
DW
392 * @task_context:
393 *
394 */
395static void scu_ssp_reqeust_construct_task_context(
6389a775 396 struct scic_sds_request *sds_request,
6f231dda
DW
397 struct scu_task_context *task_context)
398{
6389a775
DJ
399 dma_addr_t dma_addr;
400 struct scic_sds_controller *controller;
6f231dda
DW
401 struct scic_sds_remote_device *target_device;
402 struct scic_sds_port *target_port;
403
6389a775
DJ
404 controller = scic_sds_request_get_controller(sds_request);
405 target_device = scic_sds_request_get_device(sds_request);
406 target_port = scic_sds_request_get_port(sds_request);
6f231dda
DW
407
408 /* Fill in the TC with the its required data */
409 task_context->abort = 0;
410 task_context->priority = 0;
411 task_context->initiator_request = 1;
8f304c36 412 task_context->connection_rate = target_device->connection_rate;
6f231dda 413 task_context->protocol_engine_index =
6389a775 414 scic_sds_controller_get_protocol_engine_group(controller);
6f231dda
DW
415 task_context->logical_port_index =
416 scic_sds_port_get_index(target_port);
417 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
418 task_context->valid = SCU_TASK_CONTEXT_VALID;
419 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
420
421 task_context->remote_node_index =
6389a775 422 scic_sds_remote_device_get_index(sds_request->target_device);
6f231dda
DW
423 task_context->command_code = 0;
424
425 task_context->link_layer_control = 0;
426 task_context->do_not_dma_ssp_good_response = 1;
427 task_context->strict_ordering = 0;
428 task_context->control_frame = 0;
429 task_context->timeout_enable = 0;
430 task_context->block_guard_enable = 0;
431
432 task_context->address_modifier = 0;
433
e2023b87 434 /* task_context->type.ssp.tag = sci_req->io_tag; */
6f231dda
DW
435 task_context->task_phase = 0x01;
436
6389a775
DJ
437 if (sds_request->was_tag_assigned_by_user) {
438 /*
439 * Build the task context now since we have already read
440 * the data
441 */
442 sds_request->post_context =
443 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
444 (scic_sds_controller_get_protocol_engine_group(
445 controller) <<
446 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
447 (scic_sds_port_get_index(target_port) <<
448 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
449 scic_sds_io_tag_get_index(sds_request->io_tag));
6f231dda 450 } else {
6389a775
DJ
451 /*
452 * Build the task context now since we have already read
453 * the data
454 *
455 * I/O tag index is not assigned because we have to wait
456 * until we get a TCi
457 */
458 sds_request->post_context =
459 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
460 (scic_sds_controller_get_protocol_engine_group(
461 owning_controller) <<
462 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
463 (scic_sds_port_get_index(target_port) <<
464 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
6f231dda
DW
465 }
466
6389a775
DJ
467 /*
468 * Copy the physical address for the command buffer to the
469 * SCU Task Context
470 */
471 dma_addr = scic_io_request_get_dma_addr(sds_request,
472 sds_request->command_buffer);
473
474 task_context->command_iu_upper = upper_32_bits(dma_addr);
475 task_context->command_iu_lower = lower_32_bits(dma_addr);
476
477 /*
478 * Copy the physical address for the response buffer to the
479 * SCU Task Context
480 */
481 dma_addr = scic_io_request_get_dma_addr(sds_request,
482 sds_request->response_buffer);
483
484 task_context->response_iu_upper = upper_32_bits(dma_addr);
485 task_context->response_iu_lower = lower_32_bits(dma_addr);
6f231dda
DW
486}
487
488/**
489 * This method is will fill in the SCU Task Context for a SSP IO request.
e2023b87 490 * @sci_req:
6f231dda
DW
491 *
492 */
493static void scu_ssp_io_request_construct_task_context(
82d29928
DW
494 struct scic_sds_request *sci_req,
495 enum dma_data_direction dir,
496 u32 len)
6f231dda
DW
497{
498 struct scu_task_context *task_context;
499
82d29928 500 task_context = scic_sds_request_get_task_context(sci_req);
6f231dda 501
82d29928 502 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
6f231dda 503
0cfa890e
DJ
504 task_context->ssp_command_iu_length =
505 sizeof(struct ssp_cmd_iu) / sizeof(u32);
6f231dda
DW
506 task_context->type.ssp.frame_type = SCI_SAS_COMMAND_FRAME;
507
82d29928
DW
508 switch (dir) {
509 case DMA_FROM_DEVICE:
510 case DMA_NONE:
511 default:
6f231dda
DW
512 task_context->task_type = SCU_TASK_TYPE_IOREAD;
513 break;
82d29928 514 case DMA_TO_DEVICE:
6f231dda
DW
515 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
516 break;
517 }
518
82d29928 519 task_context->transfer_length_bytes = len;
6f231dda 520
82d29928
DW
521 if (task_context->transfer_length_bytes > 0)
522 scic_sds_request_build_sgl(sci_req);
6f231dda
DW
523}
524
525
526/**
527 * This method will fill in the remainder of the io request object for SSP Task
528 * requests.
e2023b87 529 * @sci_req:
6f231dda
DW
530 *
531 */
532static void scic_sds_ssp_task_request_assign_buffers(
e2023b87 533 struct scic_sds_request *sci_req)
6f231dda
DW
534{
535 /* Assign all of the buffer pointers */
e2023b87
DJ
536 sci_req->command_buffer =
537 scic_sds_ssp_task_request_get_command_buffer(sci_req);
538 sci_req->response_buffer =
539 scic_sds_ssp_task_request_get_response_buffer(sci_req);
540 sci_req->sgl_element_pair_buffer = NULL;
541
542 if (sci_req->was_tag_assigned_by_user == false) {
543 sci_req->task_context_buffer =
544 scic_sds_ssp_task_request_get_task_context_buffer(sci_req);
545 sci_req->task_context_buffer =
546 PTR_ALIGN(sci_req->task_context_buffer, SMP_CACHE_BYTES);
6f231dda
DW
547 }
548}
549
550/**
551 * This method will fill in the SCU Task Context for a SSP Task request. The
552 * following important settings are utilized: -# priority ==
553 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
554 * ahead of other task destined for the same Remote Node. -# task_type ==
555 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
556 * (i.e. non-raw frame) is being utilized to perform task management. -#
557 * control_frame == 1. This ensures that the proper endianess is set so
558 * that the bytes are transmitted in the right order for a task frame.
e2023b87 559 * @sci_req: This parameter specifies the task request object being
6f231dda
DW
560 * constructed.
561 *
562 */
563static void scu_ssp_task_request_construct_task_context(
e2023b87 564 struct scic_sds_request *sci_req)
6f231dda
DW
565{
566 struct scu_task_context *task_context;
567
e2023b87 568 task_context = scic_sds_request_get_task_context(sci_req);
6f231dda 569
e2023b87 570 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
6f231dda
DW
571
572 task_context->control_frame = 1;
573 task_context->priority = SCU_TASK_PRIORITY_HIGH;
574 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
575 task_context->transfer_length_bytes = 0;
576 task_context->type.ssp.frame_type = SCI_SAS_TASK_FRAME;
0cfa890e
DJ
577 task_context->ssp_command_iu_length =
578 sizeof(struct ssp_task_iu) / sizeof(u32);
6f231dda
DW
579}
580
581
582/**
583 * This method constructs the SSP Command IU data for this ssp passthrough
584 * comand request object.
e2023b87 585 * @sci_req: This parameter specifies the request object for which the SSP
6f231dda
DW
586 * command information unit is being built.
587 *
588 * enum sci_status, returns invalid parameter is cdb > 16
589 */
590
591
592/**
593 * This method constructs the SATA request object.
e2023b87 594 * @sci_req:
6f231dda
DW
595 * @sat_protocol:
596 * @transfer_length:
597 * @data_direction:
598 * @copy_rx_frame:
599 *
600 * enum sci_status
601 */
e76d6180
DJ
602static enum sci_status
603scic_io_request_construct_sata(struct scic_sds_request *sci_req,
604 u32 len,
605 enum dma_data_direction dir,
606 bool copy)
6f231dda
DW
607{
608 enum sci_status status = SCI_SUCCESS;
e76d6180
DJ
609 struct isci_request *ireq = sci_req->ireq;
610 struct sas_task *task = isci_request_access_task(ireq);
611
612 /* check for management protocols */
613 if (ireq->ttype == tmf_task) {
614 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
615
616 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
617 tmf->tmf_code == isci_tmf_sata_srst_low)
618 return scic_sds_stp_soft_reset_request_construct(sci_req);
619 else {
620 dev_err(scic_to_dev(sci_req->owning_controller),
621 "%s: Request 0x%p received un-handled SAT "
622 "management protocol 0x%x.\n",
623 __func__, sci_req, tmf->tmf_code);
624
625 return SCI_FAILURE;
626 }
627 }
6f231dda 628
e76d6180
DJ
629 if (!sas_protocol_ata(task->task_proto)) {
630 dev_err(scic_to_dev(sci_req->owning_controller),
631 "%s: Non-ATA protocol in SATA path: 0x%x\n",
632 __func__,
633 task->task_proto);
634 return SCI_FAILURE;
6f231dda 635
e76d6180 636 }
6f231dda 637
e76d6180
DJ
638 /* non data */
639 if (task->data_dir == DMA_NONE)
640 return scic_sds_stp_non_data_request_construct(sci_req);
6f231dda 641
e76d6180
DJ
642 /* NCQ */
643 if (task->ata_task.use_ncq)
644 return scic_sds_stp_ncq_request_construct(sci_req, len, dir);
6f231dda 645
e76d6180
DJ
646 /* DMA */
647 if (task->ata_task.dma_xfer)
648 return scic_sds_stp_udma_request_construct(sci_req, len, dir);
649 else /* PIO */
650 return scic_sds_stp_pio_request_construct(sci_req, copy);
6f231dda
DW
651
652 return status;
653}
654
6f231dda
DW
655u32 scic_io_request_get_object_size(void)
656{
657 u32 ssp_request_size;
658 u32 stp_request_size;
659 u32 smp_request_size;
660
661 ssp_request_size = scic_sds_ssp_request_get_object_size();
662 stp_request_size = scic_sds_stp_request_get_object_size();
663 smp_request_size = scic_sds_smp_request_get_object_size();
664
665 return max(ssp_request_size, max(stp_request_size, smp_request_size));
666}
667
6f231dda
DW
668enum sci_status scic_io_request_construct_basic_ssp(
669 struct scic_sds_request *sci_req)
670{
890cae9b 671 struct isci_request *isci_request = sci_req->ireq;
6f231dda
DW
672
673 sci_req->protocol = SCIC_SSP_PROTOCOL;
674
6f231dda
DW
675 scu_ssp_io_request_construct_task_context(
676 sci_req,
7392d275
DJ
677 isci_request_io_request_get_data_direction(isci_request),
678 isci_request_io_request_get_transfer_length(isci_request));
6f231dda
DW
679
680 scic_sds_io_request_build_ssp_command_iu(sci_req);
681
38aa74eb
CH
682 sci_base_state_machine_change_state(&sci_req->state_machine,
683 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
684
685 return SCI_SUCCESS;
686}
687
688
689enum sci_status scic_task_request_construct_ssp(
690 struct scic_sds_request *sci_req)
691{
692 /* Construct the SSP Task SCU Task Context */
693 scu_ssp_task_request_construct_task_context(sci_req);
694
695 /* Fill in the SSP Task IU */
696 scic_sds_task_request_build_ssp_task_iu(sci_req);
697
38aa74eb
CH
698 sci_base_state_machine_change_state(&sci_req->state_machine,
699 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
700
701 return SCI_SUCCESS;
702}
703
704
7392d275
DJ
705enum sci_status scic_io_request_construct_basic_sata(
706 struct scic_sds_request *sci_req)
6f231dda
DW
707{
708 enum sci_status status;
82d29928 709 struct scic_sds_stp_request *stp_req;
82d29928
DW
710 u32 len;
711 enum dma_data_direction dir;
712 bool copy = false;
890cae9b 713 struct isci_request *isci_request = sci_req->ireq;
7392d275 714 struct sas_task *task = isci_request_access_task(isci_request);
6f231dda 715
82d29928 716 stp_req = container_of(sci_req, typeof(*stp_req), parent);
6f231dda
DW
717
718 sci_req->protocol = SCIC_STP_PROTOCOL;
719
7392d275
DJ
720 len = isci_request_io_request_get_transfer_length(isci_request);
721 dir = isci_request_io_request_get_data_direction(isci_request);
7392d275 722 copy = (task->data_dir == DMA_NONE) ? false : true;
6f231dda 723
e76d6180 724 status = scic_io_request_construct_sata(sci_req, len, dir, copy);
6f231dda
DW
725
726 if (status == SCI_SUCCESS)
38aa74eb
CH
727 sci_base_state_machine_change_state(&sci_req->state_machine,
728 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
729
730 return status;
731}
732
733
734enum sci_status scic_task_request_construct_sata(
735 struct scic_sds_request *sci_req)
736{
e76d6180
DJ
737 enum sci_status status = SCI_SUCCESS;
738 struct isci_request *ireq = sci_req->ireq;
739
740 /* check for management protocols */
741 if (ireq->ttype == tmf_task) {
742 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
743
744 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
745 tmf->tmf_code == isci_tmf_sata_srst_low) {
746 status = scic_sds_stp_soft_reset_request_construct(sci_req);
747 } else {
748 dev_err(scic_to_dev(sci_req->owning_controller),
749 "%s: Request 0x%p received un-handled SAT "
750 "Protocol 0x%x.\n",
751 __func__, sci_req, tmf->tmf_code);
752
753 return SCI_FAILURE;
754 }
6f231dda
DW
755 }
756
757 if (status == SCI_SUCCESS)
e76d6180
DJ
758 sci_base_state_machine_change_state(
759 &sci_req->state_machine,
760 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
761
762 return status;
763}
764
765
766u16 scic_io_request_get_io_tag(
767 struct scic_sds_request *sci_req)
768{
769 return sci_req->io_tag;
770}
771
772
773u32 scic_request_get_controller_status(
774 struct scic_sds_request *sci_req)
775{
776 return sci_req->scu_status;
777}
778
779
780void *scic_io_request_get_command_iu_address(
781 struct scic_sds_request *sci_req)
782{
783 return sci_req->command_buffer;
784}
785
786
787void *scic_io_request_get_response_iu_address(
788 struct scic_sds_request *sci_req)
789{
790 return sci_req->response_buffer;
791}
792
793
794#define SCU_TASK_CONTEXT_SRAM 0x200000
795u32 scic_io_request_get_number_of_bytes_transferred(
796 struct scic_sds_request *scic_sds_request)
797{
467e855a 798 struct scic_sds_controller *scic = scic_sds_request->owning_controller;
6f231dda
DW
799 u32 ret_val = 0;
800
467e855a
BB
801 if (readl(&scic->smu_registers->address_modifier) == 0) {
802 void __iomem *scu_reg_base = scic->scu_registers;
6f231dda
DW
803 /*
804 * get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
805 * BAR1 is the scu_registers
806 * 0x20002C = 0x200000 + 0x2c
807 * = start of task context SRAM + offset of (type.ssp.data_offset)
808 * TCi is the io_tag of struct scic_sds_request */
467e855a
BB
809 ret_val = readl(scu_reg_base +
810 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
811 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(scic_sds_request->io_tag)));
6f231dda
DW
812 }
813
814 return ret_val;
815}
816
817
818/*
819 * ****************************************************************************
820 * * SCIC SDS Interface Implementation
821 * **************************************************************************** */
822
38aa74eb
CH
823enum sci_status
824scic_sds_request_start(struct scic_sds_request *request)
6f231dda 825{
524b5f72 826 if (request->device_sequence !=
38aa74eb 827 scic_sds_remote_device_get_sequence(request->target_device))
524b5f72
CH
828 return SCI_FAILURE;
829
830 if (request->state_handlers->start_handler)
38aa74eb 831 return request->state_handlers->start_handler(request);
524b5f72
CH
832
833 dev_warn(scic_to_dev(request->owning_controller),
834 "%s: SCIC IO Request requested to start while in wrong "
835 "state %d\n",
836 __func__,
837 sci_base_state_machine_get_state(&request->state_machine));
838
839 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
840}
841
38aa74eb
CH
842enum sci_status
843scic_sds_io_request_terminate(struct scic_sds_request *request)
6f231dda 844{
524b5f72
CH
845 if (request->state_handlers->abort_handler)
846 return request->state_handlers->abort_handler(request);
847
848 dev_warn(scic_to_dev(request->owning_controller),
849 "%s: SCIC IO Request requested to abort while in wrong "
850 "state %d\n",
851 __func__,
852 sci_base_state_machine_get_state(&request->state_machine));
853
854 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
855}
856
38aa74eb
CH
857enum sci_status
858scic_sds_io_request_complete(struct scic_sds_request *request)
6f231dda 859{
524b5f72
CH
860 if (request->state_handlers->complete_handler)
861 return request->state_handlers->complete_handler(request);
862
863 dev_warn(scic_to_dev(request->owning_controller),
864 "%s: SCIC IO Request requested to complete while in wrong "
865 "state %d\n",
866 __func__,
867 sci_base_state_machine_get_state(&request->state_machine));
868
869 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
870}
871
6f231dda 872enum sci_status scic_sds_io_request_event_handler(
524b5f72 873 struct scic_sds_request *request,
6f231dda
DW
874 u32 event_code)
875{
524b5f72
CH
876 if (request->state_handlers->event_handler)
877 return request->state_handlers->event_handler(request, event_code);
878
879 dev_warn(scic_to_dev(request->owning_controller),
880 "%s: SCIC IO Request given event code notification %x while "
881 "in wrong state %d\n",
882 __func__,
883 event_code,
884 sci_base_state_machine_get_state(&request->state_machine));
885
886 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
887}
888
524b5f72
CH
889enum sci_status
890scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
891{
892 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
893 request->has_started_substate_machine == false)
894 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
895 else if (request->state_handlers->tc_completion_handler)
896 return request->state_handlers->tc_completion_handler(request, completion_code);
897
898 dev_warn(scic_to_dev(request->owning_controller),
899 "%s: SCIC IO Request given task completion notification %x "
900 "while in wrong state %d\n",
901 __func__,
902 completion_code,
903 sci_base_state_machine_get_state(&request->state_machine));
904
905 return SCI_FAILURE_INVALID_STATE;
906
907}
908
909
6f231dda
DW
910/**
911 *
e2023b87 912 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
6f231dda
DW
913 * operation is to be executed.
914 * @frame_index: The frame index returned by the hardware for the reqeust
915 * object.
916 *
917 * This method invokes the core state frame handler for the
918 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
919 */
920enum sci_status scic_sds_io_request_frame_handler(
524b5f72 921 struct scic_sds_request *request,
6f231dda
DW
922 u32 frame_index)
923{
524b5f72
CH
924 if (request->state_handlers->frame_handler)
925 return request->state_handlers->frame_handler(request, frame_index);
926
927 dev_warn(scic_to_dev(request->owning_controller),
928 "%s: SCIC IO Request given unexpected frame %x while in "
929 "state %d\n",
930 __func__,
931 frame_index,
932 sci_base_state_machine_get_state(&request->state_machine));
933
934 scic_sds_controller_release_frame(request->owning_controller, frame_index);
935 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
936}
937
6f231dda 938/*
af5ae893 939 * This function copies response data for requests returning response data
6f231dda 940 * instead of sense data.
e2023b87 941 * @sci_req: This parameter specifies the request object for which to copy
6f231dda 942 * the response data.
6f231dda 943 */
af5ae893 944void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
6f231dda 945{
af5ae893
DJ
946 void *resp_buf;
947 u32 len;
948 struct ssp_response_iu *ssp_response;
949 struct isci_request *ireq = sci_req->ireq;
950 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
6f231dda 951
af5ae893 952 ssp_response = sci_req->response_buffer;
6f231dda 953
af5ae893 954 resp_buf = &isci_tmf->resp.resp_iu;
6f231dda 955
af5ae893
DJ
956 len = min_t(u32,
957 SSP_RESP_IU_MAX_SIZE,
958 be32_to_cpu(ssp_response->response_data_len));
6f231dda 959
af5ae893 960 memcpy(resp_buf, ssp_response->resp_data, len);
6f231dda
DW
961}
962
6f231dda
DW
963/*
964 * *****************************************************************************
965 * * CONSTRUCTED STATE HANDLERS
966 * ***************************************************************************** */
967
38aa74eb 968/*
6f231dda
DW
969 * This method implements the action taken when a constructed
970 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
971 * This method will, if necessary, allocate a TCi for the io request object and
972 * then will, if necessary, copy the constructed TC data into the actual TC
973 * buffer. If everything is successful the post context field is updated with
974 * the TCi so the controller can post the request to the hardware. enum sci_status
975 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
976 */
977static enum sci_status scic_sds_request_constructed_state_start_handler(
38aa74eb 978 struct scic_sds_request *request)
6f231dda
DW
979{
980 struct scu_task_context *task_context;
6f231dda 981
38aa74eb
CH
982 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
983 request->io_tag =
984 scic_controller_allocate_io_tag(request->owning_controller);
6f231dda
DW
985 }
986
987 /* Record the IO Tag in the request */
38aa74eb
CH
988 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
989 task_context = request->task_context_buffer;
6f231dda 990
38aa74eb 991 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
6f231dda
DW
992
993 switch (task_context->protocol_type) {
994 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
995 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
996 /* SSP/SMP Frame */
38aa74eb 997 task_context->type.ssp.tag = request->io_tag;
6f231dda
DW
998 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
999 break;
1000
1001 case SCU_TASK_CONTEXT_PROTOCOL_STP:
1002 /*
1003 * STP/SATA Frame
38aa74eb 1004 * task_context->type.stp.ncq_tag = request->ncq_tag; */
6f231dda
DW
1005 break;
1006
1007 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
1008 /* / @todo When do we set no protocol type? */
1009 break;
1010
1011 default:
1012 /* This should never happen since we build the IO requests */
1013 break;
1014 }
1015
1016 /*
1017 * Check to see if we need to copy the task context buffer
1018 * or have been building into the task context buffer */
38aa74eb 1019 if (request->was_tag_assigned_by_user == false) {
6f231dda 1020 scic_sds_controller_copy_task_context(
38aa74eb 1021 request->owning_controller, request);
6f231dda
DW
1022 }
1023
1024 /* Add to the post_context the io tag value */
38aa74eb 1025 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
6f231dda
DW
1026
1027 /* Everything is good go ahead and change state */
38aa74eb
CH
1028 sci_base_state_machine_change_state(&request->state_machine,
1029 SCI_BASE_REQUEST_STATE_STARTED);
6f231dda
DW
1030
1031 return SCI_SUCCESS;
1032 }
1033
1034 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1035}
1036
38aa74eb 1037/*
6f231dda
DW
1038 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1039 * object receives a scic_sds_request_terminate() request. Since the request
1040 * has not yet been posted to the hardware the request transitions to the
1041 * completed state. enum sci_status SCI_SUCCESS
1042 */
1043static enum sci_status scic_sds_request_constructed_state_abort_handler(
38aa74eb 1044 struct scic_sds_request *request)
6f231dda 1045{
6f231dda
DW
1046 /*
1047 * This request has been terminated by the user make sure that the correct
1048 * status code is returned */
38aa74eb 1049 scic_sds_request_set_status(request,
6f231dda 1050 SCU_TASK_DONE_TASK_ABORT,
38aa74eb 1051 SCI_FAILURE_IO_TERMINATED);
6f231dda 1052
38aa74eb
CH
1053 sci_base_state_machine_change_state(&request->state_machine,
1054 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1055 return SCI_SUCCESS;
1056}
1057
1058/*
1059 * *****************************************************************************
1060 * * STARTED STATE HANDLERS
1061 * ***************************************************************************** */
1062
38aa74eb 1063/*
6f231dda
DW
1064 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1065 * object receives a scic_sds_request_terminate() request. Since the request
1066 * has been posted to the hardware the io request state is changed to the
1067 * aborting state. enum sci_status SCI_SUCCESS
1068 */
1069enum sci_status scic_sds_request_started_state_abort_handler(
38aa74eb 1070 struct scic_sds_request *request)
6f231dda 1071{
38aa74eb
CH
1072 if (request->has_started_substate_machine)
1073 sci_base_state_machine_stop(&request->started_substate_machine);
6f231dda 1074
38aa74eb
CH
1075 sci_base_state_machine_change_state(&request->state_machine,
1076 SCI_BASE_REQUEST_STATE_ABORTING);
6f231dda
DW
1077 return SCI_SUCCESS;
1078}
1079
af5ae893 1080/*
6f231dda
DW
1081 * scic_sds_request_started_state_tc_completion_handler() - This method process
1082 * TC (task context) completions for normal IO request (i.e. Task/Abort
1083 * Completions of type 0). This method will update the
1084 * SCIC_SDS_IO_REQUEST_T::status field.
e2023b87 1085 * @sci_req: This parameter specifies the request for which a completion
6f231dda
DW
1086 * occurred.
1087 * @completion_code: This parameter specifies the completion code received from
1088 * the SCU.
1089 *
1090 */
af5ae893
DJ
1091enum sci_status
1092scic_sds_request_started_state_tc_completion_handler(
1093 struct scic_sds_request *sci_req,
1094 u32 completion_code)
6f231dda 1095{
af5ae893
DJ
1096 u8 datapres;
1097 struct ssp_response_iu *resp_iu;
6f231dda 1098
af5ae893
DJ
1099 /*
1100 * TODO: Any SDMA return code of other than 0 is bad
6f231dda
DW
1101 * decode 0x003C0000 to determine SDMA status
1102 */
1103 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
af5ae893
DJ
1105 scic_sds_request_set_status(sci_req,
1106 SCU_TASK_DONE_GOOD,
1107 SCI_SUCCESS);
6f231dda
DW
1108 break;
1109
1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
1111 {
1112 /*
af5ae893
DJ
1113 * There are times when the SCU hardware will return an early
1114 * response because the io request specified more data than is
1115 * returned by the target device (mode pages, inquiry data,
1116 * etc.). We must check the response stats to see if this is
1117 * truly a failed request or a good request that just got
1118 * completed early.
1119 */
1120 struct ssp_response_iu *resp = sci_req->response_buffer;
51a57cff
DJ
1121 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1122
1123 sci_swab32_cpy(sci_req->response_buffer,
1124 sci_req->response_buffer,
1125 word_cnt);
6f231dda 1126
af5ae893 1127 if (resp->status == 0) {
6f231dda 1128 scic_sds_request_set_status(
af5ae893
DJ
1129 sci_req,
1130 SCU_TASK_DONE_GOOD,
1131 SCI_SUCCESS_IO_DONE_EARLY);
6f231dda
DW
1132 } else {
1133 scic_sds_request_set_status(
e2023b87 1134 sci_req,
6f231dda 1135 SCU_TASK_DONE_CHECK_RESPONSE,
af5ae893 1136 SCI_FAILURE_IO_RESPONSE_VALID);
6f231dda
DW
1137 }
1138 }
1139 break;
1140
1141 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
51a57cff
DJ
1142 {
1143 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
6f231dda 1144
51a57cff
DJ
1145 sci_swab32_cpy(sci_req->response_buffer,
1146 sci_req->response_buffer,
1147 word_cnt);
1148
1149 scic_sds_request_set_status(sci_req,
1150 SCU_TASK_DONE_CHECK_RESPONSE,
1151 SCI_FAILURE_IO_RESPONSE_VALID);
6f231dda 1152 break;
51a57cff 1153 }
6f231dda
DW
1154
1155 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1156 /*
af5ae893
DJ
1157 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
1158 * guaranteed to be received before this completion status is
1159 * posted?
1160 */
1161 resp_iu = sci_req->response_buffer;
1162 datapres = resp_iu->datapres;
1163
1164 if ((datapres == 0x01) || (datapres == 0x02)) {
6f231dda 1165 scic_sds_request_set_status(
e2023b87 1166 sci_req,
6f231dda 1167 SCU_TASK_DONE_CHECK_RESPONSE,
af5ae893
DJ
1168 SCI_FAILURE_IO_RESPONSE_VALID);
1169 } else
6f231dda 1170 scic_sds_request_set_status(
af5ae893 1171 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
6f231dda
DW
1172 break;
1173
1174 /* only stp device gets suspended. */
1175 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1176 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1177 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1178 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1179 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1180 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1181 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1182 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1183 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1184 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1185 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
e2023b87 1186 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
6f231dda 1187 scic_sds_request_set_status(
e2023b87 1188 sci_req,
af5ae893
DJ
1189 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1190 SCU_COMPLETION_TL_STATUS_SHIFT,
1191 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
6f231dda
DW
1192 } else {
1193 scic_sds_request_set_status(
e2023b87 1194 sci_req,
af5ae893
DJ
1195 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1196 SCU_COMPLETION_TL_STATUS_SHIFT,
1197 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1198 }
1199 break;
1200
1201 /* both stp/ssp device gets suspended */
1202 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1203 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1204 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1205 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1206 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1209 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1210 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1211 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1212 scic_sds_request_set_status(
e2023b87 1213 sci_req,
af5ae893
DJ
1214 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1215 SCU_COMPLETION_TL_STATUS_SHIFT,
1216 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
6f231dda
DW
1217 break;
1218
1219 /* neither ssp nor stp gets suspended. */
1220 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1221 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1223 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1224 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1225 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1226 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1228 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1229 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1230 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1231 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1232 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1233 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1234 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1235 default:
1236 scic_sds_request_set_status(
e2023b87 1237 sci_req,
af5ae893
DJ
1238 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1239 SCU_COMPLETION_TL_STATUS_SHIFT,
1240 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1241 break;
1242 }
1243
af5ae893
DJ
1244 /*
1245 * TODO: This is probably wrong for ACK/NAK timeout conditions
6f231dda
DW
1246 */
1247
af5ae893
DJ
1248 /* In all cases we will treat this as the completion of the IO req. */
1249 sci_base_state_machine_change_state(
1250 &sci_req->state_machine,
1251 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1252 return SCI_SUCCESS;
1253}
1254
38aa74eb 1255/*
6f231dda
DW
1256 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1257 * object receives a scic_sds_request_frame_handler() request. This method
1258 * first determines the frame type received. If this is a response frame then
1259 * the response data is copied to the io request response buffer for processing
1260 * at completion time. If the frame type is not a response buffer an error is
1261 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1262 */
af5ae893
DJ
1263static enum sci_status
1264scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
1265 u32 frame_index)
6f231dda
DW
1266{
1267 enum sci_status status;
2d9c2240
DJ
1268 u32 *frame_header;
1269 struct ssp_frame_hdr ssp_hdr;
1270 ssize_t word_cnt;
6f231dda 1271
6f231dda 1272 status = scic_sds_unsolicited_frame_control_get_header(
e2023b87 1273 &(scic_sds_request_get_controller(sci_req)->uf_control),
6f231dda 1274 frame_index,
af5ae893 1275 (void **)&frame_header);
6f231dda 1276
2d9c2240
DJ
1277 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1278 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1279
1280 if (ssp_hdr.frame_type == SSP_RESPONSE) {
af5ae893 1281 struct ssp_response_iu *resp_iu;
51a57cff 1282 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
6f231dda
DW
1283
1284 status = scic_sds_unsolicited_frame_control_get_buffer(
e2023b87 1285 &(scic_sds_request_get_controller(sci_req)->uf_control),
6f231dda 1286 frame_index,
af5ae893 1287 (void **)&resp_iu);
6f231dda 1288
51a57cff
DJ
1289 sci_swab32_cpy(sci_req->response_buffer,
1290 resp_iu, word_cnt);
6f231dda 1291
af5ae893 1292 resp_iu = sci_req->response_buffer;
6f231dda 1293
af5ae893
DJ
1294 if ((resp_iu->datapres == 0x01) ||
1295 (resp_iu->datapres == 0x02)) {
6f231dda 1296 scic_sds_request_set_status(
e2023b87 1297 sci_req,
6f231dda 1298 SCU_TASK_DONE_CHECK_RESPONSE,
af5ae893 1299 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1300 } else
1301 scic_sds_request_set_status(
af5ae893
DJ
1302 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1303 } else {
6f231dda 1304 /* This was not a response frame why did it get forwarded? */
e2023b87 1305 dev_err(scic_to_dev(sci_req->owning_controller),
6f231dda
DW
1306 "%s: SCIC IO Request 0x%p received unexpected "
1307 "frame %d type 0x%02x\n",
1308 __func__,
e2023b87 1309 sci_req,
6f231dda 1310 frame_index,
2d9c2240 1311 ssp_hdr.frame_type);
af5ae893 1312 }
6f231dda
DW
1313
1314 /*
1315 * In any case we are done with this frame buffer return it to the
af5ae893
DJ
1316 * controller
1317 */
6f231dda 1318 scic_sds_controller_release_frame(
af5ae893 1319 sci_req->owning_controller, frame_index);
6f231dda
DW
1320
1321 return SCI_SUCCESS;
1322}
1323
1324/*
1325 * *****************************************************************************
1326 * * COMPLETED STATE HANDLERS
1327 * ***************************************************************************** */
1328
1329
38aa74eb 1330/*
6f231dda
DW
1331 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1332 * object receives a scic_sds_request_complete() request. This method frees up
1333 * any io request resources that have been allocated and transitions the
1334 * request to its final state. Consider stopping the state machine instead of
1335 * transitioning to the final state? enum sci_status SCI_SUCCESS
1336 */
1337static enum sci_status scic_sds_request_completed_state_complete_handler(
38aa74eb 1338 struct scic_sds_request *request)
6f231dda 1339{
38aa74eb 1340 if (request->was_tag_assigned_by_user != true) {
6f231dda 1341 scic_controller_free_io_tag(
38aa74eb 1342 request->owning_controller, request->io_tag);
6f231dda
DW
1343 }
1344
38aa74eb 1345 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
6f231dda 1346 scic_sds_controller_release_frame(
38aa74eb 1347 request->owning_controller, request->saved_rx_frame_index);
6f231dda
DW
1348 }
1349
38aa74eb
CH
1350 sci_base_state_machine_change_state(&request->state_machine,
1351 SCI_BASE_REQUEST_STATE_FINAL);
6f231dda
DW
1352 return SCI_SUCCESS;
1353}
1354
1355/*
1356 * *****************************************************************************
1357 * * ABORTING STATE HANDLERS
1358 * ***************************************************************************** */
1359
38aa74eb 1360/*
6f231dda
DW
1361 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1362 * object receives a scic_sds_request_terminate() request. This method is the
1363 * io request aborting state abort handlers. On receipt of a multiple
1364 * terminate requests the io request will transition to the completed state.
1365 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1366 */
1367static enum sci_status scic_sds_request_aborting_state_abort_handler(
38aa74eb 1368 struct scic_sds_request *request)
6f231dda 1369{
38aa74eb
CH
1370 sci_base_state_machine_change_state(&request->state_machine,
1371 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1372 return SCI_SUCCESS;
1373}
1374
38aa74eb 1375/*
6f231dda
DW
1376 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1377 * object receives a scic_sds_request_task_completion() request. This method
1378 * decodes the completion type waiting for the abort task complete
1379 * notification. When the abort task complete is received the io request
1380 * transitions to the completed state. enum sci_status SCI_SUCCESS
1381 */
1382static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
e2023b87 1383 struct scic_sds_request *sci_req,
6f231dda
DW
1384 u32 completion_code)
1385{
1386 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1387 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1388 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1389 scic_sds_request_set_status(
e2023b87 1390 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
6f231dda
DW
1391 );
1392
e2023b87 1393 sci_base_state_machine_change_state(&sci_req->state_machine,
38aa74eb 1394 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1395 break;
1396
1397 default:
1398 /*
1399 * Unless we get some strange error wait for the task abort to complete
1400 * TODO: Should there be a state change for this completion? */
1401 break;
1402 }
1403
1404 return SCI_SUCCESS;
1405}
1406
38aa74eb 1407/*
6f231dda
DW
1408 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1409 * object receives a scic_sds_request_frame_handler() request. This method
1410 * discards the unsolicited frame since we are waiting for the abort task
1411 * completion. enum sci_status SCI_SUCCESS
1412 */
1413static enum sci_status scic_sds_request_aborting_state_frame_handler(
e2023b87 1414 struct scic_sds_request *sci_req,
6f231dda
DW
1415 u32 frame_index)
1416{
1417 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1418
1419 scic_sds_controller_release_frame(
e2023b87 1420 sci_req->owning_controller, frame_index);
6f231dda
DW
1421
1422 return SCI_SUCCESS;
1423}
1424
35173d57 1425static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
6f231dda 1426 [SCI_BASE_REQUEST_STATE_INITIAL] = {
6f231dda
DW
1427 },
1428 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
38aa74eb
CH
1429 .start_handler = scic_sds_request_constructed_state_start_handler,
1430 .abort_handler = scic_sds_request_constructed_state_abort_handler,
6f231dda
DW
1431 },
1432 [SCI_BASE_REQUEST_STATE_STARTED] = {
38aa74eb 1433 .abort_handler = scic_sds_request_started_state_abort_handler,
38aa74eb 1434 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
38aa74eb 1435 .frame_handler = scic_sds_request_started_state_frame_handler,
6f231dda
DW
1436 },
1437 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
38aa74eb 1438 .complete_handler = scic_sds_request_completed_state_complete_handler,
6f231dda
DW
1439 },
1440 [SCI_BASE_REQUEST_STATE_ABORTING] = {
38aa74eb 1441 .abort_handler = scic_sds_request_aborting_state_abort_handler,
38aa74eb 1442 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
38aa74eb 1443 .frame_handler = scic_sds_request_aborting_state_frame_handler,
6f231dda
DW
1444 },
1445 [SCI_BASE_REQUEST_STATE_FINAL] = {
6f231dda
DW
1446 },
1447};
1448
1449/**
1450 * scic_sds_request_initial_state_enter() -
1451 * @object: This parameter specifies the base object for which the state
1452 * transition is occurring.
1453 *
1454 * This method implements the actions taken when entering the
1455 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
1456 * base request is constructed. Entry into the initial state sets all handlers
1457 * for the io request object to their default handlers. none
1458 */
9a0fff7b 1459static void scic_sds_request_initial_state_enter(void *object)
6f231dda 1460{
890cae9b 1461 struct scic_sds_request *sci_req = object;
6f231dda
DW
1462
1463 SET_STATE_HANDLER(
e2023b87 1464 sci_req,
6f231dda
DW
1465 scic_sds_request_state_handler_table,
1466 SCI_BASE_REQUEST_STATE_INITIAL
1467 );
1468}
1469
1470/**
1471 * scic_sds_request_constructed_state_enter() -
1472 * @object: The io request object that is to enter the constructed state.
1473 *
1474 * This method implements the actions taken when entering the
1475 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
1476 * for the the constructed state. none
1477 */
9a0fff7b 1478static void scic_sds_request_constructed_state_enter(void *object)
6f231dda 1479{
890cae9b 1480 struct scic_sds_request *sci_req = object;
6f231dda
DW
1481
1482 SET_STATE_HANDLER(
e2023b87 1483 sci_req,
6f231dda
DW
1484 scic_sds_request_state_handler_table,
1485 SCI_BASE_REQUEST_STATE_CONSTRUCTED
1486 );
1487}
1488
1489/**
1490 * scic_sds_request_started_state_enter() -
1491 * @object: This parameter specifies the base object for which the state
9a0fff7b 1492 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
6f231dda
DW
1493 *
1494 * This method implements the actions taken when entering the
1495 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
1496 * SCSI Task request we must enter the started substate machine. none
1497 */
9a0fff7b 1498static void scic_sds_request_started_state_enter(void *object)
6f231dda 1499{
890cae9b 1500 struct scic_sds_request *sci_req = object;
6f231dda
DW
1501
1502 SET_STATE_HANDLER(
e2023b87 1503 sci_req,
6f231dda
DW
1504 scic_sds_request_state_handler_table,
1505 SCI_BASE_REQUEST_STATE_STARTED
1506 );
1507
1508 /*
1509 * Most of the request state machines have a started substate machine so
1510 * start its execution on the entry to the started state. */
e2023b87
DJ
1511 if (sci_req->has_started_substate_machine == true)
1512 sci_base_state_machine_start(&sci_req->started_substate_machine);
6f231dda
DW
1513}
1514
1515/**
1516 * scic_sds_request_started_state_exit() -
1517 * @object: This parameter specifies the base object for which the state
9a0fff7b 1518 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
6f231dda
DW
1519 * object.
1520 *
1521 * This method implements the actions taken when exiting the
1522 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
1523 * to stop the started substate machine. none
1524 */
9a0fff7b 1525static void scic_sds_request_started_state_exit(void *object)
6f231dda 1526{
890cae9b 1527 struct scic_sds_request *sci_req = object;
6f231dda 1528
e2023b87
DJ
1529 if (sci_req->has_started_substate_machine == true)
1530 sci_base_state_machine_stop(&sci_req->started_substate_machine);
6f231dda
DW
1531}
1532
1533/**
1534 * scic_sds_request_completed_state_enter() -
1535 * @object: This parameter specifies the base object for which the state
9a0fff7b 1536 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
6f231dda
DW
1537 * object.
1538 *
1539 * This method implements the actions taken when entering the
1540 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
1541 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
1542 * completion status and convert it to an enum sci_status to return in the
1543 * completion callback function. none
1544 */
9a0fff7b 1545static void scic_sds_request_completed_state_enter(void *object)
6f231dda 1546{
890cae9b 1547 struct scic_sds_request *sci_req = object;
09d7da13
DJ
1548 struct scic_sds_controller *scic =
1549 scic_sds_request_get_controller(sci_req);
d3757c3a 1550 struct isci_host *ihost = scic->ihost;
890cae9b 1551 struct isci_request *ireq = sci_req->ireq;
6f231dda 1552
09d7da13
DJ
1553 SET_STATE_HANDLER(sci_req,
1554 scic_sds_request_state_handler_table,
1555 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1556
1557 /* Tell the SCI_USER that the IO request is complete */
09d7da13
DJ
1558 if (sci_req->is_task_management_request == false)
1559 isci_request_io_request_complete(ihost,
1560 ireq,
1561 sci_req->sci_status);
1562 else
1563 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
6f231dda
DW
1564}
1565
1566/**
1567 * scic_sds_request_aborting_state_enter() -
1568 * @object: This parameter specifies the base object for which the state
9a0fff7b 1569 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
6f231dda
DW
1570 * object.
1571 *
1572 * This method implements the actions taken when entering the
1573 * SCI_BASE_REQUEST_STATE_ABORTING state. none
1574 */
9a0fff7b 1575static void scic_sds_request_aborting_state_enter(void *object)
6f231dda 1576{
890cae9b 1577 struct scic_sds_request *sci_req = object;
6f231dda
DW
1578
1579 /* Setting the abort bit in the Task Context is required by the silicon. */
e2023b87 1580 sci_req->task_context_buffer->abort = 1;
6f231dda
DW
1581
1582 SET_STATE_HANDLER(
e2023b87 1583 sci_req,
6f231dda
DW
1584 scic_sds_request_state_handler_table,
1585 SCI_BASE_REQUEST_STATE_ABORTING
1586 );
1587}
1588
1589/**
1590 * scic_sds_request_final_state_enter() -
1591 * @object: This parameter specifies the base object for which the state
9a0fff7b 1592 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
6f231dda
DW
1593 *
1594 * This method implements the actions taken when entering the
1595 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
1596 * state handlers in place. none
1597 */
9a0fff7b 1598static void scic_sds_request_final_state_enter(void *object)
6f231dda 1599{
890cae9b 1600 struct scic_sds_request *sci_req = object;
6f231dda
DW
1601
1602 SET_STATE_HANDLER(
e2023b87 1603 sci_req,
6f231dda
DW
1604 scic_sds_request_state_handler_table,
1605 SCI_BASE_REQUEST_STATE_FINAL
1606 );
1607}
1608
35173d57 1609static const struct sci_base_state scic_sds_request_state_table[] = {
6f231dda
DW
1610 [SCI_BASE_REQUEST_STATE_INITIAL] = {
1611 .enter_state = scic_sds_request_initial_state_enter,
1612 },
1613 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1614 .enter_state = scic_sds_request_constructed_state_enter,
1615 },
1616 [SCI_BASE_REQUEST_STATE_STARTED] = {
1617 .enter_state = scic_sds_request_started_state_enter,
1618 .exit_state = scic_sds_request_started_state_exit
1619 },
1620 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1621 .enter_state = scic_sds_request_completed_state_enter,
1622 },
1623 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1624 .enter_state = scic_sds_request_aborting_state_enter,
1625 },
1626 [SCI_BASE_REQUEST_STATE_FINAL] = {
1627 .enter_state = scic_sds_request_final_state_enter,
1628 },
1629};
1630
35173d57
DW
1631static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
1632 struct scic_sds_remote_device *sci_dev,
1633 u16 io_tag,
1634 void *user_io_request_object,
1635 struct scic_sds_request *sci_req)
1636{
890cae9b 1637 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
38aa74eb
CH
1638 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
1639 sci_base_state_machine_start(&sci_req->state_machine);
1640
35173d57
DW
1641 sci_req->io_tag = io_tag;
1642 sci_req->user_request = user_io_request_object;
1643 sci_req->owning_controller = scic;
1644 sci_req->target_device = sci_dev;
1645 sci_req->has_started_substate_machine = false;
1646 sci_req->protocol = SCIC_NO_PROTOCOL;
1647 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
1648 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
1649
1650 sci_req->sci_status = SCI_SUCCESS;
1651 sci_req->scu_status = 0;
1652 sci_req->post_context = 0xFFFFFFFF;
1653
1654 sci_req->is_task_management_request = false;
1655
1656 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1657 sci_req->was_tag_assigned_by_user = false;
1658 sci_req->task_context_buffer = NULL;
1659 } else {
1660 sci_req->was_tag_assigned_by_user = true;
1661
1662 sci_req->task_context_buffer =
1663 scic_sds_controller_get_task_context_buffer(scic, io_tag);
1664 }
1665}
1666
2ec53eb4
DJ
1667enum sci_status
1668scic_io_request_construct(struct scic_sds_controller *scic,
1669 struct scic_sds_remote_device *sci_dev,
1670 u16 io_tag,
1671 void *user_req,
1672 struct scic_sds_request *sci_req,
1673 struct scic_sds_request **new_sci_req)
35173d57 1674{
a1a113b0 1675 struct domain_device *dev = sci_dev_to_domain(sci_dev);
35173d57 1676 enum sci_status status = SCI_SUCCESS;
35173d57
DW
1677
1678 /* Build the common part of the request */
2ec53eb4
DJ
1679 scic_sds_general_request_construct(scic,
1680 sci_dev,
1681 io_tag,
1682 user_req,
1683 sci_req);
1684
1685 if (sci_dev->rnc.remote_node_index ==
1686 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
35173d57
DW
1687 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
1688
2ec53eb4 1689 if (dev->dev_type == SAS_END_DEV)
35173d57 1690 scic_sds_ssp_io_request_assign_buffers(sci_req);
2ec53eb4
DJ
1691 else if ((dev->dev_type == SATA_DEV) ||
1692 (dev->tproto & SAS_PROTOCOL_STP)) {
35173d57 1693 scic_sds_stp_request_assign_buffers(sci_req);
2ec53eb4
DJ
1694 memset(sci_req->command_buffer,
1695 0,
1696 sizeof(struct host_to_dev_fis));
a1a113b0 1697 } else if (dev_is_expander(dev)) {
35173d57 1698 scic_sds_smp_request_assign_buffers(sci_req);
2ec53eb4 1699 memset(sci_req->command_buffer, 0, sizeof(struct smp_req));
a1a113b0 1700 } else
35173d57 1701 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
35173d57
DW
1702
1703 if (status == SCI_SUCCESS) {
2ec53eb4
DJ
1704 memset(sci_req->task_context_buffer,
1705 0,
1706 SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab));
1707 *new_sci_req = sci_req;
35173d57
DW
1708 }
1709
1710 return status;
1711}
1712
1713enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
1714 struct scic_sds_remote_device *sci_dev,
1715 u16 io_tag,
1716 void *user_io_request_object,
1717 struct scic_sds_request *sci_req,
1718 struct scic_sds_request **new_sci_req)
1719{
a1a113b0 1720 struct domain_device *dev = sci_dev_to_domain(sci_dev);
35173d57 1721 enum sci_status status = SCI_SUCCESS;
35173d57
DW
1722
1723 /* Build the common part of the request */
1724 scic_sds_general_request_construct(scic, sci_dev, io_tag,
1725 user_io_request_object,
1726 sci_req);
1727
a1a113b0 1728 if (dev->dev_type == SAS_END_DEV) {
35173d57
DW
1729 scic_sds_ssp_task_request_assign_buffers(sci_req);
1730
1731 sci_req->has_started_substate_machine = true;
1732
1733 /* Construct the started sub-state machine. */
1734 sci_base_state_machine_construct(
1735 &sci_req->started_substate_machine,
890cae9b 1736 sci_req,
35173d57
DW
1737 scic_sds_io_request_started_task_mgmt_substate_table,
1738 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
1739 );
a1a113b0 1740 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
35173d57 1741 scic_sds_stp_request_assign_buffers(sci_req);
a1a113b0 1742 else
35173d57 1743 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
35173d57
DW
1744
1745 if (status == SCI_SUCCESS) {
1746 sci_req->is_task_management_request = true;
1747 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
1748 *new_sci_req = sci_req;
1749 }
1750
1751 return status;
1752}
This page took 0.116161 seconds and 5 git commands to generate.