isci: removing intel_*.h headers
[deliverable/linux.git] / drivers / scsi / isci / core / scic_sds_request.c
CommitLineData
6f231dda
DW
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
f2f30080 56#include <scsi/sas.h>
6f231dda
DW
57#include "scic_controller.h"
58#include "scic_io_request.h"
6f231dda 59#include "scic_sds_controller.h"
bc99aa47 60#include "scu_registers.h"
6f231dda 61#include "scic_sds_port.h"
88f3b62a 62#include "remote_device.h"
6f231dda
DW
63#include "scic_sds_request.h"
64#include "scic_sds_smp_request.h"
65#include "scic_sds_stp_request.h"
66#include "scic_sds_unsolicited_frame_control.h"
6f231dda 67#include "sci_environment.h"
6f231dda
DW
68#include "sci_util.h"
69#include "scu_completion_codes.h"
70#include "scu_constants.h"
71#include "scu_task_context.h"
72
6f231dda
DW
73/*
74 * ****************************************************************************
75 * * SCIC SDS IO REQUEST CONSTANTS
76 * **************************************************************************** */
77
78/**
79 *
80 *
81 * We have no timer requirements for IO requests right now
82 */
83#define SCIC_SDS_IO_REQUEST_MINIMUM_TIMER_COUNT (0)
84#define SCIC_SDS_IO_REQUEST_MAXIMUM_TIMER_COUNT (0)
85
86/*
87 * ****************************************************************************
88 * * SCIC SDS IO REQUEST MACROS
89 * **************************************************************************** */
90
6f231dda
DW
91/**
92 * scic_ssp_io_request_get_object_size() -
93 *
94 * This macro returns the sizeof memory required to store the an SSP IO
95 * request. This does not include the size of the SGL or SCU Task Context
96 * memory.
97 */
98#define scic_ssp_io_request_get_object_size() \
99 (\
0cfa890e 100 sizeof(struct ssp_cmd_iu) \
af5ae893 101 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
102 )
103
104/**
105 * scic_sds_ssp_request_get_command_buffer() -
106 *
107 * This macro returns the address of the ssp command buffer in the io request
108 * memory
109 */
110#define scic_sds_ssp_request_get_command_buffer(memory) \
0cfa890e 111 ((struct ssp_cmd_iu *)(\
6f231dda
DW
112 ((char *)(memory)) + sizeof(struct scic_sds_request) \
113 ))
114
115/**
116 * scic_sds_ssp_request_get_response_buffer() -
117 *
118 * This macro returns the address of the ssp response buffer in the io request
119 * memory
120 */
121#define scic_sds_ssp_request_get_response_buffer(memory) \
af5ae893 122 ((struct ssp_response_iu *)(\
6f231dda 123 ((char *)(scic_sds_ssp_request_get_command_buffer(memory))) \
0cfa890e 124 + sizeof(struct ssp_cmd_iu) \
6f231dda
DW
125 ))
126
127/**
128 * scic_sds_ssp_request_get_task_context_buffer() -
129 *
130 * This macro returns the address of the task context buffer in the io request
131 * memory
132 */
133#define scic_sds_ssp_request_get_task_context_buffer(memory) \
134 ((struct scu_task_context *)(\
135 ((char *)(scic_sds_ssp_request_get_response_buffer(memory))) \
af5ae893 136 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
137 ))
138
139/**
140 * scic_sds_ssp_request_get_sgl_element_buffer() -
141 *
142 * This macro returns the address of the sgl elment pairs in the io request
143 * memory buffer
144 */
145#define scic_sds_ssp_request_get_sgl_element_buffer(memory) \
146 ((struct scu_sgl_element_pair *)(\
147 ((char *)(scic_sds_ssp_request_get_task_context_buffer(memory))) \
148 + sizeof(struct scu_task_context) \
149 ))
150
151
152/**
153 * scic_ssp_task_request_get_object_size() -
154 *
155 * This macro returns the sizeof of memory required to store an SSP Task
156 * request. This does not include the size of the SCU Task Context memory.
157 */
158#define scic_ssp_task_request_get_object_size() \
159 (\
0cfa890e 160 sizeof(struct ssp_task_iu) \
af5ae893 161 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
162 )
163
164/**
165 * scic_sds_ssp_task_request_get_command_buffer() -
166 *
167 * This macro returns the address of the ssp command buffer in the task request
168 * memory. Yes its the same as the above macro except for the name.
169 */
170#define scic_sds_ssp_task_request_get_command_buffer(memory) \
0cfa890e 171 ((struct ssp_task_iu *)(\
6f231dda
DW
172 ((char *)(memory)) + sizeof(struct scic_sds_request) \
173 ))
174
175/**
176 * scic_sds_ssp_task_request_get_response_buffer() -
177 *
178 * This macro returns the address of the ssp response buffer in the task
179 * request memory.
180 */
181#define scic_sds_ssp_task_request_get_response_buffer(memory) \
af5ae893 182 ((struct ssp_response_iu *)(\
6f231dda 183 ((char *)(scic_sds_ssp_task_request_get_command_buffer(memory))) \
0cfa890e 184 + sizeof(struct ssp_task_iu) \
6f231dda
DW
185 ))
186
187/**
188 * scic_sds_ssp_task_request_get_task_context_buffer() -
189 *
190 * This macro returs the task context buffer for the SSP task request.
191 */
192#define scic_sds_ssp_task_request_get_task_context_buffer(memory) \
193 ((struct scu_task_context *)(\
194 ((char *)(scic_sds_ssp_task_request_get_response_buffer(memory))) \
af5ae893 195 + SSP_RESP_IU_MAX_SIZE \
6f231dda
DW
196 ))
197
198
199
200/*
201 * ****************************************************************************
202 * * SCIC SDS IO REQUEST PRIVATE METHODS
203 * **************************************************************************** */
204
205/**
206 *
207 *
208 * This method returns the size required to store an SSP IO request object. u32
209 */
210static u32 scic_sds_ssp_request_get_object_size(void)
211{
212 return sizeof(struct scic_sds_request)
213 + scic_ssp_io_request_get_object_size()
214 + sizeof(struct scu_task_context)
fe9a6431 215 + SMP_CACHE_BYTES
6f231dda
DW
216 + sizeof(struct scu_sgl_element_pair) * SCU_MAX_SGL_ELEMENT_PAIRS;
217}
218
219/**
220 * This method returns the sgl element pair for the specificed sgl_pair index.
e2023b87 221 * @sci_req: This parameter specifies the IO request for which to retrieve
6f231dda
DW
222 * the Scatter-Gather List element pair.
223 * @sgl_pair_index: This parameter specifies the index into the SGL element
224 * pair to be retrieved.
225 *
226 * This method returns a pointer to an struct scu_sgl_element_pair.
227 */
228static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
e2023b87 229 struct scic_sds_request *sci_req,
6f231dda
DW
230 u32 sgl_pair_index
231 ) {
232 struct scu_task_context *task_context;
233
e2023b87 234 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
6f231dda
DW
235
236 if (sgl_pair_index == 0) {
237 return &task_context->sgl_pair_ab;
238 } else if (sgl_pair_index == 1) {
239 return &task_context->sgl_pair_cd;
240 }
241
e2023b87 242 return &sci_req->sgl_element_pair_buffer[sgl_pair_index - 2];
6f231dda
DW
243}
244
245/**
246 * This function will build the SGL list for an IO request.
e2023b87 247 * @sci_req: This parameter specifies the IO request for which to build
6f231dda
DW
248 * the Scatter-Gather List.
249 *
250 */
6389a775 251void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
6f231dda 252{
890cae9b 253 struct isci_request *isci_request = sds_request->ireq;
6389a775
DJ
254 struct isci_host *isci_host = isci_request->isci_host;
255 struct sas_task *task = isci_request_access_task(isci_request);
256 struct scatterlist *sg = NULL;
257 dma_addr_t dma_addr;
258 u32 sg_idx = 0;
259 struct scu_sgl_element_pair *scu_sg = NULL;
260 struct scu_sgl_element_pair *prev_sg = NULL;
261
262 if (task->num_scatter > 0) {
263 sg = task->scatter;
264
265 while (sg) {
266 scu_sg = scic_sds_request_get_sgl_element_pair(
267 sds_request,
268 sg_idx);
269
270 SCU_SGL_COPY(scu_sg->A, sg);
271
272 sg = sg_next(sg);
273
274 if (sg) {
275 SCU_SGL_COPY(scu_sg->B, sg);
276 sg = sg_next(sg);
277 } else
278 SCU_SGL_ZERO(scu_sg->B);
279
280 if (prev_sg) {
281 dma_addr =
282 scic_io_request_get_dma_addr(
283 sds_request,
284 scu_sg);
285
286 prev_sg->next_pair_upper =
287 upper_32_bits(dma_addr);
288 prev_sg->next_pair_lower =
289 lower_32_bits(dma_addr);
290 }
291
292 prev_sg = scu_sg;
293 sg_idx++;
6f231dda 294 }
6389a775
DJ
295 } else { /* handle when no sg */
296 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
297 sg_idx);
6f231dda 298
6389a775
DJ
299 dma_addr = dma_map_single(&isci_host->pdev->dev,
300 task->scatter,
301 task->total_xfer_len,
302 task->data_dir);
6f231dda 303
6389a775
DJ
304 isci_request->zero_scatter_daddr = dma_addr;
305
306 scu_sg->A.length = task->total_xfer_len;
307 scu_sg->A.address_upper = upper_32_bits(dma_addr);
308 scu_sg->A.address_lower = lower_32_bits(dma_addr);
6f231dda
DW
309 }
310
6389a775
DJ
311 if (scu_sg) {
312 scu_sg->next_pair_upper = 0;
313 scu_sg->next_pair_lower = 0;
6f231dda
DW
314 }
315}
316
6f231dda
DW
317/**
318 * This method build the remainder of the IO request object.
e2023b87 319 * @sci_req: This parameter specifies the request object being constructed.
6f231dda
DW
320 *
321 * The scic_sds_general_request_construct() must be called before this call is
322 * valid. none
323 */
324static void scic_sds_ssp_io_request_assign_buffers(
e2023b87 325 struct scic_sds_request *sci_req)
6f231dda 326{
e2023b87
DJ
327 sci_req->command_buffer =
328 scic_sds_ssp_request_get_command_buffer(sci_req);
329 sci_req->response_buffer =
330 scic_sds_ssp_request_get_response_buffer(sci_req);
331 sci_req->sgl_element_pair_buffer =
332 scic_sds_ssp_request_get_sgl_element_buffer(sci_req);
333 sci_req->sgl_element_pair_buffer =
334 PTR_ALIGN(sci_req->sgl_element_pair_buffer,
fe9a6431 335 sizeof(struct scu_sgl_element_pair));
6f231dda 336
e2023b87
DJ
337 if (sci_req->was_tag_assigned_by_user == false) {
338 sci_req->task_context_buffer =
339 scic_sds_ssp_request_get_task_context_buffer(sci_req);
340 sci_req->task_context_buffer =
341 PTR_ALIGN(sci_req->task_context_buffer,
fe9a6431 342 SMP_CACHE_BYTES);
6f231dda
DW
343 }
344}
345
0cfa890e 346static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
6f231dda 347{
0cfa890e
DJ
348 struct ssp_cmd_iu *cmd_iu;
349 struct isci_request *ireq = sci_req->ireq;
350 struct sas_task *task = isci_request_access_task(ireq);
6f231dda 351
0cfa890e 352 cmd_iu = sci_req->command_buffer;
6f231dda 353
0cfa890e
DJ
354 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
355 cmd_iu->add_cdb_len = 0;
356 cmd_iu->_r_a = 0;
357 cmd_iu->_r_b = 0;
358 cmd_iu->en_fburst = 0; /* unsupported */
359 cmd_iu->task_prio = task->ssp_task.task_prio;
360 cmd_iu->task_attr = task->ssp_task.task_attr;
361 cmd_iu->_r_c = 0;
6f231dda 362
51a57cff
DJ
363 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
364 sizeof(task->ssp_task.cdb) / sizeof(u32));
6f231dda
DW
365}
366
0cfa890e 367static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
6f231dda 368{
0cfa890e
DJ
369 struct ssp_task_iu *task_iu;
370 struct isci_request *ireq = sci_req->ireq;
371 struct sas_task *task = isci_request_access_task(ireq);
372 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
6f231dda 373
0cfa890e 374 task_iu = sci_req->command_buffer;
6f231dda 375
0cfa890e 376 memset(task_iu, 0, sizeof(struct ssp_task_iu));
6f231dda 377
0cfa890e 378 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
6f231dda 379
0cfa890e
DJ
380 task_iu->task_func = isci_tmf->tmf_code;
381 task_iu->task_tag =
382 (ireq->ttype == tmf_task) ?
383 isci_tmf->io_tag :
384 SCI_CONTROLLER_INVALID_IO_TAG;
6f231dda
DW
385}
386
6f231dda
DW
387/**
388 * This method is will fill in the SCU Task Context for any type of SSP request.
e2023b87 389 * @sci_req:
6f231dda
DW
390 * @task_context:
391 *
392 */
393static void scu_ssp_reqeust_construct_task_context(
6389a775 394 struct scic_sds_request *sds_request,
6f231dda
DW
395 struct scu_task_context *task_context)
396{
6389a775
DJ
397 dma_addr_t dma_addr;
398 struct scic_sds_controller *controller;
6f231dda
DW
399 struct scic_sds_remote_device *target_device;
400 struct scic_sds_port *target_port;
401
6389a775
DJ
402 controller = scic_sds_request_get_controller(sds_request);
403 target_device = scic_sds_request_get_device(sds_request);
404 target_port = scic_sds_request_get_port(sds_request);
6f231dda
DW
405
406 /* Fill in the TC with the its required data */
407 task_context->abort = 0;
408 task_context->priority = 0;
409 task_context->initiator_request = 1;
8f304c36 410 task_context->connection_rate = target_device->connection_rate;
6f231dda 411 task_context->protocol_engine_index =
6389a775 412 scic_sds_controller_get_protocol_engine_group(controller);
6f231dda
DW
413 task_context->logical_port_index =
414 scic_sds_port_get_index(target_port);
415 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
416 task_context->valid = SCU_TASK_CONTEXT_VALID;
417 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
418
419 task_context->remote_node_index =
6389a775 420 scic_sds_remote_device_get_index(sds_request->target_device);
6f231dda
DW
421 task_context->command_code = 0;
422
423 task_context->link_layer_control = 0;
424 task_context->do_not_dma_ssp_good_response = 1;
425 task_context->strict_ordering = 0;
426 task_context->control_frame = 0;
427 task_context->timeout_enable = 0;
428 task_context->block_guard_enable = 0;
429
430 task_context->address_modifier = 0;
431
e2023b87 432 /* task_context->type.ssp.tag = sci_req->io_tag; */
6f231dda
DW
433 task_context->task_phase = 0x01;
434
6389a775
DJ
435 if (sds_request->was_tag_assigned_by_user) {
436 /*
437 * Build the task context now since we have already read
438 * the data
439 */
440 sds_request->post_context =
441 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
442 (scic_sds_controller_get_protocol_engine_group(
443 controller) <<
444 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
445 (scic_sds_port_get_index(target_port) <<
446 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
447 scic_sds_io_tag_get_index(sds_request->io_tag));
6f231dda 448 } else {
6389a775
DJ
449 /*
450 * Build the task context now since we have already read
451 * the data
452 *
453 * I/O tag index is not assigned because we have to wait
454 * until we get a TCi
455 */
456 sds_request->post_context =
457 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
458 (scic_sds_controller_get_protocol_engine_group(
459 owning_controller) <<
460 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
461 (scic_sds_port_get_index(target_port) <<
462 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
6f231dda
DW
463 }
464
6389a775
DJ
465 /*
466 * Copy the physical address for the command buffer to the
467 * SCU Task Context
468 */
469 dma_addr = scic_io_request_get_dma_addr(sds_request,
470 sds_request->command_buffer);
471
472 task_context->command_iu_upper = upper_32_bits(dma_addr);
473 task_context->command_iu_lower = lower_32_bits(dma_addr);
474
475 /*
476 * Copy the physical address for the response buffer to the
477 * SCU Task Context
478 */
479 dma_addr = scic_io_request_get_dma_addr(sds_request,
480 sds_request->response_buffer);
481
482 task_context->response_iu_upper = upper_32_bits(dma_addr);
483 task_context->response_iu_lower = lower_32_bits(dma_addr);
6f231dda
DW
484}
485
486/**
487 * This method is will fill in the SCU Task Context for a SSP IO request.
e2023b87 488 * @sci_req:
6f231dda
DW
489 *
490 */
491static void scu_ssp_io_request_construct_task_context(
82d29928
DW
492 struct scic_sds_request *sci_req,
493 enum dma_data_direction dir,
494 u32 len)
6f231dda
DW
495{
496 struct scu_task_context *task_context;
497
82d29928 498 task_context = scic_sds_request_get_task_context(sci_req);
6f231dda 499
82d29928 500 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
6f231dda 501
0cfa890e
DJ
502 task_context->ssp_command_iu_length =
503 sizeof(struct ssp_cmd_iu) / sizeof(u32);
8694e792 504 task_context->type.ssp.frame_type = SSP_COMMAND;
6f231dda 505
82d29928
DW
506 switch (dir) {
507 case DMA_FROM_DEVICE:
508 case DMA_NONE:
509 default:
6f231dda
DW
510 task_context->task_type = SCU_TASK_TYPE_IOREAD;
511 break;
82d29928 512 case DMA_TO_DEVICE:
6f231dda
DW
513 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
514 break;
515 }
516
82d29928 517 task_context->transfer_length_bytes = len;
6f231dda 518
82d29928
DW
519 if (task_context->transfer_length_bytes > 0)
520 scic_sds_request_build_sgl(sci_req);
6f231dda
DW
521}
522
523
524/**
525 * This method will fill in the remainder of the io request object for SSP Task
526 * requests.
e2023b87 527 * @sci_req:
6f231dda
DW
528 *
529 */
530static void scic_sds_ssp_task_request_assign_buffers(
e2023b87 531 struct scic_sds_request *sci_req)
6f231dda
DW
532{
533 /* Assign all of the buffer pointers */
e2023b87
DJ
534 sci_req->command_buffer =
535 scic_sds_ssp_task_request_get_command_buffer(sci_req);
536 sci_req->response_buffer =
537 scic_sds_ssp_task_request_get_response_buffer(sci_req);
538 sci_req->sgl_element_pair_buffer = NULL;
539
540 if (sci_req->was_tag_assigned_by_user == false) {
541 sci_req->task_context_buffer =
542 scic_sds_ssp_task_request_get_task_context_buffer(sci_req);
543 sci_req->task_context_buffer =
544 PTR_ALIGN(sci_req->task_context_buffer, SMP_CACHE_BYTES);
6f231dda
DW
545 }
546}
547
548/**
549 * This method will fill in the SCU Task Context for a SSP Task request. The
550 * following important settings are utilized: -# priority ==
551 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
552 * ahead of other task destined for the same Remote Node. -# task_type ==
553 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
554 * (i.e. non-raw frame) is being utilized to perform task management. -#
555 * control_frame == 1. This ensures that the proper endianess is set so
556 * that the bytes are transmitted in the right order for a task frame.
e2023b87 557 * @sci_req: This parameter specifies the task request object being
6f231dda
DW
558 * constructed.
559 *
560 */
561static void scu_ssp_task_request_construct_task_context(
e2023b87 562 struct scic_sds_request *sci_req)
6f231dda
DW
563{
564 struct scu_task_context *task_context;
565
e2023b87 566 task_context = scic_sds_request_get_task_context(sci_req);
6f231dda 567
e2023b87 568 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
6f231dda
DW
569
570 task_context->control_frame = 1;
571 task_context->priority = SCU_TASK_PRIORITY_HIGH;
572 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
573 task_context->transfer_length_bytes = 0;
8694e792 574 task_context->type.ssp.frame_type = SSP_TASK;
0cfa890e
DJ
575 task_context->ssp_command_iu_length =
576 sizeof(struct ssp_task_iu) / sizeof(u32);
6f231dda
DW
577}
578
579
580/**
581 * This method constructs the SSP Command IU data for this ssp passthrough
582 * comand request object.
e2023b87 583 * @sci_req: This parameter specifies the request object for which the SSP
6f231dda
DW
584 * command information unit is being built.
585 *
586 * enum sci_status, returns invalid parameter is cdb > 16
587 */
588
589
590/**
591 * This method constructs the SATA request object.
e2023b87 592 * @sci_req:
6f231dda
DW
593 * @sat_protocol:
594 * @transfer_length:
595 * @data_direction:
596 * @copy_rx_frame:
597 *
598 * enum sci_status
599 */
e76d6180
DJ
600static enum sci_status
601scic_io_request_construct_sata(struct scic_sds_request *sci_req,
602 u32 len,
603 enum dma_data_direction dir,
604 bool copy)
6f231dda
DW
605{
606 enum sci_status status = SCI_SUCCESS;
e76d6180
DJ
607 struct isci_request *ireq = sci_req->ireq;
608 struct sas_task *task = isci_request_access_task(ireq);
609
610 /* check for management protocols */
611 if (ireq->ttype == tmf_task) {
612 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
613
614 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
615 tmf->tmf_code == isci_tmf_sata_srst_low)
616 return scic_sds_stp_soft_reset_request_construct(sci_req);
617 else {
618 dev_err(scic_to_dev(sci_req->owning_controller),
619 "%s: Request 0x%p received un-handled SAT "
620 "management protocol 0x%x.\n",
621 __func__, sci_req, tmf->tmf_code);
622
623 return SCI_FAILURE;
624 }
625 }
6f231dda 626
e76d6180
DJ
627 if (!sas_protocol_ata(task->task_proto)) {
628 dev_err(scic_to_dev(sci_req->owning_controller),
629 "%s: Non-ATA protocol in SATA path: 0x%x\n",
630 __func__,
631 task->task_proto);
632 return SCI_FAILURE;
6f231dda 633
e76d6180 634 }
6f231dda 635
e76d6180
DJ
636 /* non data */
637 if (task->data_dir == DMA_NONE)
638 return scic_sds_stp_non_data_request_construct(sci_req);
6f231dda 639
e76d6180
DJ
640 /* NCQ */
641 if (task->ata_task.use_ncq)
642 return scic_sds_stp_ncq_request_construct(sci_req, len, dir);
6f231dda 643
e76d6180
DJ
644 /* DMA */
645 if (task->ata_task.dma_xfer)
646 return scic_sds_stp_udma_request_construct(sci_req, len, dir);
647 else /* PIO */
648 return scic_sds_stp_pio_request_construct(sci_req, copy);
6f231dda
DW
649
650 return status;
651}
652
6f231dda
DW
653u32 scic_io_request_get_object_size(void)
654{
655 u32 ssp_request_size;
656 u32 stp_request_size;
657 u32 smp_request_size;
658
659 ssp_request_size = scic_sds_ssp_request_get_object_size();
660 stp_request_size = scic_sds_stp_request_get_object_size();
661 smp_request_size = scic_sds_smp_request_get_object_size();
662
663 return max(ssp_request_size, max(stp_request_size, smp_request_size));
664}
665
6f231dda
DW
666enum sci_status scic_io_request_construct_basic_ssp(
667 struct scic_sds_request *sci_req)
668{
890cae9b 669 struct isci_request *isci_request = sci_req->ireq;
6f231dda
DW
670
671 sci_req->protocol = SCIC_SSP_PROTOCOL;
672
6f231dda
DW
673 scu_ssp_io_request_construct_task_context(
674 sci_req,
7392d275
DJ
675 isci_request_io_request_get_data_direction(isci_request),
676 isci_request_io_request_get_transfer_length(isci_request));
6f231dda
DW
677
678 scic_sds_io_request_build_ssp_command_iu(sci_req);
679
38aa74eb
CH
680 sci_base_state_machine_change_state(&sci_req->state_machine,
681 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
682
683 return SCI_SUCCESS;
684}
685
686
687enum sci_status scic_task_request_construct_ssp(
688 struct scic_sds_request *sci_req)
689{
690 /* Construct the SSP Task SCU Task Context */
691 scu_ssp_task_request_construct_task_context(sci_req);
692
693 /* Fill in the SSP Task IU */
694 scic_sds_task_request_build_ssp_task_iu(sci_req);
695
38aa74eb
CH
696 sci_base_state_machine_change_state(&sci_req->state_machine,
697 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
698
699 return SCI_SUCCESS;
700}
701
702
7392d275
DJ
703enum sci_status scic_io_request_construct_basic_sata(
704 struct scic_sds_request *sci_req)
6f231dda
DW
705{
706 enum sci_status status;
82d29928 707 struct scic_sds_stp_request *stp_req;
82d29928
DW
708 u32 len;
709 enum dma_data_direction dir;
710 bool copy = false;
890cae9b 711 struct isci_request *isci_request = sci_req->ireq;
7392d275 712 struct sas_task *task = isci_request_access_task(isci_request);
6f231dda 713
82d29928 714 stp_req = container_of(sci_req, typeof(*stp_req), parent);
6f231dda
DW
715
716 sci_req->protocol = SCIC_STP_PROTOCOL;
717
7392d275
DJ
718 len = isci_request_io_request_get_transfer_length(isci_request);
719 dir = isci_request_io_request_get_data_direction(isci_request);
7392d275 720 copy = (task->data_dir == DMA_NONE) ? false : true;
6f231dda 721
e76d6180 722 status = scic_io_request_construct_sata(sci_req, len, dir, copy);
6f231dda
DW
723
724 if (status == SCI_SUCCESS)
38aa74eb
CH
725 sci_base_state_machine_change_state(&sci_req->state_machine,
726 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
727
728 return status;
729}
730
731
732enum sci_status scic_task_request_construct_sata(
733 struct scic_sds_request *sci_req)
734{
e76d6180
DJ
735 enum sci_status status = SCI_SUCCESS;
736 struct isci_request *ireq = sci_req->ireq;
737
738 /* check for management protocols */
739 if (ireq->ttype == tmf_task) {
740 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
741
742 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
743 tmf->tmf_code == isci_tmf_sata_srst_low) {
744 status = scic_sds_stp_soft_reset_request_construct(sci_req);
745 } else {
746 dev_err(scic_to_dev(sci_req->owning_controller),
747 "%s: Request 0x%p received un-handled SAT "
748 "Protocol 0x%x.\n",
749 __func__, sci_req, tmf->tmf_code);
750
751 return SCI_FAILURE;
752 }
6f231dda
DW
753 }
754
755 if (status == SCI_SUCCESS)
e76d6180
DJ
756 sci_base_state_machine_change_state(
757 &sci_req->state_machine,
758 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
6f231dda
DW
759
760 return status;
761}
762
763
764u16 scic_io_request_get_io_tag(
765 struct scic_sds_request *sci_req)
766{
767 return sci_req->io_tag;
768}
769
770
771u32 scic_request_get_controller_status(
772 struct scic_sds_request *sci_req)
773{
774 return sci_req->scu_status;
775}
776
777
778void *scic_io_request_get_command_iu_address(
779 struct scic_sds_request *sci_req)
780{
781 return sci_req->command_buffer;
782}
783
784
785void *scic_io_request_get_response_iu_address(
786 struct scic_sds_request *sci_req)
787{
788 return sci_req->response_buffer;
789}
790
791
792#define SCU_TASK_CONTEXT_SRAM 0x200000
793u32 scic_io_request_get_number_of_bytes_transferred(
794 struct scic_sds_request *scic_sds_request)
795{
467e855a 796 struct scic_sds_controller *scic = scic_sds_request->owning_controller;
6f231dda
DW
797 u32 ret_val = 0;
798
467e855a
BB
799 if (readl(&scic->smu_registers->address_modifier) == 0) {
800 void __iomem *scu_reg_base = scic->scu_registers;
6f231dda
DW
801 /*
802 * get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
803 * BAR1 is the scu_registers
804 * 0x20002C = 0x200000 + 0x2c
805 * = start of task context SRAM + offset of (type.ssp.data_offset)
806 * TCi is the io_tag of struct scic_sds_request */
467e855a
BB
807 ret_val = readl(scu_reg_base +
808 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
809 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(scic_sds_request->io_tag)));
6f231dda
DW
810 }
811
812 return ret_val;
813}
814
815
816/*
817 * ****************************************************************************
818 * * SCIC SDS Interface Implementation
819 * **************************************************************************** */
820
38aa74eb
CH
821enum sci_status
822scic_sds_request_start(struct scic_sds_request *request)
6f231dda 823{
524b5f72 824 if (request->device_sequence !=
38aa74eb 825 scic_sds_remote_device_get_sequence(request->target_device))
524b5f72
CH
826 return SCI_FAILURE;
827
828 if (request->state_handlers->start_handler)
38aa74eb 829 return request->state_handlers->start_handler(request);
524b5f72
CH
830
831 dev_warn(scic_to_dev(request->owning_controller),
832 "%s: SCIC IO Request requested to start while in wrong "
833 "state %d\n",
834 __func__,
835 sci_base_state_machine_get_state(&request->state_machine));
836
837 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
838}
839
38aa74eb
CH
840enum sci_status
841scic_sds_io_request_terminate(struct scic_sds_request *request)
6f231dda 842{
524b5f72
CH
843 if (request->state_handlers->abort_handler)
844 return request->state_handlers->abort_handler(request);
845
846 dev_warn(scic_to_dev(request->owning_controller),
847 "%s: SCIC IO Request requested to abort while in wrong "
848 "state %d\n",
849 __func__,
850 sci_base_state_machine_get_state(&request->state_machine));
851
852 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
853}
854
38aa74eb
CH
855enum sci_status
856scic_sds_io_request_complete(struct scic_sds_request *request)
6f231dda 857{
524b5f72
CH
858 if (request->state_handlers->complete_handler)
859 return request->state_handlers->complete_handler(request);
860
861 dev_warn(scic_to_dev(request->owning_controller),
862 "%s: SCIC IO Request requested to complete while in wrong "
863 "state %d\n",
864 __func__,
865 sci_base_state_machine_get_state(&request->state_machine));
866
867 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
868}
869
6f231dda 870enum sci_status scic_sds_io_request_event_handler(
524b5f72 871 struct scic_sds_request *request,
6f231dda
DW
872 u32 event_code)
873{
524b5f72
CH
874 if (request->state_handlers->event_handler)
875 return request->state_handlers->event_handler(request, event_code);
876
877 dev_warn(scic_to_dev(request->owning_controller),
878 "%s: SCIC IO Request given event code notification %x while "
879 "in wrong state %d\n",
880 __func__,
881 event_code,
882 sci_base_state_machine_get_state(&request->state_machine));
883
884 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
885}
886
524b5f72
CH
887enum sci_status
888scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
889{
890 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
891 request->has_started_substate_machine == false)
892 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
893 else if (request->state_handlers->tc_completion_handler)
894 return request->state_handlers->tc_completion_handler(request, completion_code);
895
896 dev_warn(scic_to_dev(request->owning_controller),
897 "%s: SCIC IO Request given task completion notification %x "
898 "while in wrong state %d\n",
899 __func__,
900 completion_code,
901 sci_base_state_machine_get_state(&request->state_machine));
902
903 return SCI_FAILURE_INVALID_STATE;
904
905}
906
907
6f231dda
DW
908/**
909 *
e2023b87 910 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
6f231dda
DW
911 * operation is to be executed.
912 * @frame_index: The frame index returned by the hardware for the reqeust
913 * object.
914 *
915 * This method invokes the core state frame handler for the
916 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
917 */
918enum sci_status scic_sds_io_request_frame_handler(
524b5f72 919 struct scic_sds_request *request,
6f231dda
DW
920 u32 frame_index)
921{
524b5f72
CH
922 if (request->state_handlers->frame_handler)
923 return request->state_handlers->frame_handler(request, frame_index);
924
925 dev_warn(scic_to_dev(request->owning_controller),
926 "%s: SCIC IO Request given unexpected frame %x while in "
927 "state %d\n",
928 __func__,
929 frame_index,
930 sci_base_state_machine_get_state(&request->state_machine));
931
932 scic_sds_controller_release_frame(request->owning_controller, frame_index);
933 return SCI_FAILURE_INVALID_STATE;
6f231dda
DW
934}
935
6f231dda 936/*
af5ae893 937 * This function copies response data for requests returning response data
6f231dda 938 * instead of sense data.
e2023b87 939 * @sci_req: This parameter specifies the request object for which to copy
6f231dda 940 * the response data.
6f231dda 941 */
af5ae893 942void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
6f231dda 943{
af5ae893
DJ
944 void *resp_buf;
945 u32 len;
946 struct ssp_response_iu *ssp_response;
947 struct isci_request *ireq = sci_req->ireq;
948 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
6f231dda 949
af5ae893 950 ssp_response = sci_req->response_buffer;
6f231dda 951
af5ae893 952 resp_buf = &isci_tmf->resp.resp_iu;
6f231dda 953
af5ae893
DJ
954 len = min_t(u32,
955 SSP_RESP_IU_MAX_SIZE,
956 be32_to_cpu(ssp_response->response_data_len));
6f231dda 957
af5ae893 958 memcpy(resp_buf, ssp_response->resp_data, len);
6f231dda
DW
959}
960
6f231dda
DW
961/*
962 * *****************************************************************************
963 * * CONSTRUCTED STATE HANDLERS
964 * ***************************************************************************** */
965
38aa74eb 966/*
6f231dda
DW
967 * This method implements the action taken when a constructed
968 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
969 * This method will, if necessary, allocate a TCi for the io request object and
970 * then will, if necessary, copy the constructed TC data into the actual TC
971 * buffer. If everything is successful the post context field is updated with
972 * the TCi so the controller can post the request to the hardware. enum sci_status
973 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
974 */
975static enum sci_status scic_sds_request_constructed_state_start_handler(
38aa74eb 976 struct scic_sds_request *request)
6f231dda
DW
977{
978 struct scu_task_context *task_context;
6f231dda 979
38aa74eb
CH
980 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
981 request->io_tag =
982 scic_controller_allocate_io_tag(request->owning_controller);
6f231dda
DW
983 }
984
985 /* Record the IO Tag in the request */
38aa74eb
CH
986 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
987 task_context = request->task_context_buffer;
6f231dda 988
38aa74eb 989 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
6f231dda
DW
990
991 switch (task_context->protocol_type) {
992 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
993 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
994 /* SSP/SMP Frame */
38aa74eb 995 task_context->type.ssp.tag = request->io_tag;
6f231dda
DW
996 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
997 break;
998
999 case SCU_TASK_CONTEXT_PROTOCOL_STP:
1000 /*
1001 * STP/SATA Frame
38aa74eb 1002 * task_context->type.stp.ncq_tag = request->ncq_tag; */
6f231dda
DW
1003 break;
1004
1005 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
1006 /* / @todo When do we set no protocol type? */
1007 break;
1008
1009 default:
1010 /* This should never happen since we build the IO requests */
1011 break;
1012 }
1013
1014 /*
1015 * Check to see if we need to copy the task context buffer
1016 * or have been building into the task context buffer */
38aa74eb 1017 if (request->was_tag_assigned_by_user == false) {
6f231dda 1018 scic_sds_controller_copy_task_context(
38aa74eb 1019 request->owning_controller, request);
6f231dda
DW
1020 }
1021
1022 /* Add to the post_context the io tag value */
38aa74eb 1023 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
6f231dda
DW
1024
1025 /* Everything is good go ahead and change state */
38aa74eb
CH
1026 sci_base_state_machine_change_state(&request->state_machine,
1027 SCI_BASE_REQUEST_STATE_STARTED);
6f231dda
DW
1028
1029 return SCI_SUCCESS;
1030 }
1031
1032 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1033}
1034
38aa74eb 1035/*
6f231dda
DW
1036 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1037 * object receives a scic_sds_request_terminate() request. Since the request
1038 * has not yet been posted to the hardware the request transitions to the
1039 * completed state. enum sci_status SCI_SUCCESS
1040 */
1041static enum sci_status scic_sds_request_constructed_state_abort_handler(
38aa74eb 1042 struct scic_sds_request *request)
6f231dda 1043{
6f231dda
DW
1044 /*
1045 * This request has been terminated by the user make sure that the correct
1046 * status code is returned */
38aa74eb 1047 scic_sds_request_set_status(request,
6f231dda 1048 SCU_TASK_DONE_TASK_ABORT,
38aa74eb 1049 SCI_FAILURE_IO_TERMINATED);
6f231dda 1050
38aa74eb
CH
1051 sci_base_state_machine_change_state(&request->state_machine,
1052 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1053 return SCI_SUCCESS;
1054}
1055
1056/*
1057 * *****************************************************************************
1058 * * STARTED STATE HANDLERS
1059 * ***************************************************************************** */
1060
38aa74eb 1061/*
6f231dda
DW
1062 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1063 * object receives a scic_sds_request_terminate() request. Since the request
1064 * has been posted to the hardware the io request state is changed to the
1065 * aborting state. enum sci_status SCI_SUCCESS
1066 */
1067enum sci_status scic_sds_request_started_state_abort_handler(
38aa74eb 1068 struct scic_sds_request *request)
6f231dda 1069{
38aa74eb
CH
1070 if (request->has_started_substate_machine)
1071 sci_base_state_machine_stop(&request->started_substate_machine);
6f231dda 1072
38aa74eb
CH
1073 sci_base_state_machine_change_state(&request->state_machine,
1074 SCI_BASE_REQUEST_STATE_ABORTING);
6f231dda
DW
1075 return SCI_SUCCESS;
1076}
1077
af5ae893 1078/*
6f231dda
DW
1079 * scic_sds_request_started_state_tc_completion_handler() - This method process
1080 * TC (task context) completions for normal IO request (i.e. Task/Abort
1081 * Completions of type 0). This method will update the
1082 * SCIC_SDS_IO_REQUEST_T::status field.
e2023b87 1083 * @sci_req: This parameter specifies the request for which a completion
6f231dda
DW
1084 * occurred.
1085 * @completion_code: This parameter specifies the completion code received from
1086 * the SCU.
1087 *
1088 */
af5ae893
DJ
1089enum sci_status
1090scic_sds_request_started_state_tc_completion_handler(
1091 struct scic_sds_request *sci_req,
1092 u32 completion_code)
6f231dda 1093{
af5ae893
DJ
1094 u8 datapres;
1095 struct ssp_response_iu *resp_iu;
6f231dda 1096
af5ae893
DJ
1097 /*
1098 * TODO: Any SDMA return code of other than 0 is bad
6f231dda
DW
1099 * decode 0x003C0000 to determine SDMA status
1100 */
1101 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
af5ae893
DJ
1103 scic_sds_request_set_status(sci_req,
1104 SCU_TASK_DONE_GOOD,
1105 SCI_SUCCESS);
6f231dda
DW
1106 break;
1107
1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
1109 {
1110 /*
af5ae893
DJ
1111 * There are times when the SCU hardware will return an early
1112 * response because the io request specified more data than is
1113 * returned by the target device (mode pages, inquiry data,
1114 * etc.). We must check the response stats to see if this is
1115 * truly a failed request or a good request that just got
1116 * completed early.
1117 */
1118 struct ssp_response_iu *resp = sci_req->response_buffer;
51a57cff
DJ
1119 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1120
1121 sci_swab32_cpy(sci_req->response_buffer,
1122 sci_req->response_buffer,
1123 word_cnt);
6f231dda 1124
af5ae893 1125 if (resp->status == 0) {
6f231dda 1126 scic_sds_request_set_status(
af5ae893
DJ
1127 sci_req,
1128 SCU_TASK_DONE_GOOD,
1129 SCI_SUCCESS_IO_DONE_EARLY);
6f231dda
DW
1130 } else {
1131 scic_sds_request_set_status(
e2023b87 1132 sci_req,
6f231dda 1133 SCU_TASK_DONE_CHECK_RESPONSE,
af5ae893 1134 SCI_FAILURE_IO_RESPONSE_VALID);
6f231dda
DW
1135 }
1136 }
1137 break;
1138
1139 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
51a57cff
DJ
1140 {
1141 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
6f231dda 1142
51a57cff
DJ
1143 sci_swab32_cpy(sci_req->response_buffer,
1144 sci_req->response_buffer,
1145 word_cnt);
1146
1147 scic_sds_request_set_status(sci_req,
1148 SCU_TASK_DONE_CHECK_RESPONSE,
1149 SCI_FAILURE_IO_RESPONSE_VALID);
6f231dda 1150 break;
51a57cff 1151 }
6f231dda
DW
1152
1153 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1154 /*
af5ae893
DJ
1155 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
1156 * guaranteed to be received before this completion status is
1157 * posted?
1158 */
1159 resp_iu = sci_req->response_buffer;
1160 datapres = resp_iu->datapres;
1161
1162 if ((datapres == 0x01) || (datapres == 0x02)) {
6f231dda 1163 scic_sds_request_set_status(
e2023b87 1164 sci_req,
6f231dda 1165 SCU_TASK_DONE_CHECK_RESPONSE,
af5ae893
DJ
1166 SCI_FAILURE_IO_RESPONSE_VALID);
1167 } else
6f231dda 1168 scic_sds_request_set_status(
af5ae893 1169 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
6f231dda
DW
1170 break;
1171
1172 /* only stp device gets suspended. */
1173 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1174 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1175 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1176 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1177 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1178 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1179 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1180 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1181 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1182 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1183 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
e2023b87 1184 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
6f231dda 1185 scic_sds_request_set_status(
e2023b87 1186 sci_req,
af5ae893
DJ
1187 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1188 SCU_COMPLETION_TL_STATUS_SHIFT,
1189 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
6f231dda
DW
1190 } else {
1191 scic_sds_request_set_status(
e2023b87 1192 sci_req,
af5ae893
DJ
1193 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1194 SCU_COMPLETION_TL_STATUS_SHIFT,
1195 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1196 }
1197 break;
1198
1199 /* both stp/ssp device gets suspended */
1200 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1201 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1202 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1203 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1204 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1205 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1206 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1209 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1210 scic_sds_request_set_status(
e2023b87 1211 sci_req,
af5ae893
DJ
1212 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1213 SCU_COMPLETION_TL_STATUS_SHIFT,
1214 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
6f231dda
DW
1215 break;
1216
1217 /* neither ssp nor stp gets suspended. */
1218 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1219 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1220 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1221 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1223 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1224 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1225 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1226 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1228 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1229 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1230 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1231 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1232 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1233 default:
1234 scic_sds_request_set_status(
e2023b87 1235 sci_req,
af5ae893
DJ
1236 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1237 SCU_COMPLETION_TL_STATUS_SHIFT,
1238 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1239 break;
1240 }
1241
af5ae893
DJ
1242 /*
1243 * TODO: This is probably wrong for ACK/NAK timeout conditions
6f231dda
DW
1244 */
1245
af5ae893
DJ
1246 /* In all cases we will treat this as the completion of the IO req. */
1247 sci_base_state_machine_change_state(
1248 &sci_req->state_machine,
1249 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1250 return SCI_SUCCESS;
1251}
1252
38aa74eb 1253/*
6f231dda
DW
1254 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1255 * object receives a scic_sds_request_frame_handler() request. This method
1256 * first determines the frame type received. If this is a response frame then
1257 * the response data is copied to the io request response buffer for processing
1258 * at completion time. If the frame type is not a response buffer an error is
1259 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1260 */
af5ae893
DJ
1261static enum sci_status
1262scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
1263 u32 frame_index)
6f231dda
DW
1264{
1265 enum sci_status status;
2d9c2240
DJ
1266 u32 *frame_header;
1267 struct ssp_frame_hdr ssp_hdr;
1268 ssize_t word_cnt;
6f231dda 1269
6f231dda 1270 status = scic_sds_unsolicited_frame_control_get_header(
e2023b87 1271 &(scic_sds_request_get_controller(sci_req)->uf_control),
6f231dda 1272 frame_index,
af5ae893 1273 (void **)&frame_header);
6f231dda 1274
2d9c2240
DJ
1275 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1276 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1277
1278 if (ssp_hdr.frame_type == SSP_RESPONSE) {
af5ae893 1279 struct ssp_response_iu *resp_iu;
51a57cff 1280 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
6f231dda
DW
1281
1282 status = scic_sds_unsolicited_frame_control_get_buffer(
e2023b87 1283 &(scic_sds_request_get_controller(sci_req)->uf_control),
6f231dda 1284 frame_index,
af5ae893 1285 (void **)&resp_iu);
6f231dda 1286
51a57cff
DJ
1287 sci_swab32_cpy(sci_req->response_buffer,
1288 resp_iu, word_cnt);
6f231dda 1289
af5ae893 1290 resp_iu = sci_req->response_buffer;
6f231dda 1291
af5ae893
DJ
1292 if ((resp_iu->datapres == 0x01) ||
1293 (resp_iu->datapres == 0x02)) {
6f231dda 1294 scic_sds_request_set_status(
e2023b87 1295 sci_req,
6f231dda 1296 SCU_TASK_DONE_CHECK_RESPONSE,
af5ae893 1297 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
6f231dda
DW
1298 } else
1299 scic_sds_request_set_status(
af5ae893
DJ
1300 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1301 } else {
6f231dda 1302 /* This was not a response frame why did it get forwarded? */
e2023b87 1303 dev_err(scic_to_dev(sci_req->owning_controller),
6f231dda
DW
1304 "%s: SCIC IO Request 0x%p received unexpected "
1305 "frame %d type 0x%02x\n",
1306 __func__,
e2023b87 1307 sci_req,
6f231dda 1308 frame_index,
2d9c2240 1309 ssp_hdr.frame_type);
af5ae893 1310 }
6f231dda
DW
1311
1312 /*
1313 * In any case we are done with this frame buffer return it to the
af5ae893
DJ
1314 * controller
1315 */
6f231dda 1316 scic_sds_controller_release_frame(
af5ae893 1317 sci_req->owning_controller, frame_index);
6f231dda
DW
1318
1319 return SCI_SUCCESS;
1320}
1321
1322/*
1323 * *****************************************************************************
1324 * * COMPLETED STATE HANDLERS
1325 * ***************************************************************************** */
1326
1327
38aa74eb 1328/*
6f231dda
DW
1329 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1330 * object receives a scic_sds_request_complete() request. This method frees up
1331 * any io request resources that have been allocated and transitions the
1332 * request to its final state. Consider stopping the state machine instead of
1333 * transitioning to the final state? enum sci_status SCI_SUCCESS
1334 */
1335static enum sci_status scic_sds_request_completed_state_complete_handler(
38aa74eb 1336 struct scic_sds_request *request)
6f231dda 1337{
38aa74eb 1338 if (request->was_tag_assigned_by_user != true) {
6f231dda 1339 scic_controller_free_io_tag(
38aa74eb 1340 request->owning_controller, request->io_tag);
6f231dda
DW
1341 }
1342
38aa74eb 1343 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
6f231dda 1344 scic_sds_controller_release_frame(
38aa74eb 1345 request->owning_controller, request->saved_rx_frame_index);
6f231dda
DW
1346 }
1347
38aa74eb
CH
1348 sci_base_state_machine_change_state(&request->state_machine,
1349 SCI_BASE_REQUEST_STATE_FINAL);
6f231dda
DW
1350 return SCI_SUCCESS;
1351}
1352
1353/*
1354 * *****************************************************************************
1355 * * ABORTING STATE HANDLERS
1356 * ***************************************************************************** */
1357
38aa74eb 1358/*
6f231dda
DW
1359 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1360 * object receives a scic_sds_request_terminate() request. This method is the
1361 * io request aborting state abort handlers. On receipt of a multiple
1362 * terminate requests the io request will transition to the completed state.
1363 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1364 */
1365static enum sci_status scic_sds_request_aborting_state_abort_handler(
38aa74eb 1366 struct scic_sds_request *request)
6f231dda 1367{
38aa74eb
CH
1368 sci_base_state_machine_change_state(&request->state_machine,
1369 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1370 return SCI_SUCCESS;
1371}
1372
38aa74eb 1373/*
6f231dda
DW
1374 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1375 * object receives a scic_sds_request_task_completion() request. This method
1376 * decodes the completion type waiting for the abort task complete
1377 * notification. When the abort task complete is received the io request
1378 * transitions to the completed state. enum sci_status SCI_SUCCESS
1379 */
1380static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
e2023b87 1381 struct scic_sds_request *sci_req,
6f231dda
DW
1382 u32 completion_code)
1383{
1384 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1385 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1386 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1387 scic_sds_request_set_status(
e2023b87 1388 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
6f231dda
DW
1389 );
1390
e2023b87 1391 sci_base_state_machine_change_state(&sci_req->state_machine,
38aa74eb 1392 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1393 break;
1394
1395 default:
1396 /*
1397 * Unless we get some strange error wait for the task abort to complete
1398 * TODO: Should there be a state change for this completion? */
1399 break;
1400 }
1401
1402 return SCI_SUCCESS;
1403}
1404
38aa74eb 1405/*
6f231dda
DW
1406 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1407 * object receives a scic_sds_request_frame_handler() request. This method
1408 * discards the unsolicited frame since we are waiting for the abort task
1409 * completion. enum sci_status SCI_SUCCESS
1410 */
1411static enum sci_status scic_sds_request_aborting_state_frame_handler(
e2023b87 1412 struct scic_sds_request *sci_req,
6f231dda
DW
1413 u32 frame_index)
1414{
1415 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1416
1417 scic_sds_controller_release_frame(
e2023b87 1418 sci_req->owning_controller, frame_index);
6f231dda
DW
1419
1420 return SCI_SUCCESS;
1421}
1422
35173d57 1423static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
6f231dda 1424 [SCI_BASE_REQUEST_STATE_INITIAL] = {
6f231dda
DW
1425 },
1426 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
38aa74eb
CH
1427 .start_handler = scic_sds_request_constructed_state_start_handler,
1428 .abort_handler = scic_sds_request_constructed_state_abort_handler,
6f231dda
DW
1429 },
1430 [SCI_BASE_REQUEST_STATE_STARTED] = {
38aa74eb 1431 .abort_handler = scic_sds_request_started_state_abort_handler,
38aa74eb 1432 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
38aa74eb 1433 .frame_handler = scic_sds_request_started_state_frame_handler,
6f231dda
DW
1434 },
1435 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
38aa74eb 1436 .complete_handler = scic_sds_request_completed_state_complete_handler,
6f231dda
DW
1437 },
1438 [SCI_BASE_REQUEST_STATE_ABORTING] = {
38aa74eb 1439 .abort_handler = scic_sds_request_aborting_state_abort_handler,
38aa74eb 1440 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
38aa74eb 1441 .frame_handler = scic_sds_request_aborting_state_frame_handler,
6f231dda
DW
1442 },
1443 [SCI_BASE_REQUEST_STATE_FINAL] = {
6f231dda
DW
1444 },
1445};
1446
1447/**
1448 * scic_sds_request_initial_state_enter() -
1449 * @object: This parameter specifies the base object for which the state
1450 * transition is occurring.
1451 *
1452 * This method implements the actions taken when entering the
1453 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
1454 * base request is constructed. Entry into the initial state sets all handlers
1455 * for the io request object to their default handlers. none
1456 */
9a0fff7b 1457static void scic_sds_request_initial_state_enter(void *object)
6f231dda 1458{
890cae9b 1459 struct scic_sds_request *sci_req = object;
6f231dda
DW
1460
1461 SET_STATE_HANDLER(
e2023b87 1462 sci_req,
6f231dda
DW
1463 scic_sds_request_state_handler_table,
1464 SCI_BASE_REQUEST_STATE_INITIAL
1465 );
1466}
1467
1468/**
1469 * scic_sds_request_constructed_state_enter() -
1470 * @object: The io request object that is to enter the constructed state.
1471 *
1472 * This method implements the actions taken when entering the
1473 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
1474 * for the the constructed state. none
1475 */
9a0fff7b 1476static void scic_sds_request_constructed_state_enter(void *object)
6f231dda 1477{
890cae9b 1478 struct scic_sds_request *sci_req = object;
6f231dda
DW
1479
1480 SET_STATE_HANDLER(
e2023b87 1481 sci_req,
6f231dda
DW
1482 scic_sds_request_state_handler_table,
1483 SCI_BASE_REQUEST_STATE_CONSTRUCTED
1484 );
1485}
1486
1487/**
1488 * scic_sds_request_started_state_enter() -
1489 * @object: This parameter specifies the base object for which the state
9a0fff7b 1490 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
6f231dda
DW
1491 *
1492 * This method implements the actions taken when entering the
1493 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
1494 * SCSI Task request we must enter the started substate machine. none
1495 */
9a0fff7b 1496static void scic_sds_request_started_state_enter(void *object)
6f231dda 1497{
890cae9b 1498 struct scic_sds_request *sci_req = object;
6f231dda
DW
1499
1500 SET_STATE_HANDLER(
e2023b87 1501 sci_req,
6f231dda
DW
1502 scic_sds_request_state_handler_table,
1503 SCI_BASE_REQUEST_STATE_STARTED
1504 );
1505
1506 /*
1507 * Most of the request state machines have a started substate machine so
1508 * start its execution on the entry to the started state. */
e2023b87
DJ
1509 if (sci_req->has_started_substate_machine == true)
1510 sci_base_state_machine_start(&sci_req->started_substate_machine);
6f231dda
DW
1511}
1512
1513/**
1514 * scic_sds_request_started_state_exit() -
1515 * @object: This parameter specifies the base object for which the state
9a0fff7b 1516 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
6f231dda
DW
1517 * object.
1518 *
1519 * This method implements the actions taken when exiting the
1520 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
1521 * to stop the started substate machine. none
1522 */
9a0fff7b 1523static void scic_sds_request_started_state_exit(void *object)
6f231dda 1524{
890cae9b 1525 struct scic_sds_request *sci_req = object;
6f231dda 1526
e2023b87
DJ
1527 if (sci_req->has_started_substate_machine == true)
1528 sci_base_state_machine_stop(&sci_req->started_substate_machine);
6f231dda
DW
1529}
1530
1531/**
1532 * scic_sds_request_completed_state_enter() -
1533 * @object: This parameter specifies the base object for which the state
9a0fff7b 1534 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
6f231dda
DW
1535 * object.
1536 *
1537 * This method implements the actions taken when entering the
1538 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
1539 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
1540 * completion status and convert it to an enum sci_status to return in the
1541 * completion callback function. none
1542 */
9a0fff7b 1543static void scic_sds_request_completed_state_enter(void *object)
6f231dda 1544{
890cae9b 1545 struct scic_sds_request *sci_req = object;
09d7da13
DJ
1546 struct scic_sds_controller *scic =
1547 scic_sds_request_get_controller(sci_req);
d3757c3a 1548 struct isci_host *ihost = scic->ihost;
890cae9b 1549 struct isci_request *ireq = sci_req->ireq;
6f231dda 1550
09d7da13
DJ
1551 SET_STATE_HANDLER(sci_req,
1552 scic_sds_request_state_handler_table,
1553 SCI_BASE_REQUEST_STATE_COMPLETED);
6f231dda
DW
1554
1555 /* Tell the SCI_USER that the IO request is complete */
09d7da13
DJ
1556 if (sci_req->is_task_management_request == false)
1557 isci_request_io_request_complete(ihost,
1558 ireq,
1559 sci_req->sci_status);
1560 else
1561 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
6f231dda
DW
1562}
1563
1564/**
1565 * scic_sds_request_aborting_state_enter() -
1566 * @object: This parameter specifies the base object for which the state
9a0fff7b 1567 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
6f231dda
DW
1568 * object.
1569 *
1570 * This method implements the actions taken when entering the
1571 * SCI_BASE_REQUEST_STATE_ABORTING state. none
1572 */
9a0fff7b 1573static void scic_sds_request_aborting_state_enter(void *object)
6f231dda 1574{
890cae9b 1575 struct scic_sds_request *sci_req = object;
6f231dda
DW
1576
1577 /* Setting the abort bit in the Task Context is required by the silicon. */
e2023b87 1578 sci_req->task_context_buffer->abort = 1;
6f231dda
DW
1579
1580 SET_STATE_HANDLER(
e2023b87 1581 sci_req,
6f231dda
DW
1582 scic_sds_request_state_handler_table,
1583 SCI_BASE_REQUEST_STATE_ABORTING
1584 );
1585}
1586
1587/**
1588 * scic_sds_request_final_state_enter() -
1589 * @object: This parameter specifies the base object for which the state
9a0fff7b 1590 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
6f231dda
DW
1591 *
1592 * This method implements the actions taken when entering the
1593 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
1594 * state handlers in place. none
1595 */
9a0fff7b 1596static void scic_sds_request_final_state_enter(void *object)
6f231dda 1597{
890cae9b 1598 struct scic_sds_request *sci_req = object;
6f231dda
DW
1599
1600 SET_STATE_HANDLER(
e2023b87 1601 sci_req,
6f231dda
DW
1602 scic_sds_request_state_handler_table,
1603 SCI_BASE_REQUEST_STATE_FINAL
1604 );
1605}
1606
35173d57 1607static const struct sci_base_state scic_sds_request_state_table[] = {
6f231dda
DW
1608 [SCI_BASE_REQUEST_STATE_INITIAL] = {
1609 .enter_state = scic_sds_request_initial_state_enter,
1610 },
1611 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1612 .enter_state = scic_sds_request_constructed_state_enter,
1613 },
1614 [SCI_BASE_REQUEST_STATE_STARTED] = {
1615 .enter_state = scic_sds_request_started_state_enter,
1616 .exit_state = scic_sds_request_started_state_exit
1617 },
1618 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1619 .enter_state = scic_sds_request_completed_state_enter,
1620 },
1621 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1622 .enter_state = scic_sds_request_aborting_state_enter,
1623 },
1624 [SCI_BASE_REQUEST_STATE_FINAL] = {
1625 .enter_state = scic_sds_request_final_state_enter,
1626 },
1627};
1628
35173d57
DW
1629static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
1630 struct scic_sds_remote_device *sci_dev,
1631 u16 io_tag,
1632 void *user_io_request_object,
1633 struct scic_sds_request *sci_req)
1634{
890cae9b 1635 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
38aa74eb
CH
1636 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
1637 sci_base_state_machine_start(&sci_req->state_machine);
1638
35173d57
DW
1639 sci_req->io_tag = io_tag;
1640 sci_req->user_request = user_io_request_object;
1641 sci_req->owning_controller = scic;
1642 sci_req->target_device = sci_dev;
1643 sci_req->has_started_substate_machine = false;
1644 sci_req->protocol = SCIC_NO_PROTOCOL;
1645 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
1646 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
1647
1648 sci_req->sci_status = SCI_SUCCESS;
1649 sci_req->scu_status = 0;
1650 sci_req->post_context = 0xFFFFFFFF;
1651
1652 sci_req->is_task_management_request = false;
1653
1654 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1655 sci_req->was_tag_assigned_by_user = false;
1656 sci_req->task_context_buffer = NULL;
1657 } else {
1658 sci_req->was_tag_assigned_by_user = true;
1659
1660 sci_req->task_context_buffer =
1661 scic_sds_controller_get_task_context_buffer(scic, io_tag);
1662 }
1663}
1664
2ec53eb4
DJ
1665enum sci_status
1666scic_io_request_construct(struct scic_sds_controller *scic,
1667 struct scic_sds_remote_device *sci_dev,
1668 u16 io_tag,
1669 void *user_req,
1670 struct scic_sds_request *sci_req,
1671 struct scic_sds_request **new_sci_req)
35173d57 1672{
a1a113b0 1673 struct domain_device *dev = sci_dev_to_domain(sci_dev);
35173d57 1674 enum sci_status status = SCI_SUCCESS;
35173d57
DW
1675
1676 /* Build the common part of the request */
2ec53eb4
DJ
1677 scic_sds_general_request_construct(scic,
1678 sci_dev,
1679 io_tag,
1680 user_req,
1681 sci_req);
1682
1683 if (sci_dev->rnc.remote_node_index ==
1684 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
35173d57
DW
1685 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
1686
2ec53eb4 1687 if (dev->dev_type == SAS_END_DEV)
35173d57 1688 scic_sds_ssp_io_request_assign_buffers(sci_req);
2ec53eb4
DJ
1689 else if ((dev->dev_type == SATA_DEV) ||
1690 (dev->tproto & SAS_PROTOCOL_STP)) {
35173d57 1691 scic_sds_stp_request_assign_buffers(sci_req);
2ec53eb4
DJ
1692 memset(sci_req->command_buffer,
1693 0,
1694 sizeof(struct host_to_dev_fis));
a1a113b0 1695 } else if (dev_is_expander(dev)) {
35173d57 1696 scic_sds_smp_request_assign_buffers(sci_req);
2ec53eb4 1697 memset(sci_req->command_buffer, 0, sizeof(struct smp_req));
a1a113b0 1698 } else
35173d57 1699 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
35173d57
DW
1700
1701 if (status == SCI_SUCCESS) {
2ec53eb4
DJ
1702 memset(sci_req->task_context_buffer,
1703 0,
1704 SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab));
1705 *new_sci_req = sci_req;
35173d57
DW
1706 }
1707
1708 return status;
1709}
1710
1711enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
1712 struct scic_sds_remote_device *sci_dev,
1713 u16 io_tag,
1714 void *user_io_request_object,
1715 struct scic_sds_request *sci_req,
1716 struct scic_sds_request **new_sci_req)
1717{
a1a113b0 1718 struct domain_device *dev = sci_dev_to_domain(sci_dev);
35173d57 1719 enum sci_status status = SCI_SUCCESS;
35173d57
DW
1720
1721 /* Build the common part of the request */
1722 scic_sds_general_request_construct(scic, sci_dev, io_tag,
1723 user_io_request_object,
1724 sci_req);
1725
a1a113b0 1726 if (dev->dev_type == SAS_END_DEV) {
35173d57
DW
1727 scic_sds_ssp_task_request_assign_buffers(sci_req);
1728
1729 sci_req->has_started_substate_machine = true;
1730
1731 /* Construct the started sub-state machine. */
1732 sci_base_state_machine_construct(
1733 &sci_req->started_substate_machine,
890cae9b 1734 sci_req,
35173d57
DW
1735 scic_sds_io_request_started_task_mgmt_substate_table,
1736 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
1737 );
a1a113b0 1738 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
35173d57 1739 scic_sds_stp_request_assign_buffers(sci_req);
a1a113b0 1740 else
35173d57 1741 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
35173d57
DW
1742
1743 if (status == SCI_SUCCESS) {
1744 sci_req->is_task_management_request = true;
1745 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
1746 *new_sci_req = sci_req;
1747 }
1748
1749 return status;
1750}
This page took 0.12222 seconds and 5 git commands to generate.