2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 #include "sci_base_state.h"
59 #include "sci_base_state_machine.h"
60 #include "scic_io_request.h"
61 #include "scic_sds_controller.h"
62 #include "remote_device.h"
63 #include "scic_sds_request.h"
64 #include "scic_sds_stp_pio_request.h"
65 #include "scic_sds_stp_request.h"
66 #include "scic_sds_unsolicited_frame_control.h"
67 #include "sci_environment.h"
69 #include "scu_completion_codes.h"
70 #include "scu_event_codes.h"
71 #include "scu_task_context.h"
74 * scic_sds_stp_request_get_h2d_reg_buffer() -
76 * This macro returns the address of the stp h2d reg fis buffer in the io
79 #define scic_sds_stp_request_get_h2d_reg_buffer(memory) \
80 ((struct host_to_dev_fis *)(\
81 ((char *)(memory)) + sizeof(struct scic_sds_stp_request) \
85 * scic_sds_stp_request_get_response_buffer() -
87 * This macro returns the address of the ssp response iu buffer in the io
90 #define scic_sds_stp_request_get_response_buffer(memory) \
91 ((struct dev_to_host_fis *)(\
92 ((char *)(scic_sds_stp_request_get_h2d_reg_buffer(memory))) \
93 + sizeof(struct host_to_dev_fis) \
97 * scic_sds_stp_request_get_task_context_buffer() -
99 * This macro returns the address of the task context buffer in the io request
102 #define scic_sds_stp_request_get_task_context_buffer(memory) \
103 ((struct scu_task_context *)(\
104 ((char *)(scic_sds_stp_request_get_response_buffer(memory))) \
105 + SSP_RESP_IU_MAX_SIZE \
111 * This method return the memory space required for STP PIO requests. u32
113 u32
scic_sds_stp_request_get_object_size(void)
115 return sizeof(struct scic_sds_stp_request
)
116 + sizeof(struct host_to_dev_fis
)
117 + sizeof(struct dev_to_host_fis
)
118 + sizeof(struct scu_task_context
)
122 void scic_sds_stp_request_assign_buffers(struct scic_sds_request
*sci_req
)
124 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
126 sci_req
->command_buffer
= scic_sds_stp_request_get_h2d_reg_buffer(stp_req
);
127 sci_req
->response_buffer
= scic_sds_stp_request_get_response_buffer(stp_req
);
129 if (sci_req
->was_tag_assigned_by_user
== false) {
130 sci_req
->task_context_buffer
=
131 scic_sds_stp_request_get_task_context_buffer(stp_req
);
132 sci_req
->task_context_buffer
= PTR_ALIGN(sci_req
->task_context_buffer
,
138 * This method is will fill in the SCU Task Context for any type of SATA
139 * request. This is called from the various SATA constructors.
140 * @sci_req: The general IO request object which is to be used in
141 * constructing the SCU task context.
142 * @task_context: The buffer pointer for the SCU task context which is being
145 * The general io request construction is complete. The buffer assignment for
146 * the command buffer is complete. none Revisit task context construction to
147 * determine what is common for SSP/SMP/STP task context structures.
149 static void scu_sata_reqeust_construct_task_context(
150 struct scic_sds_request
*sds_request
,
151 struct scu_task_context
*task_context
)
154 struct scic_sds_controller
*controller
;
155 struct scic_sds_remote_device
*target_device
;
156 struct scic_sds_port
*target_port
;
158 controller
= scic_sds_request_get_controller(sds_request
);
159 target_device
= scic_sds_request_get_device(sds_request
);
160 target_port
= scic_sds_request_get_port(sds_request
);
162 /* Fill in the TC with the its required data */
163 task_context
->abort
= 0;
164 task_context
->priority
= SCU_TASK_PRIORITY_NORMAL
;
165 task_context
->initiator_request
= 1;
166 task_context
->connection_rate
= target_device
->connection_rate
;
167 task_context
->protocol_engine_index
=
168 scic_sds_controller_get_protocol_engine_group(controller
);
169 task_context
->logical_port_index
=
170 scic_sds_port_get_index(target_port
);
171 task_context
->protocol_type
= SCU_TASK_CONTEXT_PROTOCOL_STP
;
172 task_context
->valid
= SCU_TASK_CONTEXT_VALID
;
173 task_context
->context_type
= SCU_TASK_CONTEXT_TYPE
;
175 task_context
->remote_node_index
=
176 scic_sds_remote_device_get_index(sds_request
->target_device
);
177 task_context
->command_code
= 0;
179 task_context
->link_layer_control
= 0;
180 task_context
->do_not_dma_ssp_good_response
= 1;
181 task_context
->strict_ordering
= 0;
182 task_context
->control_frame
= 0;
183 task_context
->timeout_enable
= 0;
184 task_context
->block_guard_enable
= 0;
186 task_context
->address_modifier
= 0;
187 task_context
->task_phase
= 0x01;
189 task_context
->ssp_command_iu_length
=
190 (sizeof(struct host_to_dev_fis
) - sizeof(u32
)) / sizeof(u32
);
192 /* Set the first word of the H2D REG FIS */
193 task_context
->type
.words
[0] = *(u32
*)sds_request
->command_buffer
;
195 if (sds_request
->was_tag_assigned_by_user
) {
197 * Build the task context now since we have already read
200 sds_request
->post_context
=
201 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC
|
202 (scic_sds_controller_get_protocol_engine_group(
204 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT
) |
205 (scic_sds_port_get_index(target_port
) <<
206 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT
) |
207 scic_sds_io_tag_get_index(sds_request
->io_tag
));
210 * Build the task context now since we have already read
212 * I/O tag index is not assigned because we have to wait
213 * until we get a TCi.
215 sds_request
->post_context
=
216 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC
|
217 (scic_sds_controller_get_protocol_engine_group(
219 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT
) |
220 (scic_sds_port_get_index(target_port
) <<
221 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT
));
225 * Copy the physical address for the command buffer to the SCU Task
226 * Context. We must offset the command buffer by 4 bytes because the
227 * first 4 bytes are transfered in the body of the TC.
230 scic_io_request_get_dma_addr(sds_request
,
231 (char *)sds_request
->
235 task_context
->command_iu_upper
= upper_32_bits(dma_addr
);
236 task_context
->command_iu_lower
= lower_32_bits(dma_addr
);
238 /* SATA Requests do not have a response buffer */
239 task_context
->response_iu_upper
= 0;
240 task_context
->response_iu_lower
= 0;
247 * This method will perform any general sata request construction. What part of
248 * SATA IO request construction is general? none
250 static void scic_sds_stp_non_ncq_request_construct(
251 struct scic_sds_request
*sci_req
)
253 sci_req
->has_started_substate_machine
= true;
258 * @sci_req: This parameter specifies the request to be constructed as an
260 * @optimized_task_type: This parameter specifies whether the request is to be
261 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
262 * value of 1 indicates NCQ.
264 * This method will perform request construction common to all types of STP
265 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
266 * returns an indication as to whether the construction was successful.
268 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request
*sci_req
,
269 u8 optimized_task_type
,
271 enum dma_data_direction dir
)
273 struct scu_task_context
*task_context
= sci_req
->task_context_buffer
;
275 /* Build the STP task context structure */
276 scu_sata_reqeust_construct_task_context(sci_req
, task_context
);
278 /* Copy over the SGL elements */
279 scic_sds_request_build_sgl(sci_req
);
281 /* Copy over the number of bytes to be transfered */
282 task_context
->transfer_length_bytes
= len
;
284 if (dir
== DMA_TO_DEVICE
) {
286 * The difference between the DMA IN and DMA OUT request task type
287 * values are consistent with the difference between FPDMA READ
288 * and FPDMA WRITE values. Add the supplied task type parameter
289 * to this difference to set the task type properly for this
290 * DATA OUT (WRITE) case. */
291 task_context
->task_type
= optimized_task_type
+ (SCU_TASK_TYPE_DMA_OUT
292 - SCU_TASK_TYPE_DMA_IN
);
295 * For the DATA IN (READ) case, simply save the supplied
296 * optimized task type. */
297 task_context
->task_type
= optimized_task_type
;
303 * @sci_req: This parameter specifies the request to be constructed.
305 * This method will construct the STP UDMA request and its associated TC data.
306 * This method returns an indication as to whether the construction was
307 * successful. SCI_SUCCESS Currently this method always returns this value.
309 enum sci_status
scic_sds_stp_ncq_request_construct(struct scic_sds_request
*sci_req
,
311 enum dma_data_direction dir
)
313 scic_sds_stp_optimized_request_construct(sci_req
,
314 SCU_TASK_TYPE_FPDMAQ_READ
,
320 * scu_stp_raw_request_construct_task_context -
321 * @sci_req: This parameter specifies the STP request object for which to
322 * construct a RAW command frame task context.
323 * @task_context: This parameter specifies the SCU specific task context buffer
326 * This method performs the operations common to all SATA/STP requests
327 * utilizing the raw frame method. none
329 static void scu_stp_raw_request_construct_task_context(
330 struct scic_sds_stp_request
*stp_req
,
331 struct scu_task_context
*task_context
)
333 struct scic_sds_request
*sci_req
= to_sci_req(stp_req
);
335 scu_sata_reqeust_construct_task_context(sci_req
, task_context
);
337 task_context
->control_frame
= 0;
338 task_context
->priority
= SCU_TASK_PRIORITY_NORMAL
;
339 task_context
->task_type
= SCU_TASK_TYPE_SATA_RAW_FRAME
;
340 task_context
->type
.stp
.fis_type
= FIS_REGH2D
;
341 task_context
->transfer_length_bytes
= sizeof(struct host_to_dev_fis
) - sizeof(u32
);
344 void scic_stp_io_request_set_ncq_tag(
345 struct scic_sds_request
*req
,
349 * @note This could be made to return an error to the user if the user
350 * attempts to set the NCQ tag in the wrong state.
352 req
->task_context_buffer
->type
.stp
.ncq_tag
= ncq_tag
;
356 void *scic_stp_io_request_get_h2d_reg_address(
357 struct scic_sds_request
*req
)
359 return req
->command_buffer
;
363 void *scic_stp_io_request_get_d2h_reg_address(struct scic_sds_request
*sci_req
)
365 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
367 return &stp_req
->d2h_reg_fis
;
374 * Get the next SGL element from the request. - Check on which SGL element pair
375 * we are working - if working on SLG pair element A - advance to element B -
376 * else - check to see if there are more SGL element pairs for this IO request
377 * - if there are more SGL element pairs - advance to the next pair and return
378 * element A struct scu_sgl_element*
380 static struct scu_sgl_element
*scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request
*stp_req
)
382 struct scu_sgl_element
*current_sgl
;
383 struct scic_sds_request
*sci_req
= to_sci_req(stp_req
);
384 struct scic_sds_request_pio_sgl
*pio_sgl
= &stp_req
->type
.pio
.request_current
;
386 if (pio_sgl
->sgl_set
== SCU_SGL_ELEMENT_PAIR_A
) {
387 if (pio_sgl
->sgl_pair
->B
.address_lower
== 0 &&
388 pio_sgl
->sgl_pair
->B
.address_upper
== 0) {
391 pio_sgl
->sgl_set
= SCU_SGL_ELEMENT_PAIR_B
;
392 current_sgl
= &pio_sgl
->sgl_pair
->B
;
395 if (pio_sgl
->sgl_pair
->next_pair_lower
== 0 &&
396 pio_sgl
->sgl_pair
->next_pair_upper
== 0) {
401 phys_addr
= pio_sgl
->sgl_pair
->next_pair_upper
;
403 phys_addr
|= pio_sgl
->sgl_pair
->next_pair_lower
;
405 pio_sgl
->sgl_pair
= scic_request_get_virt_addr(sci_req
, phys_addr
);
406 pio_sgl
->sgl_set
= SCU_SGL_ELEMENT_PAIR_A
;
407 current_sgl
= &pio_sgl
->sgl_pair
->A
;
419 * This method processes a TC completion. The expected TC completion is for
420 * the transmission of the H2D register FIS containing the SATA/STP non-data
421 * request. This method always successfully processes the TC completion.
422 * SCI_SUCCESS This value is always returned.
424 static enum sci_status
scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
425 struct scic_sds_request
*sci_req
,
428 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
429 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
430 scic_sds_request_set_status(
431 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
434 sci_base_state_machine_change_state(
435 &sci_req
->started_substate_machine
,
436 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
442 * All other completion status cause the IO to be complete. If a NAK
443 * was received, then it is up to the user to retry the request. */
444 scic_sds_request_set_status(
446 SCU_NORMALIZE_COMPLETION_STATUS(completion_code
),
447 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
450 sci_base_state_machine_change_state(
451 &sci_req
->state_machine
, SCI_BASE_REQUEST_STATE_COMPLETED
);
460 * @request: This parameter specifies the request for which a frame has been
462 * @frame_index: This parameter specifies the index of the frame that has been
465 * This method processes frames received from the target while waiting for a
466 * device to host register FIS. If a non-register FIS is received during this
467 * time, it is treated as a protocol violation from an IO perspective. Indicate
468 * if the received frame was processed successfully.
470 static enum sci_status
scic_sds_stp_request_non_data_await_d2h_frame_handler(
471 struct scic_sds_request
*sci_req
,
474 enum sci_status status
;
475 struct dev_to_host_fis
*frame_header
;
477 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
478 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
480 status
= scic_sds_unsolicited_frame_control_get_header(&scic
->uf_control
,
482 (void **)&frame_header
);
484 if (status
!= SCI_SUCCESS
) {
485 dev_err(scic_to_dev(sci_req
->owning_controller
),
486 "%s: SCIC IO Request 0x%p could not get frame header "
487 "for frame index %d, status %x\n",
488 __func__
, stp_req
, frame_index
, status
);
493 switch (frame_header
->fis_type
) {
495 scic_sds_unsolicited_frame_control_get_buffer(&scic
->uf_control
,
497 (void **)&frame_buffer
);
499 scic_sds_controller_copy_sata_response(&stp_req
->d2h_reg_fis
,
503 /* The command has completed with error */
504 scic_sds_request_set_status(sci_req
, SCU_TASK_DONE_CHECK_RESPONSE
,
505 SCI_FAILURE_IO_RESPONSE_VALID
);
509 dev_warn(scic_to_dev(scic
),
510 "%s: IO Request:0x%p Frame Id:%d protocol "
511 "violation occurred\n", __func__
, stp_req
,
514 scic_sds_request_set_status(sci_req
, SCU_TASK_DONE_UNEXP_FIS
,
515 SCI_FAILURE_PROTOCOL_VIOLATION
);
519 sci_base_state_machine_change_state(&sci_req
->state_machine
,
520 SCI_BASE_REQUEST_STATE_COMPLETED
);
522 /* Frame has been decoded return it to the controller */
523 scic_sds_controller_release_frame(scic
, frame_index
);
528 /* --------------------------------------------------------------------------- */
530 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table
[] = {
531 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
] = {
532 .abort_handler
= scic_sds_request_started_state_abort_handler
,
533 .tc_completion_handler
= scic_sds_stp_request_non_data_await_h2d_tc_completion_handler
,
535 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
] = {
536 .abort_handler
= scic_sds_request_started_state_abort_handler
,
537 .frame_handler
= scic_sds_stp_request_non_data_await_d2h_frame_handler
,
541 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
544 struct scic_sds_request
*sci_req
= object
;
548 scic_sds_stp_request_started_non_data_substate_handler_table
,
549 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
552 scic_sds_remote_device_set_working_request(
553 sci_req
->target_device
, sci_req
557 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object
)
559 struct scic_sds_request
*sci_req
= object
;
563 scic_sds_stp_request_started_non_data_substate_handler_table
,
564 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
568 /* --------------------------------------------------------------------------- */
570 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table
[] = {
571 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
] = {
572 .enter_state
= scic_sds_stp_request_started_non_data_await_h2d_completion_enter
,
574 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
] = {
575 .enter_state
= scic_sds_stp_request_started_non_data_await_d2h_enter
,
579 enum sci_status
scic_sds_stp_non_data_request_construct(struct scic_sds_request
*sci_req
)
581 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
583 scic_sds_stp_non_ncq_request_construct(sci_req
);
585 /* Build the STP task context structure */
586 scu_stp_raw_request_construct_task_context(stp_req
, sci_req
->task_context_buffer
);
588 sci_base_state_machine_construct(&sci_req
->started_substate_machine
,
590 scic_sds_stp_request_started_non_data_substate_table
,
591 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
);
596 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
598 /* transmit DATA_FIS from (current sgl + offset) for input
599 * parameter length. current sgl and offset is alreay stored in the IO request
601 static enum sci_status
scic_sds_stp_request_pio_data_out_trasmit_data_frame(
602 struct scic_sds_request
*sci_req
,
605 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
606 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
607 struct scu_task_context
*task_context
;
608 struct scu_sgl_element
*current_sgl
;
610 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
611 * for the data from current_sgl+offset for the input length
613 task_context
= scic_sds_controller_get_task_context_buffer(scic
,
616 if (stp_req
->type
.pio
.request_current
.sgl_set
== SCU_SGL_ELEMENT_PAIR_A
)
617 current_sgl
= &stp_req
->type
.pio
.request_current
.sgl_pair
->A
;
619 current_sgl
= &stp_req
->type
.pio
.request_current
.sgl_pair
->B
;
622 task_context
->command_iu_upper
= current_sgl
->address_upper
;
623 task_context
->command_iu_lower
= current_sgl
->address_lower
;
624 task_context
->transfer_length_bytes
= length
;
625 task_context
->type
.stp
.fis_type
= FIS_DATA
;
627 /* send the new TC out. */
628 return scic_controller_continue_io(sci_req
);
631 static enum sci_status
scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request
*sci_req
)
634 struct scu_sgl_element
*current_sgl
;
636 u32 remaining_bytes_in_current_sgl
= 0;
637 enum sci_status status
= SCI_SUCCESS
;
638 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
640 sgl_offset
= stp_req
->type
.pio
.request_current
.sgl_offset
;
642 if (stp_req
->type
.pio
.request_current
.sgl_set
== SCU_SGL_ELEMENT_PAIR_A
) {
643 current_sgl
= &(stp_req
->type
.pio
.request_current
.sgl_pair
->A
);
644 remaining_bytes_in_current_sgl
= stp_req
->type
.pio
.request_current
.sgl_pair
->A
.length
- sgl_offset
;
646 current_sgl
= &(stp_req
->type
.pio
.request_current
.sgl_pair
->B
);
647 remaining_bytes_in_current_sgl
= stp_req
->type
.pio
.request_current
.sgl_pair
->B
.length
- sgl_offset
;
651 if (stp_req
->type
.pio
.pio_transfer_bytes
> 0) {
652 if (stp_req
->type
.pio
.pio_transfer_bytes
>= remaining_bytes_in_current_sgl
) {
653 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
654 status
= scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req
, remaining_bytes_in_current_sgl
);
655 if (status
== SCI_SUCCESS
) {
656 stp_req
->type
.pio
.pio_transfer_bytes
-= remaining_bytes_in_current_sgl
;
658 /* update the current sgl, sgl_offset and save for future */
659 current_sgl
= scic_sds_stp_request_pio_get_next_sgl(stp_req
);
662 } else if (stp_req
->type
.pio
.pio_transfer_bytes
< remaining_bytes_in_current_sgl
) {
663 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
664 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req
, stp_req
->type
.pio
.pio_transfer_bytes
);
666 if (status
== SCI_SUCCESS
) {
667 /* Sgl offset will be adjusted and saved for future */
668 sgl_offset
+= stp_req
->type
.pio
.pio_transfer_bytes
;
669 current_sgl
->address_lower
+= stp_req
->type
.pio
.pio_transfer_bytes
;
670 stp_req
->type
.pio
.pio_transfer_bytes
= 0;
675 if (status
== SCI_SUCCESS
) {
676 stp_req
->type
.pio
.request_current
.sgl_offset
= sgl_offset
;
684 * @stp_request: The request that is used for the SGL processing.
685 * @data_buffer: The buffer of data to be copied.
686 * @length: The length of the data transfer.
688 * Copy the data from the buffer for the length specified to the IO reqeust SGL
689 * specified data region. enum sci_status
691 static enum sci_status
692 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request
*stp_req
,
693 u8
*data_buf
, u32 len
)
695 struct scic_sds_request
*sci_req
;
696 struct isci_request
*ireq
;
699 struct sas_task
*task
;
700 struct scatterlist
*sg
;
704 sci_req
= to_sci_req(stp_req
);
705 ireq
= scic_sds_request_get_user_request(sci_req
);
706 task
= isci_request_access_task(ireq
);
709 if (task
->num_scatter
> 0) {
712 while (total_len
> 0) {
713 struct page
*page
= sg_page(sg
);
715 copy_len
= min_t(int, total_len
, sg_dma_len(sg
));
716 kaddr
= kmap_atomic(page
, KM_IRQ0
);
717 memcpy(kaddr
+ sg
->offset
, src_addr
, copy_len
);
718 kunmap_atomic(kaddr
, KM_IRQ0
);
719 total_len
-= copy_len
;
720 src_addr
+= copy_len
;
724 BUG_ON(task
->total_xfer_len
< total_len
);
725 memcpy(task
->scatter
, src_addr
, total_len
);
733 * @sci_req: The PIO DATA IN request that is to receive the data.
734 * @data_buffer: The buffer to copy from.
736 * Copy the data buffer to the io request data region. enum sci_status
738 static enum sci_status
scic_sds_stp_request_pio_data_in_copy_data(
739 struct scic_sds_stp_request
*sci_req
,
742 enum sci_status status
;
745 * If there is less than 1K remaining in the transfer request
746 * copy just the data for the transfer */
747 if (sci_req
->type
.pio
.pio_transfer_bytes
< SCU_MAX_FRAME_BUFFER_SIZE
) {
748 status
= scic_sds_stp_request_pio_data_in_copy_data_buffer(
749 sci_req
, data_buffer
, sci_req
->type
.pio
.pio_transfer_bytes
);
751 if (status
== SCI_SUCCESS
)
752 sci_req
->type
.pio
.pio_transfer_bytes
= 0;
754 /* We are transfering the whole frame so copy */
755 status
= scic_sds_stp_request_pio_data_in_copy_data_buffer(
756 sci_req
, data_buffer
, SCU_MAX_FRAME_BUFFER_SIZE
);
758 if (status
== SCI_SUCCESS
)
759 sci_req
->type
.pio
.pio_transfer_bytes
-= SCU_MAX_FRAME_BUFFER_SIZE
;
772 static enum sci_status
scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
773 struct scic_sds_request
*sci_req
,
776 enum sci_status status
= SCI_SUCCESS
;
778 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
779 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
780 scic_sds_request_set_status(
781 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
784 sci_base_state_machine_change_state(
785 &sci_req
->started_substate_machine
,
786 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
792 * All other completion status cause the IO to be complete. If a NAK
793 * was received, then it is up to the user to retry the request. */
794 scic_sds_request_set_status(
796 SCU_NORMALIZE_COMPLETION_STATUS(completion_code
),
797 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
800 sci_base_state_machine_change_state(
801 &sci_req
->state_machine
,
802 SCI_BASE_REQUEST_STATE_COMPLETED
810 static enum sci_status
scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request
*sci_req
,
813 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
814 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
815 struct isci_request
*ireq
= sci_req
->ireq
;
816 struct sas_task
*task
= isci_request_access_task(ireq
);
817 struct dev_to_host_fis
*frame_header
;
818 enum sci_status status
;
821 status
= scic_sds_unsolicited_frame_control_get_header(&scic
->uf_control
,
823 (void **)&frame_header
);
825 if (status
!= SCI_SUCCESS
) {
826 dev_err(scic_to_dev(scic
),
827 "%s: SCIC IO Request 0x%p could not get frame header "
828 "for frame index %d, status %x\n",
829 __func__
, stp_req
, frame_index
, status
);
833 switch (frame_header
->fis_type
) {
835 /* Get from the frame buffer the PIO Setup Data */
836 scic_sds_unsolicited_frame_control_get_buffer(&scic
->uf_control
,
838 (void **)&frame_buffer
);
840 /* Get the data from the PIO Setup The SCU Hardware returns
841 * first word in the frame_header and the rest of the data is in
842 * the frame buffer so we need to back up one dword
845 /* transfer_count: first 16bits in the 4th dword */
846 stp_req
->type
.pio
.pio_transfer_bytes
= frame_buffer
[3] & 0xffff;
848 /* ending_status: 4th byte in the 3rd dword */
849 stp_req
->type
.pio
.ending_status
= (frame_buffer
[2] >> 24) & 0xff;
851 scic_sds_controller_copy_sata_response(&stp_req
->d2h_reg_fis
,
855 stp_req
->d2h_reg_fis
.status
= stp_req
->type
.pio
.ending_status
;
857 /* The next state is dependent on whether the
858 * request was PIO Data-in or Data out
860 if (task
->data_dir
== DMA_FROM_DEVICE
) {
861 sci_base_state_machine_change_state(&sci_req
->started_substate_machine
,
862 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
);
863 } else if (task
->data_dir
== DMA_TO_DEVICE
) {
865 status
= scic_sds_stp_request_pio_data_out_transmit_data(sci_req
);
866 if (status
!= SCI_SUCCESS
)
868 sci_base_state_machine_change_state(&sci_req
->started_substate_machine
,
869 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
);
873 sci_base_state_machine_change_state(&sci_req
->started_substate_machine
,
874 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
);
877 if (frame_header
->status
& ATA_BUSY
) {
878 /* Now why is the drive sending a D2H Register FIS when
879 * it is still busy? Do nothing since we are still in
882 dev_dbg(scic_to_dev(scic
),
883 "%s: SCIC PIO Request 0x%p received "
884 "D2H Register FIS with BSY status "
885 "0x%x\n", __func__
, stp_req
,
886 frame_header
->status
);
890 scic_sds_unsolicited_frame_control_get_buffer(&scic
->uf_control
,
892 (void **)&frame_buffer
);
894 scic_sds_controller_copy_sata_response(&stp_req
->d2h_reg_fis
,
898 scic_sds_request_set_status(sci_req
,
899 SCU_TASK_DONE_CHECK_RESPONSE
,
900 SCI_FAILURE_IO_RESPONSE_VALID
);
902 sci_base_state_machine_change_state(&sci_req
->state_machine
,
903 SCI_BASE_REQUEST_STATE_COMPLETED
);
906 /* FIXME: what do we do here? */
910 /* Frame is decoded return it to the controller */
911 scic_sds_controller_release_frame(scic
, frame_index
);
916 static enum sci_status
scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request
*sci_req
,
919 enum sci_status status
;
920 struct dev_to_host_fis
*frame_header
;
921 struct sata_fis_data
*frame_buffer
;
922 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
923 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
925 status
= scic_sds_unsolicited_frame_control_get_header(&scic
->uf_control
,
927 (void **)&frame_header
);
929 if (status
!= SCI_SUCCESS
) {
930 dev_err(scic_to_dev(scic
),
931 "%s: SCIC IO Request 0x%p could not get frame header "
932 "for frame index %d, status %x\n",
933 __func__
, stp_req
, frame_index
, status
);
937 if (frame_header
->fis_type
== FIS_DATA
) {
938 if (stp_req
->type
.pio
.request_current
.sgl_pair
== NULL
) {
939 sci_req
->saved_rx_frame_index
= frame_index
;
940 stp_req
->type
.pio
.pio_transfer_bytes
= 0;
942 scic_sds_unsolicited_frame_control_get_buffer(&scic
->uf_control
,
944 (void **)&frame_buffer
);
946 status
= scic_sds_stp_request_pio_data_in_copy_data(stp_req
,
949 /* Frame is decoded return it to the controller */
950 scic_sds_controller_release_frame(scic
, frame_index
);
953 /* Check for the end of the transfer, are there more
954 * bytes remaining for this data transfer
956 if (status
!= SCI_SUCCESS
||
957 stp_req
->type
.pio
.pio_transfer_bytes
!= 0)
960 if ((stp_req
->type
.pio
.ending_status
& ATA_BUSY
) == 0) {
961 scic_sds_request_set_status(sci_req
,
962 SCU_TASK_DONE_CHECK_RESPONSE
,
963 SCI_FAILURE_IO_RESPONSE_VALID
);
965 sci_base_state_machine_change_state(&sci_req
->state_machine
,
966 SCI_BASE_REQUEST_STATE_COMPLETED
);
968 sci_base_state_machine_change_state(&sci_req
->started_substate_machine
,
969 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
);
972 dev_err(scic_to_dev(scic
),
973 "%s: SCIC PIO Request 0x%p received frame %d "
974 "with fis type 0x%02x when expecting a data "
975 "fis.\n", __func__
, stp_req
, frame_index
,
976 frame_header
->fis_type
);
978 scic_sds_request_set_status(sci_req
,
980 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT
);
982 sci_base_state_machine_change_state(&sci_req
->state_machine
,
983 SCI_BASE_REQUEST_STATE_COMPLETED
);
985 /* Frame is decoded return it to the controller */
986 scic_sds_controller_release_frame(scic
, frame_index
);
1000 static enum sci_status
scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
1002 struct scic_sds_request
*sci_req
,
1003 u32 completion_code
)
1005 enum sci_status status
= SCI_SUCCESS
;
1006 bool all_frames_transferred
= false;
1007 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
1009 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
1010 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
1012 if (stp_req
->type
.pio
.pio_transfer_bytes
!= 0) {
1013 status
= scic_sds_stp_request_pio_data_out_transmit_data(sci_req
);
1014 if (status
== SCI_SUCCESS
) {
1015 if (stp_req
->type
.pio
.pio_transfer_bytes
== 0)
1016 all_frames_transferred
= true;
1018 } else if (stp_req
->type
.pio
.pio_transfer_bytes
== 0) {
1020 * this will happen if the all data is written at the
1021 * first time after the pio setup fis is received
1023 all_frames_transferred
= true;
1026 /* all data transferred. */
1027 if (all_frames_transferred
) {
1029 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
1030 * and wait for PIO_SETUP fis / or D2H REg fis. */
1031 sci_base_state_machine_change_state(
1032 &sci_req
->started_substate_machine
,
1033 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1040 * All other completion status cause the IO to be complete. If a NAK
1041 * was received, then it is up to the user to retry the request. */
1042 scic_sds_request_set_status(
1044 SCU_NORMALIZE_COMPLETION_STATUS(completion_code
),
1045 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1048 sci_base_state_machine_change_state(
1049 &sci_req
->state_machine
,
1050 SCI_BASE_REQUEST_STATE_COMPLETED
1060 * @request: This is the request which is receiving the event.
1061 * @event_code: This is the event code that the request on which the request is
1062 * expected to take action.
1064 * This method will handle any link layer events while waiting for the data
1065 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
1067 static enum sci_status
scic_sds_stp_request_pio_data_in_await_data_event_handler(
1068 struct scic_sds_request
*request
,
1071 enum sci_status status
;
1073 switch (scu_get_event_specifier(event_code
)) {
1074 case SCU_TASK_DONE_CRC_ERR
<< SCU_EVENT_SPECIFIC_CODE_SHIFT
:
1076 * We are waiting for data and the SCU has R_ERR the data frame.
1077 * Go back to waiting for the D2H Register FIS */
1078 sci_base_state_machine_change_state(
1079 &request
->started_substate_machine
,
1080 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1083 status
= SCI_SUCCESS
;
1087 dev_err(scic_to_dev(request
->owning_controller
),
1088 "%s: SCIC PIO Request 0x%p received unexpected "
1090 __func__
, request
, event_code
);
1092 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1093 status
= SCI_FAILURE
;
1100 /* --------------------------------------------------------------------------- */
1102 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table
[] = {
1103 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
] = {
1104 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1105 .tc_completion_handler
= scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler
,
1107 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
] = {
1108 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1109 .frame_handler
= scic_sds_stp_request_pio_await_frame_frame_handler
1111 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
] = {
1112 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1113 .event_handler
= scic_sds_stp_request_pio_data_in_await_data_event_handler
,
1114 .frame_handler
= scic_sds_stp_request_pio_data_in_await_data_frame_handler
1116 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
] = {
1117 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1118 .tc_completion_handler
= scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler
,
1122 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1125 struct scic_sds_request
*sci_req
= object
;
1129 scic_sds_stp_request_started_pio_substate_handler_table
,
1130 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1133 scic_sds_remote_device_set_working_request(
1134 sci_req
->target_device
, sci_req
);
1137 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object
)
1139 struct scic_sds_request
*sci_req
= object
;
1143 scic_sds_stp_request_started_pio_substate_handler_table
,
1144 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1148 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1151 struct scic_sds_request
*sci_req
= object
;
1155 scic_sds_stp_request_started_pio_substate_handler_table
,
1156 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1160 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1163 struct scic_sds_request
*sci_req
= object
;
1167 scic_sds_stp_request_started_pio_substate_handler_table
,
1168 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1172 /* --------------------------------------------------------------------------- */
1174 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table
[] = {
1175 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
] = {
1176 .enter_state
= scic_sds_stp_request_started_pio_await_h2d_completion_enter
,
1178 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
] = {
1179 .enter_state
= scic_sds_stp_request_started_pio_await_frame_enter
,
1181 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
] = {
1182 .enter_state
= scic_sds_stp_request_started_pio_data_in_await_data_enter
,
1184 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
] = {
1185 .enter_state
= scic_sds_stp_request_started_pio_data_out_transmit_data_enter
,
1190 scic_sds_stp_pio_request_construct(struct scic_sds_request
*sci_req
,
1193 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
1194 struct scic_sds_stp_pio_request
*pio
= &stp_req
->type
.pio
;
1196 scic_sds_stp_non_ncq_request_construct(sci_req
);
1198 scu_stp_raw_request_construct_task_context(stp_req
,
1199 sci_req
->task_context_buffer
);
1201 pio
->current_transfer_bytes
= 0;
1202 pio
->ending_error
= 0;
1203 pio
->ending_status
= 0;
1205 pio
->request_current
.sgl_offset
= 0;
1206 pio
->request_current
.sgl_set
= SCU_SGL_ELEMENT_PAIR_A
;
1208 if (copy_rx_frame
) {
1209 scic_sds_request_build_sgl(sci_req
);
1210 /* Since the IO request copy of the TC contains the same data as
1211 * the actual TC this pointer is vaild for either.
1213 pio
->request_current
.sgl_pair
= &sci_req
->task_context_buffer
->sgl_pair_ab
;
1215 /* The user does not want the data copied to the SGL buffer location */
1216 pio
->request_current
.sgl_pair
= NULL
;
1219 sci_base_state_machine_construct(&sci_req
->started_substate_machine
,
1221 scic_sds_stp_request_started_pio_substate_table
,
1222 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
);
1227 static void scic_sds_stp_request_udma_complete_request(
1228 struct scic_sds_request
*request
,
1230 enum sci_status sci_status
)
1232 scic_sds_request_set_status(request
, scu_status
, sci_status
);
1233 sci_base_state_machine_change_state(&request
->state_machine
,
1234 SCI_BASE_REQUEST_STATE_COMPLETED
);
1237 static enum sci_status
scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request
*sci_req
,
1240 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
1241 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
1242 struct dev_to_host_fis
*frame_header
;
1243 enum sci_status status
;
1246 status
= scic_sds_unsolicited_frame_control_get_header(&scic
->uf_control
,
1248 (void **)&frame_header
);
1250 if ((status
== SCI_SUCCESS
) &&
1251 (frame_header
->fis_type
== FIS_REGD2H
)) {
1252 scic_sds_unsolicited_frame_control_get_buffer(&scic
->uf_control
,
1254 (void **)&frame_buffer
);
1256 scic_sds_controller_copy_sata_response(&stp_req
->d2h_reg_fis
,
1261 scic_sds_controller_release_frame(scic
, frame_index
);
1266 static enum sci_status
scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1267 struct scic_sds_request
*sci_req
,
1268 u32 completion_code
)
1270 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
1271 enum sci_status status
= SCI_SUCCESS
;
1273 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
1274 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
1275 scic_sds_stp_request_udma_complete_request(sci_req
,
1279 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS
):
1280 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR
):
1282 * We must check ther response buffer to see if the D2H Register FIS was
1283 * received before we got the TC completion. */
1284 if (stp_req
->d2h_reg_fis
.fis_type
== FIS_REGD2H
) {
1285 scic_sds_remote_device_suspend(sci_req
->target_device
,
1286 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code
)));
1288 scic_sds_stp_request_udma_complete_request(sci_req
,
1289 SCU_TASK_DONE_CHECK_RESPONSE
,
1290 SCI_FAILURE_IO_RESPONSE_VALID
);
1293 * If we have an error completion status for the TC then we can expect a
1294 * D2H register FIS from the device so we must change state to wait for it */
1295 sci_base_state_machine_change_state(&sci_req
->started_substate_machine
,
1296 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
);
1301 * / @todo Check to see if any of these completion status need to wait for
1302 * / the device to host register fis. */
1303 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1304 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN
):
1305 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR
):
1306 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR
):
1307 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR
):
1308 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR
):
1309 scic_sds_remote_device_suspend(sci_req
->target_device
,
1310 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code
)));
1311 /* Fall through to the default case */
1313 /* All other completion status cause the IO to be complete. */
1314 scic_sds_stp_request_udma_complete_request(sci_req
,
1315 SCU_NORMALIZE_COMPLETION_STATUS(completion_code
),
1316 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
);
1323 static enum sci_status
scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1324 struct scic_sds_request
*sci_req
,
1327 enum sci_status status
;
1329 /* Use the general frame handler to copy the resposne data */
1330 status
= scic_sds_stp_request_udma_general_frame_handler(sci_req
, frame_index
);
1332 if (status
!= SCI_SUCCESS
)
1335 scic_sds_stp_request_udma_complete_request(sci_req
,
1336 SCU_TASK_DONE_CHECK_RESPONSE
,
1337 SCI_FAILURE_IO_RESPONSE_VALID
);
1342 /* --------------------------------------------------------------------------- */
1344 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table
[] = {
1345 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
] = {
1346 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1347 .tc_completion_handler
= scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler
,
1348 .frame_handler
= scic_sds_stp_request_udma_general_frame_handler
,
1350 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
] = {
1351 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1352 .frame_handler
= scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler
,
1356 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1359 struct scic_sds_request
*sci_req
= object
;
1363 scic_sds_stp_request_started_udma_substate_handler_table
,
1364 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1371 * This state is entered when there is an TC completion failure. The hardware
1372 * received an unexpected condition while processing the IO request and now
1373 * will UF the D2H register FIS to complete the IO.
1375 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1378 struct scic_sds_request
*sci_req
= object
;
1382 scic_sds_stp_request_started_udma_substate_handler_table
,
1383 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1387 /* --------------------------------------------------------------------------- */
1389 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table
[] = {
1390 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
] = {
1391 .enter_state
= scic_sds_stp_request_started_udma_await_tc_completion_enter
,
1393 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
] = {
1394 .enter_state
= scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter
,
1398 enum sci_status
scic_sds_stp_udma_request_construct(struct scic_sds_request
*sci_req
,
1400 enum dma_data_direction dir
)
1402 scic_sds_stp_non_ncq_request_construct(sci_req
);
1404 scic_sds_stp_optimized_request_construct(sci_req
, SCU_TASK_TYPE_DMA_IN
,
1407 sci_base_state_machine_construct(
1408 &sci_req
->started_substate_machine
,
1410 scic_sds_stp_request_started_udma_substate_table
,
1411 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1422 * This method processes a TC completion. The expected TC completion is for
1423 * the transmission of the H2D register FIS containing the SATA/STP non-data
1424 * request. This method always successfully processes the TC completion.
1425 * SCI_SUCCESS This value is always returned.
1427 static enum sci_status
scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1428 struct scic_sds_request
*sci_req
,
1429 u32 completion_code
)
1431 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
1432 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
1433 scic_sds_request_set_status(
1434 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
1437 sci_base_state_machine_change_state(
1438 &sci_req
->started_substate_machine
,
1439 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1445 * All other completion status cause the IO to be complete. If a NAK
1446 * was received, then it is up to the user to retry the request. */
1447 scic_sds_request_set_status(
1449 SCU_NORMALIZE_COMPLETION_STATUS(completion_code
),
1450 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1453 sci_base_state_machine_change_state(
1454 &sci_req
->state_machine
, SCI_BASE_REQUEST_STATE_COMPLETED
);
1466 * This method processes a TC completion. The expected TC completion is for
1467 * the transmission of the H2D register FIS containing the SATA/STP non-data
1468 * request. This method always successfully processes the TC completion.
1469 * SCI_SUCCESS This value is always returned.
1471 static enum sci_status
scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1472 struct scic_sds_request
*sci_req
,
1473 u32 completion_code
)
1475 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
1476 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
1477 scic_sds_request_set_status(
1478 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
1481 sci_base_state_machine_change_state(
1482 &sci_req
->started_substate_machine
,
1483 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1489 * All other completion status cause the IO to be complete. If a NAK
1490 * was received, then it is up to the user to retry the request. */
1491 scic_sds_request_set_status(
1493 SCU_NORMALIZE_COMPLETION_STATUS(completion_code
),
1494 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1497 sci_base_state_machine_change_state(&sci_req
->state_machine
,
1498 SCI_BASE_REQUEST_STATE_COMPLETED
);
1507 * @request: This parameter specifies the request for which a frame has been
1509 * @frame_index: This parameter specifies the index of the frame that has been
1512 * This method processes frames received from the target while waiting for a
1513 * device to host register FIS. If a non-register FIS is received during this
1514 * time, it is treated as a protocol violation from an IO perspective. Indicate
1515 * if the received frame was processed successfully.
1517 static enum sci_status
scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1518 struct scic_sds_request
*sci_req
,
1521 enum sci_status status
;
1522 struct dev_to_host_fis
*frame_header
;
1524 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
1525 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
1527 status
= scic_sds_unsolicited_frame_control_get_header(&scic
->uf_control
,
1529 (void **)&frame_header
);
1530 if (status
!= SCI_SUCCESS
) {
1531 dev_err(scic_to_dev(scic
),
1532 "%s: SCIC IO Request 0x%p could not get frame header "
1533 "for frame index %d, status %x\n",
1534 __func__
, stp_req
, frame_index
, status
);
1538 switch (frame_header
->fis_type
) {
1540 scic_sds_unsolicited_frame_control_get_buffer(&scic
->uf_control
,
1542 (void **)&frame_buffer
);
1544 scic_sds_controller_copy_sata_response(&stp_req
->d2h_reg_fis
,
1548 /* The command has completed with error */
1549 scic_sds_request_set_status(sci_req
,
1550 SCU_TASK_DONE_CHECK_RESPONSE
,
1551 SCI_FAILURE_IO_RESPONSE_VALID
);
1555 dev_warn(scic_to_dev(scic
),
1556 "%s: IO Request:0x%p Frame Id:%d protocol "
1557 "violation occurred\n", __func__
, stp_req
,
1560 scic_sds_request_set_status(sci_req
, SCU_TASK_DONE_UNEXP_FIS
,
1561 SCI_FAILURE_PROTOCOL_VIOLATION
);
1565 sci_base_state_machine_change_state(&sci_req
->state_machine
,
1566 SCI_BASE_REQUEST_STATE_COMPLETED
);
1568 /* Frame has been decoded return it to the controller */
1569 scic_sds_controller_release_frame(scic
, frame_index
);
1574 /* --------------------------------------------------------------------------- */
1576 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table
[] = {
1577 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
] = {
1578 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1579 .tc_completion_handler
= scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler
,
1581 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
] = {
1582 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1583 .tc_completion_handler
= scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler
,
1585 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
] = {
1586 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1587 .frame_handler
= scic_sds_stp_request_soft_reset_await_d2h_frame_handler
,
1591 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1594 struct scic_sds_request
*sci_req
= object
;
1598 scic_sds_stp_request_started_soft_reset_substate_handler_table
,
1599 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1602 scic_sds_remote_device_set_working_request(
1603 sci_req
->target_device
, sci_req
1607 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1610 struct scic_sds_request
*sci_req
= object
;
1611 struct scu_task_context
*task_context
;
1612 struct host_to_dev_fis
*h2d_fis
;
1613 enum sci_status status
;
1615 /* Clear the SRST bit */
1616 h2d_fis
= scic_stp_io_request_get_h2d_reg_address(sci_req
);
1617 h2d_fis
->control
= 0;
1619 /* Clear the TC control bit */
1620 task_context
= scic_sds_controller_get_task_context_buffer(
1621 sci_req
->owning_controller
, sci_req
->io_tag
);
1622 task_context
->control_frame
= 0;
1624 status
= scic_controller_continue_io(sci_req
);
1625 if (status
== SCI_SUCCESS
) {
1628 scic_sds_stp_request_started_soft_reset_substate_handler_table
,
1629 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1634 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1637 struct scic_sds_request
*sci_req
= object
;
1641 scic_sds_stp_request_started_soft_reset_substate_handler_table
,
1642 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1646 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table
[] = {
1647 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
] = {
1648 .enter_state
= scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter
,
1650 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
] = {
1651 .enter_state
= scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter
,
1653 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
] = {
1654 .enter_state
= scic_sds_stp_request_started_soft_reset_await_d2h_response_enter
,
1658 enum sci_status
scic_sds_stp_soft_reset_request_construct(struct scic_sds_request
*sci_req
)
1660 struct scic_sds_stp_request
*stp_req
= &sci_req
->stp
.req
;
1662 scic_sds_stp_non_ncq_request_construct(sci_req
);
1664 /* Build the STP task context structure */
1665 scu_stp_raw_request_construct_task_context(stp_req
, sci_req
->task_context_buffer
);
1667 sci_base_state_machine_construct(&sci_req
->started_substate_machine
,
1669 scic_sds_stp_request_started_soft_reset_substate_table
,
1670 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
);