2f5095130cad8565efc24df8e3d3cc96744a9d1a
[deliverable/linux.git] / drivers / scsi / isci / core / scic_sds_stp_request.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <scsi/sas.h>
57 #include "sas.h"
58 #include "sci_base_state.h"
59 #include "sci_base_state_machine.h"
60 #include "scic_io_request.h"
61 #include "remote_device.h"
62 #include "scic_sds_request.h"
63 #include "scic_sds_stp_pio_request.h"
64 #include "scic_sds_stp_request.h"
65 #include "scic_sds_unsolicited_frame_control.h"
66 #include "sci_util.h"
67 #include "scu_completion_codes.h"
68 #include "scu_event_codes.h"
69 #include "scu_task_context.h"
70 #include "request.h"
71
72 void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req)
73 {
74 if (sci_req->was_tag_assigned_by_user == false)
75 sci_req->task_context_buffer = &sci_req->tc;
76 }
77
78 /**
79 * This method is will fill in the SCU Task Context for any type of SATA
80 * request. This is called from the various SATA constructors.
81 * @sci_req: The general IO request object which is to be used in
82 * constructing the SCU task context.
83 * @task_context: The buffer pointer for the SCU task context which is being
84 * constructed.
85 *
86 * The general io request construction is complete. The buffer assignment for
87 * the command buffer is complete. none Revisit task context construction to
88 * determine what is common for SSP/SMP/STP task context structures.
89 */
90 static void scu_sata_reqeust_construct_task_context(
91 struct scic_sds_request *sci_req,
92 struct scu_task_context *task_context)
93 {
94 dma_addr_t dma_addr;
95 struct scic_sds_controller *controller;
96 struct scic_sds_remote_device *target_device;
97 struct scic_sds_port *target_port;
98
99 controller = scic_sds_request_get_controller(sci_req);
100 target_device = scic_sds_request_get_device(sci_req);
101 target_port = scic_sds_request_get_port(sci_req);
102
103 /* Fill in the TC with the its required data */
104 task_context->abort = 0;
105 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
106 task_context->initiator_request = 1;
107 task_context->connection_rate = target_device->connection_rate;
108 task_context->protocol_engine_index =
109 scic_sds_controller_get_protocol_engine_group(controller);
110 task_context->logical_port_index =
111 scic_sds_port_get_index(target_port);
112 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
113 task_context->valid = SCU_TASK_CONTEXT_VALID;
114 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
115
116 task_context->remote_node_index =
117 scic_sds_remote_device_get_index(sci_req->target_device);
118 task_context->command_code = 0;
119
120 task_context->link_layer_control = 0;
121 task_context->do_not_dma_ssp_good_response = 1;
122 task_context->strict_ordering = 0;
123 task_context->control_frame = 0;
124 task_context->timeout_enable = 0;
125 task_context->block_guard_enable = 0;
126
127 task_context->address_modifier = 0;
128 task_context->task_phase = 0x01;
129
130 task_context->ssp_command_iu_length =
131 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
132
133 /* Set the first word of the H2D REG FIS */
134 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
135
136 if (sci_req->was_tag_assigned_by_user) {
137 /*
138 * Build the task context now since we have already read
139 * the data
140 */
141 sci_req->post_context =
142 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
143 (scic_sds_controller_get_protocol_engine_group(
144 controller) <<
145 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
146 (scic_sds_port_get_index(target_port) <<
147 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
148 scic_sds_io_tag_get_index(sci_req->io_tag));
149 } else {
150 /*
151 * Build the task context now since we have already read
152 * the data.
153 * I/O tag index is not assigned because we have to wait
154 * until we get a TCi.
155 */
156 sci_req->post_context =
157 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
158 (scic_sds_controller_get_protocol_engine_group(
159 controller) <<
160 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
161 (scic_sds_port_get_index(target_port) <<
162 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
163 }
164
165 /*
166 * Copy the physical address for the command buffer to the SCU Task
167 * Context. We must offset the command buffer by 4 bytes because the
168 * first 4 bytes are transfered in the body of the TC.
169 */
170 dma_addr = scic_io_request_get_dma_addr(sci_req,
171 ((char *) &sci_req->stp.cmd) +
172 sizeof(u32));
173
174 task_context->command_iu_upper = upper_32_bits(dma_addr);
175 task_context->command_iu_lower = lower_32_bits(dma_addr);
176
177 /* SATA Requests do not have a response buffer */
178 task_context->response_iu_upper = 0;
179 task_context->response_iu_lower = 0;
180 }
181
182 /**
183 *
184 * @sci_req:
185 *
186 * This method will perform any general sata request construction. What part of
187 * SATA IO request construction is general? none
188 */
189 static void scic_sds_stp_non_ncq_request_construct(
190 struct scic_sds_request *sci_req)
191 {
192 sci_req->has_started_substate_machine = true;
193 }
194
195 /**
196 *
197 * @sci_req: This parameter specifies the request to be constructed as an
198 * optimized request.
199 * @optimized_task_type: This parameter specifies whether the request is to be
200 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
201 * value of 1 indicates NCQ.
202 *
203 * This method will perform request construction common to all types of STP
204 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
205 * returns an indication as to whether the construction was successful.
206 */
207 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
208 u8 optimized_task_type,
209 u32 len,
210 enum dma_data_direction dir)
211 {
212 struct scu_task_context *task_context = sci_req->task_context_buffer;
213
214 /* Build the STP task context structure */
215 scu_sata_reqeust_construct_task_context(sci_req, task_context);
216
217 /* Copy over the SGL elements */
218 scic_sds_request_build_sgl(sci_req);
219
220 /* Copy over the number of bytes to be transfered */
221 task_context->transfer_length_bytes = len;
222
223 if (dir == DMA_TO_DEVICE) {
224 /*
225 * The difference between the DMA IN and DMA OUT request task type
226 * values are consistent with the difference between FPDMA READ
227 * and FPDMA WRITE values. Add the supplied task type parameter
228 * to this difference to set the task type properly for this
229 * DATA OUT (WRITE) case. */
230 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
231 - SCU_TASK_TYPE_DMA_IN);
232 } else {
233 /*
234 * For the DATA IN (READ) case, simply save the supplied
235 * optimized task type. */
236 task_context->task_type = optimized_task_type;
237 }
238 }
239
240 /**
241 *
242 * @sci_req: This parameter specifies the request to be constructed.
243 *
244 * This method will construct the STP UDMA request and its associated TC data.
245 * This method returns an indication as to whether the construction was
246 * successful. SCI_SUCCESS Currently this method always returns this value.
247 */
248 enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
249 u32 len,
250 enum dma_data_direction dir)
251 {
252 scic_sds_stp_optimized_request_construct(sci_req,
253 SCU_TASK_TYPE_FPDMAQ_READ,
254 len, dir);
255 return SCI_SUCCESS;
256 }
257
258 /**
259 * scu_stp_raw_request_construct_task_context -
260 * @sci_req: This parameter specifies the STP request object for which to
261 * construct a RAW command frame task context.
262 * @task_context: This parameter specifies the SCU specific task context buffer
263 * to construct.
264 *
265 * This method performs the operations common to all SATA/STP requests
266 * utilizing the raw frame method. none
267 */
268 static void scu_stp_raw_request_construct_task_context(
269 struct scic_sds_stp_request *stp_req,
270 struct scu_task_context *task_context)
271 {
272 struct scic_sds_request *sci_req = to_sci_req(stp_req);
273
274 scu_sata_reqeust_construct_task_context(sci_req, task_context);
275
276 task_context->control_frame = 0;
277 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
278 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
279 task_context->type.stp.fis_type = FIS_REGH2D;
280 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
281 }
282
283 void scic_stp_io_request_set_ncq_tag(
284 struct scic_sds_request *req,
285 u16 ncq_tag)
286 {
287 /**
288 * @note This could be made to return an error to the user if the user
289 * attempts to set the NCQ tag in the wrong state.
290 */
291 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
292 }
293
294 /**
295 *
296 * @sci_req:
297 *
298 * Get the next SGL element from the request. - Check on which SGL element pair
299 * we are working - if working on SLG pair element A - advance to element B -
300 * else - check to see if there are more SGL element pairs for this IO request
301 * - if there are more SGL element pairs - advance to the next pair and return
302 * element A struct scu_sgl_element*
303 */
304 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
305 {
306 struct scu_sgl_element *current_sgl;
307 struct scic_sds_request *sci_req = to_sci_req(stp_req);
308 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
309
310 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
311 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
312 pio_sgl->sgl_pair->B.address_upper == 0) {
313 current_sgl = NULL;
314 } else {
315 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
316 current_sgl = &pio_sgl->sgl_pair->B;
317 }
318 } else {
319 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
320 pio_sgl->sgl_pair->next_pair_upper == 0) {
321 current_sgl = NULL;
322 } else {
323 u64 phys_addr;
324
325 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
326 phys_addr <<= 32;
327 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
328
329 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
330 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
331 current_sgl = &pio_sgl->sgl_pair->A;
332 }
333 }
334
335 return current_sgl;
336 }
337
338 /**
339 *
340 * @sci_req:
341 * @completion_code:
342 *
343 * This method processes a TC completion. The expected TC completion is for
344 * the transmission of the H2D register FIS containing the SATA/STP non-data
345 * request. This method always successfully processes the TC completion.
346 * SCI_SUCCESS This value is always returned.
347 */
348 static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
349 struct scic_sds_request *sci_req,
350 u32 completion_code)
351 {
352 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
353 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
354 scic_sds_request_set_status(
355 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
356 );
357
358 sci_base_state_machine_change_state(
359 &sci_req->started_substate_machine,
360 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
361 );
362 break;
363
364 default:
365 /*
366 * All other completion status cause the IO to be complete. If a NAK
367 * was received, then it is up to the user to retry the request. */
368 scic_sds_request_set_status(
369 sci_req,
370 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
371 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
372 );
373
374 sci_base_state_machine_change_state(
375 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
376 break;
377 }
378
379 return SCI_SUCCESS;
380 }
381
382 /**
383 *
384 * @request: This parameter specifies the request for which a frame has been
385 * received.
386 * @frame_index: This parameter specifies the index of the frame that has been
387 * received.
388 *
389 * This method processes frames received from the target while waiting for a
390 * device to host register FIS. If a non-register FIS is received during this
391 * time, it is treated as a protocol violation from an IO perspective. Indicate
392 * if the received frame was processed successfully.
393 */
394 static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
395 struct scic_sds_request *sci_req,
396 u32 frame_index)
397 {
398 enum sci_status status;
399 struct dev_to_host_fis *frame_header;
400 u32 *frame_buffer;
401 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
402 struct scic_sds_controller *scic = sci_req->owning_controller;
403
404 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
405 frame_index,
406 (void **)&frame_header);
407
408 if (status != SCI_SUCCESS) {
409 dev_err(scic_to_dev(sci_req->owning_controller),
410 "%s: SCIC IO Request 0x%p could not get frame header "
411 "for frame index %d, status %x\n",
412 __func__, stp_req, frame_index, status);
413
414 return status;
415 }
416
417 switch (frame_header->fis_type) {
418 case FIS_REGD2H:
419 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
420 frame_index,
421 (void **)&frame_buffer);
422
423 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
424 frame_header,
425 frame_buffer);
426
427 /* The command has completed with error */
428 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
429 SCI_FAILURE_IO_RESPONSE_VALID);
430 break;
431
432 default:
433 dev_warn(scic_to_dev(scic),
434 "%s: IO Request:0x%p Frame Id:%d protocol "
435 "violation occurred\n", __func__, stp_req,
436 frame_index);
437
438 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
439 SCI_FAILURE_PROTOCOL_VIOLATION);
440 break;
441 }
442
443 sci_base_state_machine_change_state(&sci_req->state_machine,
444 SCI_BASE_REQUEST_STATE_COMPLETED);
445
446 /* Frame has been decoded return it to the controller */
447 scic_sds_controller_release_frame(scic, frame_index);
448
449 return status;
450 }
451
452 /* --------------------------------------------------------------------------- */
453
454 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
455 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
456 .abort_handler = scic_sds_request_started_state_abort_handler,
457 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
458 },
459 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
460 .abort_handler = scic_sds_request_started_state_abort_handler,
461 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
462 }
463 };
464
465 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
466 void *object)
467 {
468 struct scic_sds_request *sci_req = object;
469
470 SET_STATE_HANDLER(
471 sci_req,
472 scic_sds_stp_request_started_non_data_substate_handler_table,
473 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
474 );
475
476 scic_sds_remote_device_set_working_request(
477 sci_req->target_device, sci_req
478 );
479 }
480
481 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
482 {
483 struct scic_sds_request *sci_req = object;
484
485 SET_STATE_HANDLER(
486 sci_req,
487 scic_sds_stp_request_started_non_data_substate_handler_table,
488 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
489 );
490 }
491
492 /* --------------------------------------------------------------------------- */
493
494 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
495 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
496 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
497 },
498 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
499 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
500 },
501 };
502
503 enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
504 {
505 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
506
507 scic_sds_stp_non_ncq_request_construct(sci_req);
508
509 /* Build the STP task context structure */
510 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
511
512 sci_base_state_machine_construct(&sci_req->started_substate_machine,
513 sci_req,
514 scic_sds_stp_request_started_non_data_substate_table,
515 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
516
517 return SCI_SUCCESS;
518 }
519
520 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
521
522 /* transmit DATA_FIS from (current sgl + offset) for input
523 * parameter length. current sgl and offset is alreay stored in the IO request
524 */
525 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
526 struct scic_sds_request *sci_req,
527 u32 length)
528 {
529 struct scic_sds_controller *scic = sci_req->owning_controller;
530 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
531 struct scu_task_context *task_context;
532 struct scu_sgl_element *current_sgl;
533
534 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
535 * for the data from current_sgl+offset for the input length
536 */
537 task_context = scic_sds_controller_get_task_context_buffer(scic,
538 sci_req->io_tag);
539
540 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
541 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
542 else
543 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
544
545 /* update the TC */
546 task_context->command_iu_upper = current_sgl->address_upper;
547 task_context->command_iu_lower = current_sgl->address_lower;
548 task_context->transfer_length_bytes = length;
549 task_context->type.stp.fis_type = FIS_DATA;
550
551 /* send the new TC out. */
552 return scic_controller_continue_io(sci_req);
553 }
554
555 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
556 {
557
558 struct scu_sgl_element *current_sgl;
559 u32 sgl_offset;
560 u32 remaining_bytes_in_current_sgl = 0;
561 enum sci_status status = SCI_SUCCESS;
562 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
563
564 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
565
566 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
567 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
568 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
569 } else {
570 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
571 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
572 }
573
574
575 if (stp_req->type.pio.pio_transfer_bytes > 0) {
576 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
577 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
578 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
579 if (status == SCI_SUCCESS) {
580 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
581
582 /* update the current sgl, sgl_offset and save for future */
583 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
584 sgl_offset = 0;
585 }
586 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
587 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
588 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
589
590 if (status == SCI_SUCCESS) {
591 /* Sgl offset will be adjusted and saved for future */
592 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
593 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
594 stp_req->type.pio.pio_transfer_bytes = 0;
595 }
596 }
597 }
598
599 if (status == SCI_SUCCESS) {
600 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
601 }
602
603 return status;
604 }
605
606 /**
607 *
608 * @stp_request: The request that is used for the SGL processing.
609 * @data_buffer: The buffer of data to be copied.
610 * @length: The length of the data transfer.
611 *
612 * Copy the data from the buffer for the length specified to the IO reqeust SGL
613 * specified data region. enum sci_status
614 */
615 static enum sci_status
616 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
617 u8 *data_buf, u32 len)
618 {
619 struct scic_sds_request *sci_req;
620 struct isci_request *ireq;
621 u8 *src_addr;
622 int copy_len;
623 struct sas_task *task;
624 struct scatterlist *sg;
625 void *kaddr;
626 int total_len = len;
627
628 sci_req = to_sci_req(stp_req);
629 ireq = sci_req_to_ireq(sci_req);
630 task = isci_request_access_task(ireq);
631 src_addr = data_buf;
632
633 if (task->num_scatter > 0) {
634 sg = task->scatter;
635
636 while (total_len > 0) {
637 struct page *page = sg_page(sg);
638
639 copy_len = min_t(int, total_len, sg_dma_len(sg));
640 kaddr = kmap_atomic(page, KM_IRQ0);
641 memcpy(kaddr + sg->offset, src_addr, copy_len);
642 kunmap_atomic(kaddr, KM_IRQ0);
643 total_len -= copy_len;
644 src_addr += copy_len;
645 sg = sg_next(sg);
646 }
647 } else {
648 BUG_ON(task->total_xfer_len < total_len);
649 memcpy(task->scatter, src_addr, total_len);
650 }
651
652 return SCI_SUCCESS;
653 }
654
655 /**
656 *
657 * @sci_req: The PIO DATA IN request that is to receive the data.
658 * @data_buffer: The buffer to copy from.
659 *
660 * Copy the data buffer to the io request data region. enum sci_status
661 */
662 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
663 struct scic_sds_stp_request *sci_req,
664 u8 *data_buffer)
665 {
666 enum sci_status status;
667
668 /*
669 * If there is less than 1K remaining in the transfer request
670 * copy just the data for the transfer */
671 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
672 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
673 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
674
675 if (status == SCI_SUCCESS)
676 sci_req->type.pio.pio_transfer_bytes = 0;
677 } else {
678 /* We are transfering the whole frame so copy */
679 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
680 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
681
682 if (status == SCI_SUCCESS)
683 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
684 }
685
686 return status;
687 }
688
689 /**
690 *
691 * @sci_req:
692 * @completion_code:
693 *
694 * enum sci_status
695 */
696 static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
697 struct scic_sds_request *sci_req,
698 u32 completion_code)
699 {
700 enum sci_status status = SCI_SUCCESS;
701
702 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
703 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
704 scic_sds_request_set_status(
705 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
706 );
707
708 sci_base_state_machine_change_state(
709 &sci_req->started_substate_machine,
710 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
711 );
712 break;
713
714 default:
715 /*
716 * All other completion status cause the IO to be complete. If a NAK
717 * was received, then it is up to the user to retry the request. */
718 scic_sds_request_set_status(
719 sci_req,
720 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
721 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
722 );
723
724 sci_base_state_machine_change_state(
725 &sci_req->state_machine,
726 SCI_BASE_REQUEST_STATE_COMPLETED
727 );
728 break;
729 }
730
731 return status;
732 }
733
734 static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
735 u32 frame_index)
736 {
737 struct scic_sds_controller *scic = sci_req->owning_controller;
738 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
739 struct isci_request *ireq = sci_req_to_ireq(sci_req);
740 struct sas_task *task = isci_request_access_task(ireq);
741 struct dev_to_host_fis *frame_header;
742 enum sci_status status;
743 u32 *frame_buffer;
744
745 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
746 frame_index,
747 (void **)&frame_header);
748
749 if (status != SCI_SUCCESS) {
750 dev_err(scic_to_dev(scic),
751 "%s: SCIC IO Request 0x%p could not get frame header "
752 "for frame index %d, status %x\n",
753 __func__, stp_req, frame_index, status);
754 return status;
755 }
756
757 switch (frame_header->fis_type) {
758 case FIS_PIO_SETUP:
759 /* Get from the frame buffer the PIO Setup Data */
760 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
761 frame_index,
762 (void **)&frame_buffer);
763
764 /* Get the data from the PIO Setup The SCU Hardware returns
765 * first word in the frame_header and the rest of the data is in
766 * the frame buffer so we need to back up one dword
767 */
768
769 /* transfer_count: first 16bits in the 4th dword */
770 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
771
772 /* ending_status: 4th byte in the 3rd dword */
773 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
774
775 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
776 frame_header,
777 frame_buffer);
778
779 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
780
781 /* The next state is dependent on whether the
782 * request was PIO Data-in or Data out
783 */
784 if (task->data_dir == DMA_FROM_DEVICE) {
785 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
786 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
787 } else if (task->data_dir == DMA_TO_DEVICE) {
788 /* Transmit data */
789 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
790 if (status != SCI_SUCCESS)
791 break;
792 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
793 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
794 }
795 break;
796 case FIS_SETDEVBITS:
797 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
798 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
799 break;
800 case FIS_REGD2H:
801 if (frame_header->status & ATA_BUSY) {
802 /* Now why is the drive sending a D2H Register FIS when
803 * it is still busy? Do nothing since we are still in
804 * the right state.
805 */
806 dev_dbg(scic_to_dev(scic),
807 "%s: SCIC PIO Request 0x%p received "
808 "D2H Register FIS with BSY status "
809 "0x%x\n", __func__, stp_req,
810 frame_header->status);
811 break;
812 }
813
814 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
815 frame_index,
816 (void **)&frame_buffer);
817
818 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
819 frame_header,
820 frame_buffer);
821
822 scic_sds_request_set_status(sci_req,
823 SCU_TASK_DONE_CHECK_RESPONSE,
824 SCI_FAILURE_IO_RESPONSE_VALID);
825
826 sci_base_state_machine_change_state(&sci_req->state_machine,
827 SCI_BASE_REQUEST_STATE_COMPLETED);
828 break;
829 default:
830 /* FIXME: what do we do here? */
831 break;
832 }
833
834 /* Frame is decoded return it to the controller */
835 scic_sds_controller_release_frame(scic, frame_index);
836
837 return status;
838 }
839
840 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
841 u32 frame_index)
842 {
843 enum sci_status status;
844 struct dev_to_host_fis *frame_header;
845 struct sata_fis_data *frame_buffer;
846 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
847 struct scic_sds_controller *scic = sci_req->owning_controller;
848
849 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
850 frame_index,
851 (void **)&frame_header);
852
853 if (status != SCI_SUCCESS) {
854 dev_err(scic_to_dev(scic),
855 "%s: SCIC IO Request 0x%p could not get frame header "
856 "for frame index %d, status %x\n",
857 __func__, stp_req, frame_index, status);
858 return status;
859 }
860
861 if (frame_header->fis_type == FIS_DATA) {
862 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
863 sci_req->saved_rx_frame_index = frame_index;
864 stp_req->type.pio.pio_transfer_bytes = 0;
865 } else {
866 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
867 frame_index,
868 (void **)&frame_buffer);
869
870 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
871 (u8 *)frame_buffer);
872
873 /* Frame is decoded return it to the controller */
874 scic_sds_controller_release_frame(scic, frame_index);
875 }
876
877 /* Check for the end of the transfer, are there more
878 * bytes remaining for this data transfer
879 */
880 if (status != SCI_SUCCESS ||
881 stp_req->type.pio.pio_transfer_bytes != 0)
882 return status;
883
884 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
885 scic_sds_request_set_status(sci_req,
886 SCU_TASK_DONE_CHECK_RESPONSE,
887 SCI_FAILURE_IO_RESPONSE_VALID);
888
889 sci_base_state_machine_change_state(&sci_req->state_machine,
890 SCI_BASE_REQUEST_STATE_COMPLETED);
891 } else {
892 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
893 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
894 }
895 } else {
896 dev_err(scic_to_dev(scic),
897 "%s: SCIC PIO Request 0x%p received frame %d "
898 "with fis type 0x%02x when expecting a data "
899 "fis.\n", __func__, stp_req, frame_index,
900 frame_header->fis_type);
901
902 scic_sds_request_set_status(sci_req,
903 SCU_TASK_DONE_GOOD,
904 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
905
906 sci_base_state_machine_change_state(&sci_req->state_machine,
907 SCI_BASE_REQUEST_STATE_COMPLETED);
908
909 /* Frame is decoded return it to the controller */
910 scic_sds_controller_release_frame(scic, frame_index);
911 }
912
913 return status;
914 }
915
916
917 /**
918 *
919 * @sci_req:
920 * @completion_code:
921 *
922 * enum sci_status
923 */
924 static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
925
926 struct scic_sds_request *sci_req,
927 u32 completion_code)
928 {
929 enum sci_status status = SCI_SUCCESS;
930 bool all_frames_transferred = false;
931 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
932
933 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
934 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
935 /* Transmit data */
936 if (stp_req->type.pio.pio_transfer_bytes != 0) {
937 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
938 if (status == SCI_SUCCESS) {
939 if (stp_req->type.pio.pio_transfer_bytes == 0)
940 all_frames_transferred = true;
941 }
942 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
943 /*
944 * this will happen if the all data is written at the
945 * first time after the pio setup fis is received
946 */
947 all_frames_transferred = true;
948 }
949
950 /* all data transferred. */
951 if (all_frames_transferred) {
952 /*
953 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
954 * and wait for PIO_SETUP fis / or D2H REg fis. */
955 sci_base_state_machine_change_state(
956 &sci_req->started_substate_machine,
957 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
958 );
959 }
960 break;
961
962 default:
963 /*
964 * All other completion status cause the IO to be complete. If a NAK
965 * was received, then it is up to the user to retry the request. */
966 scic_sds_request_set_status(
967 sci_req,
968 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
969 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
970 );
971
972 sci_base_state_machine_change_state(
973 &sci_req->state_machine,
974 SCI_BASE_REQUEST_STATE_COMPLETED
975 );
976 break;
977 }
978
979 return status;
980 }
981
982 /**
983 *
984 * @request: This is the request which is receiving the event.
985 * @event_code: This is the event code that the request on which the request is
986 * expected to take action.
987 *
988 * This method will handle any link layer events while waiting for the data
989 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
990 */
991 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
992 struct scic_sds_request *request,
993 u32 event_code)
994 {
995 enum sci_status status;
996
997 switch (scu_get_event_specifier(event_code)) {
998 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
999 /*
1000 * We are waiting for data and the SCU has R_ERR the data frame.
1001 * Go back to waiting for the D2H Register FIS */
1002 sci_base_state_machine_change_state(
1003 &request->started_substate_machine,
1004 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1005 );
1006
1007 status = SCI_SUCCESS;
1008 break;
1009
1010 default:
1011 dev_err(scic_to_dev(request->owning_controller),
1012 "%s: SCIC PIO Request 0x%p received unexpected "
1013 "event 0x%08x\n",
1014 __func__, request, event_code);
1015
1016 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1017 status = SCI_FAILURE;
1018 break;
1019 }
1020
1021 return status;
1022 }
1023
1024 /* --------------------------------------------------------------------------- */
1025
1026 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1027 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1028 .abort_handler = scic_sds_request_started_state_abort_handler,
1029 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1030 },
1031 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1032 .abort_handler = scic_sds_request_started_state_abort_handler,
1033 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1034 },
1035 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1036 .abort_handler = scic_sds_request_started_state_abort_handler,
1037 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1038 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1039 },
1040 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1041 .abort_handler = scic_sds_request_started_state_abort_handler,
1042 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1043 }
1044 };
1045
1046 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1047 void *object)
1048 {
1049 struct scic_sds_request *sci_req = object;
1050
1051 SET_STATE_HANDLER(
1052 sci_req,
1053 scic_sds_stp_request_started_pio_substate_handler_table,
1054 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1055 );
1056
1057 scic_sds_remote_device_set_working_request(
1058 sci_req->target_device, sci_req);
1059 }
1060
1061 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1062 {
1063 struct scic_sds_request *sci_req = object;
1064
1065 SET_STATE_HANDLER(
1066 sci_req,
1067 scic_sds_stp_request_started_pio_substate_handler_table,
1068 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1069 );
1070 }
1071
1072 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1073 void *object)
1074 {
1075 struct scic_sds_request *sci_req = object;
1076
1077 SET_STATE_HANDLER(
1078 sci_req,
1079 scic_sds_stp_request_started_pio_substate_handler_table,
1080 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1081 );
1082 }
1083
1084 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1085 void *object)
1086 {
1087 struct scic_sds_request *sci_req = object;
1088
1089 SET_STATE_HANDLER(
1090 sci_req,
1091 scic_sds_stp_request_started_pio_substate_handler_table,
1092 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1093 );
1094 }
1095
1096 /* --------------------------------------------------------------------------- */
1097
1098 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1099 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1100 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1101 },
1102 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1103 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1104 },
1105 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1106 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1107 },
1108 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1109 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1110 }
1111 };
1112
1113 enum sci_status
1114 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1115 bool copy_rx_frame)
1116 {
1117 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1118 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1119
1120 scic_sds_stp_non_ncq_request_construct(sci_req);
1121
1122 scu_stp_raw_request_construct_task_context(stp_req,
1123 sci_req->task_context_buffer);
1124
1125 pio->current_transfer_bytes = 0;
1126 pio->ending_error = 0;
1127 pio->ending_status = 0;
1128
1129 pio->request_current.sgl_offset = 0;
1130 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1131
1132 if (copy_rx_frame) {
1133 scic_sds_request_build_sgl(sci_req);
1134 /* Since the IO request copy of the TC contains the same data as
1135 * the actual TC this pointer is vaild for either.
1136 */
1137 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1138 } else {
1139 /* The user does not want the data copied to the SGL buffer location */
1140 pio->request_current.sgl_pair = NULL;
1141 }
1142
1143 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1144 sci_req,
1145 scic_sds_stp_request_started_pio_substate_table,
1146 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1147
1148 return SCI_SUCCESS;
1149 }
1150
1151 static void scic_sds_stp_request_udma_complete_request(
1152 struct scic_sds_request *request,
1153 u32 scu_status,
1154 enum sci_status sci_status)
1155 {
1156 scic_sds_request_set_status(request, scu_status, sci_status);
1157 sci_base_state_machine_change_state(&request->state_machine,
1158 SCI_BASE_REQUEST_STATE_COMPLETED);
1159 }
1160
1161 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1162 u32 frame_index)
1163 {
1164 struct scic_sds_controller *scic = sci_req->owning_controller;
1165 struct dev_to_host_fis *frame_header;
1166 enum sci_status status;
1167 u32 *frame_buffer;
1168
1169 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1170 frame_index,
1171 (void **)&frame_header);
1172
1173 if ((status == SCI_SUCCESS) &&
1174 (frame_header->fis_type == FIS_REGD2H)) {
1175 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1176 frame_index,
1177 (void **)&frame_buffer);
1178
1179 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1180 frame_header,
1181 frame_buffer);
1182 }
1183
1184 scic_sds_controller_release_frame(scic, frame_index);
1185
1186 return status;
1187 }
1188
1189 static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1190 struct scic_sds_request *sci_req,
1191 u32 completion_code)
1192 {
1193 enum sci_status status = SCI_SUCCESS;
1194
1195 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1196 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1197 scic_sds_stp_request_udma_complete_request(sci_req,
1198 SCU_TASK_DONE_GOOD,
1199 SCI_SUCCESS);
1200 break;
1201 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1202 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1203 /*
1204 * We must check ther response buffer to see if the D2H Register FIS was
1205 * received before we got the TC completion. */
1206 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
1207 scic_sds_remote_device_suspend(sci_req->target_device,
1208 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1209
1210 scic_sds_stp_request_udma_complete_request(sci_req,
1211 SCU_TASK_DONE_CHECK_RESPONSE,
1212 SCI_FAILURE_IO_RESPONSE_VALID);
1213 } else {
1214 /*
1215 * If we have an error completion status for the TC then we can expect a
1216 * D2H register FIS from the device so we must change state to wait for it */
1217 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
1218 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
1219 }
1220 break;
1221
1222 /*
1223 * / @todo Check to see if any of these completion status need to wait for
1224 * / the device to host register fis. */
1225 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1226 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1228 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1229 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1230 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1231 scic_sds_remote_device_suspend(sci_req->target_device,
1232 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1233 /* Fall through to the default case */
1234 default:
1235 /* All other completion status cause the IO to be complete. */
1236 scic_sds_stp_request_udma_complete_request(sci_req,
1237 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1238 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1239 break;
1240 }
1241
1242 return status;
1243 }
1244
1245 static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1246 struct scic_sds_request *sci_req,
1247 u32 frame_index)
1248 {
1249 enum sci_status status;
1250
1251 /* Use the general frame handler to copy the resposne data */
1252 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1253
1254 if (status != SCI_SUCCESS)
1255 return status;
1256
1257 scic_sds_stp_request_udma_complete_request(sci_req,
1258 SCU_TASK_DONE_CHECK_RESPONSE,
1259 SCI_FAILURE_IO_RESPONSE_VALID);
1260
1261 return status;
1262 }
1263
1264 /* --------------------------------------------------------------------------- */
1265
1266 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1267 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1268 .abort_handler = scic_sds_request_started_state_abort_handler,
1269 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1270 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1271 },
1272 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1273 .abort_handler = scic_sds_request_started_state_abort_handler,
1274 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1275 },
1276 };
1277
1278 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1279 void *object)
1280 {
1281 struct scic_sds_request *sci_req = object;
1282
1283 SET_STATE_HANDLER(
1284 sci_req,
1285 scic_sds_stp_request_started_udma_substate_handler_table,
1286 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1287 );
1288 }
1289
1290 /**
1291 *
1292 *
1293 * This state is entered when there is an TC completion failure. The hardware
1294 * received an unexpected condition while processing the IO request and now
1295 * will UF the D2H register FIS to complete the IO.
1296 */
1297 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1298 void *object)
1299 {
1300 struct scic_sds_request *sci_req = object;
1301
1302 SET_STATE_HANDLER(
1303 sci_req,
1304 scic_sds_stp_request_started_udma_substate_handler_table,
1305 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1306 );
1307 }
1308
1309 /* --------------------------------------------------------------------------- */
1310
1311 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1312 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1313 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1314 },
1315 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1316 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1317 },
1318 };
1319
1320 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1321 u32 len,
1322 enum dma_data_direction dir)
1323 {
1324 scic_sds_stp_non_ncq_request_construct(sci_req);
1325
1326 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1327 len, dir);
1328
1329 sci_base_state_machine_construct(
1330 &sci_req->started_substate_machine,
1331 sci_req,
1332 scic_sds_stp_request_started_udma_substate_table,
1333 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1334 );
1335
1336 return SCI_SUCCESS;
1337 }
1338
1339 /**
1340 *
1341 * @sci_req:
1342 * @completion_code:
1343 *
1344 * This method processes a TC completion. The expected TC completion is for
1345 * the transmission of the H2D register FIS containing the SATA/STP non-data
1346 * request. This method always successfully processes the TC completion.
1347 * SCI_SUCCESS This value is always returned.
1348 */
1349 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1350 struct scic_sds_request *sci_req,
1351 u32 completion_code)
1352 {
1353 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1354 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1355 scic_sds_request_set_status(
1356 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1357 );
1358
1359 sci_base_state_machine_change_state(
1360 &sci_req->started_substate_machine,
1361 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1362 );
1363 break;
1364
1365 default:
1366 /*
1367 * All other completion status cause the IO to be complete. If a NAK
1368 * was received, then it is up to the user to retry the request. */
1369 scic_sds_request_set_status(
1370 sci_req,
1371 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1372 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1373 );
1374
1375 sci_base_state_machine_change_state(
1376 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1377 break;
1378 }
1379
1380 return SCI_SUCCESS;
1381 }
1382
1383 /**
1384 *
1385 * @sci_req:
1386 * @completion_code:
1387 *
1388 * This method processes a TC completion. The expected TC completion is for
1389 * the transmission of the H2D register FIS containing the SATA/STP non-data
1390 * request. This method always successfully processes the TC completion.
1391 * SCI_SUCCESS This value is always returned.
1392 */
1393 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1394 struct scic_sds_request *sci_req,
1395 u32 completion_code)
1396 {
1397 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1398 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1399 scic_sds_request_set_status(
1400 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1401 );
1402
1403 sci_base_state_machine_change_state(
1404 &sci_req->started_substate_machine,
1405 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1406 );
1407 break;
1408
1409 default:
1410 /*
1411 * All other completion status cause the IO to be complete. If a NAK
1412 * was received, then it is up to the user to retry the request. */
1413 scic_sds_request_set_status(
1414 sci_req,
1415 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1416 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1417 );
1418
1419 sci_base_state_machine_change_state(&sci_req->state_machine,
1420 SCI_BASE_REQUEST_STATE_COMPLETED);
1421 break;
1422 }
1423
1424 return SCI_SUCCESS;
1425 }
1426
1427 /**
1428 *
1429 * @request: This parameter specifies the request for which a frame has been
1430 * received.
1431 * @frame_index: This parameter specifies the index of the frame that has been
1432 * received.
1433 *
1434 * This method processes frames received from the target while waiting for a
1435 * device to host register FIS. If a non-register FIS is received during this
1436 * time, it is treated as a protocol violation from an IO perspective. Indicate
1437 * if the received frame was processed successfully.
1438 */
1439 static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1440 struct scic_sds_request *sci_req,
1441 u32 frame_index)
1442 {
1443 enum sci_status status;
1444 struct dev_to_host_fis *frame_header;
1445 u32 *frame_buffer;
1446 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1447 struct scic_sds_controller *scic = sci_req->owning_controller;
1448
1449 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1450 frame_index,
1451 (void **)&frame_header);
1452 if (status != SCI_SUCCESS) {
1453 dev_err(scic_to_dev(scic),
1454 "%s: SCIC IO Request 0x%p could not get frame header "
1455 "for frame index %d, status %x\n",
1456 __func__, stp_req, frame_index, status);
1457 return status;
1458 }
1459
1460 switch (frame_header->fis_type) {
1461 case FIS_REGD2H:
1462 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1463 frame_index,
1464 (void **)&frame_buffer);
1465
1466 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1467 frame_header,
1468 frame_buffer);
1469
1470 /* The command has completed with error */
1471 scic_sds_request_set_status(sci_req,
1472 SCU_TASK_DONE_CHECK_RESPONSE,
1473 SCI_FAILURE_IO_RESPONSE_VALID);
1474 break;
1475
1476 default:
1477 dev_warn(scic_to_dev(scic),
1478 "%s: IO Request:0x%p Frame Id:%d protocol "
1479 "violation occurred\n", __func__, stp_req,
1480 frame_index);
1481
1482 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1483 SCI_FAILURE_PROTOCOL_VIOLATION);
1484 break;
1485 }
1486
1487 sci_base_state_machine_change_state(&sci_req->state_machine,
1488 SCI_BASE_REQUEST_STATE_COMPLETED);
1489
1490 /* Frame has been decoded return it to the controller */
1491 scic_sds_controller_release_frame(scic, frame_index);
1492
1493 return status;
1494 }
1495
1496 /* --------------------------------------------------------------------------- */
1497
1498 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1499 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1500 .abort_handler = scic_sds_request_started_state_abort_handler,
1501 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1502 },
1503 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1504 .abort_handler = scic_sds_request_started_state_abort_handler,
1505 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1506 },
1507 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1508 .abort_handler = scic_sds_request_started_state_abort_handler,
1509 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1510 },
1511 };
1512
1513 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1514 void *object)
1515 {
1516 struct scic_sds_request *sci_req = object;
1517
1518 SET_STATE_HANDLER(
1519 sci_req,
1520 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1521 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1522 );
1523
1524 scic_sds_remote_device_set_working_request(
1525 sci_req->target_device, sci_req
1526 );
1527 }
1528
1529 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1530 void *object)
1531 {
1532 struct scic_sds_request *sci_req = object;
1533 struct scu_task_context *task_context;
1534 struct host_to_dev_fis *h2d_fis;
1535 enum sci_status status;
1536
1537 /* Clear the SRST bit */
1538 h2d_fis = &sci_req->stp.cmd;
1539 h2d_fis->control = 0;
1540
1541 /* Clear the TC control bit */
1542 task_context = scic_sds_controller_get_task_context_buffer(
1543 sci_req->owning_controller, sci_req->io_tag);
1544 task_context->control_frame = 0;
1545
1546 status = scic_controller_continue_io(sci_req);
1547 if (status == SCI_SUCCESS) {
1548 SET_STATE_HANDLER(
1549 sci_req,
1550 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1551 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1552 );
1553 }
1554 }
1555
1556 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1557 void *object)
1558 {
1559 struct scic_sds_request *sci_req = object;
1560
1561 SET_STATE_HANDLER(
1562 sci_req,
1563 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1564 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1565 );
1566 }
1567
1568 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1569 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1570 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1571 },
1572 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1573 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1574 },
1575 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1576 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1577 },
1578 };
1579
1580 enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1581 {
1582 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1583
1584 scic_sds_stp_non_ncq_request_construct(sci_req);
1585
1586 /* Build the STP task context structure */
1587 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1588
1589 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1590 sci_req,
1591 scic_sds_stp_request_started_soft_reset_substate_table,
1592 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
1593
1594 return SCI_SUCCESS;
1595 }
This page took 0.085869 seconds and 4 git commands to generate.