isci: uplevel request infrastructure
[deliverable/linux.git] / drivers / scsi / isci / stp_request.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <scsi/sas.h>
57 #include "sas.h"
58 #include "state_machine.h"
59 #include "remote_device.h"
60 #include "stp_request.h"
61 #include "unsolicited_frame_control.h"
62 #include "scu_completion_codes.h"
63 #include "scu_event_codes.h"
64 #include "scu_task_context.h"
65 #include "request.h"
66
67 void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req)
68 {
69 if (sci_req->was_tag_assigned_by_user == false)
70 sci_req->task_context_buffer = &sci_req->tc;
71 }
72
73 /**
74 * This method is will fill in the SCU Task Context for any type of SATA
75 * request. This is called from the various SATA constructors.
76 * @sci_req: The general IO request object which is to be used in
77 * constructing the SCU task context.
78 * @task_context: The buffer pointer for the SCU task context which is being
79 * constructed.
80 *
81 * The general io request construction is complete. The buffer assignment for
82 * the command buffer is complete. none Revisit task context construction to
83 * determine what is common for SSP/SMP/STP task context structures.
84 */
85 static void scu_sata_reqeust_construct_task_context(
86 struct scic_sds_request *sci_req,
87 struct scu_task_context *task_context)
88 {
89 dma_addr_t dma_addr;
90 struct scic_sds_controller *controller;
91 struct scic_sds_remote_device *target_device;
92 struct scic_sds_port *target_port;
93
94 controller = scic_sds_request_get_controller(sci_req);
95 target_device = scic_sds_request_get_device(sci_req);
96 target_port = scic_sds_request_get_port(sci_req);
97
98 /* Fill in the TC with the its required data */
99 task_context->abort = 0;
100 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
101 task_context->initiator_request = 1;
102 task_context->connection_rate = target_device->connection_rate;
103 task_context->protocol_engine_index =
104 scic_sds_controller_get_protocol_engine_group(controller);
105 task_context->logical_port_index =
106 scic_sds_port_get_index(target_port);
107 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
108 task_context->valid = SCU_TASK_CONTEXT_VALID;
109 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
110
111 task_context->remote_node_index =
112 scic_sds_remote_device_get_index(sci_req->target_device);
113 task_context->command_code = 0;
114
115 task_context->link_layer_control = 0;
116 task_context->do_not_dma_ssp_good_response = 1;
117 task_context->strict_ordering = 0;
118 task_context->control_frame = 0;
119 task_context->timeout_enable = 0;
120 task_context->block_guard_enable = 0;
121
122 task_context->address_modifier = 0;
123 task_context->task_phase = 0x01;
124
125 task_context->ssp_command_iu_length =
126 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
127
128 /* Set the first word of the H2D REG FIS */
129 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
130
131 if (sci_req->was_tag_assigned_by_user) {
132 /*
133 * Build the task context now since we have already read
134 * the data
135 */
136 sci_req->post_context =
137 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
138 (scic_sds_controller_get_protocol_engine_group(
139 controller) <<
140 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
141 (scic_sds_port_get_index(target_port) <<
142 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
143 scic_sds_io_tag_get_index(sci_req->io_tag));
144 } else {
145 /*
146 * Build the task context now since we have already read
147 * the data.
148 * I/O tag index is not assigned because we have to wait
149 * until we get a TCi.
150 */
151 sci_req->post_context =
152 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
153 (scic_sds_controller_get_protocol_engine_group(
154 controller) <<
155 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
156 (scic_sds_port_get_index(target_port) <<
157 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
158 }
159
160 /*
161 * Copy the physical address for the command buffer to the SCU Task
162 * Context. We must offset the command buffer by 4 bytes because the
163 * first 4 bytes are transfered in the body of the TC.
164 */
165 dma_addr = scic_io_request_get_dma_addr(sci_req,
166 ((char *) &sci_req->stp.cmd) +
167 sizeof(u32));
168
169 task_context->command_iu_upper = upper_32_bits(dma_addr);
170 task_context->command_iu_lower = lower_32_bits(dma_addr);
171
172 /* SATA Requests do not have a response buffer */
173 task_context->response_iu_upper = 0;
174 task_context->response_iu_lower = 0;
175 }
176
177 /**
178 *
179 * @sci_req:
180 *
181 * This method will perform any general sata request construction. What part of
182 * SATA IO request construction is general? none
183 */
184 static void scic_sds_stp_non_ncq_request_construct(
185 struct scic_sds_request *sci_req)
186 {
187 sci_req->has_started_substate_machine = true;
188 }
189
190 /**
191 *
192 * @sci_req: This parameter specifies the request to be constructed as an
193 * optimized request.
194 * @optimized_task_type: This parameter specifies whether the request is to be
195 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
196 * value of 1 indicates NCQ.
197 *
198 * This method will perform request construction common to all types of STP
199 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
200 * returns an indication as to whether the construction was successful.
201 */
202 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
203 u8 optimized_task_type,
204 u32 len,
205 enum dma_data_direction dir)
206 {
207 struct scu_task_context *task_context = sci_req->task_context_buffer;
208
209 /* Build the STP task context structure */
210 scu_sata_reqeust_construct_task_context(sci_req, task_context);
211
212 /* Copy over the SGL elements */
213 scic_sds_request_build_sgl(sci_req);
214
215 /* Copy over the number of bytes to be transfered */
216 task_context->transfer_length_bytes = len;
217
218 if (dir == DMA_TO_DEVICE) {
219 /*
220 * The difference between the DMA IN and DMA OUT request task type
221 * values are consistent with the difference between FPDMA READ
222 * and FPDMA WRITE values. Add the supplied task type parameter
223 * to this difference to set the task type properly for this
224 * DATA OUT (WRITE) case. */
225 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
226 - SCU_TASK_TYPE_DMA_IN);
227 } else {
228 /*
229 * For the DATA IN (READ) case, simply save the supplied
230 * optimized task type. */
231 task_context->task_type = optimized_task_type;
232 }
233 }
234
235 /**
236 *
237 * @sci_req: This parameter specifies the request to be constructed.
238 *
239 * This method will construct the STP UDMA request and its associated TC data.
240 * This method returns an indication as to whether the construction was
241 * successful. SCI_SUCCESS Currently this method always returns this value.
242 */
243 enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
244 u32 len,
245 enum dma_data_direction dir)
246 {
247 scic_sds_stp_optimized_request_construct(sci_req,
248 SCU_TASK_TYPE_FPDMAQ_READ,
249 len, dir);
250 return SCI_SUCCESS;
251 }
252
253 /**
254 * scu_stp_raw_request_construct_task_context -
255 * @sci_req: This parameter specifies the STP request object for which to
256 * construct a RAW command frame task context.
257 * @task_context: This parameter specifies the SCU specific task context buffer
258 * to construct.
259 *
260 * This method performs the operations common to all SATA/STP requests
261 * utilizing the raw frame method. none
262 */
263 static void scu_stp_raw_request_construct_task_context(
264 struct scic_sds_stp_request *stp_req,
265 struct scu_task_context *task_context)
266 {
267 struct scic_sds_request *sci_req = to_sci_req(stp_req);
268
269 scu_sata_reqeust_construct_task_context(sci_req, task_context);
270
271 task_context->control_frame = 0;
272 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
273 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
274 task_context->type.stp.fis_type = FIS_REGH2D;
275 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
276 }
277
278 void scic_stp_io_request_set_ncq_tag(
279 struct scic_sds_request *req,
280 u16 ncq_tag)
281 {
282 /**
283 * @note This could be made to return an error to the user if the user
284 * attempts to set the NCQ tag in the wrong state.
285 */
286 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
287 }
288
289 /**
290 *
291 * @sci_req:
292 *
293 * Get the next SGL element from the request. - Check on which SGL element pair
294 * we are working - if working on SLG pair element A - advance to element B -
295 * else - check to see if there are more SGL element pairs for this IO request
296 * - if there are more SGL element pairs - advance to the next pair and return
297 * element A struct scu_sgl_element*
298 */
299 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
300 {
301 struct scu_sgl_element *current_sgl;
302 struct scic_sds_request *sci_req = to_sci_req(stp_req);
303 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
304
305 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
306 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
307 pio_sgl->sgl_pair->B.address_upper == 0) {
308 current_sgl = NULL;
309 } else {
310 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
311 current_sgl = &pio_sgl->sgl_pair->B;
312 }
313 } else {
314 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
315 pio_sgl->sgl_pair->next_pair_upper == 0) {
316 current_sgl = NULL;
317 } else {
318 u64 phys_addr;
319
320 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
321 phys_addr <<= 32;
322 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
323
324 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
325 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
326 current_sgl = &pio_sgl->sgl_pair->A;
327 }
328 }
329
330 return current_sgl;
331 }
332
333 /**
334 *
335 * @sci_req:
336 * @completion_code:
337 *
338 * This method processes a TC completion. The expected TC completion is for
339 * the transmission of the H2D register FIS containing the SATA/STP non-data
340 * request. This method always successfully processes the TC completion.
341 * SCI_SUCCESS This value is always returned.
342 */
343 static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
344 struct scic_sds_request *sci_req,
345 u32 completion_code)
346 {
347 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
348 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
349 scic_sds_request_set_status(
350 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
351 );
352
353 sci_base_state_machine_change_state(
354 &sci_req->started_substate_machine,
355 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
356 );
357 break;
358
359 default:
360 /*
361 * All other completion status cause the IO to be complete. If a NAK
362 * was received, then it is up to the user to retry the request. */
363 scic_sds_request_set_status(
364 sci_req,
365 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
366 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
367 );
368
369 sci_base_state_machine_change_state(
370 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
371 break;
372 }
373
374 return SCI_SUCCESS;
375 }
376
377 /**
378 *
379 * @request: This parameter specifies the request for which a frame has been
380 * received.
381 * @frame_index: This parameter specifies the index of the frame that has been
382 * received.
383 *
384 * This method processes frames received from the target while waiting for a
385 * device to host register FIS. If a non-register FIS is received during this
386 * time, it is treated as a protocol violation from an IO perspective. Indicate
387 * if the received frame was processed successfully.
388 */
389 static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
390 struct scic_sds_request *sci_req,
391 u32 frame_index)
392 {
393 enum sci_status status;
394 struct dev_to_host_fis *frame_header;
395 u32 *frame_buffer;
396 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
397 struct scic_sds_controller *scic = sci_req->owning_controller;
398
399 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
400 frame_index,
401 (void **)&frame_header);
402
403 if (status != SCI_SUCCESS) {
404 dev_err(scic_to_dev(sci_req->owning_controller),
405 "%s: SCIC IO Request 0x%p could not get frame header "
406 "for frame index %d, status %x\n",
407 __func__, stp_req, frame_index, status);
408
409 return status;
410 }
411
412 switch (frame_header->fis_type) {
413 case FIS_REGD2H:
414 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
415 frame_index,
416 (void **)&frame_buffer);
417
418 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
419 frame_header,
420 frame_buffer);
421
422 /* The command has completed with error */
423 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
424 SCI_FAILURE_IO_RESPONSE_VALID);
425 break;
426
427 default:
428 dev_warn(scic_to_dev(scic),
429 "%s: IO Request:0x%p Frame Id:%d protocol "
430 "violation occurred\n", __func__, stp_req,
431 frame_index);
432
433 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
434 SCI_FAILURE_PROTOCOL_VIOLATION);
435 break;
436 }
437
438 sci_base_state_machine_change_state(&sci_req->state_machine,
439 SCI_BASE_REQUEST_STATE_COMPLETED);
440
441 /* Frame has been decoded return it to the controller */
442 scic_sds_controller_release_frame(scic, frame_index);
443
444 return status;
445 }
446
447 /* --------------------------------------------------------------------------- */
448
449 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
450 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
451 .abort_handler = scic_sds_request_started_state_abort_handler,
452 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
453 },
454 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
455 .abort_handler = scic_sds_request_started_state_abort_handler,
456 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
457 }
458 };
459
460 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
461 void *object)
462 {
463 struct scic_sds_request *sci_req = object;
464
465 SET_STATE_HANDLER(
466 sci_req,
467 scic_sds_stp_request_started_non_data_substate_handler_table,
468 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
469 );
470
471 scic_sds_remote_device_set_working_request(
472 sci_req->target_device, sci_req
473 );
474 }
475
476 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
477 {
478 struct scic_sds_request *sci_req = object;
479
480 SET_STATE_HANDLER(
481 sci_req,
482 scic_sds_stp_request_started_non_data_substate_handler_table,
483 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
484 );
485 }
486
487 /* --------------------------------------------------------------------------- */
488
489 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
490 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
491 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
492 },
493 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
494 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
495 },
496 };
497
498 enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
499 {
500 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
501
502 scic_sds_stp_non_ncq_request_construct(sci_req);
503
504 /* Build the STP task context structure */
505 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
506
507 sci_base_state_machine_construct(&sci_req->started_substate_machine,
508 sci_req,
509 scic_sds_stp_request_started_non_data_substate_table,
510 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
511
512 return SCI_SUCCESS;
513 }
514
515 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
516
517 /* transmit DATA_FIS from (current sgl + offset) for input
518 * parameter length. current sgl and offset is alreay stored in the IO request
519 */
520 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
521 struct scic_sds_request *sci_req,
522 u32 length)
523 {
524 struct scic_sds_controller *scic = sci_req->owning_controller;
525 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
526 struct scu_task_context *task_context;
527 struct scu_sgl_element *current_sgl;
528
529 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
530 * for the data from current_sgl+offset for the input length
531 */
532 task_context = scic_sds_controller_get_task_context_buffer(scic,
533 sci_req->io_tag);
534
535 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
536 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
537 else
538 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
539
540 /* update the TC */
541 task_context->command_iu_upper = current_sgl->address_upper;
542 task_context->command_iu_lower = current_sgl->address_lower;
543 task_context->transfer_length_bytes = length;
544 task_context->type.stp.fis_type = FIS_DATA;
545
546 /* send the new TC out. */
547 return scic_controller_continue_io(sci_req);
548 }
549
550 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
551 {
552
553 struct scu_sgl_element *current_sgl;
554 u32 sgl_offset;
555 u32 remaining_bytes_in_current_sgl = 0;
556 enum sci_status status = SCI_SUCCESS;
557 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
558
559 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
560
561 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
562 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
563 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
564 } else {
565 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
566 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
567 }
568
569
570 if (stp_req->type.pio.pio_transfer_bytes > 0) {
571 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
572 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
573 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
574 if (status == SCI_SUCCESS) {
575 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
576
577 /* update the current sgl, sgl_offset and save for future */
578 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
579 sgl_offset = 0;
580 }
581 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
582 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
583 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
584
585 if (status == SCI_SUCCESS) {
586 /* Sgl offset will be adjusted and saved for future */
587 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
588 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
589 stp_req->type.pio.pio_transfer_bytes = 0;
590 }
591 }
592 }
593
594 if (status == SCI_SUCCESS) {
595 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
596 }
597
598 return status;
599 }
600
601 /**
602 *
603 * @stp_request: The request that is used for the SGL processing.
604 * @data_buffer: The buffer of data to be copied.
605 * @length: The length of the data transfer.
606 *
607 * Copy the data from the buffer for the length specified to the IO reqeust SGL
608 * specified data region. enum sci_status
609 */
610 static enum sci_status
611 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
612 u8 *data_buf, u32 len)
613 {
614 struct scic_sds_request *sci_req;
615 struct isci_request *ireq;
616 u8 *src_addr;
617 int copy_len;
618 struct sas_task *task;
619 struct scatterlist *sg;
620 void *kaddr;
621 int total_len = len;
622
623 sci_req = to_sci_req(stp_req);
624 ireq = sci_req_to_ireq(sci_req);
625 task = isci_request_access_task(ireq);
626 src_addr = data_buf;
627
628 if (task->num_scatter > 0) {
629 sg = task->scatter;
630
631 while (total_len > 0) {
632 struct page *page = sg_page(sg);
633
634 copy_len = min_t(int, total_len, sg_dma_len(sg));
635 kaddr = kmap_atomic(page, KM_IRQ0);
636 memcpy(kaddr + sg->offset, src_addr, copy_len);
637 kunmap_atomic(kaddr, KM_IRQ0);
638 total_len -= copy_len;
639 src_addr += copy_len;
640 sg = sg_next(sg);
641 }
642 } else {
643 BUG_ON(task->total_xfer_len < total_len);
644 memcpy(task->scatter, src_addr, total_len);
645 }
646
647 return SCI_SUCCESS;
648 }
649
650 /**
651 *
652 * @sci_req: The PIO DATA IN request that is to receive the data.
653 * @data_buffer: The buffer to copy from.
654 *
655 * Copy the data buffer to the io request data region. enum sci_status
656 */
657 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
658 struct scic_sds_stp_request *sci_req,
659 u8 *data_buffer)
660 {
661 enum sci_status status;
662
663 /*
664 * If there is less than 1K remaining in the transfer request
665 * copy just the data for the transfer */
666 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
667 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
668 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
669
670 if (status == SCI_SUCCESS)
671 sci_req->type.pio.pio_transfer_bytes = 0;
672 } else {
673 /* We are transfering the whole frame so copy */
674 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
675 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
676
677 if (status == SCI_SUCCESS)
678 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
679 }
680
681 return status;
682 }
683
684 /**
685 *
686 * @sci_req:
687 * @completion_code:
688 *
689 * enum sci_status
690 */
691 static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
692 struct scic_sds_request *sci_req,
693 u32 completion_code)
694 {
695 enum sci_status status = SCI_SUCCESS;
696
697 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
698 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
699 scic_sds_request_set_status(
700 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
701 );
702
703 sci_base_state_machine_change_state(
704 &sci_req->started_substate_machine,
705 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
706 );
707 break;
708
709 default:
710 /*
711 * All other completion status cause the IO to be complete. If a NAK
712 * was received, then it is up to the user to retry the request. */
713 scic_sds_request_set_status(
714 sci_req,
715 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
716 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
717 );
718
719 sci_base_state_machine_change_state(
720 &sci_req->state_machine,
721 SCI_BASE_REQUEST_STATE_COMPLETED
722 );
723 break;
724 }
725
726 return status;
727 }
728
729 static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
730 u32 frame_index)
731 {
732 struct scic_sds_controller *scic = sci_req->owning_controller;
733 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
734 struct isci_request *ireq = sci_req_to_ireq(sci_req);
735 struct sas_task *task = isci_request_access_task(ireq);
736 struct dev_to_host_fis *frame_header;
737 enum sci_status status;
738 u32 *frame_buffer;
739
740 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
741 frame_index,
742 (void **)&frame_header);
743
744 if (status != SCI_SUCCESS) {
745 dev_err(scic_to_dev(scic),
746 "%s: SCIC IO Request 0x%p could not get frame header "
747 "for frame index %d, status %x\n",
748 __func__, stp_req, frame_index, status);
749 return status;
750 }
751
752 switch (frame_header->fis_type) {
753 case FIS_PIO_SETUP:
754 /* Get from the frame buffer the PIO Setup Data */
755 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
756 frame_index,
757 (void **)&frame_buffer);
758
759 /* Get the data from the PIO Setup The SCU Hardware returns
760 * first word in the frame_header and the rest of the data is in
761 * the frame buffer so we need to back up one dword
762 */
763
764 /* transfer_count: first 16bits in the 4th dword */
765 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
766
767 /* ending_status: 4th byte in the 3rd dword */
768 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
769
770 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
771 frame_header,
772 frame_buffer);
773
774 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
775
776 /* The next state is dependent on whether the
777 * request was PIO Data-in or Data out
778 */
779 if (task->data_dir == DMA_FROM_DEVICE) {
780 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
781 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
782 } else if (task->data_dir == DMA_TO_DEVICE) {
783 /* Transmit data */
784 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
785 if (status != SCI_SUCCESS)
786 break;
787 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
788 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
789 }
790 break;
791 case FIS_SETDEVBITS:
792 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
793 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
794 break;
795 case FIS_REGD2H:
796 if (frame_header->status & ATA_BUSY) {
797 /* Now why is the drive sending a D2H Register FIS when
798 * it is still busy? Do nothing since we are still in
799 * the right state.
800 */
801 dev_dbg(scic_to_dev(scic),
802 "%s: SCIC PIO Request 0x%p received "
803 "D2H Register FIS with BSY status "
804 "0x%x\n", __func__, stp_req,
805 frame_header->status);
806 break;
807 }
808
809 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
810 frame_index,
811 (void **)&frame_buffer);
812
813 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
814 frame_header,
815 frame_buffer);
816
817 scic_sds_request_set_status(sci_req,
818 SCU_TASK_DONE_CHECK_RESPONSE,
819 SCI_FAILURE_IO_RESPONSE_VALID);
820
821 sci_base_state_machine_change_state(&sci_req->state_machine,
822 SCI_BASE_REQUEST_STATE_COMPLETED);
823 break;
824 default:
825 /* FIXME: what do we do here? */
826 break;
827 }
828
829 /* Frame is decoded return it to the controller */
830 scic_sds_controller_release_frame(scic, frame_index);
831
832 return status;
833 }
834
835 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
836 u32 frame_index)
837 {
838 enum sci_status status;
839 struct dev_to_host_fis *frame_header;
840 struct sata_fis_data *frame_buffer;
841 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
842 struct scic_sds_controller *scic = sci_req->owning_controller;
843
844 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
845 frame_index,
846 (void **)&frame_header);
847
848 if (status != SCI_SUCCESS) {
849 dev_err(scic_to_dev(scic),
850 "%s: SCIC IO Request 0x%p could not get frame header "
851 "for frame index %d, status %x\n",
852 __func__, stp_req, frame_index, status);
853 return status;
854 }
855
856 if (frame_header->fis_type == FIS_DATA) {
857 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
858 sci_req->saved_rx_frame_index = frame_index;
859 stp_req->type.pio.pio_transfer_bytes = 0;
860 } else {
861 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
862 frame_index,
863 (void **)&frame_buffer);
864
865 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
866 (u8 *)frame_buffer);
867
868 /* Frame is decoded return it to the controller */
869 scic_sds_controller_release_frame(scic, frame_index);
870 }
871
872 /* Check for the end of the transfer, are there more
873 * bytes remaining for this data transfer
874 */
875 if (status != SCI_SUCCESS ||
876 stp_req->type.pio.pio_transfer_bytes != 0)
877 return status;
878
879 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
880 scic_sds_request_set_status(sci_req,
881 SCU_TASK_DONE_CHECK_RESPONSE,
882 SCI_FAILURE_IO_RESPONSE_VALID);
883
884 sci_base_state_machine_change_state(&sci_req->state_machine,
885 SCI_BASE_REQUEST_STATE_COMPLETED);
886 } else {
887 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
888 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
889 }
890 } else {
891 dev_err(scic_to_dev(scic),
892 "%s: SCIC PIO Request 0x%p received frame %d "
893 "with fis type 0x%02x when expecting a data "
894 "fis.\n", __func__, stp_req, frame_index,
895 frame_header->fis_type);
896
897 scic_sds_request_set_status(sci_req,
898 SCU_TASK_DONE_GOOD,
899 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
900
901 sci_base_state_machine_change_state(&sci_req->state_machine,
902 SCI_BASE_REQUEST_STATE_COMPLETED);
903
904 /* Frame is decoded return it to the controller */
905 scic_sds_controller_release_frame(scic, frame_index);
906 }
907
908 return status;
909 }
910
911
912 /**
913 *
914 * @sci_req:
915 * @completion_code:
916 *
917 * enum sci_status
918 */
919 static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
920
921 struct scic_sds_request *sci_req,
922 u32 completion_code)
923 {
924 enum sci_status status = SCI_SUCCESS;
925 bool all_frames_transferred = false;
926 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
927
928 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
930 /* Transmit data */
931 if (stp_req->type.pio.pio_transfer_bytes != 0) {
932 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
933 if (status == SCI_SUCCESS) {
934 if (stp_req->type.pio.pio_transfer_bytes == 0)
935 all_frames_transferred = true;
936 }
937 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
938 /*
939 * this will happen if the all data is written at the
940 * first time after the pio setup fis is received
941 */
942 all_frames_transferred = true;
943 }
944
945 /* all data transferred. */
946 if (all_frames_transferred) {
947 /*
948 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
949 * and wait for PIO_SETUP fis / or D2H REg fis. */
950 sci_base_state_machine_change_state(
951 &sci_req->started_substate_machine,
952 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
953 );
954 }
955 break;
956
957 default:
958 /*
959 * All other completion status cause the IO to be complete. If a NAK
960 * was received, then it is up to the user to retry the request. */
961 scic_sds_request_set_status(
962 sci_req,
963 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
964 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
965 );
966
967 sci_base_state_machine_change_state(
968 &sci_req->state_machine,
969 SCI_BASE_REQUEST_STATE_COMPLETED
970 );
971 break;
972 }
973
974 return status;
975 }
976
977 /**
978 *
979 * @request: This is the request which is receiving the event.
980 * @event_code: This is the event code that the request on which the request is
981 * expected to take action.
982 *
983 * This method will handle any link layer events while waiting for the data
984 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
985 */
986 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
987 struct scic_sds_request *request,
988 u32 event_code)
989 {
990 enum sci_status status;
991
992 switch (scu_get_event_specifier(event_code)) {
993 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
994 /*
995 * We are waiting for data and the SCU has R_ERR the data frame.
996 * Go back to waiting for the D2H Register FIS */
997 sci_base_state_machine_change_state(
998 &request->started_substate_machine,
999 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1000 );
1001
1002 status = SCI_SUCCESS;
1003 break;
1004
1005 default:
1006 dev_err(scic_to_dev(request->owning_controller),
1007 "%s: SCIC PIO Request 0x%p received unexpected "
1008 "event 0x%08x\n",
1009 __func__, request, event_code);
1010
1011 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1012 status = SCI_FAILURE;
1013 break;
1014 }
1015
1016 return status;
1017 }
1018
1019 /* --------------------------------------------------------------------------- */
1020
1021 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1022 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1023 .abort_handler = scic_sds_request_started_state_abort_handler,
1024 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1025 },
1026 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1027 .abort_handler = scic_sds_request_started_state_abort_handler,
1028 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1029 },
1030 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1031 .abort_handler = scic_sds_request_started_state_abort_handler,
1032 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1033 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1034 },
1035 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1036 .abort_handler = scic_sds_request_started_state_abort_handler,
1037 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1038 }
1039 };
1040
1041 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1042 void *object)
1043 {
1044 struct scic_sds_request *sci_req = object;
1045
1046 SET_STATE_HANDLER(
1047 sci_req,
1048 scic_sds_stp_request_started_pio_substate_handler_table,
1049 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1050 );
1051
1052 scic_sds_remote_device_set_working_request(
1053 sci_req->target_device, sci_req);
1054 }
1055
1056 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1057 {
1058 struct scic_sds_request *sci_req = object;
1059
1060 SET_STATE_HANDLER(
1061 sci_req,
1062 scic_sds_stp_request_started_pio_substate_handler_table,
1063 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1064 );
1065 }
1066
1067 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1068 void *object)
1069 {
1070 struct scic_sds_request *sci_req = object;
1071
1072 SET_STATE_HANDLER(
1073 sci_req,
1074 scic_sds_stp_request_started_pio_substate_handler_table,
1075 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1076 );
1077 }
1078
1079 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1080 void *object)
1081 {
1082 struct scic_sds_request *sci_req = object;
1083
1084 SET_STATE_HANDLER(
1085 sci_req,
1086 scic_sds_stp_request_started_pio_substate_handler_table,
1087 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1088 );
1089 }
1090
1091 /* --------------------------------------------------------------------------- */
1092
1093 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1094 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1095 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1096 },
1097 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1098 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1099 },
1100 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1101 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1102 },
1103 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1104 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1105 }
1106 };
1107
1108 enum sci_status
1109 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1110 bool copy_rx_frame)
1111 {
1112 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1113 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1114
1115 scic_sds_stp_non_ncq_request_construct(sci_req);
1116
1117 scu_stp_raw_request_construct_task_context(stp_req,
1118 sci_req->task_context_buffer);
1119
1120 pio->current_transfer_bytes = 0;
1121 pio->ending_error = 0;
1122 pio->ending_status = 0;
1123
1124 pio->request_current.sgl_offset = 0;
1125 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1126
1127 if (copy_rx_frame) {
1128 scic_sds_request_build_sgl(sci_req);
1129 /* Since the IO request copy of the TC contains the same data as
1130 * the actual TC this pointer is vaild for either.
1131 */
1132 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1133 } else {
1134 /* The user does not want the data copied to the SGL buffer location */
1135 pio->request_current.sgl_pair = NULL;
1136 }
1137
1138 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1139 sci_req,
1140 scic_sds_stp_request_started_pio_substate_table,
1141 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1142
1143 return SCI_SUCCESS;
1144 }
1145
1146 static void scic_sds_stp_request_udma_complete_request(
1147 struct scic_sds_request *request,
1148 u32 scu_status,
1149 enum sci_status sci_status)
1150 {
1151 scic_sds_request_set_status(request, scu_status, sci_status);
1152 sci_base_state_machine_change_state(&request->state_machine,
1153 SCI_BASE_REQUEST_STATE_COMPLETED);
1154 }
1155
1156 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1157 u32 frame_index)
1158 {
1159 struct scic_sds_controller *scic = sci_req->owning_controller;
1160 struct dev_to_host_fis *frame_header;
1161 enum sci_status status;
1162 u32 *frame_buffer;
1163
1164 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1165 frame_index,
1166 (void **)&frame_header);
1167
1168 if ((status == SCI_SUCCESS) &&
1169 (frame_header->fis_type == FIS_REGD2H)) {
1170 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1171 frame_index,
1172 (void **)&frame_buffer);
1173
1174 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1175 frame_header,
1176 frame_buffer);
1177 }
1178
1179 scic_sds_controller_release_frame(scic, frame_index);
1180
1181 return status;
1182 }
1183
1184 static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1185 struct scic_sds_request *sci_req,
1186 u32 completion_code)
1187 {
1188 enum sci_status status = SCI_SUCCESS;
1189
1190 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1191 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1192 scic_sds_stp_request_udma_complete_request(sci_req,
1193 SCU_TASK_DONE_GOOD,
1194 SCI_SUCCESS);
1195 break;
1196 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1197 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1198 /*
1199 * We must check ther response buffer to see if the D2H Register FIS was
1200 * received before we got the TC completion. */
1201 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
1202 scic_sds_remote_device_suspend(sci_req->target_device,
1203 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1204
1205 scic_sds_stp_request_udma_complete_request(sci_req,
1206 SCU_TASK_DONE_CHECK_RESPONSE,
1207 SCI_FAILURE_IO_RESPONSE_VALID);
1208 } else {
1209 /*
1210 * If we have an error completion status for the TC then we can expect a
1211 * D2H register FIS from the device so we must change state to wait for it */
1212 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
1213 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
1214 }
1215 break;
1216
1217 /*
1218 * / @todo Check to see if any of these completion status need to wait for
1219 * / the device to host register fis. */
1220 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1221 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1223 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1224 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1225 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1226 scic_sds_remote_device_suspend(sci_req->target_device,
1227 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1228 /* Fall through to the default case */
1229 default:
1230 /* All other completion status cause the IO to be complete. */
1231 scic_sds_stp_request_udma_complete_request(sci_req,
1232 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1233 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1234 break;
1235 }
1236
1237 return status;
1238 }
1239
1240 static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1241 struct scic_sds_request *sci_req,
1242 u32 frame_index)
1243 {
1244 enum sci_status status;
1245
1246 /* Use the general frame handler to copy the resposne data */
1247 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1248
1249 if (status != SCI_SUCCESS)
1250 return status;
1251
1252 scic_sds_stp_request_udma_complete_request(sci_req,
1253 SCU_TASK_DONE_CHECK_RESPONSE,
1254 SCI_FAILURE_IO_RESPONSE_VALID);
1255
1256 return status;
1257 }
1258
1259 /* --------------------------------------------------------------------------- */
1260
1261 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1262 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1263 .abort_handler = scic_sds_request_started_state_abort_handler,
1264 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1265 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1266 },
1267 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1268 .abort_handler = scic_sds_request_started_state_abort_handler,
1269 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1270 },
1271 };
1272
1273 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1274 void *object)
1275 {
1276 struct scic_sds_request *sci_req = object;
1277
1278 SET_STATE_HANDLER(
1279 sci_req,
1280 scic_sds_stp_request_started_udma_substate_handler_table,
1281 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1282 );
1283 }
1284
1285 /**
1286 *
1287 *
1288 * This state is entered when there is an TC completion failure. The hardware
1289 * received an unexpected condition while processing the IO request and now
1290 * will UF the D2H register FIS to complete the IO.
1291 */
1292 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1293 void *object)
1294 {
1295 struct scic_sds_request *sci_req = object;
1296
1297 SET_STATE_HANDLER(
1298 sci_req,
1299 scic_sds_stp_request_started_udma_substate_handler_table,
1300 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1301 );
1302 }
1303
1304 /* --------------------------------------------------------------------------- */
1305
1306 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1307 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1308 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1309 },
1310 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1311 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1312 },
1313 };
1314
1315 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1316 u32 len,
1317 enum dma_data_direction dir)
1318 {
1319 scic_sds_stp_non_ncq_request_construct(sci_req);
1320
1321 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1322 len, dir);
1323
1324 sci_base_state_machine_construct(
1325 &sci_req->started_substate_machine,
1326 sci_req,
1327 scic_sds_stp_request_started_udma_substate_table,
1328 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1329 );
1330
1331 return SCI_SUCCESS;
1332 }
1333
1334 /**
1335 *
1336 * @sci_req:
1337 * @completion_code:
1338 *
1339 * This method processes a TC completion. The expected TC completion is for
1340 * the transmission of the H2D register FIS containing the SATA/STP non-data
1341 * request. This method always successfully processes the TC completion.
1342 * SCI_SUCCESS This value is always returned.
1343 */
1344 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1345 struct scic_sds_request *sci_req,
1346 u32 completion_code)
1347 {
1348 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1349 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1350 scic_sds_request_set_status(
1351 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1352 );
1353
1354 sci_base_state_machine_change_state(
1355 &sci_req->started_substate_machine,
1356 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1357 );
1358 break;
1359
1360 default:
1361 /*
1362 * All other completion status cause the IO to be complete. If a NAK
1363 * was received, then it is up to the user to retry the request. */
1364 scic_sds_request_set_status(
1365 sci_req,
1366 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1367 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1368 );
1369
1370 sci_base_state_machine_change_state(
1371 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1372 break;
1373 }
1374
1375 return SCI_SUCCESS;
1376 }
1377
1378 /**
1379 *
1380 * @sci_req:
1381 * @completion_code:
1382 *
1383 * This method processes a TC completion. The expected TC completion is for
1384 * the transmission of the H2D register FIS containing the SATA/STP non-data
1385 * request. This method always successfully processes the TC completion.
1386 * SCI_SUCCESS This value is always returned.
1387 */
1388 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1389 struct scic_sds_request *sci_req,
1390 u32 completion_code)
1391 {
1392 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1393 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1394 scic_sds_request_set_status(
1395 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1396 );
1397
1398 sci_base_state_machine_change_state(
1399 &sci_req->started_substate_machine,
1400 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1401 );
1402 break;
1403
1404 default:
1405 /*
1406 * All other completion status cause the IO to be complete. If a NAK
1407 * was received, then it is up to the user to retry the request. */
1408 scic_sds_request_set_status(
1409 sci_req,
1410 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1411 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1412 );
1413
1414 sci_base_state_machine_change_state(&sci_req->state_machine,
1415 SCI_BASE_REQUEST_STATE_COMPLETED);
1416 break;
1417 }
1418
1419 return SCI_SUCCESS;
1420 }
1421
1422 /**
1423 *
1424 * @request: This parameter specifies the request for which a frame has been
1425 * received.
1426 * @frame_index: This parameter specifies the index of the frame that has been
1427 * received.
1428 *
1429 * This method processes frames received from the target while waiting for a
1430 * device to host register FIS. If a non-register FIS is received during this
1431 * time, it is treated as a protocol violation from an IO perspective. Indicate
1432 * if the received frame was processed successfully.
1433 */
1434 static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1435 struct scic_sds_request *sci_req,
1436 u32 frame_index)
1437 {
1438 enum sci_status status;
1439 struct dev_to_host_fis *frame_header;
1440 u32 *frame_buffer;
1441 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1442 struct scic_sds_controller *scic = sci_req->owning_controller;
1443
1444 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1445 frame_index,
1446 (void **)&frame_header);
1447 if (status != SCI_SUCCESS) {
1448 dev_err(scic_to_dev(scic),
1449 "%s: SCIC IO Request 0x%p could not get frame header "
1450 "for frame index %d, status %x\n",
1451 __func__, stp_req, frame_index, status);
1452 return status;
1453 }
1454
1455 switch (frame_header->fis_type) {
1456 case FIS_REGD2H:
1457 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1458 frame_index,
1459 (void **)&frame_buffer);
1460
1461 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1462 frame_header,
1463 frame_buffer);
1464
1465 /* The command has completed with error */
1466 scic_sds_request_set_status(sci_req,
1467 SCU_TASK_DONE_CHECK_RESPONSE,
1468 SCI_FAILURE_IO_RESPONSE_VALID);
1469 break;
1470
1471 default:
1472 dev_warn(scic_to_dev(scic),
1473 "%s: IO Request:0x%p Frame Id:%d protocol "
1474 "violation occurred\n", __func__, stp_req,
1475 frame_index);
1476
1477 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1478 SCI_FAILURE_PROTOCOL_VIOLATION);
1479 break;
1480 }
1481
1482 sci_base_state_machine_change_state(&sci_req->state_machine,
1483 SCI_BASE_REQUEST_STATE_COMPLETED);
1484
1485 /* Frame has been decoded return it to the controller */
1486 scic_sds_controller_release_frame(scic, frame_index);
1487
1488 return status;
1489 }
1490
1491 /* --------------------------------------------------------------------------- */
1492
1493 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1494 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1495 .abort_handler = scic_sds_request_started_state_abort_handler,
1496 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1497 },
1498 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1499 .abort_handler = scic_sds_request_started_state_abort_handler,
1500 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1501 },
1502 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1503 .abort_handler = scic_sds_request_started_state_abort_handler,
1504 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1505 },
1506 };
1507
1508 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1509 void *object)
1510 {
1511 struct scic_sds_request *sci_req = object;
1512
1513 SET_STATE_HANDLER(
1514 sci_req,
1515 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1516 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1517 );
1518
1519 scic_sds_remote_device_set_working_request(
1520 sci_req->target_device, sci_req
1521 );
1522 }
1523
1524 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1525 void *object)
1526 {
1527 struct scic_sds_request *sci_req = object;
1528 struct scu_task_context *task_context;
1529 struct host_to_dev_fis *h2d_fis;
1530 enum sci_status status;
1531
1532 /* Clear the SRST bit */
1533 h2d_fis = &sci_req->stp.cmd;
1534 h2d_fis->control = 0;
1535
1536 /* Clear the TC control bit */
1537 task_context = scic_sds_controller_get_task_context_buffer(
1538 sci_req->owning_controller, sci_req->io_tag);
1539 task_context->control_frame = 0;
1540
1541 status = scic_controller_continue_io(sci_req);
1542 if (status == SCI_SUCCESS) {
1543 SET_STATE_HANDLER(
1544 sci_req,
1545 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1546 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1547 );
1548 }
1549 }
1550
1551 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1552 void *object)
1553 {
1554 struct scic_sds_request *sci_req = object;
1555
1556 SET_STATE_HANDLER(
1557 sci_req,
1558 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1559 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1560 );
1561 }
1562
1563 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1564 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1565 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1566 },
1567 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1568 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1569 },
1570 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1571 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1572 },
1573 };
1574
1575 enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1576 {
1577 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1578
1579 scic_sds_stp_non_ncq_request_construct(sci_req);
1580
1581 /* Build the STP task context structure */
1582 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1583
1584 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1585 sci_req,
1586 scic_sds_stp_request_started_soft_reset_substate_table,
1587 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
1588
1589 return SCI_SUCCESS;
1590 }
This page took 0.102066 seconds and 5 git commands to generate.