1405aa703c3a6efadc0b79d7210173ece6911c06
[deliverable/linux.git] / drivers / scsi / isci / core / scic_sds_request.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <scsi/sas.h>
57 #include "scic_io_request.h"
58 #include "scu_registers.h"
59 #include "scic_sds_port.h"
60 #include "remote_device.h"
61 #include "scic_sds_request.h"
62 #include "scic_sds_smp_request.h"
63 #include "scic_sds_stp_request.h"
64 #include "scic_sds_unsolicited_frame_control.h"
65 #include "sci_util.h"
66 #include "scu_completion_codes.h"
67 #include "scu_task_context.h"
68 #include "request.h"
69 #include "task.h"
70
71 /*
72 * ****************************************************************************
73 * * SCIC SDS IO REQUEST CONSTANTS
74 * **************************************************************************** */
75
76 /**
77 *
78 *
79 * We have no timer requirements for IO requests right now
80 */
81 #define SCIC_SDS_IO_REQUEST_MINIMUM_TIMER_COUNT (0)
82 #define SCIC_SDS_IO_REQUEST_MAXIMUM_TIMER_COUNT (0)
83
84 /**
85 * This method returns the sgl element pair for the specificed sgl_pair index.
86 * @sci_req: This parameter specifies the IO request for which to retrieve
87 * the Scatter-Gather List element pair.
88 * @sgl_pair_index: This parameter specifies the index into the SGL element
89 * pair to be retrieved.
90 *
91 * This method returns a pointer to an struct scu_sgl_element_pair.
92 */
93 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
94 struct scic_sds_request *sci_req,
95 u32 sgl_pair_index
96 ) {
97 struct scu_task_context *task_context;
98
99 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
100
101 if (sgl_pair_index == 0) {
102 return &task_context->sgl_pair_ab;
103 } else if (sgl_pair_index == 1) {
104 return &task_context->sgl_pair_cd;
105 }
106
107 return &sci_req->sg_table[sgl_pair_index - 2];
108 }
109
110 /**
111 * This function will build the SGL list for an IO request.
112 * @sci_req: This parameter specifies the IO request for which to build
113 * the Scatter-Gather List.
114 *
115 */
116 void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
117 {
118 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
119 struct isci_host *isci_host = isci_request->isci_host;
120 struct sas_task *task = isci_request_access_task(isci_request);
121 struct scatterlist *sg = NULL;
122 dma_addr_t dma_addr;
123 u32 sg_idx = 0;
124 struct scu_sgl_element_pair *scu_sg = NULL;
125 struct scu_sgl_element_pair *prev_sg = NULL;
126
127 if (task->num_scatter > 0) {
128 sg = task->scatter;
129
130 while (sg) {
131 scu_sg = scic_sds_request_get_sgl_element_pair(
132 sds_request,
133 sg_idx);
134
135 SCU_SGL_COPY(scu_sg->A, sg);
136
137 sg = sg_next(sg);
138
139 if (sg) {
140 SCU_SGL_COPY(scu_sg->B, sg);
141 sg = sg_next(sg);
142 } else
143 SCU_SGL_ZERO(scu_sg->B);
144
145 if (prev_sg) {
146 dma_addr =
147 scic_io_request_get_dma_addr(
148 sds_request,
149 scu_sg);
150
151 prev_sg->next_pair_upper =
152 upper_32_bits(dma_addr);
153 prev_sg->next_pair_lower =
154 lower_32_bits(dma_addr);
155 }
156
157 prev_sg = scu_sg;
158 sg_idx++;
159 }
160 } else { /* handle when no sg */
161 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
162 sg_idx);
163
164 dma_addr = dma_map_single(&isci_host->pdev->dev,
165 task->scatter,
166 task->total_xfer_len,
167 task->data_dir);
168
169 isci_request->zero_scatter_daddr = dma_addr;
170
171 scu_sg->A.length = task->total_xfer_len;
172 scu_sg->A.address_upper = upper_32_bits(dma_addr);
173 scu_sg->A.address_lower = lower_32_bits(dma_addr);
174 }
175
176 if (scu_sg) {
177 scu_sg->next_pair_upper = 0;
178 scu_sg->next_pair_lower = 0;
179 }
180 }
181
182 static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req)
183 {
184 if (sci_req->was_tag_assigned_by_user == false)
185 sci_req->task_context_buffer = &sci_req->tc;
186 }
187
188 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
189 {
190 struct ssp_cmd_iu *cmd_iu;
191 struct isci_request *ireq = sci_req_to_ireq(sci_req);
192 struct sas_task *task = isci_request_access_task(ireq);
193
194 cmd_iu = &sci_req->ssp.cmd;
195
196 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
197 cmd_iu->add_cdb_len = 0;
198 cmd_iu->_r_a = 0;
199 cmd_iu->_r_b = 0;
200 cmd_iu->en_fburst = 0; /* unsupported */
201 cmd_iu->task_prio = task->ssp_task.task_prio;
202 cmd_iu->task_attr = task->ssp_task.task_attr;
203 cmd_iu->_r_c = 0;
204
205 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
206 sizeof(task->ssp_task.cdb) / sizeof(u32));
207 }
208
209 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
210 {
211 struct ssp_task_iu *task_iu;
212 struct isci_request *ireq = sci_req_to_ireq(sci_req);
213 struct sas_task *task = isci_request_access_task(ireq);
214 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
215
216 task_iu = &sci_req->ssp.tmf;
217
218 memset(task_iu, 0, sizeof(struct ssp_task_iu));
219
220 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
221
222 task_iu->task_func = isci_tmf->tmf_code;
223 task_iu->task_tag =
224 (ireq->ttype == tmf_task) ?
225 isci_tmf->io_tag :
226 SCI_CONTROLLER_INVALID_IO_TAG;
227 }
228
229 /**
230 * This method is will fill in the SCU Task Context for any type of SSP request.
231 * @sci_req:
232 * @task_context:
233 *
234 */
235 static void scu_ssp_reqeust_construct_task_context(
236 struct scic_sds_request *sds_request,
237 struct scu_task_context *task_context)
238 {
239 dma_addr_t dma_addr;
240 struct scic_sds_controller *controller;
241 struct scic_sds_remote_device *target_device;
242 struct scic_sds_port *target_port;
243
244 controller = scic_sds_request_get_controller(sds_request);
245 target_device = scic_sds_request_get_device(sds_request);
246 target_port = scic_sds_request_get_port(sds_request);
247
248 /* Fill in the TC with the its required data */
249 task_context->abort = 0;
250 task_context->priority = 0;
251 task_context->initiator_request = 1;
252 task_context->connection_rate = target_device->connection_rate;
253 task_context->protocol_engine_index =
254 scic_sds_controller_get_protocol_engine_group(controller);
255 task_context->logical_port_index =
256 scic_sds_port_get_index(target_port);
257 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
258 task_context->valid = SCU_TASK_CONTEXT_VALID;
259 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
260
261 task_context->remote_node_index =
262 scic_sds_remote_device_get_index(sds_request->target_device);
263 task_context->command_code = 0;
264
265 task_context->link_layer_control = 0;
266 task_context->do_not_dma_ssp_good_response = 1;
267 task_context->strict_ordering = 0;
268 task_context->control_frame = 0;
269 task_context->timeout_enable = 0;
270 task_context->block_guard_enable = 0;
271
272 task_context->address_modifier = 0;
273
274 /* task_context->type.ssp.tag = sci_req->io_tag; */
275 task_context->task_phase = 0x01;
276
277 if (sds_request->was_tag_assigned_by_user) {
278 /*
279 * Build the task context now since we have already read
280 * the data
281 */
282 sds_request->post_context =
283 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
284 (scic_sds_controller_get_protocol_engine_group(
285 controller) <<
286 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
287 (scic_sds_port_get_index(target_port) <<
288 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
289 scic_sds_io_tag_get_index(sds_request->io_tag));
290 } else {
291 /*
292 * Build the task context now since we have already read
293 * the data
294 *
295 * I/O tag index is not assigned because we have to wait
296 * until we get a TCi
297 */
298 sds_request->post_context =
299 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
300 (scic_sds_controller_get_protocol_engine_group(
301 owning_controller) <<
302 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
303 (scic_sds_port_get_index(target_port) <<
304 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
305 }
306
307 /*
308 * Copy the physical address for the command buffer to the
309 * SCU Task Context
310 */
311 dma_addr = scic_io_request_get_dma_addr(sds_request,
312 &sds_request->ssp.cmd);
313
314 task_context->command_iu_upper = upper_32_bits(dma_addr);
315 task_context->command_iu_lower = lower_32_bits(dma_addr);
316
317 /*
318 * Copy the physical address for the response buffer to the
319 * SCU Task Context
320 */
321 dma_addr = scic_io_request_get_dma_addr(sds_request,
322 &sds_request->ssp.rsp);
323
324 task_context->response_iu_upper = upper_32_bits(dma_addr);
325 task_context->response_iu_lower = lower_32_bits(dma_addr);
326 }
327
328 /**
329 * This method is will fill in the SCU Task Context for a SSP IO request.
330 * @sci_req:
331 *
332 */
333 static void scu_ssp_io_request_construct_task_context(
334 struct scic_sds_request *sci_req,
335 enum dma_data_direction dir,
336 u32 len)
337 {
338 struct scu_task_context *task_context;
339
340 task_context = scic_sds_request_get_task_context(sci_req);
341
342 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
343
344 task_context->ssp_command_iu_length =
345 sizeof(struct ssp_cmd_iu) / sizeof(u32);
346 task_context->type.ssp.frame_type = SSP_COMMAND;
347
348 switch (dir) {
349 case DMA_FROM_DEVICE:
350 case DMA_NONE:
351 default:
352 task_context->task_type = SCU_TASK_TYPE_IOREAD;
353 break;
354 case DMA_TO_DEVICE:
355 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
356 break;
357 }
358
359 task_context->transfer_length_bytes = len;
360
361 if (task_context->transfer_length_bytes > 0)
362 scic_sds_request_build_sgl(sci_req);
363 }
364
365 static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req)
366 {
367 if (sci_req->was_tag_assigned_by_user == false)
368 sci_req->task_context_buffer = &sci_req->tc;
369 }
370
371 /**
372 * This method will fill in the SCU Task Context for a SSP Task request. The
373 * following important settings are utilized: -# priority ==
374 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
375 * ahead of other task destined for the same Remote Node. -# task_type ==
376 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
377 * (i.e. non-raw frame) is being utilized to perform task management. -#
378 * control_frame == 1. This ensures that the proper endianess is set so
379 * that the bytes are transmitted in the right order for a task frame.
380 * @sci_req: This parameter specifies the task request object being
381 * constructed.
382 *
383 */
384 static void scu_ssp_task_request_construct_task_context(
385 struct scic_sds_request *sci_req)
386 {
387 struct scu_task_context *task_context;
388
389 task_context = scic_sds_request_get_task_context(sci_req);
390
391 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
392
393 task_context->control_frame = 1;
394 task_context->priority = SCU_TASK_PRIORITY_HIGH;
395 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
396 task_context->transfer_length_bytes = 0;
397 task_context->type.ssp.frame_type = SSP_TASK;
398 task_context->ssp_command_iu_length =
399 sizeof(struct ssp_task_iu) / sizeof(u32);
400 }
401
402
403 /**
404 * This method constructs the SSP Command IU data for this ssp passthrough
405 * comand request object.
406 * @sci_req: This parameter specifies the request object for which the SSP
407 * command information unit is being built.
408 *
409 * enum sci_status, returns invalid parameter is cdb > 16
410 */
411
412
413 /**
414 * This method constructs the SATA request object.
415 * @sci_req:
416 * @sat_protocol:
417 * @transfer_length:
418 * @data_direction:
419 * @copy_rx_frame:
420 *
421 * enum sci_status
422 */
423 static enum sci_status
424 scic_io_request_construct_sata(struct scic_sds_request *sci_req,
425 u32 len,
426 enum dma_data_direction dir,
427 bool copy)
428 {
429 enum sci_status status = SCI_SUCCESS;
430 struct isci_request *ireq = sci_req_to_ireq(sci_req);
431 struct sas_task *task = isci_request_access_task(ireq);
432
433 /* check for management protocols */
434 if (ireq->ttype == tmf_task) {
435 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
436
437 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
438 tmf->tmf_code == isci_tmf_sata_srst_low)
439 return scic_sds_stp_soft_reset_request_construct(sci_req);
440 else {
441 dev_err(scic_to_dev(sci_req->owning_controller),
442 "%s: Request 0x%p received un-handled SAT "
443 "management protocol 0x%x.\n",
444 __func__, sci_req, tmf->tmf_code);
445
446 return SCI_FAILURE;
447 }
448 }
449
450 if (!sas_protocol_ata(task->task_proto)) {
451 dev_err(scic_to_dev(sci_req->owning_controller),
452 "%s: Non-ATA protocol in SATA path: 0x%x\n",
453 __func__,
454 task->task_proto);
455 return SCI_FAILURE;
456
457 }
458
459 /* non data */
460 if (task->data_dir == DMA_NONE)
461 return scic_sds_stp_non_data_request_construct(sci_req);
462
463 /* NCQ */
464 if (task->ata_task.use_ncq)
465 return scic_sds_stp_ncq_request_construct(sci_req, len, dir);
466
467 /* DMA */
468 if (task->ata_task.dma_xfer)
469 return scic_sds_stp_udma_request_construct(sci_req, len, dir);
470 else /* PIO */
471 return scic_sds_stp_pio_request_construct(sci_req, copy);
472
473 return status;
474 }
475
476 enum sci_status scic_io_request_construct_basic_ssp(
477 struct scic_sds_request *sci_req)
478 {
479 struct isci_request *ireq = sci_req_to_ireq(sci_req);
480 struct sas_task *task = isci_request_access_task(ireq);
481
482 sci_req->protocol = SCIC_SSP_PROTOCOL;
483
484 scu_ssp_io_request_construct_task_context(sci_req,
485 task->data_dir,
486 task->total_xfer_len);
487
488 scic_sds_io_request_build_ssp_command_iu(sci_req);
489
490 sci_base_state_machine_change_state(
491 &sci_req->state_machine,
492 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
493
494 return SCI_SUCCESS;
495 }
496
497
498 enum sci_status scic_task_request_construct_ssp(
499 struct scic_sds_request *sci_req)
500 {
501 /* Construct the SSP Task SCU Task Context */
502 scu_ssp_task_request_construct_task_context(sci_req);
503
504 /* Fill in the SSP Task IU */
505 scic_sds_task_request_build_ssp_task_iu(sci_req);
506
507 sci_base_state_machine_change_state(&sci_req->state_machine,
508 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
509
510 return SCI_SUCCESS;
511 }
512
513
514 enum sci_status scic_io_request_construct_basic_sata(
515 struct scic_sds_request *sci_req)
516 {
517 enum sci_status status;
518 struct scic_sds_stp_request *stp_req;
519 bool copy = false;
520 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
521 struct sas_task *task = isci_request_access_task(isci_request);
522
523 stp_req = &sci_req->stp.req;
524 sci_req->protocol = SCIC_STP_PROTOCOL;
525
526 copy = (task->data_dir == DMA_NONE) ? false : true;
527
528 status = scic_io_request_construct_sata(sci_req,
529 task->total_xfer_len,
530 task->data_dir,
531 copy);
532
533 if (status == SCI_SUCCESS)
534 sci_base_state_machine_change_state(&sci_req->state_machine,
535 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
536
537 return status;
538 }
539
540
541 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
542 {
543 enum sci_status status = SCI_SUCCESS;
544 struct isci_request *ireq = sci_req_to_ireq(sci_req);
545
546 /* check for management protocols */
547 if (ireq->ttype == tmf_task) {
548 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
549
550 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
551 tmf->tmf_code == isci_tmf_sata_srst_low) {
552 status = scic_sds_stp_soft_reset_request_construct(sci_req);
553 } else {
554 dev_err(scic_to_dev(sci_req->owning_controller),
555 "%s: Request 0x%p received un-handled SAT "
556 "Protocol 0x%x.\n",
557 __func__, sci_req, tmf->tmf_code);
558
559 return SCI_FAILURE;
560 }
561 }
562
563 if (status == SCI_SUCCESS)
564 sci_base_state_machine_change_state(
565 &sci_req->state_machine,
566 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
567
568 return status;
569 }
570
571
572 u16 scic_io_request_get_io_tag(
573 struct scic_sds_request *sci_req)
574 {
575 return sci_req->io_tag;
576 }
577
578
579 u32 scic_request_get_controller_status(
580 struct scic_sds_request *sci_req)
581 {
582 return sci_req->scu_status;
583 }
584
585 #define SCU_TASK_CONTEXT_SRAM 0x200000
586 u32 scic_io_request_get_number_of_bytes_transferred(
587 struct scic_sds_request *scic_sds_request)
588 {
589 struct scic_sds_controller *scic = scic_sds_request->owning_controller;
590 u32 ret_val = 0;
591
592 if (readl(&scic->smu_registers->address_modifier) == 0) {
593 void __iomem *scu_reg_base = scic->scu_registers;
594 /*
595 * get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
596 * BAR1 is the scu_registers
597 * 0x20002C = 0x200000 + 0x2c
598 * = start of task context SRAM + offset of (type.ssp.data_offset)
599 * TCi is the io_tag of struct scic_sds_request */
600 ret_val = readl(scu_reg_base +
601 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
602 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(scic_sds_request->io_tag)));
603 }
604
605 return ret_val;
606 }
607
608
609 /*
610 * ****************************************************************************
611 * * SCIC SDS Interface Implementation
612 * **************************************************************************** */
613
614 enum sci_status
615 scic_sds_request_start(struct scic_sds_request *request)
616 {
617 if (request->device_sequence !=
618 scic_sds_remote_device_get_sequence(request->target_device))
619 return SCI_FAILURE;
620
621 if (request->state_handlers->start_handler)
622 return request->state_handlers->start_handler(request);
623
624 dev_warn(scic_to_dev(request->owning_controller),
625 "%s: SCIC IO Request requested to start while in wrong "
626 "state %d\n",
627 __func__,
628 sci_base_state_machine_get_state(&request->state_machine));
629
630 return SCI_FAILURE_INVALID_STATE;
631 }
632
633 enum sci_status
634 scic_sds_io_request_terminate(struct scic_sds_request *request)
635 {
636 if (request->state_handlers->abort_handler)
637 return request->state_handlers->abort_handler(request);
638
639 dev_warn(scic_to_dev(request->owning_controller),
640 "%s: SCIC IO Request requested to abort while in wrong "
641 "state %d\n",
642 __func__,
643 sci_base_state_machine_get_state(&request->state_machine));
644
645 return SCI_FAILURE_INVALID_STATE;
646 }
647
648 enum sci_status
649 scic_sds_io_request_complete(struct scic_sds_request *request)
650 {
651 if (request->state_handlers->complete_handler)
652 return request->state_handlers->complete_handler(request);
653
654 dev_warn(scic_to_dev(request->owning_controller),
655 "%s: SCIC IO Request requested to complete while in wrong "
656 "state %d\n",
657 __func__,
658 sci_base_state_machine_get_state(&request->state_machine));
659
660 return SCI_FAILURE_INVALID_STATE;
661 }
662
663 enum sci_status scic_sds_io_request_event_handler(
664 struct scic_sds_request *request,
665 u32 event_code)
666 {
667 if (request->state_handlers->event_handler)
668 return request->state_handlers->event_handler(request, event_code);
669
670 dev_warn(scic_to_dev(request->owning_controller),
671 "%s: SCIC IO Request given event code notification %x while "
672 "in wrong state %d\n",
673 __func__,
674 event_code,
675 sci_base_state_machine_get_state(&request->state_machine));
676
677 return SCI_FAILURE_INVALID_STATE;
678 }
679
680 enum sci_status
681 scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
682 {
683 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
684 request->has_started_substate_machine == false)
685 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
686 else if (request->state_handlers->tc_completion_handler)
687 return request->state_handlers->tc_completion_handler(request, completion_code);
688
689 dev_warn(scic_to_dev(request->owning_controller),
690 "%s: SCIC IO Request given task completion notification %x "
691 "while in wrong state %d\n",
692 __func__,
693 completion_code,
694 sci_base_state_machine_get_state(&request->state_machine));
695
696 return SCI_FAILURE_INVALID_STATE;
697
698 }
699
700
701 /**
702 *
703 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
704 * operation is to be executed.
705 * @frame_index: The frame index returned by the hardware for the reqeust
706 * object.
707 *
708 * This method invokes the core state frame handler for the
709 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
710 */
711 enum sci_status scic_sds_io_request_frame_handler(
712 struct scic_sds_request *request,
713 u32 frame_index)
714 {
715 if (request->state_handlers->frame_handler)
716 return request->state_handlers->frame_handler(request, frame_index);
717
718 dev_warn(scic_to_dev(request->owning_controller),
719 "%s: SCIC IO Request given unexpected frame %x while in "
720 "state %d\n",
721 __func__,
722 frame_index,
723 sci_base_state_machine_get_state(&request->state_machine));
724
725 scic_sds_controller_release_frame(request->owning_controller, frame_index);
726 return SCI_FAILURE_INVALID_STATE;
727 }
728
729 /*
730 * This function copies response data for requests returning response data
731 * instead of sense data.
732 * @sci_req: This parameter specifies the request object for which to copy
733 * the response data.
734 */
735 void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
736 {
737 void *resp_buf;
738 u32 len;
739 struct ssp_response_iu *ssp_response;
740 struct isci_request *ireq = sci_req_to_ireq(sci_req);
741 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
742
743 ssp_response = &sci_req->ssp.rsp;
744
745 resp_buf = &isci_tmf->resp.resp_iu;
746
747 len = min_t(u32,
748 SSP_RESP_IU_MAX_SIZE,
749 be32_to_cpu(ssp_response->response_data_len));
750
751 memcpy(resp_buf, ssp_response->resp_data, len);
752 }
753
754 /*
755 * *****************************************************************************
756 * * CONSTRUCTED STATE HANDLERS
757 * ***************************************************************************** */
758
759 /*
760 * This method implements the action taken when a constructed
761 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
762 * This method will, if necessary, allocate a TCi for the io request object and
763 * then will, if necessary, copy the constructed TC data into the actual TC
764 * buffer. If everything is successful the post context field is updated with
765 * the TCi so the controller can post the request to the hardware. enum sci_status
766 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
767 */
768 static enum sci_status scic_sds_request_constructed_state_start_handler(
769 struct scic_sds_request *request)
770 {
771 struct scu_task_context *task_context;
772
773 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
774 request->io_tag =
775 scic_controller_allocate_io_tag(request->owning_controller);
776 }
777
778 /* Record the IO Tag in the request */
779 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
780 task_context = request->task_context_buffer;
781
782 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
783
784 switch (task_context->protocol_type) {
785 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
786 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
787 /* SSP/SMP Frame */
788 task_context->type.ssp.tag = request->io_tag;
789 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
790 break;
791
792 case SCU_TASK_CONTEXT_PROTOCOL_STP:
793 /*
794 * STP/SATA Frame
795 * task_context->type.stp.ncq_tag = request->ncq_tag; */
796 break;
797
798 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
799 /* / @todo When do we set no protocol type? */
800 break;
801
802 default:
803 /* This should never happen since we build the IO requests */
804 break;
805 }
806
807 /*
808 * Check to see if we need to copy the task context buffer
809 * or have been building into the task context buffer */
810 if (request->was_tag_assigned_by_user == false) {
811 scic_sds_controller_copy_task_context(
812 request->owning_controller, request);
813 }
814
815 /* Add to the post_context the io tag value */
816 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
817
818 /* Everything is good go ahead and change state */
819 sci_base_state_machine_change_state(&request->state_machine,
820 SCI_BASE_REQUEST_STATE_STARTED);
821
822 return SCI_SUCCESS;
823 }
824
825 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
826 }
827
828 /*
829 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
830 * object receives a scic_sds_request_terminate() request. Since the request
831 * has not yet been posted to the hardware the request transitions to the
832 * completed state. enum sci_status SCI_SUCCESS
833 */
834 static enum sci_status scic_sds_request_constructed_state_abort_handler(
835 struct scic_sds_request *request)
836 {
837 /*
838 * This request has been terminated by the user make sure that the correct
839 * status code is returned */
840 scic_sds_request_set_status(request,
841 SCU_TASK_DONE_TASK_ABORT,
842 SCI_FAILURE_IO_TERMINATED);
843
844 sci_base_state_machine_change_state(&request->state_machine,
845 SCI_BASE_REQUEST_STATE_COMPLETED);
846 return SCI_SUCCESS;
847 }
848
849 /*
850 * *****************************************************************************
851 * * STARTED STATE HANDLERS
852 * ***************************************************************************** */
853
854 /*
855 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
856 * object receives a scic_sds_request_terminate() request. Since the request
857 * has been posted to the hardware the io request state is changed to the
858 * aborting state. enum sci_status SCI_SUCCESS
859 */
860 enum sci_status scic_sds_request_started_state_abort_handler(
861 struct scic_sds_request *request)
862 {
863 if (request->has_started_substate_machine)
864 sci_base_state_machine_stop(&request->started_substate_machine);
865
866 sci_base_state_machine_change_state(&request->state_machine,
867 SCI_BASE_REQUEST_STATE_ABORTING);
868 return SCI_SUCCESS;
869 }
870
871 /*
872 * scic_sds_request_started_state_tc_completion_handler() - This method process
873 * TC (task context) completions for normal IO request (i.e. Task/Abort
874 * Completions of type 0). This method will update the
875 * SCIC_SDS_IO_REQUEST_T::status field.
876 * @sci_req: This parameter specifies the request for which a completion
877 * occurred.
878 * @completion_code: This parameter specifies the completion code received from
879 * the SCU.
880 *
881 */
882 enum sci_status
883 scic_sds_request_started_state_tc_completion_handler(
884 struct scic_sds_request *sci_req,
885 u32 completion_code)
886 {
887 u8 datapres;
888 struct ssp_response_iu *resp_iu;
889
890 /*
891 * TODO: Any SDMA return code of other than 0 is bad
892 * decode 0x003C0000 to determine SDMA status
893 */
894 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
895 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
896 scic_sds_request_set_status(sci_req,
897 SCU_TASK_DONE_GOOD,
898 SCI_SUCCESS);
899 break;
900
901 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
902 {
903 /*
904 * There are times when the SCU hardware will return an early
905 * response because the io request specified more data than is
906 * returned by the target device (mode pages, inquiry data,
907 * etc.). We must check the response stats to see if this is
908 * truly a failed request or a good request that just got
909 * completed early.
910 */
911 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
912 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
913
914 sci_swab32_cpy(&sci_req->ssp.rsp,
915 &sci_req->ssp.rsp,
916 word_cnt);
917
918 if (resp->status == 0) {
919 scic_sds_request_set_status(
920 sci_req,
921 SCU_TASK_DONE_GOOD,
922 SCI_SUCCESS_IO_DONE_EARLY);
923 } else {
924 scic_sds_request_set_status(
925 sci_req,
926 SCU_TASK_DONE_CHECK_RESPONSE,
927 SCI_FAILURE_IO_RESPONSE_VALID);
928 }
929 }
930 break;
931
932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
933 {
934 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
935
936 sci_swab32_cpy(&sci_req->ssp.rsp,
937 &sci_req->ssp.rsp,
938 word_cnt);
939
940 scic_sds_request_set_status(sci_req,
941 SCU_TASK_DONE_CHECK_RESPONSE,
942 SCI_FAILURE_IO_RESPONSE_VALID);
943 break;
944 }
945
946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
947 /*
948 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
949 * guaranteed to be received before this completion status is
950 * posted?
951 */
952 resp_iu = &sci_req->ssp.rsp;
953 datapres = resp_iu->datapres;
954
955 if ((datapres == 0x01) || (datapres == 0x02)) {
956 scic_sds_request_set_status(
957 sci_req,
958 SCU_TASK_DONE_CHECK_RESPONSE,
959 SCI_FAILURE_IO_RESPONSE_VALID);
960 } else
961 scic_sds_request_set_status(
962 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
963 break;
964
965 /* only stp device gets suspended. */
966 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
967 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
968 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
969 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
970 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
971 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
972 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
973 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
974 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
975 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
976 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
977 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
978 scic_sds_request_set_status(
979 sci_req,
980 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
981 SCU_COMPLETION_TL_STATUS_SHIFT,
982 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
983 } else {
984 scic_sds_request_set_status(
985 sci_req,
986 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
987 SCU_COMPLETION_TL_STATUS_SHIFT,
988 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
989 }
990 break;
991
992 /* both stp/ssp device gets suspended */
993 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
994 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
995 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
996 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
997 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
998 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
999 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1000 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1001 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1002 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1003 scic_sds_request_set_status(
1004 sci_req,
1005 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1006 SCU_COMPLETION_TL_STATUS_SHIFT,
1007 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1008 break;
1009
1010 /* neither ssp nor stp gets suspended. */
1011 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1012 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1013 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1014 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1015 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1016 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1017 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1018 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1019 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1020 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1021 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1022 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1023 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1024 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1025 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1026 default:
1027 scic_sds_request_set_status(
1028 sci_req,
1029 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1030 SCU_COMPLETION_TL_STATUS_SHIFT,
1031 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1032 break;
1033 }
1034
1035 /*
1036 * TODO: This is probably wrong for ACK/NAK timeout conditions
1037 */
1038
1039 /* In all cases we will treat this as the completion of the IO req. */
1040 sci_base_state_machine_change_state(
1041 &sci_req->state_machine,
1042 SCI_BASE_REQUEST_STATE_COMPLETED);
1043 return SCI_SUCCESS;
1044 }
1045
1046 /*
1047 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1048 * object receives a scic_sds_request_frame_handler() request. This method
1049 * first determines the frame type received. If this is a response frame then
1050 * the response data is copied to the io request response buffer for processing
1051 * at completion time. If the frame type is not a response buffer an error is
1052 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1053 */
1054 static enum sci_status
1055 scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
1056 u32 frame_index)
1057 {
1058 enum sci_status status;
1059 u32 *frame_header;
1060 struct ssp_frame_hdr ssp_hdr;
1061 ssize_t word_cnt;
1062
1063 status = scic_sds_unsolicited_frame_control_get_header(
1064 &(scic_sds_request_get_controller(sci_req)->uf_control),
1065 frame_index,
1066 (void **)&frame_header);
1067
1068 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1069 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1070
1071 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1072 struct ssp_response_iu *resp_iu;
1073 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1074
1075 status = scic_sds_unsolicited_frame_control_get_buffer(
1076 &(scic_sds_request_get_controller(sci_req)->uf_control),
1077 frame_index,
1078 (void **)&resp_iu);
1079
1080 sci_swab32_cpy(&sci_req->ssp.rsp,
1081 resp_iu, word_cnt);
1082
1083 resp_iu = &sci_req->ssp.rsp;
1084
1085 if ((resp_iu->datapres == 0x01) ||
1086 (resp_iu->datapres == 0x02)) {
1087 scic_sds_request_set_status(
1088 sci_req,
1089 SCU_TASK_DONE_CHECK_RESPONSE,
1090 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1091 } else
1092 scic_sds_request_set_status(
1093 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1094 } else {
1095 /* This was not a response frame why did it get forwarded? */
1096 dev_err(scic_to_dev(sci_req->owning_controller),
1097 "%s: SCIC IO Request 0x%p received unexpected "
1098 "frame %d type 0x%02x\n",
1099 __func__,
1100 sci_req,
1101 frame_index,
1102 ssp_hdr.frame_type);
1103 }
1104
1105 /*
1106 * In any case we are done with this frame buffer return it to the
1107 * controller
1108 */
1109 scic_sds_controller_release_frame(
1110 sci_req->owning_controller, frame_index);
1111
1112 return SCI_SUCCESS;
1113 }
1114
1115 /*
1116 * *****************************************************************************
1117 * * COMPLETED STATE HANDLERS
1118 * ***************************************************************************** */
1119
1120
1121 /*
1122 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1123 * object receives a scic_sds_request_complete() request. This method frees up
1124 * any io request resources that have been allocated and transitions the
1125 * request to its final state. Consider stopping the state machine instead of
1126 * transitioning to the final state? enum sci_status SCI_SUCCESS
1127 */
1128 static enum sci_status scic_sds_request_completed_state_complete_handler(
1129 struct scic_sds_request *request)
1130 {
1131 if (request->was_tag_assigned_by_user != true) {
1132 scic_controller_free_io_tag(
1133 request->owning_controller, request->io_tag);
1134 }
1135
1136 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1137 scic_sds_controller_release_frame(
1138 request->owning_controller, request->saved_rx_frame_index);
1139 }
1140
1141 sci_base_state_machine_change_state(&request->state_machine,
1142 SCI_BASE_REQUEST_STATE_FINAL);
1143 return SCI_SUCCESS;
1144 }
1145
1146 /*
1147 * *****************************************************************************
1148 * * ABORTING STATE HANDLERS
1149 * ***************************************************************************** */
1150
1151 /*
1152 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1153 * object receives a scic_sds_request_terminate() request. This method is the
1154 * io request aborting state abort handlers. On receipt of a multiple
1155 * terminate requests the io request will transition to the completed state.
1156 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1157 */
1158 static enum sci_status scic_sds_request_aborting_state_abort_handler(
1159 struct scic_sds_request *request)
1160 {
1161 sci_base_state_machine_change_state(&request->state_machine,
1162 SCI_BASE_REQUEST_STATE_COMPLETED);
1163 return SCI_SUCCESS;
1164 }
1165
1166 /*
1167 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1168 * object receives a scic_sds_request_task_completion() request. This method
1169 * decodes the completion type waiting for the abort task complete
1170 * notification. When the abort task complete is received the io request
1171 * transitions to the completed state. enum sci_status SCI_SUCCESS
1172 */
1173 static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1174 struct scic_sds_request *sci_req,
1175 u32 completion_code)
1176 {
1177 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1178 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1179 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1180 scic_sds_request_set_status(
1181 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1182 );
1183
1184 sci_base_state_machine_change_state(&sci_req->state_machine,
1185 SCI_BASE_REQUEST_STATE_COMPLETED);
1186 break;
1187
1188 default:
1189 /*
1190 * Unless we get some strange error wait for the task abort to complete
1191 * TODO: Should there be a state change for this completion? */
1192 break;
1193 }
1194
1195 return SCI_SUCCESS;
1196 }
1197
1198 /*
1199 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1200 * object receives a scic_sds_request_frame_handler() request. This method
1201 * discards the unsolicited frame since we are waiting for the abort task
1202 * completion. enum sci_status SCI_SUCCESS
1203 */
1204 static enum sci_status scic_sds_request_aborting_state_frame_handler(
1205 struct scic_sds_request *sci_req,
1206 u32 frame_index)
1207 {
1208 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1209
1210 scic_sds_controller_release_frame(
1211 sci_req->owning_controller, frame_index);
1212
1213 return SCI_SUCCESS;
1214 }
1215
1216 static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
1217 [SCI_BASE_REQUEST_STATE_INITIAL] = {
1218 },
1219 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1220 .start_handler = scic_sds_request_constructed_state_start_handler,
1221 .abort_handler = scic_sds_request_constructed_state_abort_handler,
1222 },
1223 [SCI_BASE_REQUEST_STATE_STARTED] = {
1224 .abort_handler = scic_sds_request_started_state_abort_handler,
1225 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
1226 .frame_handler = scic_sds_request_started_state_frame_handler,
1227 },
1228 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1229 .complete_handler = scic_sds_request_completed_state_complete_handler,
1230 },
1231 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1232 .abort_handler = scic_sds_request_aborting_state_abort_handler,
1233 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
1234 .frame_handler = scic_sds_request_aborting_state_frame_handler,
1235 },
1236 [SCI_BASE_REQUEST_STATE_FINAL] = {
1237 },
1238 };
1239
1240 /**
1241 * scic_sds_request_initial_state_enter() -
1242 * @object: This parameter specifies the base object for which the state
1243 * transition is occurring.
1244 *
1245 * This method implements the actions taken when entering the
1246 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
1247 * base request is constructed. Entry into the initial state sets all handlers
1248 * for the io request object to their default handlers. none
1249 */
1250 static void scic_sds_request_initial_state_enter(void *object)
1251 {
1252 struct scic_sds_request *sci_req = object;
1253
1254 SET_STATE_HANDLER(
1255 sci_req,
1256 scic_sds_request_state_handler_table,
1257 SCI_BASE_REQUEST_STATE_INITIAL
1258 );
1259 }
1260
1261 /**
1262 * scic_sds_request_constructed_state_enter() -
1263 * @object: The io request object that is to enter the constructed state.
1264 *
1265 * This method implements the actions taken when entering the
1266 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
1267 * for the the constructed state. none
1268 */
1269 static void scic_sds_request_constructed_state_enter(void *object)
1270 {
1271 struct scic_sds_request *sci_req = object;
1272
1273 SET_STATE_HANDLER(
1274 sci_req,
1275 scic_sds_request_state_handler_table,
1276 SCI_BASE_REQUEST_STATE_CONSTRUCTED
1277 );
1278 }
1279
1280 /**
1281 * scic_sds_request_started_state_enter() -
1282 * @object: This parameter specifies the base object for which the state
1283 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
1284 *
1285 * This method implements the actions taken when entering the
1286 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
1287 * SCSI Task request we must enter the started substate machine. none
1288 */
1289 static void scic_sds_request_started_state_enter(void *object)
1290 {
1291 struct scic_sds_request *sci_req = object;
1292
1293 SET_STATE_HANDLER(
1294 sci_req,
1295 scic_sds_request_state_handler_table,
1296 SCI_BASE_REQUEST_STATE_STARTED
1297 );
1298
1299 /*
1300 * Most of the request state machines have a started substate machine so
1301 * start its execution on the entry to the started state. */
1302 if (sci_req->has_started_substate_machine == true)
1303 sci_base_state_machine_start(&sci_req->started_substate_machine);
1304 }
1305
1306 /**
1307 * scic_sds_request_started_state_exit() -
1308 * @object: This parameter specifies the base object for which the state
1309 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
1310 * object.
1311 *
1312 * This method implements the actions taken when exiting the
1313 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
1314 * to stop the started substate machine. none
1315 */
1316 static void scic_sds_request_started_state_exit(void *object)
1317 {
1318 struct scic_sds_request *sci_req = object;
1319
1320 if (sci_req->has_started_substate_machine == true)
1321 sci_base_state_machine_stop(&sci_req->started_substate_machine);
1322 }
1323
1324 /**
1325 * scic_sds_request_completed_state_enter() -
1326 * @object: This parameter specifies the base object for which the state
1327 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
1328 * object.
1329 *
1330 * This method implements the actions taken when entering the
1331 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
1332 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
1333 * completion status and convert it to an enum sci_status to return in the
1334 * completion callback function. none
1335 */
1336 static void scic_sds_request_completed_state_enter(void *object)
1337 {
1338 struct scic_sds_request *sci_req = object;
1339 struct scic_sds_controller *scic =
1340 scic_sds_request_get_controller(sci_req);
1341 struct isci_host *ihost = scic_to_ihost(scic);
1342 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1343
1344 SET_STATE_HANDLER(sci_req,
1345 scic_sds_request_state_handler_table,
1346 SCI_BASE_REQUEST_STATE_COMPLETED);
1347
1348 /* Tell the SCI_USER that the IO request is complete */
1349 if (sci_req->is_task_management_request == false)
1350 isci_request_io_request_complete(ihost,
1351 ireq,
1352 sci_req->sci_status);
1353 else
1354 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
1355 }
1356
1357 /**
1358 * scic_sds_request_aborting_state_enter() -
1359 * @object: This parameter specifies the base object for which the state
1360 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
1361 * object.
1362 *
1363 * This method implements the actions taken when entering the
1364 * SCI_BASE_REQUEST_STATE_ABORTING state. none
1365 */
1366 static void scic_sds_request_aborting_state_enter(void *object)
1367 {
1368 struct scic_sds_request *sci_req = object;
1369
1370 /* Setting the abort bit in the Task Context is required by the silicon. */
1371 sci_req->task_context_buffer->abort = 1;
1372
1373 SET_STATE_HANDLER(
1374 sci_req,
1375 scic_sds_request_state_handler_table,
1376 SCI_BASE_REQUEST_STATE_ABORTING
1377 );
1378 }
1379
1380 /**
1381 * scic_sds_request_final_state_enter() -
1382 * @object: This parameter specifies the base object for which the state
1383 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
1384 *
1385 * This method implements the actions taken when entering the
1386 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
1387 * state handlers in place. none
1388 */
1389 static void scic_sds_request_final_state_enter(void *object)
1390 {
1391 struct scic_sds_request *sci_req = object;
1392
1393 SET_STATE_HANDLER(
1394 sci_req,
1395 scic_sds_request_state_handler_table,
1396 SCI_BASE_REQUEST_STATE_FINAL
1397 );
1398 }
1399
1400 static const struct sci_base_state scic_sds_request_state_table[] = {
1401 [SCI_BASE_REQUEST_STATE_INITIAL] = {
1402 .enter_state = scic_sds_request_initial_state_enter,
1403 },
1404 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1405 .enter_state = scic_sds_request_constructed_state_enter,
1406 },
1407 [SCI_BASE_REQUEST_STATE_STARTED] = {
1408 .enter_state = scic_sds_request_started_state_enter,
1409 .exit_state = scic_sds_request_started_state_exit
1410 },
1411 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1412 .enter_state = scic_sds_request_completed_state_enter,
1413 },
1414 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1415 .enter_state = scic_sds_request_aborting_state_enter,
1416 },
1417 [SCI_BASE_REQUEST_STATE_FINAL] = {
1418 .enter_state = scic_sds_request_final_state_enter,
1419 },
1420 };
1421
1422 static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
1423 struct scic_sds_remote_device *sci_dev,
1424 u16 io_tag, struct scic_sds_request *sci_req)
1425 {
1426 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
1427 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
1428 sci_base_state_machine_start(&sci_req->state_machine);
1429
1430 sci_req->io_tag = io_tag;
1431 sci_req->owning_controller = scic;
1432 sci_req->target_device = sci_dev;
1433 sci_req->has_started_substate_machine = false;
1434 sci_req->protocol = SCIC_NO_PROTOCOL;
1435 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
1436 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
1437
1438 sci_req->sci_status = SCI_SUCCESS;
1439 sci_req->scu_status = 0;
1440 sci_req->post_context = 0xFFFFFFFF;
1441
1442 sci_req->is_task_management_request = false;
1443
1444 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1445 sci_req->was_tag_assigned_by_user = false;
1446 sci_req->task_context_buffer = NULL;
1447 } else {
1448 sci_req->was_tag_assigned_by_user = true;
1449
1450 sci_req->task_context_buffer =
1451 scic_sds_controller_get_task_context_buffer(scic, io_tag);
1452 }
1453 }
1454
1455 enum sci_status
1456 scic_io_request_construct(struct scic_sds_controller *scic,
1457 struct scic_sds_remote_device *sci_dev,
1458 u16 io_tag, struct scic_sds_request *sci_req)
1459 {
1460 struct domain_device *dev = sci_dev_to_domain(sci_dev);
1461 enum sci_status status = SCI_SUCCESS;
1462
1463 /* Build the common part of the request */
1464 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
1465
1466 if (sci_dev->rnc.remote_node_index ==
1467 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
1468 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
1469
1470 if (dev->dev_type == SAS_END_DEV)
1471 scic_sds_ssp_io_request_assign_buffers(sci_req);
1472 else if ((dev->dev_type == SATA_DEV) ||
1473 (dev->tproto & SAS_PROTOCOL_STP)) {
1474 scic_sds_stp_request_assign_buffers(sci_req);
1475 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
1476 } else if (dev_is_expander(dev)) {
1477 scic_sds_smp_request_assign_buffers(sci_req);
1478 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
1479 } else
1480 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1481
1482 if (status == SCI_SUCCESS) {
1483 memset(sci_req->task_context_buffer, 0,
1484 offsetof(struct scu_task_context, sgl_pair_ab));
1485 }
1486
1487 return status;
1488 }
1489
1490 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
1491 struct scic_sds_remote_device *sci_dev,
1492 u16 io_tag, struct scic_sds_request *sci_req)
1493 {
1494 struct domain_device *dev = sci_dev_to_domain(sci_dev);
1495 enum sci_status status = SCI_SUCCESS;
1496
1497 /* Build the common part of the request */
1498 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
1499
1500 if (dev->dev_type == SAS_END_DEV) {
1501 scic_sds_ssp_task_request_assign_buffers(sci_req);
1502
1503 sci_req->has_started_substate_machine = true;
1504
1505 /* Construct the started sub-state machine. */
1506 sci_base_state_machine_construct(
1507 &sci_req->started_substate_machine,
1508 sci_req,
1509 scic_sds_io_request_started_task_mgmt_substate_table,
1510 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
1511 );
1512 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1513 scic_sds_stp_request_assign_buffers(sci_req);
1514 else
1515 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1516
1517 if (status == SCI_SUCCESS) {
1518 sci_req->is_task_management_request = true;
1519 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
1520 }
1521
1522 return status;
1523 }
This page took 0.064064 seconds and 4 git commands to generate.