2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #ifndef _ISCI_REQUEST_H_
57 #define _ISCI_REQUEST_H_
61 #include "scu_task_context.h"
62 #include "stp_request.h"
65 * struct isci_request_status - This enum defines the possible states of an I/O
70 enum isci_request_status
{
86 enum sci_request_protocol
{
91 }; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
93 struct scic_sds_request
{
95 * This field contains the information for the base request state machine.
97 struct sci_base_state_machine state_machine
;
100 * This field simply points to the controller to which this IO request
103 struct scic_sds_controller
*owning_controller
;
106 * This field simply points to the remote device to which this IO request
109 struct scic_sds_remote_device
*target_device
;
112 * This field is utilized to determine if the SCI user is managing
113 * the IO tag for this request or if the core is managing it.
115 bool was_tag_assigned_by_user
;
118 * This field indicates the IO tag for this request. The IO tag is
119 * comprised of the task_index and a sequence count. The sequence count
120 * is utilized to help identify tasks from one life to another.
125 * This field specifies the protocol being utilized for this
128 enum sci_request_protocol protocol
;
131 * This field indicates the completion status taken from the SCUs
132 * completion code. It indicates the completion result for the SCU hardware.
137 * This field indicates the completion status returned to the SCI user. It
138 * indicates the users view of the io request completion.
143 * This field contains the value to be utilized when posting (e.g. Post_TC,
144 * Post_TC_Abort) this request to the silicon.
148 struct scu_task_context
*task_context_buffer
;
149 struct scu_task_context tc ____cacheline_aligned
;
151 /* could be larger with sg chaining */
152 #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
153 struct scu_sgl_element_pair sg_table
[SCU_SGL_SIZE
] __attribute__ ((aligned(32)));
156 * This field indicates if this request is a task management request or
159 bool is_task_management_request
;
162 * This field indicates that this request contains an initialized started
165 bool has_started_substate_machine
;
168 * This field is a pointer to the stored rx frame data. It is used in STP
169 * internal requests and SMP response frames. If this field is non-NULL the
170 * saved frame must be released on IO request completion.
172 * @todo In the future do we want to keep a list of RX frame buffers?
174 u32 saved_rx_frame_index
;
177 * This field specifies the data necessary to manage the sub-state
178 * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state.
180 struct sci_base_state_machine started_substate_machine
;
183 * This field specifies the current state handlers in place for this
184 * IO Request object. This field is updated each time the request
187 const struct scic_sds_io_request_state_handler
*state_handlers
;
190 * This field in the recorded device sequence for the io request. This is
191 * recorded during the build operation and is compared in the start
192 * operation. If the sequence is different then there was a change of
193 * devices from the build to start operations.
200 struct ssp_cmd_iu cmd
;
201 struct ssp_task_iu tmf
;
204 struct ssp_response_iu rsp
;
205 u8 rsp_buf
[SSP_RESP_IU_MAX_SIZE
];
215 struct scic_sds_stp_request req
;
216 struct host_to_dev_fis cmd
;
217 struct dev_to_host_fis rsp
;
223 static inline struct scic_sds_request
*to_sci_req(struct scic_sds_stp_request
*stp_req
)
225 struct scic_sds_request
*sci_req
;
227 sci_req
= container_of(stp_req
, typeof(*sci_req
), stp
.req
);
231 struct isci_request
{
232 enum isci_request_status status
;
233 enum task_type ttype
;
234 unsigned short io_tag
;
235 bool complete_in_target
;
238 union ttype_ptr_union
{
239 struct sas_task
*io_task_ptr
; /* When ttype==io_task */
240 struct isci_tmf
*tmf_task_ptr
; /* When ttype==tmf_task */
242 struct isci_host
*isci_host
;
243 struct isci_remote_device
*isci_device
;
244 /* For use in the requests_to_{complete|abort} lists: */
245 struct list_head completed_node
;
246 /* For use in the reqs_in_process list: */
247 struct list_head dev_node
;
248 spinlock_t state_lock
;
249 dma_addr_t request_daddr
;
250 dma_addr_t zero_scatter_daddr
;
252 unsigned int num_sg_entries
; /* returned by pci_alloc_sg */
254 /** Note: "io_request_completion" is completed in two different ways
255 * depending on whether this is a TMF or regular request.
256 * - TMF requests are completed in the thread that started them;
257 * - regular requests are completed in the request completion callback
259 * This difference in operation allows the aborter of a TMF request
260 * to be sure that once the TMF request completes, the I/O that the
261 * TMF was aborting is guaranteed to have completed.
263 struct completion
*io_request_completion
;
264 struct scic_sds_request sci
;
267 static inline struct isci_request
*sci_req_to_ireq(struct scic_sds_request
*sci_req
)
269 struct isci_request
*ireq
= container_of(sci_req
, typeof(*ireq
), sci
);
275 * enum sci_base_request_states - This enumeration depicts all the states for
276 * the common request state machine.
280 enum sci_base_request_states
{
282 * Simply the initial state for the base request state machine.
284 SCI_BASE_REQUEST_STATE_INITIAL
,
287 * This state indicates that the request has been constructed. This state
288 * is entered from the INITIAL state.
290 SCI_BASE_REQUEST_STATE_CONSTRUCTED
,
293 * This state indicates that the request has been started. This state is
294 * entered from the CONSTRUCTED state.
296 SCI_BASE_REQUEST_STATE_STARTED
,
299 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
300 * task management request is waiting for the transmission of the
301 * initial frame (i.e. command, task, etc.).
303 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
,
306 * This sub-state indicates that the started task management request
307 * is waiting for the reception of an unsolicited frame
308 * (i.e. response IU).
310 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
,
313 * This sub-state indicates that the started task management request
314 * is waiting for the reception of an unsolicited frame
315 * (i.e. response IU).
317 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE
,
320 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
321 * waiting for the transmission of the initial frame (i.e. command, task, etc.).
323 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION
,
326 * This state indicates that the request has completed.
327 * This state is entered from the STARTED state. This state is entered from
328 * the ABORTING state.
330 SCI_BASE_REQUEST_STATE_COMPLETED
,
333 * This state indicates that the request is in the process of being
334 * terminated/aborted.
335 * This state is entered from the CONSTRUCTED state.
336 * This state is entered from the STARTED state.
338 SCI_BASE_REQUEST_STATE_ABORTING
,
341 * Simply the final state for the base request state machine.
343 SCI_BASE_REQUEST_STATE_FINAL
,
346 typedef enum sci_status (*scic_sds_io_request_handler_t
)
347 (struct scic_sds_request
*request
);
348 typedef enum sci_status (*scic_sds_io_request_frame_handler_t
)
349 (struct scic_sds_request
*req
, u32 frame
);
350 typedef enum sci_status (*scic_sds_io_request_event_handler_t
)
351 (struct scic_sds_request
*req
, u32 event
);
352 typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t
)
353 (struct scic_sds_request
*req
, u32 completion_code
);
356 * struct scic_sds_io_request_state_handler - This is the SDS core definition
357 * of the state handlers.
361 struct scic_sds_io_request_state_handler
{
363 * The start_handler specifies the method invoked when a user attempts to
366 scic_sds_io_request_handler_t start_handler
;
369 * The abort_handler specifies the method invoked when a user attempts to
372 scic_sds_io_request_handler_t abort_handler
;
375 * The complete_handler specifies the method invoked when a user attempts to
376 * complete a request.
378 scic_sds_io_request_handler_t complete_handler
;
380 scic_sds_io_request_task_completion_handler_t tc_completion_handler
;
381 scic_sds_io_request_event_handler_t event_handler
;
382 scic_sds_io_request_frame_handler_t frame_handler
;
386 extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table
[];
389 * scic_sds_request_get_controller() -
391 * This macro will return the controller for this io request object
393 #define scic_sds_request_get_controller(sci_req) \
394 ((sci_req)->owning_controller)
397 * scic_sds_request_get_device() -
399 * This macro will return the device for this io request object
401 #define scic_sds_request_get_device(sci_req) \
402 ((sci_req)->target_device)
405 * scic_sds_request_get_port() -
407 * This macro will return the port for this io request object
409 #define scic_sds_request_get_port(sci_req) \
410 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
413 * scic_sds_request_get_post_context() -
415 * This macro returns the constructed post context result for the io request.
417 #define scic_sds_request_get_post_context(sci_req) \
418 ((sci_req)->post_context)
421 * scic_sds_request_get_task_context() -
423 * This is a helper macro to return the os handle for this request object.
425 #define scic_sds_request_get_task_context(request) \
426 ((request)->task_context_buffer)
429 * scic_sds_request_set_status() -
431 * This macro will set the scu hardware status and sci request completion
432 * status for an io request.
434 #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
436 (request)->scu_status = (scu_status_code); \
437 (request)->sci_status = (sci_status_code); \
440 #define scic_sds_request_complete(a_request) \
441 ((a_request)->state_handlers->complete_handler(a_request))
444 extern enum sci_status
445 scic_sds_io_request_tc_completion(struct scic_sds_request
*request
, u32 completion_code
);
450 * This macro zeros the hardware SGL element data
452 #define SCU_SGL_ZERO(scu_sge) \
454 (scu_sge).length = 0; \
455 (scu_sge).address_lower = 0; \
456 (scu_sge).address_upper = 0; \
457 (scu_sge).address_modifier = 0; \
463 * This macro copys the SGL Element data from the host os to the hardware SGL
466 #define SCU_SGL_COPY(scu_sge, os_sge) \
468 (scu_sge).length = sg_dma_len(sg); \
469 (scu_sge).address_upper = \
470 upper_32_bits(sg_dma_address(sg)); \
471 (scu_sge).address_lower = \
472 lower_32_bits(sg_dma_address(sg)); \
473 (scu_sge).address_modifier = 0; \
476 void scic_sds_request_build_sgl(struct scic_sds_request
*sci_req
);
477 enum sci_status
scic_sds_request_start(struct scic_sds_request
*sci_req
);
478 enum sci_status
scic_sds_io_request_terminate(struct scic_sds_request
*sci_req
);
479 enum sci_status
scic_sds_io_request_event_handler(struct scic_sds_request
*sci_req
,
481 enum sci_status
scic_sds_io_request_frame_handler(struct scic_sds_request
*sci_req
,
483 enum sci_status
scic_sds_task_request_terminate(struct scic_sds_request
*sci_req
);
484 enum sci_status
scic_sds_request_started_state_abort_handler(struct scic_sds_request
*sci_req
);
487 /* XXX open code in caller */
488 static inline void *scic_request_get_virt_addr(struct scic_sds_request
*sci_req
,
489 dma_addr_t phys_addr
)
491 struct isci_request
*ireq
= sci_req_to_ireq(sci_req
);
494 BUG_ON(phys_addr
< ireq
->request_daddr
);
496 offset
= phys_addr
- ireq
->request_daddr
;
498 BUG_ON(offset
>= sizeof(*ireq
));
500 return (char *)ireq
+ offset
;
503 /* XXX open code in caller */
504 static inline dma_addr_t
scic_io_request_get_dma_addr(struct scic_sds_request
*sci_req
,
507 struct isci_request
*ireq
= sci_req_to_ireq(sci_req
);
509 char *requested_addr
= (char *)virt_addr
;
510 char *base_addr
= (char *)ireq
;
512 BUG_ON(requested_addr
< base_addr
);
513 BUG_ON((requested_addr
- base_addr
) >= sizeof(*ireq
));
515 return ireq
->request_daddr
+ (requested_addr
- base_addr
);
519 * This function gets the status of the request object.
520 * @request: This parameter points to the isci_request object
522 * status of the object as a isci_request_status enum.
525 enum isci_request_status
isci_request_get_state(
526 struct isci_request
*isci_request
)
528 BUG_ON(isci_request
== NULL
);
530 /*probably a bad sign... */
531 if (isci_request
->status
== unallocated
)
532 dev_warn(&isci_request
->isci_host
->pdev
->dev
,
533 "%s: isci_request->status == unallocated\n",
536 return isci_request
->status
;
541 * isci_request_change_state() - This function sets the status of the request
543 * @request: This parameter points to the isci_request object
544 * @status: This Parameter is the new status of the object
547 static inline enum isci_request_status
isci_request_change_state(
548 struct isci_request
*isci_request
,
549 enum isci_request_status status
)
551 enum isci_request_status old_state
;
554 dev_dbg(&isci_request
->isci_host
->pdev
->dev
,
555 "%s: isci_request = %p, state = 0x%x\n",
560 BUG_ON(isci_request
== NULL
);
562 spin_lock_irqsave(&isci_request
->state_lock
, flags
);
563 old_state
= isci_request
->status
;
564 isci_request
->status
= status
;
565 spin_unlock_irqrestore(&isci_request
->state_lock
, flags
);
571 * isci_request_change_started_to_newstate() - This function sets the status of
572 * the request object.
573 * @request: This parameter points to the isci_request object
574 * @status: This Parameter is the new status of the object
576 * state previous to any change.
578 static inline enum isci_request_status
isci_request_change_started_to_newstate(
579 struct isci_request
*isci_request
,
580 struct completion
*completion_ptr
,
581 enum isci_request_status newstate
)
583 enum isci_request_status old_state
;
586 spin_lock_irqsave(&isci_request
->state_lock
, flags
);
588 old_state
= isci_request
->status
;
590 if (old_state
== started
|| old_state
== aborting
) {
591 BUG_ON(isci_request
->io_request_completion
!= NULL
);
593 isci_request
->io_request_completion
= completion_ptr
;
594 isci_request
->status
= newstate
;
596 spin_unlock_irqrestore(&isci_request
->state_lock
, flags
);
598 dev_dbg(&isci_request
->isci_host
->pdev
->dev
,
599 "%s: isci_request = %p, old_state = 0x%x\n",
608 * isci_request_change_started_to_aborted() - This function sets the status of
609 * the request object.
610 * @request: This parameter points to the isci_request object
611 * @completion_ptr: This parameter is saved as the kernel completion structure
612 * signalled when the old request completes.
614 * state previous to any change.
616 static inline enum isci_request_status
isci_request_change_started_to_aborted(
617 struct isci_request
*isci_request
,
618 struct completion
*completion_ptr
)
620 return isci_request_change_started_to_newstate(
621 isci_request
, completion_ptr
, aborted
625 * isci_request_free() - This function frees the request object.
626 * @isci_host: This parameter specifies the ISCI host object
627 * @isci_request: This parameter points to the isci_request object
630 static inline void isci_request_free(
631 struct isci_host
*isci_host
,
632 struct isci_request
*isci_request
)
637 /* release the dma memory if we fail. */
638 dma_pool_free(isci_host
->dma_pool
, isci_request
,
639 isci_request
->request_daddr
);
643 /* #define ISCI_REQUEST_VALIDATE_ACCESS
646 #ifdef ISCI_REQUEST_VALIDATE_ACCESS
649 struct sas_task
*isci_request_access_task(struct isci_request
*isci_request
)
651 BUG_ON(isci_request
->ttype
!= io_task
);
652 return isci_request
->ttype_ptr
.io_task_ptr
;
656 struct isci_tmf
*isci_request_access_tmf(struct isci_request
*isci_request
)
658 BUG_ON(isci_request
->ttype
!= tmf_task
);
659 return isci_request
->ttype_ptr
.tmf_task_ptr
;
662 #else /* not ISCI_REQUEST_VALIDATE_ACCESS */
664 #define isci_request_access_task(RequestPtr) \
665 ((RequestPtr)->ttype_ptr.io_task_ptr)
667 #define isci_request_access_tmf(RequestPtr) \
668 ((RequestPtr)->ttype_ptr.tmf_task_ptr)
670 #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
673 int isci_request_alloc_tmf(
674 struct isci_host
*isci_host
,
675 struct isci_tmf
*isci_tmf
,
676 struct isci_request
**isci_request
,
677 struct isci_remote_device
*isci_device
,
681 int isci_request_execute(
682 struct isci_host
*isci_host
,
683 struct sas_task
*task
,
684 struct isci_request
**request
,
688 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
690 * @request: This parameter points to the isci_request object
691 * @*pdev: This Parameter is the pci_device struct for the controller
694 static inline void isci_request_unmap_sgl(
695 struct isci_request
*request
,
696 struct pci_dev
*pdev
)
698 struct sas_task
*task
= isci_request_access_task(request
);
700 dev_dbg(&request
->isci_host
->pdev
->dev
,
701 "%s: request = %p, task = %p,\n"
702 "task->data_dir = %d, is_sata = %d\n ",
707 sas_protocol_ata(task
->task_proto
));
709 if ((task
->data_dir
!= PCI_DMA_NONE
) &&
710 !sas_protocol_ata(task
->task_proto
)) {
711 if (task
->num_scatter
== 0)
712 /* 0 indicates a single dma address */
715 request
->zero_scatter_daddr
,
716 task
->total_xfer_len
,
720 else /* unmap the sgl dma addresses */
724 request
->num_sg_entries
,
731 * isci_request_io_request_get_next_sge() - This function is called by the sci
732 * core to retrieve the next sge for a given request.
733 * @request: This parameter is the isci_request object.
734 * @current_sge_address: This parameter is the last sge retrieved by the sci
735 * core for this request.
737 * pointer to the next sge for specified request.
739 static inline void *isci_request_io_request_get_next_sge(
740 struct isci_request
*request
,
741 void *current_sge_address
)
743 struct sas_task
*task
= isci_request_access_task(request
);
746 dev_dbg(&request
->isci_host
->pdev
->dev
,
748 "current_sge_address = %p, "
749 "num_scatter = %d\n",
755 if (!current_sge_address
) /* First time through.. */
756 ret
= task
->scatter
; /* always task->scatter */
757 else if (task
->num_scatter
== 0) /* Next element, if num_scatter == 0 */
758 ret
= NULL
; /* there is only one element. */
760 ret
= sg_next(current_sge_address
); /* sg_next returns NULL
761 * for the last element
764 dev_dbg(&request
->isci_host
->pdev
->dev
,
765 "%s: next sge address = %p\n",
772 void isci_terminate_pending_requests(struct isci_host
*isci_host
,
773 struct isci_remote_device
*isci_device
,
774 enum isci_request_status new_request_state
);
775 enum sci_status
scic_task_request_construct(struct scic_sds_controller
*scic
,
776 struct scic_sds_remote_device
*sci_dev
,
778 struct scic_sds_request
*sci_req
);
779 enum sci_status
scic_task_request_construct_ssp(struct scic_sds_request
*sci_req
);
780 enum sci_status
scic_task_request_construct_sata(struct scic_sds_request
*sci_req
);
781 void scic_stp_io_request_set_ncq_tag(struct scic_sds_request
*sci_req
, u16 ncq_tag
);
782 void scic_sds_smp_request_copy_response(struct scic_sds_request
*sci_req
);
783 #endif /* !defined(_ISCI_REQUEST_H_) */