2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #if !defined(_ISCI_REQUEST_H_)
57 #define _ISCI_REQUEST_H_
62 * struct isci_request_status - This enum defines the possible states of an I/O
67 enum isci_request_status
{
84 * struct isci_request - This class represents the request object used to track
85 * IO, smp and TMF request internal. It wraps the SCIC request object.
91 struct scic_sds_request
*sci_request_handle
;
93 enum isci_request_status status
;
95 unsigned short io_tag
;
96 bool complete_in_target
;
98 union ttype_ptr_union
{
99 struct sas_task
*io_task_ptr
; /* When ttype==io_task */
100 struct isci_tmf
*tmf_task_ptr
; /* When ttype==tmf_task */
102 struct isci_host
*isci_host
;
103 struct isci_remote_device
*isci_device
;
104 /* For use in the requests_to_{complete|abort} lists: */
105 struct list_head completed_node
;
106 /* For use in the reqs_in_process list: */
107 struct list_head dev_node
;
108 void *sci_request_mem_ptr
;
109 spinlock_t state_lock
;
110 dma_addr_t request_daddr
;
111 dma_addr_t zero_scatter_daddr
;
113 unsigned int num_sg_entries
; /* returned by pci_alloc_sg */
114 unsigned int request_alloc_size
; /* size of block from dma_pool_alloc */
116 /** Note: "io_request_completion" is completed in two different ways
117 * depending on whether this is a TMF or regular request.
118 * - TMF requests are completed in the thread that started them;
119 * - regular requests are completed in the request completion callback
121 * This difference in operation allows the aborter of a TMF request
122 * to be sure that once the TMF request completes, the I/O that the
123 * TMF was aborting is guaranteed to have completed.
125 struct completion
*io_request_completion
;
129 * This function gets the status of the request object.
130 * @request: This parameter points to the isci_request object
132 * status of the object as a isci_request_status enum.
135 enum isci_request_status
isci_request_get_state(
136 struct isci_request
*isci_request
)
138 BUG_ON(isci_request
== NULL
);
140 /*probably a bad sign... */
141 if (isci_request
->status
== unallocated
)
142 dev_warn(&isci_request
->isci_host
->pdev
->dev
,
143 "%s: isci_request->status == unallocated\n",
146 return isci_request
->status
;
151 * isci_request_change_state() - This function sets the status of the request
153 * @request: This parameter points to the isci_request object
154 * @status: This Parameter is the new status of the object
157 static inline enum isci_request_status
isci_request_change_state(
158 struct isci_request
*isci_request
,
159 enum isci_request_status status
)
161 enum isci_request_status old_state
;
164 dev_dbg(&isci_request
->isci_host
->pdev
->dev
,
165 "%s: isci_request = %p, state = 0x%x\n",
170 BUG_ON(isci_request
== NULL
);
172 spin_lock_irqsave(&isci_request
->state_lock
, flags
);
173 old_state
= isci_request
->status
;
174 isci_request
->status
= status
;
175 spin_unlock_irqrestore(&isci_request
->state_lock
, flags
);
181 * isci_request_change_started_to_newstate() - This function sets the status of
182 * the request object.
183 * @request: This parameter points to the isci_request object
184 * @status: This Parameter is the new status of the object
186 * state previous to any change.
188 static inline enum isci_request_status
isci_request_change_started_to_newstate(
189 struct isci_request
*isci_request
,
190 struct completion
*completion_ptr
,
191 enum isci_request_status newstate
)
193 enum isci_request_status old_state
;
196 BUG_ON(isci_request
== NULL
);
198 spin_lock_irqsave(&isci_request
->state_lock
, flags
);
200 old_state
= isci_request
->status
;
202 if (old_state
== started
) {
203 BUG_ON(isci_request
->io_request_completion
!= NULL
);
205 isci_request
->io_request_completion
= completion_ptr
;
206 isci_request
->status
= newstate
;
208 spin_unlock_irqrestore(&isci_request
->state_lock
, flags
);
210 dev_dbg(&isci_request
->isci_host
->pdev
->dev
,
211 "%s: isci_request = %p, old_state = 0x%x\n",
220 * isci_request_change_started_to_aborted() - This function sets the status of
221 * the request object.
222 * @request: This parameter points to the isci_request object
223 * @completion_ptr: This parameter is saved as the kernel completion structure
224 * signalled when the old request completes.
226 * state previous to any change.
228 static inline enum isci_request_status
isci_request_change_started_to_aborted(
229 struct isci_request
*isci_request
,
230 struct completion
*completion_ptr
)
232 return isci_request_change_started_to_newstate(
233 isci_request
, completion_ptr
, aborted
237 * isci_request_free() - This function frees the request object.
238 * @isci_host: This parameter specifies the ISCI host object
239 * @isci_request: This parameter points to the isci_request object
242 static inline void isci_request_free(
243 struct isci_host
*isci_host
,
244 struct isci_request
*isci_request
)
246 BUG_ON(isci_request
== NULL
);
248 /* release the dma memory if we fail. */
249 dma_pool_free(isci_host
->dma_pool
, isci_request
,
250 isci_request
->request_daddr
);
254 /* #define ISCI_REQUEST_VALIDATE_ACCESS
257 #ifdef ISCI_REQUEST_VALIDATE_ACCESS
260 struct sas_task
*isci_request_access_task(struct isci_request
*isci_request
)
262 BUG_ON(isci_request
->ttype
!= io_task
);
263 return isci_request
->ttype_ptr
.io_task_ptr
;
267 struct isci_tmf
*isci_request_access_tmf(struct isci_request
*isci_request
)
269 BUG_ON(isci_request
->ttype
!= tmf_task
);
270 return isci_request
->ttype_ptr
.tmf_task_ptr
;
273 #else /* not ISCI_REQUEST_VALIDATE_ACCESS */
275 #define isci_request_access_task(RequestPtr) \
276 ((RequestPtr)->ttype_ptr.io_task_ptr)
278 #define isci_request_access_tmf(RequestPtr) \
279 ((RequestPtr)->ttype_ptr.tmf_task_ptr)
281 #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
284 int isci_request_alloc_tmf(
285 struct isci_host
*isci_host
,
286 struct isci_tmf
*isci_tmf
,
287 struct isci_request
**isci_request
,
288 struct isci_remote_device
*isci_device
,
292 int isci_request_execute(
293 struct isci_host
*isci_host
,
294 struct sas_task
*task
,
295 struct isci_request
**request
,
299 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
301 * @request: This parameter points to the isci_request object
302 * @*pdev: This Parameter is the pci_device struct for the controller
305 static inline void isci_request_unmap_sgl(
306 struct isci_request
*request
,
307 struct pci_dev
*pdev
)
309 struct sas_task
*task
= isci_request_access_task(request
);
311 dev_dbg(&request
->isci_host
->pdev
->dev
,
312 "%s: request = %p, task = %p,\n"
313 "task->data_dir = %d, is_sata = %d\n ",
318 sas_protocol_ata(task
->task_proto
));
320 if ((task
->data_dir
!= PCI_DMA_NONE
) &&
321 !sas_protocol_ata(task
->task_proto
)) {
322 if (task
->num_scatter
== 0)
323 /* 0 indicates a single dma address */
326 request
->zero_scatter_daddr
,
327 task
->total_xfer_len
,
331 else /* unmap the sgl dma addresses */
335 request
->num_sg_entries
,
342 void isci_request_io_request_complete(
343 struct isci_host
*isci_host
,
344 struct isci_request
*request
,
345 enum sci_io_status completion_status
);
347 u32
isci_request_io_request_get_transfer_length(
348 struct isci_request
*request
);
350 enum dma_data_direction
isci_request_io_request_get_data_direction(struct isci_request
*req
);
353 * isci_request_io_request_get_next_sge() - This function is called by the sci
354 * core to retrieve the next sge for a given request.
355 * @request: This parameter is the isci_request object.
356 * @current_sge_address: This parameter is the last sge retrieved by the sci
357 * core for this request.
359 * pointer to the next sge for specified request.
361 static inline void *isci_request_io_request_get_next_sge(
362 struct isci_request
*request
,
363 void *current_sge_address
)
365 struct sas_task
*task
= isci_request_access_task(request
);
368 dev_dbg(&request
->isci_host
->pdev
->dev
,
370 "current_sge_address = %p, "
371 "num_scatter = %d\n",
377 if (!current_sge_address
) /* First time through.. */
378 ret
= task
->scatter
; /* always task->scatter */
379 else if (task
->num_scatter
== 0) /* Next element, if num_scatter == 0 */
380 ret
= NULL
; /* there is only one element. */
382 ret
= sg_next(current_sge_address
); /* sg_next returns NULL
383 * for the last element
386 dev_dbg(&request
->isci_host
->pdev
->dev
,
387 "%s: next sge address = %p\n",
396 void *isci_request_ssp_io_request_get_cdb_address(
397 struct isci_request
*request
);
399 u32
isci_request_ssp_io_request_get_cdb_length(
400 struct isci_request
*request
);
402 u32
isci_request_ssp_io_request_get_lun(
403 struct isci_request
*request
);
405 u32
isci_request_ssp_io_request_get_task_attribute(
406 struct isci_request
*request
);
408 u32
isci_request_ssp_io_request_get_command_priority(
409 struct isci_request
*request
);
415 void isci_terminate_pending_requests(
416 struct isci_host
*isci_host
,
417 struct isci_remote_device
*isci_device
,
418 enum isci_request_status new_request_state
);
423 #endif /* !defined(_ISCI_REQUEST_H_) */