2 * xor offload engine api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/kernel.h>
27 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/raid/xor.h>
31 #include <linux/async_tx.h>
34 * async_tx_quiesce - ensure tx is complete and freeable upon return
35 * @tx - transaction to quiesce
37 static void async_tx_quiesce(struct dma_async_tx_descriptor
**tx
)
40 /* if ack is already set then we cannot be sure
41 * we are referring to the correct operation
43 BUG_ON(async_tx_test_ack(*tx
));
44 if (dma_wait_for_async_tx(*tx
) == DMA_ERROR
)
45 panic("DMA_ERROR waiting for transaction\n");
51 /* do_async_xor - dma map the pages and perform the xor with an engine.
52 * This routine is marked __always_inline so it can be compiled away
53 * when CONFIG_DMA_ENGINE=n
55 static __always_inline
struct dma_async_tx_descriptor
*
56 do_async_xor(struct dma_chan
*chan
, struct page
*dest
, struct page
**src_list
,
57 unsigned int offset
, int src_cnt
, size_t len
,
58 enum async_tx_flags flags
,
59 struct dma_async_tx_descriptor
*depend_tx
,
60 dma_async_tx_callback cb_fn
, void *cb_param
)
62 struct dma_device
*dma
= chan
->device
;
63 dma_addr_t
*dma_src
= (dma_addr_t
*) src_list
;
64 struct dma_async_tx_descriptor
*tx
= NULL
;
67 dma_async_tx_callback _cb_fn
;
69 enum async_tx_flags async_flags
;
70 enum dma_ctrl_flags dma_flags
;
74 dma_dest
= dma_map_page(dma
->dev
, dest
, offset
, len
, DMA_FROM_DEVICE
);
75 for (i
= 0; i
< src_cnt
; i
++)
76 dma_src
[i
] = dma_map_page(dma
->dev
, src_list
[i
], offset
,
82 xor_src_cnt
= min(src_cnt
, dma
->max_xor
);
83 /* if we are submitting additional xors, leave the chain open,
84 * clear the callback parameters, and leave the destination
87 if (src_cnt
> xor_src_cnt
) {
88 async_flags
&= ~ASYNC_TX_ACK
;
89 dma_flags
= DMA_COMPL_SKIP_DEST_UNMAP
;
97 dma_flags
|= DMA_PREP_INTERRUPT
;
99 /* Since we have clobbered the src_list we are committed
100 * to doing this asynchronously. Drivers force forward progress
101 * in case they can not provide a descriptor
103 tx
= dma
->device_prep_dma_xor(chan
, dma_dest
, &dma_src
[src_off
],
104 xor_src_cnt
, len
, dma_flags
);
107 async_tx_quiesce(&depend_tx
);
109 /* spin wait for the preceeding transactions to complete */
110 while (unlikely(!tx
)) {
111 dma_async_issue_pending(chan
);
112 tx
= dma
->device_prep_dma_xor(chan
, dma_dest
,
118 async_tx_submit(chan
, tx
, async_flags
, depend_tx
, _cb_fn
,
122 flags
|= ASYNC_TX_DEP_ACK
;
124 if (src_cnt
> xor_src_cnt
) {
125 /* drop completed sources */
126 src_cnt
-= xor_src_cnt
;
127 src_off
+= xor_src_cnt
;
129 /* use the intermediate result a source */
130 dma_src
[--src_off
] = dma_dest
;
140 do_sync_xor(struct page
*dest
, struct page
**src_list
, unsigned int offset
,
141 int src_cnt
, size_t len
, enum async_tx_flags flags
,
142 struct dma_async_tx_descriptor
*depend_tx
,
143 dma_async_tx_callback cb_fn
, void *cb_param
)
149 void **srcs
= (void **) src_list
;
151 /* reuse the 'src_list' array to convert to buffer pointers */
152 for (i
= 0; i
< src_cnt
; i
++)
153 srcs
[i
] = page_address(src_list
[i
]) + offset
;
155 /* set destination address */
156 dest_buf
= page_address(dest
) + offset
;
158 if (flags
& ASYNC_TX_XOR_ZERO_DST
)
159 memset(dest_buf
, 0, len
);
161 while (src_cnt
> 0) {
162 /* process up to 'MAX_XOR_BLOCKS' sources */
163 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
164 xor_blocks(xor_src_cnt
, len
, dest_buf
, &srcs
[src_off
]);
166 /* drop completed sources */
167 src_cnt
-= xor_src_cnt
;
168 src_off
+= xor_src_cnt
;
171 async_tx_sync_epilog(flags
, depend_tx
, cb_fn
, cb_param
);
175 * async_xor - attempt to xor a set of blocks with a dma engine.
176 * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
177 * flag must be set to not include dest data in the calculation. The
178 * assumption with dma eninges is that they only use the destination
179 * buffer as a source when it is explicity specified in the source list.
180 * @dest: destination page
181 * @src_list: array of source pages (if the dest is also a source it must be
182 * at index zero). The contents of this array may be overwritten.
183 * @offset: offset in pages to start transaction
184 * @src_cnt: number of source pages
185 * @len: length in bytes
186 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
187 * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
188 * @depend_tx: xor depends on the result of this transaction.
189 * @cb_fn: function to call when the xor completes
190 * @cb_param: parameter to pass to the callback routine
192 struct dma_async_tx_descriptor
*
193 async_xor(struct page
*dest
, struct page
**src_list
, unsigned int offset
,
194 int src_cnt
, size_t len
, enum async_tx_flags flags
,
195 struct dma_async_tx_descriptor
*depend_tx
,
196 dma_async_tx_callback cb_fn
, void *cb_param
)
198 struct dma_chan
*chan
= async_tx_find_channel(depend_tx
, DMA_XOR
,
201 BUG_ON(src_cnt
<= 1);
204 /* run the xor asynchronously */
205 pr_debug("%s (async): len: %zu\n", __func__
, len
);
207 return do_async_xor(chan
, dest
, src_list
, offset
, src_cnt
, len
,
208 flags
, depend_tx
, cb_fn
, cb_param
);
210 /* run the xor synchronously */
211 pr_debug("%s (sync): len: %zu\n", __func__
, len
);
213 /* in the sync case the dest is an implied source
214 * (assumes the dest is the first source)
216 if (flags
& ASYNC_TX_XOR_DROP_DST
) {
221 /* wait for any prerequisite operations */
223 /* if ack is already set then we cannot be sure
224 * we are referring to the correct operation
226 BUG_ON(async_tx_test_ack(depend_tx
));
227 if (dma_wait_for_async_tx(depend_tx
) == DMA_ERROR
)
228 panic("%s: DMA_ERROR waiting for depend_tx\n",
232 do_sync_xor(dest
, src_list
, offset
, src_cnt
, len
,
233 flags
, depend_tx
, cb_fn
, cb_param
);
238 EXPORT_SYMBOL_GPL(async_xor
);
240 static int page_is_zero(struct page
*p
, unsigned int offset
, size_t len
)
242 char *a
= page_address(p
) + offset
;
243 return ((*(u32
*) a
) == 0 &&
244 memcmp(a
, a
+ 4, len
- 4) == 0);
248 * async_xor_zero_sum - attempt a xor parity check with a dma engine.
249 * @dest: destination page used if the xor is performed synchronously
250 * @src_list: array of source pages. The dest page must be listed as a source
251 * at index zero. The contents of this array may be overwritten.
252 * @offset: offset in pages to start transaction
253 * @src_cnt: number of source pages
254 * @len: length in bytes
255 * @result: 0 if sum == 0 else non-zero
256 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
257 * @depend_tx: xor depends on the result of this transaction.
258 * @cb_fn: function to call when the xor completes
259 * @cb_param: parameter to pass to the callback routine
261 struct dma_async_tx_descriptor
*
262 async_xor_zero_sum(struct page
*dest
, struct page
**src_list
,
263 unsigned int offset
, int src_cnt
, size_t len
,
264 u32
*result
, enum async_tx_flags flags
,
265 struct dma_async_tx_descriptor
*depend_tx
,
266 dma_async_tx_callback cb_fn
, void *cb_param
)
268 struct dma_chan
*chan
= async_tx_find_channel(depend_tx
, DMA_ZERO_SUM
,
271 struct dma_device
*device
= chan
? chan
->device
: NULL
;
272 struct dma_async_tx_descriptor
*tx
= NULL
;
274 BUG_ON(src_cnt
<= 1);
276 if (device
&& src_cnt
<= device
->max_xor
) {
277 dma_addr_t
*dma_src
= (dma_addr_t
*) src_list
;
278 unsigned long dma_prep_flags
= cb_fn
? DMA_PREP_INTERRUPT
: 0;
281 pr_debug("%s: (async) len: %zu\n", __func__
, len
);
283 for (i
= 0; i
< src_cnt
; i
++)
284 dma_src
[i
] = dma_map_page(device
->dev
, src_list
[i
],
285 offset
, len
, DMA_TO_DEVICE
);
287 tx
= device
->device_prep_dma_zero_sum(chan
, dma_src
, src_cnt
,
291 async_tx_quiesce(&depend_tx
);
294 dma_async_issue_pending(chan
);
295 tx
= device
->device_prep_dma_zero_sum(chan
,
296 dma_src
, src_cnt
, len
, result
,
300 async_tx_submit(chan
, tx
, flags
, depend_tx
, cb_fn
, cb_param
);
302 unsigned long xor_flags
= flags
;
304 pr_debug("%s: (sync) len: %zu\n", __func__
, len
);
306 xor_flags
|= ASYNC_TX_XOR_DROP_DST
;
307 xor_flags
&= ~ASYNC_TX_ACK
;
309 tx
= async_xor(dest
, src_list
, offset
, src_cnt
, len
, xor_flags
,
310 depend_tx
, NULL
, NULL
);
313 if (dma_wait_for_async_tx(tx
) == DMA_ERROR
)
314 panic("%s: DMA_ERROR waiting for tx\n",
319 *result
= page_is_zero(dest
, offset
, len
) ? 0 : 1;
323 async_tx_sync_epilog(flags
, depend_tx
, cb_fn
, cb_param
);
328 EXPORT_SYMBOL_GPL(async_xor_zero_sum
);
330 static int __init
async_xor_init(void)
332 #ifdef CONFIG_DMA_ENGINE
333 /* To conserve stack space the input src_list (array of page pointers)
334 * is reused to hold the array of dma addresses passed to the driver.
335 * This conversion is only possible when dma_addr_t is less than the
336 * the size of a pointer. HIGHMEM64G is known to violate this
339 BUILD_BUG_ON(sizeof(dma_addr_t
) > sizeof(struct page
*));
345 static void __exit
async_xor_exit(void)
350 module_init(async_xor_init
);
351 module_exit(async_xor_exit
);
353 MODULE_AUTHOR("Intel Corporation");
354 MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
355 MODULE_LICENSE("GPL");