Commit | Line | Data |
---|---|---|
9bc89cd8 DW |
1 | /* |
2 | * core routines for the asynchronous memory transfer/transform api | |
3 | * | |
4 | * Copyright © 2006, Intel Corporation. | |
5 | * | |
6 | * Dan Williams <dan.j.williams@intel.com> | |
7 | * | |
8 | * with architecture considerations by: | |
9 | * Neil Brown <neilb@suse.de> | |
10 | * Jeff Garzik <jeff@garzik.org> | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify it | |
13 | * under the terms and conditions of the GNU General Public License, | |
14 | * version 2, as published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
19 | * more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along with | |
22 | * this program; if not, write to the Free Software Foundation, Inc., | |
23 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
24 | * | |
25 | */ | |
82524746 | 26 | #include <linux/rculist.h> |
9bc89cd8 DW |
27 | #include <linux/kernel.h> |
28 | #include <linux/async_tx.h> | |
29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | |
bec08513 | 31 | static int __init async_tx_init(void) |
9bc89cd8 | 32 | { |
209b84a8 | 33 | dmaengine_get(); |
9bc89cd8 DW |
34 | |
35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | |
36 | ||
37 | return 0; | |
9bc89cd8 DW |
38 | } |
39 | ||
40 | static void __exit async_tx_exit(void) | |
41 | { | |
209b84a8 | 42 | dmaengine_put(); |
9bc89cd8 DW |
43 | } |
44 | ||
45 | /** | |
47437b2c | 46 | * __async_tx_find_channel - find a channel to carry out the operation or let |
9bc89cd8 DW |
47 | * the transaction execute synchronously |
48 | * @depend_tx: transaction dependency | |
49 | * @tx_type: transaction type | |
50 | */ | |
51 | struct dma_chan * | |
47437b2c | 52 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
9bc89cd8 DW |
53 | enum dma_transaction_type tx_type) |
54 | { | |
55 | /* see if we can keep the chain on one channel */ | |
56 | if (depend_tx && | |
bec08513 | 57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
9bc89cd8 | 58 | return depend_tx->chan; |
bec08513 | 59 | return dma_find_channel(tx_type); |
9bc89cd8 | 60 | } |
47437b2c | 61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
9bc89cd8 DW |
62 | #else |
63 | static int __init async_tx_init(void) | |
64 | { | |
65 | printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); | |
66 | return 0; | |
67 | } | |
68 | ||
69 | static void __exit async_tx_exit(void) | |
70 | { | |
71 | do { } while (0); | |
72 | } | |
73 | #endif | |
74 | ||
19242d72 DW |
75 | |
76 | /** | |
77 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency | |
78 | * pre-attached. | |
79 | * @depend_tx: the operation that must finish before the new operation runs | |
80 | * @tx: the new operation | |
81 | */ | |
82 | static void | |
83 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |
84 | struct dma_async_tx_descriptor *tx) | |
85 | { | |
86 | struct dma_chan *chan; | |
87 | struct dma_device *device; | |
88 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | |
89 | ||
90 | /* first check to see if we can still append to depend_tx */ | |
91 | spin_lock_bh(&depend_tx->lock); | |
92 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | |
93 | tx->parent = depend_tx; | |
94 | depend_tx->next = tx; | |
95 | intr_tx = NULL; | |
96 | } | |
97 | spin_unlock_bh(&depend_tx->lock); | |
98 | ||
99 | if (!intr_tx) | |
100 | return; | |
101 | ||
102 | chan = depend_tx->chan; | |
103 | device = chan->device; | |
104 | ||
105 | /* see if we can schedule an interrupt | |
106 | * otherwise poll for completion | |
107 | */ | |
108 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
636bdeaa | 109 | intr_tx = device->device_prep_dma_interrupt(chan, 0); |
19242d72 DW |
110 | else |
111 | intr_tx = NULL; | |
112 | ||
113 | if (intr_tx) { | |
114 | intr_tx->callback = NULL; | |
115 | intr_tx->callback_param = NULL; | |
116 | tx->parent = intr_tx; | |
117 | /* safe to set ->next outside the lock since we know we are | |
118 | * not submitted yet | |
119 | */ | |
120 | intr_tx->next = tx; | |
121 | ||
122 | /* check if we need to append */ | |
123 | spin_lock_bh(&depend_tx->lock); | |
124 | if (depend_tx->parent) { | |
125 | intr_tx->parent = depend_tx; | |
126 | depend_tx->next = intr_tx; | |
127 | async_tx_ack(intr_tx); | |
128 | intr_tx = NULL; | |
129 | } | |
130 | spin_unlock_bh(&depend_tx->lock); | |
131 | ||
132 | if (intr_tx) { | |
133 | intr_tx->parent = NULL; | |
134 | intr_tx->tx_submit(intr_tx); | |
135 | async_tx_ack(intr_tx); | |
136 | } | |
137 | } else { | |
138 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | |
139 | panic("%s: DMA_ERROR waiting for depend_tx\n", | |
140 | __func__); | |
141 | tx->tx_submit(tx); | |
142 | } | |
143 | } | |
144 | ||
145 | ||
146 | /** | |
147 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | |
148 | * new operations to prevent a circular locking dependency with | |
149 | * drivers that already hold a channel lock when calling | |
150 | * async_tx_run_dependencies. | |
151 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | |
152 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | |
153 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | |
154 | */ | |
155 | enum submit_disposition { | |
156 | ASYNC_TX_SUBMITTED, | |
157 | ASYNC_TX_CHANNEL_SWITCH, | |
158 | ASYNC_TX_DIRECT_SUBMIT, | |
159 | }; | |
160 | ||
9bc89cd8 DW |
161 | void |
162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |
163 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | |
164 | dma_async_tx_callback cb_fn, void *cb_param) | |
165 | { | |
166 | tx->callback = cb_fn; | |
167 | tx->callback_param = cb_param; | |
168 | ||
19242d72 DW |
169 | if (depend_tx) { |
170 | enum submit_disposition s; | |
171 | ||
172 | /* sanity check the dependency chain: | |
173 | * 1/ if ack is already set then we cannot be sure | |
9bc89cd8 | 174 | * we are referring to the correct operation |
19242d72 DW |
175 | * 2/ dependencies are 1:1 i.e. two transactions can |
176 | * not depend on the same parent | |
9bc89cd8 | 177 | */ |
636bdeaa DW |
178 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || |
179 | tx->parent); | |
9bc89cd8 | 180 | |
19242d72 DW |
181 | /* the lock prevents async_tx_run_dependencies from missing |
182 | * the setting of ->next when ->parent != NULL | |
183 | */ | |
9bc89cd8 | 184 | spin_lock_bh(&depend_tx->lock); |
19242d72 DW |
185 | if (depend_tx->parent) { |
186 | /* we have a parent so we can not submit directly | |
187 | * if we are staying on the same channel: append | |
188 | * else: channel switch | |
189 | */ | |
190 | if (depend_tx->chan == chan) { | |
191 | tx->parent = depend_tx; | |
192 | depend_tx->next = tx; | |
193 | s = ASYNC_TX_SUBMITTED; | |
194 | } else | |
195 | s = ASYNC_TX_CHANNEL_SWITCH; | |
196 | } else { | |
197 | /* we do not have a parent so we may be able to submit | |
198 | * directly if we are staying on the same channel | |
199 | */ | |
200 | if (depend_tx->chan == chan) | |
201 | s = ASYNC_TX_DIRECT_SUBMIT; | |
202 | else | |
203 | s = ASYNC_TX_CHANNEL_SWITCH; | |
9bc89cd8 DW |
204 | } |
205 | spin_unlock_bh(&depend_tx->lock); | |
206 | ||
19242d72 DW |
207 | switch (s) { |
208 | case ASYNC_TX_SUBMITTED: | |
209 | break; | |
210 | case ASYNC_TX_CHANNEL_SWITCH: | |
211 | async_tx_channel_switch(depend_tx, tx); | |
212 | break; | |
213 | case ASYNC_TX_DIRECT_SUBMIT: | |
214 | tx->parent = NULL; | |
215 | tx->tx_submit(tx); | |
216 | break; | |
217 | } | |
9bc89cd8 DW |
218 | } else { |
219 | tx->parent = NULL; | |
220 | tx->tx_submit(tx); | |
221 | } | |
222 | ||
223 | if (flags & ASYNC_TX_ACK) | |
224 | async_tx_ack(tx); | |
225 | ||
226 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) | |
227 | async_tx_ack(depend_tx); | |
228 | } | |
229 | EXPORT_SYMBOL_GPL(async_tx_submit); | |
230 | ||
231 | /** | |
232 | * async_trigger_callback - schedules the callback function to be run after | |
233 | * any dependent operations have been completed. | |
234 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | |
235 | * @depend_tx: 'callback' requires the completion of this transaction | |
236 | * @cb_fn: function to call after depend_tx completes | |
237 | * @cb_param: parameter to pass to the callback routine | |
238 | */ | |
239 | struct dma_async_tx_descriptor * | |
240 | async_trigger_callback(enum async_tx_flags flags, | |
241 | struct dma_async_tx_descriptor *depend_tx, | |
242 | dma_async_tx_callback cb_fn, void *cb_param) | |
243 | { | |
244 | struct dma_chan *chan; | |
245 | struct dma_device *device; | |
246 | struct dma_async_tx_descriptor *tx; | |
247 | ||
248 | if (depend_tx) { | |
249 | chan = depend_tx->chan; | |
250 | device = chan->device; | |
251 | ||
252 | /* see if we can schedule an interrupt | |
253 | * otherwise poll for completion | |
254 | */ | |
255 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
256 | device = NULL; | |
257 | ||
636bdeaa | 258 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; |
9bc89cd8 DW |
259 | } else |
260 | tx = NULL; | |
261 | ||
262 | if (tx) { | |
3280ab3e | 263 | pr_debug("%s: (async)\n", __func__); |
9bc89cd8 DW |
264 | |
265 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | |
266 | } else { | |
3280ab3e | 267 | pr_debug("%s: (sync)\n", __func__); |
9bc89cd8 DW |
268 | |
269 | /* wait for any prerequisite operations */ | |
d2c52b79 | 270 | async_tx_quiesce(&depend_tx); |
9bc89cd8 | 271 | |
3dce0171 | 272 | async_tx_sync_epilog(cb_fn, cb_param); |
9bc89cd8 DW |
273 | } |
274 | ||
275 | return tx; | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(async_trigger_callback); | |
278 | ||
d2c52b79 DW |
279 | /** |
280 | * async_tx_quiesce - ensure tx is complete and freeable upon return | |
281 | * @tx - transaction to quiesce | |
282 | */ | |
283 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |
284 | { | |
285 | if (*tx) { | |
286 | /* if ack is already set then we cannot be sure | |
287 | * we are referring to the correct operation | |
288 | */ | |
289 | BUG_ON(async_tx_test_ack(*tx)); | |
290 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) | |
291 | panic("DMA_ERROR waiting for transaction\n"); | |
292 | async_tx_ack(*tx); | |
293 | *tx = NULL; | |
294 | } | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); | |
297 | ||
9bc89cd8 DW |
298 | module_init(async_tx_init); |
299 | module_exit(async_tx_exit); | |
300 | ||
301 | MODULE_AUTHOR("Intel Corporation"); | |
302 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); | |
303 | MODULE_LICENSE("GPL"); |