Commit | Line | Data |
---|---|---|
9a7b8e00 GL |
1 | /* |
2 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs | |
3 | * | |
4 | * extracted from shdma.c | |
5 | * | |
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | |
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | |
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | |
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | |
10 | * | |
11 | * This is free software; you can redistribute it and/or modify | |
12 | * it under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | */ | |
15 | ||
16 | #include <linux/delay.h> | |
17 | #include <linux/shdma-base.h> | |
18 | #include <linux/dmaengine.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/pm_runtime.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | #include "../dmaengine.h" | |
27 | ||
28 | /* DMA descriptor control */ | |
29 | enum shdma_desc_status { | |
30 | DESC_IDLE, | |
31 | DESC_PREPARED, | |
32 | DESC_SUBMITTED, | |
33 | DESC_COMPLETED, /* completed, have to call callback */ | |
34 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | |
35 | }; | |
36 | ||
37 | #define NR_DESCS_PER_CHANNEL 32 | |
38 | ||
39 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | |
40 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) | |
41 | ||
42 | /* | |
43 | * For slave DMA we assume, that there is a finite number of DMA slaves in the | |
44 | * system, and that each such slave can only use a finite number of channels. | |
45 | * We use slave channel IDs to make sure, that no such slave channel ID is | |
46 | * allocated more than once. | |
47 | */ | |
48 | static unsigned int slave_num = 256; | |
49 | module_param(slave_num, uint, 0444); | |
50 | ||
51 | /* A bitmask with slave_num bits */ | |
52 | static unsigned long *shdma_slave_used; | |
53 | ||
54 | /* Called under spin_lock_irq(&schan->chan_lock") */ | |
55 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) | |
56 | { | |
57 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
58 | const struct shdma_ops *ops = sdev->ops; | |
59 | struct shdma_desc *sdesc; | |
60 | ||
61 | /* DMA work check */ | |
62 | if (ops->channel_busy(schan)) | |
63 | return; | |
64 | ||
65 | /* Find the first not transferred descriptor */ | |
66 | list_for_each_entry(sdesc, &schan->ld_queue, node) | |
67 | if (sdesc->mark == DESC_SUBMITTED) { | |
68 | ops->start_xfer(schan, sdesc); | |
69 | break; | |
70 | } | |
71 | } | |
72 | ||
73 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
74 | { | |
75 | struct shdma_desc *chunk, *c, *desc = | |
91ea74e9 | 76 | container_of(tx, struct shdma_desc, async_tx); |
9a7b8e00 | 77 | struct shdma_chan *schan = to_shdma_chan(tx->chan); |
9a7b8e00 GL |
78 | dma_async_tx_callback callback = tx->callback; |
79 | dma_cookie_t cookie; | |
80 | bool power_up; | |
81 | ||
82 | spin_lock_irq(&schan->chan_lock); | |
83 | ||
84 | power_up = list_empty(&schan->ld_queue); | |
85 | ||
86 | cookie = dma_cookie_assign(tx); | |
87 | ||
88 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | |
89 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | |
90 | /* | |
91 | * All chunks are on the global ld_free, so, we have to find | |
92 | * the end of the chain ourselves | |
93 | */ | |
94 | if (chunk != desc && (chunk->mark == DESC_IDLE || | |
95 | chunk->async_tx.cookie > 0 || | |
96 | chunk->async_tx.cookie == -EBUSY || | |
97 | &chunk->node == &schan->ld_free)) | |
98 | break; | |
99 | chunk->mark = DESC_SUBMITTED; | |
91ea74e9 KM |
100 | if (chunk->chunks == 1) { |
101 | chunk->async_tx.callback = callback; | |
102 | chunk->async_tx.callback_param = tx->callback_param; | |
103 | } else { | |
104 | /* Callback goes to the last chunk */ | |
105 | chunk->async_tx.callback = NULL; | |
106 | } | |
9a7b8e00 GL |
107 | chunk->cookie = cookie; |
108 | list_move_tail(&chunk->node, &schan->ld_queue); | |
9a7b8e00 GL |
109 | |
110 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", | |
91ea74e9 | 111 | tx->cookie, &chunk->async_tx, schan->id); |
9a7b8e00 GL |
112 | } |
113 | ||
9a7b8e00 GL |
114 | if (power_up) { |
115 | int ret; | |
116 | schan->pm_state = SHDMA_PM_BUSY; | |
117 | ||
118 | ret = pm_runtime_get(schan->dev); | |
119 | ||
120 | spin_unlock_irq(&schan->chan_lock); | |
121 | if (ret < 0) | |
122 | dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); | |
123 | ||
124 | pm_runtime_barrier(schan->dev); | |
125 | ||
126 | spin_lock_irq(&schan->chan_lock); | |
127 | ||
128 | /* Have we been reset, while waiting? */ | |
129 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { | |
130 | struct shdma_dev *sdev = | |
131 | to_shdma_dev(schan->dma_chan.device); | |
132 | const struct shdma_ops *ops = sdev->ops; | |
133 | dev_dbg(schan->dev, "Bring up channel %d\n", | |
134 | schan->id); | |
135 | /* | |
136 | * TODO: .xfer_setup() might fail on some platforms. | |
137 | * Make it int then, on error remove chunks from the | |
138 | * queue again | |
139 | */ | |
c2cdb7e4 | 140 | ops->setup_xfer(schan, schan->slave_id); |
9a7b8e00 GL |
141 | |
142 | if (schan->pm_state == SHDMA_PM_PENDING) | |
143 | shdma_chan_xfer_ld_queue(schan); | |
144 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
145 | } | |
146 | } else { | |
147 | /* | |
148 | * Tell .device_issue_pending() not to run the queue, interrupts | |
149 | * will do it anyway | |
150 | */ | |
151 | schan->pm_state = SHDMA_PM_PENDING; | |
152 | } | |
153 | ||
154 | spin_unlock_irq(&schan->chan_lock); | |
155 | ||
156 | return cookie; | |
157 | } | |
158 | ||
159 | /* Called with desc_lock held */ | |
160 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | |
161 | { | |
162 | struct shdma_desc *sdesc; | |
163 | ||
164 | list_for_each_entry(sdesc, &schan->ld_free, node) | |
165 | if (sdesc->mark != DESC_PREPARED) { | |
166 | BUG_ON(sdesc->mark != DESC_IDLE); | |
167 | list_del(&sdesc->node); | |
168 | return sdesc; | |
169 | } | |
170 | ||
171 | return NULL; | |
172 | } | |
173 | ||
411fdaf8 | 174 | static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) |
1ff8df4f GL |
175 | { |
176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
177 | const struct shdma_ops *ops = sdev->ops; | |
67eacc15 GL |
178 | int ret, match; |
179 | ||
180 | if (schan->dev->of_node) { | |
181 | match = schan->hw_req; | |
4981c4dc | 182 | ret = ops->set_slave(schan, match, slave_addr, true); |
67eacc15 GL |
183 | if (ret < 0) |
184 | return ret; | |
67eacc15 | 185 | } else { |
411fdaf8 | 186 | match = schan->real_slave_id; |
67eacc15 | 187 | } |
1ff8df4f | 188 | |
411fdaf8 | 189 | if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) |
1ff8df4f GL |
190 | return -EINVAL; |
191 | ||
411fdaf8 | 192 | if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) |
1ff8df4f GL |
193 | return -EBUSY; |
194 | ||
4981c4dc | 195 | ret = ops->set_slave(schan, match, slave_addr, false); |
1ff8df4f | 196 | if (ret < 0) { |
411fdaf8 | 197 | clear_bit(schan->real_slave_id, shdma_slave_used); |
1ff8df4f GL |
198 | return ret; |
199 | } | |
200 | ||
411fdaf8 | 201 | schan->slave_id = schan->real_slave_id; |
1ff8df4f GL |
202 | |
203 | return 0; | |
204 | } | |
205 | ||
9a7b8e00 GL |
206 | static int shdma_alloc_chan_resources(struct dma_chan *chan) |
207 | { | |
208 | struct shdma_chan *schan = to_shdma_chan(chan); | |
209 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
210 | const struct shdma_ops *ops = sdev->ops; | |
211 | struct shdma_desc *desc; | |
212 | struct shdma_slave *slave = chan->private; | |
213 | int ret, i; | |
214 | ||
215 | /* | |
216 | * This relies on the guarantee from dmaengine that alloc_chan_resources | |
217 | * never runs concurrently with itself or free_chan_resources. | |
218 | */ | |
219 | if (slave) { | |
1ff8df4f | 220 | /* Legacy mode: .private is set in filter */ |
411fdaf8 AB |
221 | schan->real_slave_id = slave->slave_id; |
222 | ret = shdma_setup_slave(schan, 0); | |
9a7b8e00 GL |
223 | if (ret < 0) |
224 | goto esetslave; | |
c2cdb7e4 | 225 | } else { |
411fdaf8 | 226 | /* Normal mode: real_slave_id was set by filter */ |
c2cdb7e4 | 227 | schan->slave_id = -EINVAL; |
9a7b8e00 GL |
228 | } |
229 | ||
230 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, | |
231 | sdev->desc_size, GFP_KERNEL); | |
232 | if (!schan->desc) { | |
233 | ret = -ENOMEM; | |
234 | goto edescalloc; | |
235 | } | |
236 | schan->desc_num = NR_DESCS_PER_CHANNEL; | |
237 | ||
238 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { | |
239 | desc = ops->embedded_desc(schan->desc, i); | |
240 | dma_async_tx_descriptor_init(&desc->async_tx, | |
241 | &schan->dma_chan); | |
242 | desc->async_tx.tx_submit = shdma_tx_submit; | |
243 | desc->mark = DESC_IDLE; | |
244 | ||
245 | list_add(&desc->node, &schan->ld_free); | |
246 | } | |
247 | ||
248 | return NR_DESCS_PER_CHANNEL; | |
249 | ||
250 | edescalloc: | |
251 | if (slave) | |
252 | esetslave: | |
253 | clear_bit(slave->slave_id, shdma_slave_used); | |
9a7b8e00 GL |
254 | chan->private = NULL; |
255 | return ret; | |
256 | } | |
257 | ||
c091ff51 LP |
258 | /* |
259 | * This is the standard shdma filter function to be used as a replacement to the | |
411fdaf8 AB |
260 | * "old" method, using the .private pointer. |
261 | * You always have to pass a valid slave id as the argument, old drivers that | |
262 | * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config | |
263 | * need to be updated so we can remove the slave_id field from dma_slave_config. | |
c091ff51 LP |
264 | * parameter. If this filter is used, the slave driver, after calling |
265 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | |
411fdaf8 AB |
266 | * .direction, and either .src_addr or .dst_addr set. |
267 | * | |
c091ff51 LP |
268 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE |
269 | * capability! If this becomes a requirement, hardware glue drivers, using this | |
270 | * services would have to provide their own filters, which first would check | |
271 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | |
272 | * this, and only then, in case of a match, call this common filter. | |
273 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). | |
274 | * In that case the MID-RID value is used for slave channel filtering and is | |
275 | * passed to this function in the "arg" parameter. | |
276 | */ | |
277 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | |
278 | { | |
279 | struct shdma_chan *schan; | |
280 | struct shdma_dev *sdev; | |
411fdaf8 | 281 | int slave_id = (long)arg; |
c091ff51 LP |
282 | int ret; |
283 | ||
284 | /* Only support channels handled by this driver. */ | |
285 | if (chan->device->device_alloc_chan_resources != | |
286 | shdma_alloc_chan_resources) | |
287 | return false; | |
288 | ||
411fdaf8 AB |
289 | schan = to_shdma_chan(chan); |
290 | sdev = to_shdma_dev(chan->device); | |
291 | ||
292 | /* | |
293 | * For DT, the schan->slave_id field is generated by the | |
294 | * set_slave function from the slave ID that is passed in | |
295 | * from xlate. For the non-DT case, the slave ID is | |
296 | * directly passed into the filter function by the driver | |
297 | */ | |
298 | if (schan->dev->of_node) { | |
299 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); | |
300 | if (ret < 0) | |
301 | return false; | |
302 | ||
303 | schan->real_slave_id = schan->slave_id; | |
304 | return true; | |
305 | } | |
306 | ||
307 | if (slave_id < 0) { | |
c091ff51 | 308 | /* No slave requested - arbitrary channel */ |
411fdaf8 | 309 | dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n"); |
c091ff51 | 310 | return true; |
411fdaf8 | 311 | } |
c091ff51 | 312 | |
411fdaf8 | 313 | if (slave_id >= slave_num) |
c091ff51 LP |
314 | return false; |
315 | ||
411fdaf8 | 316 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
c091ff51 LP |
317 | if (ret < 0) |
318 | return false; | |
319 | ||
411fdaf8 AB |
320 | schan->real_slave_id = slave_id; |
321 | ||
c091ff51 LP |
322 | return true; |
323 | } | |
324 | EXPORT_SYMBOL(shdma_chan_filter); | |
325 | ||
9a7b8e00 GL |
326 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) |
327 | { | |
328 | struct shdma_desc *desc, *_desc; | |
329 | /* Is the "exposed" head of a chain acked? */ | |
330 | bool head_acked = false; | |
331 | dma_cookie_t cookie = 0; | |
332 | dma_async_tx_callback callback = NULL; | |
73fc45e3 | 333 | struct dmaengine_desc_callback cb; |
9a7b8e00 | 334 | unsigned long flags; |
dfbb85ca | 335 | LIST_HEAD(cyclic_list); |
9a7b8e00 | 336 | |
73fc45e3 | 337 | memset(&cb, 0, sizeof(cb)); |
9a7b8e00 GL |
338 | spin_lock_irqsave(&schan->chan_lock, flags); |
339 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | |
340 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | |
341 | ||
342 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | |
343 | BUG_ON(desc->mark != DESC_SUBMITTED && | |
344 | desc->mark != DESC_COMPLETED && | |
345 | desc->mark != DESC_WAITING); | |
346 | ||
347 | /* | |
348 | * queue is ordered, and we use this loop to (1) clean up all | |
349 | * completed descriptors, and to (2) update descriptor flags of | |
350 | * any chunks in a (partially) completed chain | |
351 | */ | |
352 | if (!all && desc->mark == DESC_SUBMITTED && | |
353 | desc->cookie != cookie) | |
354 | break; | |
355 | ||
356 | if (tx->cookie > 0) | |
357 | cookie = tx->cookie; | |
358 | ||
359 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | |
360 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) | |
361 | dev_dbg(schan->dev, | |
362 | "Completing cookie %d, expected %d\n", | |
363 | desc->cookie, | |
364 | schan->dma_chan.completed_cookie + 1); | |
365 | schan->dma_chan.completed_cookie = desc->cookie; | |
366 | } | |
367 | ||
368 | /* Call callback on the last chunk */ | |
369 | if (desc->mark == DESC_COMPLETED && tx->callback) { | |
370 | desc->mark = DESC_WAITING; | |
73fc45e3 | 371 | dmaengine_desc_get_callback(tx, &cb); |
9a7b8e00 | 372 | callback = tx->callback; |
9a7b8e00 GL |
373 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", |
374 | tx->cookie, tx, schan->id); | |
375 | BUG_ON(desc->chunks != 1); | |
376 | break; | |
377 | } | |
378 | ||
379 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | |
380 | if (desc->mark == DESC_COMPLETED) { | |
381 | BUG_ON(tx->cookie < 0); | |
382 | desc->mark = DESC_WAITING; | |
383 | } | |
384 | head_acked = async_tx_test_ack(tx); | |
385 | } else { | |
386 | switch (desc->mark) { | |
387 | case DESC_COMPLETED: | |
388 | desc->mark = DESC_WAITING; | |
389 | /* Fall through */ | |
390 | case DESC_WAITING: | |
391 | if (head_acked) | |
392 | async_tx_ack(&desc->async_tx); | |
393 | } | |
394 | } | |
395 | ||
396 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n", | |
397 | tx, tx->cookie); | |
398 | ||
399 | if (((desc->mark == DESC_COMPLETED || | |
400 | desc->mark == DESC_WAITING) && | |
401 | async_tx_test_ack(&desc->async_tx)) || all) { | |
9a7b8e00 | 402 | |
dfbb85ca KM |
403 | if (all || !desc->cyclic) { |
404 | /* Remove from ld_queue list */ | |
405 | desc->mark = DESC_IDLE; | |
406 | list_move(&desc->node, &schan->ld_free); | |
407 | } else { | |
408 | /* reuse as cyclic */ | |
409 | desc->mark = DESC_SUBMITTED; | |
410 | list_move_tail(&desc->node, &cyclic_list); | |
411 | } | |
9a7b8e00 GL |
412 | |
413 | if (list_empty(&schan->ld_queue)) { | |
414 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | |
415 | pm_runtime_put(schan->dev); | |
416 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
26fd830a YS |
417 | } else if (schan->pm_state == SHDMA_PM_PENDING) { |
418 | shdma_chan_xfer_ld_queue(schan); | |
9a7b8e00 GL |
419 | } |
420 | } | |
421 | } | |
422 | ||
423 | if (all && !callback) | |
424 | /* | |
425 | * Terminating and the loop completed normally: forgive | |
426 | * uncompleted cookies | |
427 | */ | |
428 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; | |
429 | ||
dfbb85ca KM |
430 | list_splice_tail(&cyclic_list, &schan->ld_queue); |
431 | ||
9a7b8e00 GL |
432 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
433 | ||
73fc45e3 | 434 | dmaengine_desc_callback_invoke(&cb, NULL); |
9a7b8e00 GL |
435 | |
436 | return callback; | |
437 | } | |
438 | ||
439 | /* | |
440 | * shdma_chan_ld_cleanup - Clean up link descriptors | |
441 | * | |
442 | * Clean up the ld_queue of DMA channel. | |
443 | */ | |
444 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) | |
445 | { | |
446 | while (__ld_cleanup(schan, all)) | |
447 | ; | |
448 | } | |
449 | ||
450 | /* | |
451 | * shdma_free_chan_resources - Free all resources of the channel. | |
452 | */ | |
453 | static void shdma_free_chan_resources(struct dma_chan *chan) | |
454 | { | |
455 | struct shdma_chan *schan = to_shdma_chan(chan); | |
456 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | |
457 | const struct shdma_ops *ops = sdev->ops; | |
458 | LIST_HEAD(list); | |
459 | ||
460 | /* Protect against ISR */ | |
461 | spin_lock_irq(&schan->chan_lock); | |
462 | ops->halt_channel(schan); | |
463 | spin_unlock_irq(&schan->chan_lock); | |
464 | ||
465 | /* Now no new interrupts will occur */ | |
466 | ||
467 | /* Prepared and not submitted descriptors can still be on the queue */ | |
468 | if (!list_empty(&schan->ld_queue)) | |
469 | shdma_chan_ld_cleanup(schan, true); | |
470 | ||
c2cdb7e4 | 471 | if (schan->slave_id >= 0) { |
9a7b8e00 | 472 | /* The caller is holding dma_list_mutex */ |
c2cdb7e4 | 473 | clear_bit(schan->slave_id, shdma_slave_used); |
9a7b8e00 GL |
474 | chan->private = NULL; |
475 | } | |
476 | ||
411fdaf8 AB |
477 | schan->real_slave_id = 0; |
478 | ||
9a7b8e00 GL |
479 | spin_lock_irq(&schan->chan_lock); |
480 | ||
481 | list_splice_init(&schan->ld_free, &list); | |
482 | schan->desc_num = 0; | |
483 | ||
484 | spin_unlock_irq(&schan->chan_lock); | |
485 | ||
486 | kfree(schan->desc); | |
487 | } | |
488 | ||
489 | /** | |
490 | * shdma_add_desc - get, set up and return one transfer descriptor | |
491 | * @schan: DMA channel | |
492 | * @flags: DMA transfer flags | |
493 | * @dst: destination DMA address, incremented when direction equals | |
494 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM | |
495 | * @src: source DMA address, incremented when direction equals | |
496 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM | |
497 | * @len: DMA transfer length | |
498 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | |
499 | * @direction: needed for slave DMA to decide which address to keep constant, | |
500 | * equals DMA_MEM_TO_MEM for MEMCPY | |
501 | * Returns 0 or an error | |
502 | * Locks: called with desc_lock held | |
503 | */ | |
504 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | |
505 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, | |
506 | struct shdma_desc **first, enum dma_transfer_direction direction) | |
507 | { | |
508 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
509 | const struct shdma_ops *ops = sdev->ops; | |
510 | struct shdma_desc *new; | |
511 | size_t copy_size = *len; | |
512 | ||
513 | if (!copy_size) | |
514 | return NULL; | |
515 | ||
516 | /* Allocate the link descriptor from the free list */ | |
517 | new = shdma_get_desc(schan); | |
518 | if (!new) { | |
519 | dev_err(schan->dev, "No free link descriptor available\n"); | |
520 | return NULL; | |
521 | } | |
522 | ||
523 | ops->desc_setup(schan, new, *src, *dst, ©_size); | |
524 | ||
525 | if (!*first) { | |
526 | /* First desc */ | |
527 | new->async_tx.cookie = -EBUSY; | |
528 | *first = new; | |
529 | } else { | |
530 | /* Other desc - invisible to the user */ | |
531 | new->async_tx.cookie = -EINVAL; | |
532 | } | |
533 | ||
534 | dev_dbg(schan->dev, | |
42e4a12a LP |
535 | "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n", |
536 | copy_size, *len, src, dst, &new->async_tx, | |
9a7b8e00 GL |
537 | new->async_tx.cookie); |
538 | ||
539 | new->mark = DESC_PREPARED; | |
540 | new->async_tx.flags = flags; | |
541 | new->direction = direction; | |
4f46f8ac | 542 | new->partial = 0; |
9a7b8e00 GL |
543 | |
544 | *len -= copy_size; | |
545 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | |
546 | *src += copy_size; | |
547 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | |
548 | *dst += copy_size; | |
549 | ||
550 | return new; | |
551 | } | |
552 | ||
553 | /* | |
554 | * shdma_prep_sg - prepare transfer descriptors from an SG list | |
555 | * | |
556 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | |
557 | * converted to scatter-gather to guarantee consistent locking and a correct | |
558 | * list manipulation. For slave DMA direction carries the usual meaning, and, | |
559 | * logically, the SG list is RAM and the addr variable contains slave address, | |
560 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | |
561 | * and the SG list contains only one element and points at the source buffer. | |
562 | */ | |
563 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | |
564 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | |
dfbb85ca | 565 | enum dma_transfer_direction direction, unsigned long flags, bool cyclic) |
9a7b8e00 GL |
566 | { |
567 | struct scatterlist *sg; | |
568 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; | |
569 | LIST_HEAD(tx_list); | |
570 | int chunks = 0; | |
571 | unsigned long irq_flags; | |
572 | int i; | |
573 | ||
574 | for_each_sg(sgl, sg, sg_len, i) | |
575 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); | |
576 | ||
577 | /* Have to lock the whole loop to protect against concurrent release */ | |
578 | spin_lock_irqsave(&schan->chan_lock, irq_flags); | |
579 | ||
580 | /* | |
581 | * Chaining: | |
582 | * first descriptor is what user is dealing with in all API calls, its | |
583 | * cookie is at first set to -EBUSY, at tx-submit to a positive | |
584 | * number | |
585 | * if more than one chunk is needed further chunks have cookie = -EINVAL | |
586 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | |
587 | * all chunks are linked onto the tx_list head with their .node heads | |
588 | * only during this function, then they are immediately spliced | |
589 | * back onto the free list in form of a chain | |
590 | */ | |
591 | for_each_sg(sgl, sg, sg_len, i) { | |
592 | dma_addr_t sg_addr = sg_dma_address(sg); | |
593 | size_t len = sg_dma_len(sg); | |
594 | ||
595 | if (!len) | |
596 | goto err_get_desc; | |
597 | ||
598 | do { | |
42e4a12a LP |
599 | dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", |
600 | i, sg, len, &sg_addr); | |
9a7b8e00 GL |
601 | |
602 | if (direction == DMA_DEV_TO_MEM) | |
603 | new = shdma_add_desc(schan, flags, | |
604 | &sg_addr, addr, &len, &first, | |
605 | direction); | |
606 | else | |
607 | new = shdma_add_desc(schan, flags, | |
608 | addr, &sg_addr, &len, &first, | |
609 | direction); | |
610 | if (!new) | |
611 | goto err_get_desc; | |
612 | ||
dfbb85ca KM |
613 | new->cyclic = cyclic; |
614 | if (cyclic) | |
615 | new->chunks = 1; | |
616 | else | |
617 | new->chunks = chunks--; | |
9a7b8e00 GL |
618 | list_add_tail(&new->node, &tx_list); |
619 | } while (len); | |
620 | } | |
621 | ||
622 | if (new != first) | |
623 | new->async_tx.cookie = -ENOSPC; | |
624 | ||
625 | /* Put them back on the free list, so, they don't get lost */ | |
626 | list_splice_tail(&tx_list, &schan->ld_free); | |
627 | ||
628 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | |
629 | ||
630 | return &first->async_tx; | |
631 | ||
632 | err_get_desc: | |
633 | list_for_each_entry(new, &tx_list, node) | |
634 | new->mark = DESC_IDLE; | |
635 | list_splice(&tx_list, &schan->ld_free); | |
636 | ||
637 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | |
638 | ||
639 | return NULL; | |
640 | } | |
641 | ||
642 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( | |
643 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | |
644 | size_t len, unsigned long flags) | |
645 | { | |
646 | struct shdma_chan *schan = to_shdma_chan(chan); | |
647 | struct scatterlist sg; | |
648 | ||
649 | if (!chan || !len) | |
650 | return NULL; | |
651 | ||
652 | BUG_ON(!schan->desc_num); | |
653 | ||
654 | sg_init_table(&sg, 1); | |
655 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | |
656 | offset_in_page(dma_src)); | |
657 | sg_dma_address(&sg) = dma_src; | |
658 | sg_dma_len(&sg) = len; | |
659 | ||
dfbb85ca KM |
660 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, |
661 | flags, false); | |
9a7b8e00 GL |
662 | } |
663 | ||
664 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | |
665 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | |
666 | enum dma_transfer_direction direction, unsigned long flags, void *context) | |
667 | { | |
668 | struct shdma_chan *schan = to_shdma_chan(chan); | |
669 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
670 | const struct shdma_ops *ops = sdev->ops; | |
c2cdb7e4 | 671 | int slave_id = schan->slave_id; |
9a7b8e00 GL |
672 | dma_addr_t slave_addr; |
673 | ||
674 | if (!chan) | |
675 | return NULL; | |
676 | ||
677 | BUG_ON(!schan->desc_num); | |
678 | ||
679 | /* Someone calling slave DMA on a generic channel? */ | |
c2cdb7e4 GL |
680 | if (slave_id < 0 || !sg_len) { |
681 | dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", | |
682 | __func__, sg_len, slave_id); | |
9a7b8e00 GL |
683 | return NULL; |
684 | } | |
685 | ||
686 | slave_addr = ops->slave_addr(schan); | |
687 | ||
688 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | |
dfbb85ca KM |
689 | direction, flags, false); |
690 | } | |
691 | ||
877d8425 VK |
692 | #define SHDMA_MAX_SG_LEN 32 |
693 | ||
a6876543 | 694 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( |
dfbb85ca KM |
695 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
696 | size_t period_len, enum dma_transfer_direction direction, | |
31c1e5a1 | 697 | unsigned long flags) |
dfbb85ca KM |
698 | { |
699 | struct shdma_chan *schan = to_shdma_chan(chan); | |
700 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
4415b03a | 701 | struct dma_async_tx_descriptor *desc; |
dfbb85ca KM |
702 | const struct shdma_ops *ops = sdev->ops; |
703 | unsigned int sg_len = buf_len / period_len; | |
704 | int slave_id = schan->slave_id; | |
705 | dma_addr_t slave_addr; | |
4415b03a | 706 | struct scatterlist *sgl; |
dfbb85ca KM |
707 | int i; |
708 | ||
709 | if (!chan) | |
710 | return NULL; | |
711 | ||
712 | BUG_ON(!schan->desc_num); | |
713 | ||
877d8425 VK |
714 | if (sg_len > SHDMA_MAX_SG_LEN) { |
715 | dev_err(schan->dev, "sg length %d exceds limit %d", | |
716 | sg_len, SHDMA_MAX_SG_LEN); | |
717 | return NULL; | |
718 | } | |
719 | ||
dfbb85ca KM |
720 | /* Someone calling slave DMA on a generic channel? */ |
721 | if (slave_id < 0 || (buf_len < period_len)) { | |
722 | dev_warn(schan->dev, | |
9d9f71a8 | 723 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", |
dfbb85ca KM |
724 | __func__, buf_len, period_len, slave_id); |
725 | return NULL; | |
726 | } | |
727 | ||
728 | slave_addr = ops->slave_addr(schan); | |
729 | ||
4415b03a LP |
730 | /* |
731 | * Allocate the sg list dynamically as it would consumer too much stack | |
732 | * space. | |
733 | */ | |
734 | sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL); | |
735 | if (!sgl) | |
736 | return NULL; | |
737 | ||
dfbb85ca | 738 | sg_init_table(sgl, sg_len); |
4415b03a | 739 | |
dfbb85ca KM |
740 | for (i = 0; i < sg_len; i++) { |
741 | dma_addr_t src = buf_addr + (period_len * i); | |
742 | ||
743 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, | |
744 | offset_in_page(src)); | |
745 | sg_dma_address(&sgl[i]) = src; | |
746 | sg_dma_len(&sgl[i]) = period_len; | |
747 | } | |
748 | ||
4415b03a | 749 | desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, |
dfbb85ca | 750 | direction, flags, true); |
4415b03a LP |
751 | |
752 | kfree(sgl); | |
753 | return desc; | |
9a7b8e00 GL |
754 | } |
755 | ||
be60f940 | 756 | static int shdma_terminate_all(struct dma_chan *chan) |
9a7b8e00 GL |
757 | { |
758 | struct shdma_chan *schan = to_shdma_chan(chan); | |
759 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | |
760 | const struct shdma_ops *ops = sdev->ops; | |
761 | unsigned long flags; | |
9a7b8e00 | 762 | |
be60f940 MR |
763 | spin_lock_irqsave(&schan->chan_lock, flags); |
764 | ops->halt_channel(schan); | |
4f46f8ac | 765 | |
be60f940 MR |
766 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { |
767 | /* Record partial transfer */ | |
768 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | |
769 | struct shdma_desc, node); | |
770 | desc->partial = ops->get_partial(schan, desc); | |
771 | } | |
4f46f8ac | 772 | |
be60f940 | 773 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
9a7b8e00 | 774 | |
be60f940 | 775 | shdma_chan_ld_cleanup(schan, true); |
9a7b8e00 GL |
776 | |
777 | return 0; | |
778 | } | |
779 | ||
be60f940 MR |
780 | static int shdma_config(struct dma_chan *chan, |
781 | struct dma_slave_config *config) | |
782 | { | |
783 | struct shdma_chan *schan = to_shdma_chan(chan); | |
784 | ||
785 | /* | |
786 | * So far only .slave_id is used, but the slave drivers are | |
787 | * encouraged to also set a transfer direction and an address. | |
788 | */ | |
789 | if (!config) | |
790 | return -EINVAL; | |
411fdaf8 AB |
791 | |
792 | /* | |
793 | * overriding the slave_id through dma_slave_config is deprecated, | |
794 | * but possibly some out-of-tree drivers still do it. | |
795 | */ | |
796 | if (WARN_ON_ONCE(config->slave_id && | |
797 | config->slave_id != schan->real_slave_id)) | |
798 | schan->real_slave_id = config->slave_id; | |
799 | ||
be60f940 MR |
800 | /* |
801 | * We could lock this, but you shouldn't be configuring the | |
802 | * channel, while using it... | |
803 | */ | |
411fdaf8 | 804 | return shdma_setup_slave(schan, |
be60f940 MR |
805 | config->direction == DMA_DEV_TO_MEM ? |
806 | config->src_addr : config->dst_addr); | |
807 | } | |
808 | ||
9a7b8e00 GL |
809 | static void shdma_issue_pending(struct dma_chan *chan) |
810 | { | |
811 | struct shdma_chan *schan = to_shdma_chan(chan); | |
812 | ||
813 | spin_lock_irq(&schan->chan_lock); | |
814 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) | |
815 | shdma_chan_xfer_ld_queue(schan); | |
816 | else | |
817 | schan->pm_state = SHDMA_PM_PENDING; | |
818 | spin_unlock_irq(&schan->chan_lock); | |
819 | } | |
820 | ||
821 | static enum dma_status shdma_tx_status(struct dma_chan *chan, | |
822 | dma_cookie_t cookie, | |
823 | struct dma_tx_state *txstate) | |
824 | { | |
825 | struct shdma_chan *schan = to_shdma_chan(chan); | |
826 | enum dma_status status; | |
827 | unsigned long flags; | |
828 | ||
829 | shdma_chan_ld_cleanup(schan, false); | |
830 | ||
831 | spin_lock_irqsave(&schan->chan_lock, flags); | |
832 | ||
833 | status = dma_cookie_status(chan, cookie, txstate); | |
834 | ||
835 | /* | |
836 | * If we don't find cookie on the queue, it has been aborted and we have | |
837 | * to report error | |
838 | */ | |
a8d8d268 | 839 | if (status != DMA_COMPLETE) { |
9a7b8e00 GL |
840 | struct shdma_desc *sdesc; |
841 | status = DMA_ERROR; | |
842 | list_for_each_entry(sdesc, &schan->ld_queue, node) | |
843 | if (sdesc->cookie == cookie) { | |
844 | status = DMA_IN_PROGRESS; | |
845 | break; | |
846 | } | |
847 | } | |
848 | ||
849 | spin_unlock_irqrestore(&schan->chan_lock, flags); | |
850 | ||
851 | return status; | |
852 | } | |
853 | ||
854 | /* Called from error IRQ or NMI */ | |
855 | bool shdma_reset(struct shdma_dev *sdev) | |
856 | { | |
857 | const struct shdma_ops *ops = sdev->ops; | |
858 | struct shdma_chan *schan; | |
859 | unsigned int handled = 0; | |
860 | int i; | |
861 | ||
862 | /* Reset all channels */ | |
863 | shdma_for_each_chan(schan, sdev, i) { | |
864 | struct shdma_desc *sdesc; | |
865 | LIST_HEAD(dl); | |
866 | ||
867 | if (!schan) | |
868 | continue; | |
869 | ||
870 | spin_lock(&schan->chan_lock); | |
871 | ||
872 | /* Stop the channel */ | |
873 | ops->halt_channel(schan); | |
874 | ||
875 | list_splice_init(&schan->ld_queue, &dl); | |
876 | ||
877 | if (!list_empty(&dl)) { | |
878 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | |
879 | pm_runtime_put(schan->dev); | |
880 | } | |
881 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
882 | ||
883 | spin_unlock(&schan->chan_lock); | |
884 | ||
885 | /* Complete all */ | |
886 | list_for_each_entry(sdesc, &dl, node) { | |
887 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; | |
73fc45e3 | 888 | |
9a7b8e00 | 889 | sdesc->mark = DESC_IDLE; |
73fc45e3 | 890 | dmaengine_desc_get_callback_invoke(tx, NULL); |
9a7b8e00 GL |
891 | } |
892 | ||
893 | spin_lock(&schan->chan_lock); | |
894 | list_splice(&dl, &schan->ld_free); | |
895 | spin_unlock(&schan->chan_lock); | |
896 | ||
897 | handled++; | |
898 | } | |
899 | ||
900 | return !!handled; | |
901 | } | |
902 | EXPORT_SYMBOL(shdma_reset); | |
903 | ||
904 | static irqreturn_t chan_irq(int irq, void *dev) | |
905 | { | |
906 | struct shdma_chan *schan = dev; | |
907 | const struct shdma_ops *ops = | |
908 | to_shdma_dev(schan->dma_chan.device)->ops; | |
909 | irqreturn_t ret; | |
910 | ||
911 | spin_lock(&schan->chan_lock); | |
912 | ||
913 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; | |
914 | ||
915 | spin_unlock(&schan->chan_lock); | |
916 | ||
917 | return ret; | |
918 | } | |
919 | ||
920 | static irqreturn_t chan_irqt(int irq, void *dev) | |
921 | { | |
922 | struct shdma_chan *schan = dev; | |
923 | const struct shdma_ops *ops = | |
924 | to_shdma_dev(schan->dma_chan.device)->ops; | |
925 | struct shdma_desc *sdesc; | |
926 | ||
927 | spin_lock_irq(&schan->chan_lock); | |
928 | list_for_each_entry(sdesc, &schan->ld_queue, node) { | |
929 | if (sdesc->mark == DESC_SUBMITTED && | |
930 | ops->desc_completed(schan, sdesc)) { | |
931 | dev_dbg(schan->dev, "done #%d@%p\n", | |
932 | sdesc->async_tx.cookie, &sdesc->async_tx); | |
933 | sdesc->mark = DESC_COMPLETED; | |
934 | break; | |
935 | } | |
936 | } | |
937 | /* Next desc */ | |
938 | shdma_chan_xfer_ld_queue(schan); | |
939 | spin_unlock_irq(&schan->chan_lock); | |
940 | ||
941 | shdma_chan_ld_cleanup(schan, false); | |
942 | ||
943 | return IRQ_HANDLED; | |
944 | } | |
945 | ||
946 | int shdma_request_irq(struct shdma_chan *schan, int irq, | |
947 | unsigned long flags, const char *name) | |
948 | { | |
c1c63a14 GL |
949 | int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, |
950 | chan_irqt, flags, name, schan); | |
9a7b8e00 GL |
951 | |
952 | schan->irq = ret < 0 ? ret : irq; | |
953 | ||
954 | return ret; | |
955 | } | |
956 | EXPORT_SYMBOL(shdma_request_irq); | |
957 | ||
9a7b8e00 GL |
958 | void shdma_chan_probe(struct shdma_dev *sdev, |
959 | struct shdma_chan *schan, int id) | |
960 | { | |
961 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
962 | ||
963 | /* reference struct dma_device */ | |
964 | schan->dma_chan.device = &sdev->dma_dev; | |
965 | dma_cookie_init(&schan->dma_chan); | |
966 | ||
967 | schan->dev = sdev->dma_dev.dev; | |
968 | schan->id = id; | |
969 | ||
970 | if (!schan->max_xfer_len) | |
971 | schan->max_xfer_len = PAGE_SIZE; | |
972 | ||
973 | spin_lock_init(&schan->chan_lock); | |
974 | ||
975 | /* Init descripter manage list */ | |
976 | INIT_LIST_HEAD(&schan->ld_queue); | |
977 | INIT_LIST_HEAD(&schan->ld_free); | |
978 | ||
979 | /* Add the channel to DMA device channel list */ | |
980 | list_add_tail(&schan->dma_chan.device_node, | |
981 | &sdev->dma_dev.channels); | |
1e916474 | 982 | sdev->schan[id] = schan; |
9a7b8e00 GL |
983 | } |
984 | EXPORT_SYMBOL(shdma_chan_probe); | |
985 | ||
986 | void shdma_chan_remove(struct shdma_chan *schan) | |
987 | { | |
988 | list_del(&schan->dma_chan.device_node); | |
989 | } | |
990 | EXPORT_SYMBOL(shdma_chan_remove); | |
991 | ||
992 | int shdma_init(struct device *dev, struct shdma_dev *sdev, | |
993 | int chan_num) | |
994 | { | |
995 | struct dma_device *dma_dev = &sdev->dma_dev; | |
996 | ||
997 | /* | |
998 | * Require all call-backs for now, they can trivially be made optional | |
999 | * later as required | |
1000 | */ | |
1001 | if (!sdev->ops || | |
1002 | !sdev->desc_size || | |
1003 | !sdev->ops->embedded_desc || | |
1004 | !sdev->ops->start_xfer || | |
1005 | !sdev->ops->setup_xfer || | |
1006 | !sdev->ops->set_slave || | |
1007 | !sdev->ops->desc_setup || | |
1008 | !sdev->ops->slave_addr || | |
1009 | !sdev->ops->channel_busy || | |
1010 | !sdev->ops->halt_channel || | |
1011 | !sdev->ops->desc_completed) | |
1012 | return -EINVAL; | |
1013 | ||
1014 | sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); | |
1015 | if (!sdev->schan) | |
1016 | return -ENOMEM; | |
1017 | ||
1018 | INIT_LIST_HEAD(&dma_dev->channels); | |
1019 | ||
1020 | /* Common and MEMCPY operations */ | |
1021 | dma_dev->device_alloc_chan_resources | |
1022 | = shdma_alloc_chan_resources; | |
1023 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; | |
1024 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; | |
1025 | dma_dev->device_tx_status = shdma_tx_status; | |
1026 | dma_dev->device_issue_pending = shdma_issue_pending; | |
1027 | ||
1028 | /* Compulsory for DMA_SLAVE fields */ | |
1029 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | |
dfbb85ca | 1030 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; |
be60f940 MR |
1031 | dma_dev->device_config = shdma_config; |
1032 | dma_dev->device_terminate_all = shdma_terminate_all; | |
9a7b8e00 GL |
1033 | |
1034 | dma_dev->dev = dev; | |
1035 | ||
1036 | return 0; | |
1037 | } | |
1038 | EXPORT_SYMBOL(shdma_init); | |
1039 | ||
1040 | void shdma_cleanup(struct shdma_dev *sdev) | |
1041 | { | |
1042 | kfree(sdev->schan); | |
1043 | } | |
1044 | EXPORT_SYMBOL(shdma_cleanup); | |
1045 | ||
1046 | static int __init shdma_enter(void) | |
1047 | { | |
1048 | shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * | |
1049 | sizeof(long), GFP_KERNEL); | |
1050 | if (!shdma_slave_used) | |
1051 | return -ENOMEM; | |
1052 | return 0; | |
1053 | } | |
1054 | module_init(shdma_enter); | |
1055 | ||
1056 | static void __exit shdma_exit(void) | |
1057 | { | |
1058 | kfree(shdma_slave_used); | |
1059 | } | |
1060 | module_exit(shdma_exit); | |
1061 | ||
1062 | MODULE_LICENSE("GPL v2"); | |
1063 | MODULE_DESCRIPTION("SH-DMA driver base library"); | |
1064 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); |