2 * driver/dma/ste_dma40.c
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 #define D40_ALLOC_FREE (1 << 31)
34 #define D40_ALLOC_PHY (1 << 30)
35 #define D40_ALLOC_LOG_FREE 0
37 /* Hardware designer of the block */
38 #define D40_PERIPHID2_DESIGNER 0x8
41 * enum 40_command - The different commands and/or statuses.
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 D40_DMA_SUSPEND_REQ
= 2,
56 * struct d40_lli_pool - Structure for keeping LLIs in memory
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer.
68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
73 * struct d40_desc - A descriptor is one DMA job.
75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated.
80 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor.
84 * @txd: DMA engine struct. Used for among other things for communication
87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor.
90 * This descriptor is used for both logical and physical transfers.
95 struct d40_phy_lli_bidir lli_phy
;
97 struct d40_log_lli_bidir lli_log
;
99 struct d40_lli_pool lli_pool
;
104 struct dma_async_tx_descriptor txd
;
105 struct list_head node
;
107 enum dma_data_direction dir
;
108 bool is_in_client_list
;
112 * struct d40_lcla_pool - LCLA pool settings and data.
114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels.
122 struct d40_lcla_pool
{
125 resource_size_t base_size
;
132 * struct d40_phy_res - struct for handling eventlines mapped to physical
135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to.
155 * struct d40_chan - Struct that describes a channel.
157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
161 * @pending_tx: The number of pending transfers. Used between interrupt handler
163 * @busy: Set to true when transfer is ongoing on this channel.
164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated.
166 * @chan: DMA engine handle.
167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168 * transfer and call client callback.
169 * @client: Cliented owned descriptor list.
170 * @active: Active descriptor.
171 * @queue: Queued jobs.
172 * @dma_cfg: The client configuration of this dma channel.
173 * @base: Pointer to the device instance struct.
174 * @src_def_cfg: Default cfg register setting for src.
175 * @dst_def_cfg: Default cfg register setting for dst.
176 * @log_def: Default logical channel settings.
177 * @lcla: Space for one dst src pair for logical channel transfers.
178 * @lcpa: Pointer to dst and src lcpa settings.
180 * This struct can either "be" a logical or a physical channel.
185 /* ID of the most recent completed transfer */
189 struct d40_phy_res
*phy_chan
;
190 struct dma_chan chan
;
191 struct tasklet_struct tasklet
;
192 struct list_head client
;
193 struct list_head active
;
194 struct list_head queue
;
195 struct stedma40_chan_cfg dma_cfg
;
196 struct d40_base
*base
;
197 /* Default register configurations */
200 struct d40_def_lcsp log_def
;
201 struct d40_lcla_elem lcla
;
202 struct d40_log_lli_full
*lcpa
;
206 * struct d40_base - The big global struct, one for each probe'd instance.
208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209 * @execmd_lock: Lock for execute command usage since several channels share
210 * the same physical register.
211 * @dev: The device structure.
212 * @virtbase: The virtual base address of the DMA's register.
213 * @clk: Pointer to the DMA clock structure.
214 * @phy_start: Physical memory start of the DMA registers.
215 * @phy_size: Size of the DMA register map.
216 * @irq: The IRQ number.
217 * @num_phy_chans: The number of physical channels. Read from HW. This
218 * is the number of available channels for this driver, not counting "Secure
219 * mode" allocated physical channels.
220 * @num_log_chans: The number of logical channels. Calculated from
222 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223 * @dma_slave: dma_device channels that can do only do slave transfers.
224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225 * @phy_chans: Room for all possible physical channels in system.
226 * @log_chans: Room for all possible logical channels in system.
227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228 * to log_chans entries.
229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230 * to phy_chans entries.
231 * @plat_data: Pointer to provided platform_data which is the driver
233 * @phy_res: Vector containing all physical channels.
234 * @lcla_pool: lcla pool settings and data.
235 * @lcpa_base: The virtual mapped address of LCPA.
236 * @phy_lcpa: The physical address of the LCPA.
237 * @lcpa_size: The size of the LCPA area.
238 * @desc_slab: cache for descriptors.
241 spinlock_t interrupt_lock
;
242 spinlock_t execmd_lock
;
244 void __iomem
*virtbase
;
246 phys_addr_t phy_start
;
247 resource_size_t phy_size
;
251 struct dma_device dma_both
;
252 struct dma_device dma_slave
;
253 struct dma_device dma_memcpy
;
254 struct d40_chan
*phy_chans
;
255 struct d40_chan
*log_chans
;
256 struct d40_chan
**lookup_log_chans
;
257 struct d40_chan
**lookup_phy_chans
;
258 struct stedma40_platform_data
*plat_data
;
259 /* Physical half channels */
260 struct d40_phy_res
*phy_res
;
261 struct d40_lcla_pool lcla_pool
;
264 resource_size_t lcpa_size
;
265 struct kmem_cache
*desc_slab
;
269 * struct d40_interrupt_lookup - lookup table for interrupt handler
271 * @src: Interrupt mask register.
272 * @clr: Interrupt clear register.
273 * @is_error: true if this is an error interrupt.
274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
277 struct d40_interrupt_lookup
{
285 * struct d40_reg_val - simple lookup struct
287 * @reg: The register.
288 * @val: The value that belongs to the register in reg.
295 static int d40_pool_lli_alloc(struct d40_desc
*d40d
,
296 int lli_len
, bool is_log
)
302 align
= sizeof(struct d40_log_lli
);
304 align
= sizeof(struct d40_phy_lli
);
307 base
= d40d
->lli_pool
.pre_alloc_lli
;
308 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
309 d40d
->lli_pool
.base
= NULL
;
311 d40d
->lli_pool
.size
= ALIGN(lli_len
* 2 * align
, align
);
313 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
314 d40d
->lli_pool
.base
= base
;
316 if (d40d
->lli_pool
.base
== NULL
)
321 d40d
->lli_log
.src
= PTR_ALIGN((struct d40_log_lli
*) base
,
323 d40d
->lli_log
.dst
= PTR_ALIGN(d40d
->lli_log
.src
+ lli_len
,
326 d40d
->lli_phy
.src
= PTR_ALIGN((struct d40_phy_lli
*)base
,
328 d40d
->lli_phy
.dst
= PTR_ALIGN(d40d
->lli_phy
.src
+ lli_len
,
331 d40d
->lli_phy
.src_addr
= virt_to_phys(d40d
->lli_phy
.src
);
332 d40d
->lli_phy
.dst_addr
= virt_to_phys(d40d
->lli_phy
.dst
);
338 static void d40_pool_lli_free(struct d40_desc
*d40d
)
340 kfree(d40d
->lli_pool
.base
);
341 d40d
->lli_pool
.base
= NULL
;
342 d40d
->lli_pool
.size
= 0;
343 d40d
->lli_log
.src
= NULL
;
344 d40d
->lli_log
.dst
= NULL
;
345 d40d
->lli_phy
.src
= NULL
;
346 d40d
->lli_phy
.dst
= NULL
;
347 d40d
->lli_phy
.src_addr
= 0;
348 d40d
->lli_phy
.dst_addr
= 0;
351 static dma_cookie_t
d40_assign_cookie(struct d40_chan
*d40c
,
352 struct d40_desc
*desc
)
354 dma_cookie_t cookie
= d40c
->chan
.cookie
;
359 d40c
->chan
.cookie
= cookie
;
360 desc
->txd
.cookie
= cookie
;
365 static void d40_desc_remove(struct d40_desc
*d40d
)
367 list_del(&d40d
->node
);
370 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
375 if (!list_empty(&d40c
->client
)) {
376 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
377 if (async_tx_test_ack(&d
->txd
)) {
378 d40_pool_lli_free(d
);
383 d
= kmem_cache_alloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
385 memset(d
, 0, sizeof(struct d40_desc
));
386 INIT_LIST_HEAD(&d
->node
);
392 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
394 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
397 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
399 list_add_tail(&desc
->node
, &d40c
->active
);
402 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
406 if (list_empty(&d40c
->active
))
409 d
= list_first_entry(&d40c
->active
,
415 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
417 list_add_tail(&desc
->node
, &d40c
->queue
);
420 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
424 if (list_empty(&d40c
->queue
))
427 d
= list_first_entry(&d40c
->queue
,
433 /* Support functions for logical channels */
435 static int d40_lcla_id_get(struct d40_chan
*d40c
,
436 struct d40_lcla_pool
*pool
)
440 struct d40_log_lli
*lcla_lidx_base
=
441 pool
->base
+ d40c
->phy_chan
->num
* 1024;
443 int lli_per_log
= d40c
->base
->plat_data
->llis_per_log
;
446 if (d40c
->lcla
.src_id
>= 0 && d40c
->lcla
.dst_id
>= 0)
449 if (pool
->num_blocks
> 32)
452 spin_lock_irqsave(&pool
->lock
, flags
);
454 for (i
= 0; i
< pool
->num_blocks
; i
++) {
455 if (!(pool
->alloc_map
[d40c
->phy_chan
->num
] & (0x1 << i
))) {
456 pool
->alloc_map
[d40c
->phy_chan
->num
] |= (0x1 << i
);
461 if (src_id
>= pool
->num_blocks
)
464 for (; i
< pool
->num_blocks
; i
++) {
465 if (!(pool
->alloc_map
[d40c
->phy_chan
->num
] & (0x1 << i
))) {
466 pool
->alloc_map
[d40c
->phy_chan
->num
] |= (0x1 << i
);
472 if (dst_id
== src_id
)
475 d40c
->lcla
.src_id
= src_id
;
476 d40c
->lcla
.dst_id
= dst_id
;
477 d40c
->lcla
.dst
= lcla_lidx_base
+ dst_id
* lli_per_log
+ 1;
478 d40c
->lcla
.src
= lcla_lidx_base
+ src_id
* lli_per_log
+ 1;
481 spin_unlock_irqrestore(&pool
->lock
, flags
);
484 spin_unlock_irqrestore(&pool
->lock
, flags
);
488 static void d40_lcla_id_put(struct d40_chan
*d40c
,
489 struct d40_lcla_pool
*pool
,
496 d40c
->lcla
.src_id
= -1;
497 d40c
->lcla
.dst_id
= -1;
499 spin_lock_irqsave(&pool
->lock
, flags
);
500 pool
->alloc_map
[d40c
->phy_chan
->num
] &= (~(0x1 << id
));
501 spin_unlock_irqrestore(&pool
->lock
, flags
);
504 static int d40_channel_execute_command(struct d40_chan
*d40c
,
505 enum d40_command command
)
508 void __iomem
*active_reg
;
512 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
514 if (d40c
->phy_chan
->num
% 2 == 0)
515 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
517 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
519 if (command
== D40_DMA_SUSPEND_REQ
) {
520 status
= (readl(active_reg
) &
521 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
522 D40_CHAN_POS(d40c
->phy_chan
->num
);
524 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
528 writel(command
<< D40_CHAN_POS(d40c
->phy_chan
->num
), active_reg
);
530 if (command
== D40_DMA_SUSPEND_REQ
) {
532 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
533 status
= (readl(active_reg
) &
534 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
535 D40_CHAN_POS(d40c
->phy_chan
->num
);
539 * Reduce the number of bus accesses while
540 * waiting for the DMA to suspend.
544 if (status
== D40_DMA_STOP
||
545 status
== D40_DMA_SUSPENDED
)
549 if (i
== D40_SUSPEND_MAX_IT
) {
550 dev_err(&d40c
->chan
.dev
->device
,
551 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
552 __func__
, d40c
->phy_chan
->num
, d40c
->log_num
,
560 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
564 static void d40_term_all(struct d40_chan
*d40c
)
566 struct d40_desc
*d40d
;
568 /* Release active descriptors */
569 while ((d40d
= d40_first_active_get(d40c
))) {
570 d40_desc_remove(d40d
);
572 /* Return desc to free-list */
573 d40_desc_free(d40c
, d40d
);
576 /* Release queued descriptors waiting for transfer */
577 while ((d40d
= d40_first_queued(d40c
))) {
578 d40_desc_remove(d40d
);
580 /* Return desc to free-list */
581 d40_desc_free(d40c
, d40d
);
584 d40_lcla_id_put(d40c
, &d40c
->base
->lcla_pool
,
586 d40_lcla_id_put(d40c
, &d40c
->base
->lcla_pool
,
589 d40c
->pending_tx
= 0;
593 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
598 /* Notice, that disable requires the physical channel to be stopped */
600 val
= D40_ACTIVATE_EVENTLINE
;
602 val
= D40_DEACTIVATE_EVENTLINE
;
604 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
606 /* Enable event line connected to device (or memcpy) */
607 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
608 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
609 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
611 writel((val
<< D40_EVENTLINE_POS(event
)) |
612 ~D40_EVENTLINE_MASK(event
),
613 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
614 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
617 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
618 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
620 writel((val
<< D40_EVENTLINE_POS(event
)) |
621 ~D40_EVENTLINE_MASK(event
),
622 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
623 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
627 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
630 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
634 /* If SSLNK or SDLNK is zero all events are disabled */
635 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
636 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
637 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
638 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
641 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
)
642 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
643 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
648 static void d40_config_enable_lidx(struct d40_chan
*d40c
)
650 /* Set LIDX for lcla */
651 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
652 D40_SREG_ELEM_LOG_LIDX_MASK
,
653 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
654 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDELT
);
656 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
657 D40_SREG_ELEM_LOG_LIDX_MASK
,
658 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
659 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSELT
);
662 static int d40_config_write(struct d40_chan
*d40c
)
668 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
672 /* Odd addresses are even addresses + 4 */
673 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
674 /* Setup channel mode to logical or physical */
675 var
= ((u32
)(d40c
->log_num
!= D40_PHY_CHAN
) + 1) <<
676 D40_CHAN_POS(d40c
->phy_chan
->num
);
677 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
679 /* Setup operational mode option register */
680 var
= ((d40c
->dma_cfg
.channel_type
>> STEDMA40_INFO_CH_MODE_OPT_POS
) &
681 0x3) << D40_CHAN_POS(d40c
->phy_chan
->num
);
683 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
685 if (d40c
->log_num
!= D40_PHY_CHAN
) {
686 /* Set default config for CFG reg */
687 writel(d40c
->src_def_cfg
,
688 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
689 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
691 writel(d40c
->dst_def_cfg
,
692 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
693 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
696 d40_config_enable_lidx(d40c
);
701 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
704 if (d40d
->lli_phy
.dst
&& d40d
->lli_phy
.src
) {
705 d40_phy_lli_write(d40c
->base
->virtbase
,
709 } else if (d40d
->lli_log
.dst
&& d40d
->lli_log
.src
) {
710 struct d40_log_lli
*src
= d40d
->lli_log
.src
;
711 struct d40_log_lli
*dst
= d40d
->lli_log
.dst
;
713 src
+= d40d
->lli_count
;
714 dst
+= d40d
->lli_count
;
715 d40_log_lli_write(d40c
->lcpa
, d40c
->lcla
.src
,
718 d40c
->base
->plat_data
->llis_per_log
);
720 d40d
->lli_count
+= d40d
->lli_tx_len
;
723 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
725 struct d40_chan
*d40c
= container_of(tx
->chan
,
728 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
731 spin_lock_irqsave(&d40c
->lock
, flags
);
733 tx
->cookie
= d40_assign_cookie(d40c
, d40d
);
735 d40_desc_queue(d40c
, d40d
);
737 spin_unlock_irqrestore(&d40c
->lock
, flags
);
742 static int d40_start(struct d40_chan
*d40c
)
744 if (d40c
->log_num
!= D40_PHY_CHAN
)
745 d40_config_set_event(d40c
, true);
747 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
750 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
752 struct d40_desc
*d40d
;
755 /* Start queued jobs, if any */
756 d40d
= d40_first_queued(d40c
);
761 /* Remove from queue */
762 d40_desc_remove(d40d
);
764 /* Add to active queue */
765 d40_desc_submit(d40c
, d40d
);
767 /* Initiate DMA job */
768 d40_desc_load(d40c
, d40d
);
771 err
= d40_start(d40c
);
780 /* called from interrupt context */
781 static void dma_tc_handle(struct d40_chan
*d40c
)
783 struct d40_desc
*d40d
;
788 /* Get first active entry from list */
789 d40d
= d40_first_active_get(d40c
);
794 if (d40d
->lli_count
< d40d
->lli_len
) {
796 d40_desc_load(d40c
, d40d
);
798 (void) d40_start(d40c
);
802 if (d40_queue_start(d40c
) == NULL
)
806 tasklet_schedule(&d40c
->tasklet
);
810 static void dma_tasklet(unsigned long data
)
812 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
813 struct d40_desc
*d40d_fin
;
815 dma_async_tx_callback callback
;
816 void *callback_param
;
818 spin_lock_irqsave(&d40c
->lock
, flags
);
820 /* Get first active entry from list */
821 d40d_fin
= d40_first_active_get(d40c
);
823 if (d40d_fin
== NULL
)
826 d40c
->completed
= d40d_fin
->txd
.cookie
;
829 * If terminating a channel pending_tx is set to zero.
830 * This prevents any finished active jobs to return to the client.
832 if (d40c
->pending_tx
== 0) {
833 spin_unlock_irqrestore(&d40c
->lock
, flags
);
837 /* Callback to client */
838 callback
= d40d_fin
->txd
.callback
;
839 callback_param
= d40d_fin
->txd
.callback_param
;
841 if (async_tx_test_ack(&d40d_fin
->txd
)) {
842 d40_pool_lli_free(d40d_fin
);
843 d40_desc_remove(d40d_fin
);
844 /* Return desc to free-list */
845 d40_desc_free(d40c
, d40d_fin
);
847 if (!d40d_fin
->is_in_client_list
) {
848 d40_desc_remove(d40d_fin
);
849 list_add_tail(&d40d_fin
->node
, &d40c
->client
);
850 d40d_fin
->is_in_client_list
= true;
856 if (d40c
->pending_tx
)
857 tasklet_schedule(&d40c
->tasklet
);
859 spin_unlock_irqrestore(&d40c
->lock
, flags
);
862 callback(callback_param
);
867 /* Rescue manouver if receiving double interrupts */
868 if (d40c
->pending_tx
> 0)
870 spin_unlock_irqrestore(&d40c
->lock
, flags
);
873 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
875 static const struct d40_interrupt_lookup il
[] = {
876 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
877 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
878 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
879 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
880 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
881 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
882 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
883 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
884 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
885 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
889 u32 regs
[ARRAY_SIZE(il
)];
894 struct d40_chan
*d40c
;
896 struct d40_base
*base
= data
;
898 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
900 /* Read interrupt status of both logical and physical channels */
901 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
902 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
906 chan
= find_next_bit((unsigned long *)regs
,
907 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
909 /* No more set bits found? */
910 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
913 row
= chan
/ BITS_PER_LONG
;
914 idx
= chan
& (BITS_PER_LONG
- 1);
917 tmp
= readl(base
->virtbase
+ il
[row
].clr
);
919 writel(tmp
, base
->virtbase
+ il
[row
].clr
);
921 if (il
[row
].offset
== D40_PHY_CHAN
)
922 d40c
= base
->lookup_phy_chans
[idx
];
924 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
925 spin_lock(&d40c
->lock
);
927 if (!il
[row
].is_error
)
930 dev_err(base
->dev
, "[%s] IRQ chan: %ld offset %d idx %d\n",
931 __func__
, chan
, il
[row
].offset
, idx
);
933 spin_unlock(&d40c
->lock
);
936 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
942 static int d40_validate_conf(struct d40_chan
*d40c
,
943 struct stedma40_chan_cfg
*conf
)
946 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
947 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
948 bool is_log
= (conf
->channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
949 == STEDMA40_CHANNEL_IN_LOG_MODE
;
951 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
&&
952 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
953 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid dst\n",
958 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
&&
959 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
960 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid src\n",
965 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
966 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
967 dev_err(&d40c
->chan
.dev
->device
,
968 "[%s] No event line\n", __func__
);
972 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
973 (src_event_group
!= dst_event_group
)) {
974 dev_err(&d40c
->chan
.dev
->device
,
975 "[%s] Invalid event group\n", __func__
);
979 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
981 * DMAC HW supports it. Will be added to this driver,
982 * in case any dma client requires it.
984 dev_err(&d40c
->chan
.dev
->device
,
985 "[%s] periph to periph not supported\n",
993 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
994 int log_event_line
, bool is_log
)
997 spin_lock_irqsave(&phy
->lock
, flags
);
999 /* Physical interrupts are masked per physical full channel */
1000 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1001 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1002 phy
->allocated_dst
= D40_ALLOC_PHY
;
1003 phy
->allocated_src
= D40_ALLOC_PHY
;
1009 /* Logical channel */
1011 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1014 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1015 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1017 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1018 phy
->allocated_src
|= 1 << log_event_line
;
1023 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1026 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1027 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1029 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1030 phy
->allocated_dst
|= 1 << log_event_line
;
1037 spin_unlock_irqrestore(&phy
->lock
, flags
);
1040 spin_unlock_irqrestore(&phy
->lock
, flags
);
1044 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1047 unsigned long flags
;
1048 bool is_free
= false;
1050 spin_lock_irqsave(&phy
->lock
, flags
);
1051 if (!log_event_line
) {
1052 /* Physical interrupts are masked per physical full channel */
1053 phy
->allocated_dst
= D40_ALLOC_FREE
;
1054 phy
->allocated_src
= D40_ALLOC_FREE
;
1059 /* Logical channel */
1061 phy
->allocated_src
&= ~(1 << log_event_line
);
1062 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1063 phy
->allocated_src
= D40_ALLOC_FREE
;
1065 phy
->allocated_dst
&= ~(1 << log_event_line
);
1066 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1067 phy
->allocated_dst
= D40_ALLOC_FREE
;
1070 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1074 spin_unlock_irqrestore(&phy
->lock
, flags
);
1079 static int d40_allocate_channel(struct d40_chan
*d40c
)
1084 struct d40_phy_res
*phys
;
1089 bool is_log
= (d40c
->dma_cfg
.channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
1090 == STEDMA40_CHANNEL_IN_LOG_MODE
;
1093 phys
= d40c
->base
->phy_res
;
1095 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1096 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1097 log_num
= 2 * dev_type
;
1099 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1100 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1101 /* dst event lines are used for logical memcpy */
1102 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1103 log_num
= 2 * dev_type
+ 1;
1108 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1109 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1112 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1113 /* Find physical half channel */
1114 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1116 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1121 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1122 int phy_num
= j
+ event_group
* 2;
1123 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1124 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1131 d40c
->phy_chan
= &phys
[i
];
1132 d40c
->log_num
= D40_PHY_CHAN
;
1138 /* Find logical channel */
1139 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1140 int phy_num
= j
+ event_group
* 2;
1142 * Spread logical channels across all available physical rather
1143 * than pack every logical channel at the first available phy
1147 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1148 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1149 event_line
, is_log
))
1153 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1154 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1155 event_line
, is_log
))
1163 d40c
->phy_chan
= &phys
[i
];
1164 d40c
->log_num
= log_num
;
1168 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1170 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1176 static int d40_config_memcpy(struct d40_chan
*d40c
)
1178 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1180 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1181 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1182 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1183 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1184 memcpy
[d40c
->chan
.chan_id
];
1186 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1187 dma_has_cap(DMA_SLAVE
, cap
)) {
1188 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1190 dev_err(&d40c
->chan
.dev
->device
, "[%s] No memcpy\n",
1199 static int d40_free_dma(struct d40_chan
*d40c
)
1204 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1207 struct d40_desc
*_d
;
1210 /* Terminate all queued and active transfers */
1213 /* Release client owned descriptors */
1214 if (!list_empty(&d40c
->client
))
1215 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1216 d40_pool_lli_free(d
);
1218 /* Return desc to free-list */
1219 d40_desc_free(d40c
, d
);
1223 dev_err(&d40c
->chan
.dev
->device
, "[%s] phy == null\n",
1228 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1229 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1230 dev_err(&d40c
->chan
.dev
->device
, "[%s] channel already free\n",
1235 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1237 dev_err(&d40c
->chan
.dev
->device
, "[%s] suspend failed\n",
1242 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1243 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1244 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1245 dir
= D40_CHAN_REG_SDLNK
;
1247 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1248 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1249 dir
= D40_CHAN_REG_SSLNK
;
1252 dev_err(&d40c
->chan
.dev
->device
,
1253 "[%s] Unknown direction\n", __func__
);
1257 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1259 * Release logical channel, deactivate the event line during
1260 * the time physical res is suspended.
1262 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
)) &
1263 D40_EVENTLINE_MASK(event
),
1264 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1265 phy
->num
* D40_DREG_PCDELTA
+ dir
);
1267 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1270 * Check if there are more logical allocation
1271 * on this phy channel.
1273 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1274 /* Resume the other logical channels if any */
1275 if (d40_chan_has_events(d40c
)) {
1276 res
= d40_channel_execute_command(d40c
,
1279 dev_err(&d40c
->chan
.dev
->device
,
1280 "[%s] Executing RUN command\n",
1288 d40_alloc_mask_free(phy
, is_src
, 0);
1290 /* Release physical channel */
1291 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1293 dev_err(&d40c
->chan
.dev
->device
,
1294 "[%s] Failed to stop channel\n", __func__
);
1297 d40c
->phy_chan
= NULL
;
1298 /* Invalidate channel type */
1299 d40c
->dma_cfg
.channel_type
= 0;
1300 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1305 static int d40_pause(struct dma_chan
*chan
)
1307 struct d40_chan
*d40c
=
1308 container_of(chan
, struct d40_chan
, chan
);
1310 unsigned long flags
;
1312 spin_lock_irqsave(&d40c
->lock
, flags
);
1314 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1316 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1317 d40_config_set_event(d40c
, false);
1318 /* Resume the other logical channels if any */
1319 if (d40_chan_has_events(d40c
))
1320 res
= d40_channel_execute_command(d40c
,
1325 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1329 static bool d40_is_paused(struct d40_chan
*d40c
)
1331 bool is_paused
= false;
1332 unsigned long flags
;
1333 void __iomem
*active_reg
;
1337 spin_lock_irqsave(&d40c
->lock
, flags
);
1339 if (d40c
->log_num
== D40_PHY_CHAN
) {
1340 if (d40c
->phy_chan
->num
% 2 == 0)
1341 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1343 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1345 status
= (readl(active_reg
) &
1346 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1347 D40_CHAN_POS(d40c
->phy_chan
->num
);
1348 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1354 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1355 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
)
1356 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1357 else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1358 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1360 dev_err(&d40c
->chan
.dev
->device
,
1361 "[%s] Unknown direction\n", __func__
);
1364 status
= d40_chan_has_events(d40c
);
1365 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1366 D40_EVENTLINE_POS(event
);
1368 if (status
!= D40_DMA_RUN
)
1371 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1377 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1381 if (d40c
->log_num
!= D40_PHY_CHAN
)
1382 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1384 is_link
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1385 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1386 D40_CHAN_REG_SDLNK
) &
1387 D40_SREG_LNK_PHYS_LNK_MASK
;
1391 static u32
d40_residue(struct d40_chan
*d40c
)
1395 if (d40c
->log_num
!= D40_PHY_CHAN
)
1396 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1397 >> D40_MEM_LCSP2_ECNT_POS
;
1399 num_elt
= (readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1400 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1401 D40_CHAN_REG_SDELT
) &
1402 D40_SREG_ELEM_PHY_ECNT_MASK
) >> D40_SREG_ELEM_PHY_ECNT_POS
;
1403 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1406 static int d40_resume(struct dma_chan
*chan
)
1408 struct d40_chan
*d40c
=
1409 container_of(chan
, struct d40_chan
, chan
);
1411 unsigned long flags
;
1413 spin_lock_irqsave(&d40c
->lock
, flags
);
1415 /* If bytes left to transfer or linked tx resume job */
1416 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
1417 if (d40c
->log_num
!= D40_PHY_CHAN
)
1418 d40_config_set_event(d40c
, true);
1419 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1422 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1426 static u32
stedma40_residue(struct dma_chan
*chan
)
1428 struct d40_chan
*d40c
=
1429 container_of(chan
, struct d40_chan
, chan
);
1431 unsigned long flags
;
1433 spin_lock_irqsave(&d40c
->lock
, flags
);
1434 bytes_left
= d40_residue(d40c
);
1435 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1440 /* Public DMA functions in addition to the DMA engine framework */
1442 int stedma40_set_psize(struct dma_chan
*chan
,
1446 struct d40_chan
*d40c
=
1447 container_of(chan
, struct d40_chan
, chan
);
1448 unsigned long flags
;
1450 spin_lock_irqsave(&d40c
->lock
, flags
);
1452 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1453 d40c
->log_def
.lcsp1
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1454 d40c
->log_def
.lcsp3
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1455 d40c
->log_def
.lcsp1
|= src_psize
<< D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1456 d40c
->log_def
.lcsp3
|= dst_psize
<< D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1460 if (src_psize
== STEDMA40_PSIZE_PHY_1
)
1461 d40c
->src_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1463 d40c
->src_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1464 d40c
->src_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1465 D40_SREG_CFG_PSIZE_POS
);
1466 d40c
->src_def_cfg
|= src_psize
<< D40_SREG_CFG_PSIZE_POS
;
1469 if (dst_psize
== STEDMA40_PSIZE_PHY_1
)
1470 d40c
->dst_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1472 d40c
->dst_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1473 d40c
->dst_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1474 D40_SREG_CFG_PSIZE_POS
);
1475 d40c
->dst_def_cfg
|= dst_psize
<< D40_SREG_CFG_PSIZE_POS
;
1478 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1481 EXPORT_SYMBOL(stedma40_set_psize
);
1483 struct dma_async_tx_descriptor
*stedma40_memcpy_sg(struct dma_chan
*chan
,
1484 struct scatterlist
*sgl_dst
,
1485 struct scatterlist
*sgl_src
,
1486 unsigned int sgl_len
,
1487 unsigned long dma_flags
)
1490 struct d40_desc
*d40d
;
1491 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1493 unsigned long flags
;
1495 if (d40c
->phy_chan
== NULL
) {
1496 dev_err(&d40c
->chan
.dev
->device
,
1497 "[%s] Unallocated channel.\n", __func__
);
1498 return ERR_PTR(-EINVAL
);
1501 spin_lock_irqsave(&d40c
->lock
, flags
);
1502 d40d
= d40_desc_get(d40c
);
1507 d40d
->lli_len
= sgl_len
;
1508 d40d
->lli_tx_len
= d40d
->lli_len
;
1509 d40d
->txd
.flags
= dma_flags
;
1511 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1512 if (d40d
->lli_len
> d40c
->base
->plat_data
->llis_per_log
)
1513 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1517 * Check if there is space available in lcla. If not,
1518 * split list into 1-length and run only in lcpa
1521 if (d40_lcla_id_get(d40c
,
1522 &d40c
->base
->lcla_pool
) != 0)
1523 d40d
->lli_tx_len
= 1;
1525 if (d40_pool_lli_alloc(d40d
, sgl_len
, true) < 0) {
1526 dev_err(&d40c
->chan
.dev
->device
,
1527 "[%s] Out of memory\n", __func__
);
1531 (void) d40_log_sg_to_lli(d40c
->lcla
.src_id
,
1535 d40c
->log_def
.lcsp1
,
1536 d40c
->dma_cfg
.src_info
.data_width
,
1537 dma_flags
& DMA_PREP_INTERRUPT
,
1539 d40c
->base
->plat_data
->llis_per_log
);
1541 (void) d40_log_sg_to_lli(d40c
->lcla
.dst_id
,
1545 d40c
->log_def
.lcsp3
,
1546 d40c
->dma_cfg
.dst_info
.data_width
,
1547 dma_flags
& DMA_PREP_INTERRUPT
,
1549 d40c
->base
->plat_data
->llis_per_log
);
1553 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1554 dev_err(&d40c
->chan
.dev
->device
,
1555 "[%s] Out of memory\n", __func__
);
1559 res
= d40_phy_sg_to_lli(sgl_src
,
1563 d40d
->lli_phy
.src_addr
,
1565 d40c
->dma_cfg
.src_info
.data_width
,
1566 d40c
->dma_cfg
.src_info
.psize
,
1572 res
= d40_phy_sg_to_lli(sgl_dst
,
1576 d40d
->lli_phy
.dst_addr
,
1578 d40c
->dma_cfg
.dst_info
.data_width
,
1579 d40c
->dma_cfg
.dst_info
.psize
,
1585 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1586 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1589 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1591 d40d
->txd
.tx_submit
= d40_tx_submit
;
1593 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1597 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1600 EXPORT_SYMBOL(stedma40_memcpy_sg
);
1602 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1604 struct stedma40_chan_cfg
*info
= data
;
1605 struct d40_chan
*d40c
=
1606 container_of(chan
, struct d40_chan
, chan
);
1610 err
= d40_validate_conf(d40c
, info
);
1612 d40c
->dma_cfg
= *info
;
1614 err
= d40_config_memcpy(d40c
);
1618 EXPORT_SYMBOL(stedma40_filter
);
1620 /* DMA ENGINE functions */
1621 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1624 unsigned long flags
;
1625 struct d40_chan
*d40c
=
1626 container_of(chan
, struct d40_chan
, chan
);
1628 spin_lock_irqsave(&d40c
->lock
, flags
);
1630 d40c
->completed
= chan
->cookie
= 1;
1633 * If no dma configuration is set (channel_type == 0)
1634 * use default configuration (memcpy)
1636 if (d40c
->dma_cfg
.channel_type
== 0) {
1637 err
= d40_config_memcpy(d40c
);
1639 dev_err(&d40c
->chan
.dev
->device
,
1640 "[%s] Failed to configure memcpy channel\n",
1645 is_free_phy
= (d40c
->phy_chan
== NULL
);
1647 err
= d40_allocate_channel(d40c
);
1649 dev_err(&d40c
->chan
.dev
->device
,
1650 "[%s] Failed to allocate channel\n", __func__
);
1654 /* Fill in basic CFG register values */
1655 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1656 &d40c
->dst_def_cfg
, d40c
->log_num
!= D40_PHY_CHAN
);
1658 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1659 d40_log_cfg(&d40c
->dma_cfg
,
1660 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1662 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1663 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1664 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
1666 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1667 d40c
->dma_cfg
.dst_dev_type
*
1668 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
1672 * Only write channel configuration to the DMA if the physical
1673 * resource is free. In case of multiple logical channels
1674 * on the same physical resource, only the first write is necessary.
1677 err
= d40_config_write(d40c
);
1679 dev_err(&d40c
->chan
.dev
->device
,
1680 "[%s] Failed to configure channel\n",
1685 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1689 static void d40_free_chan_resources(struct dma_chan
*chan
)
1691 struct d40_chan
*d40c
=
1692 container_of(chan
, struct d40_chan
, chan
);
1694 unsigned long flags
;
1696 if (d40c
->phy_chan
== NULL
) {
1697 dev_err(&d40c
->chan
.dev
->device
,
1698 "[%s] Cannot free unallocated channel\n", __func__
);
1703 spin_lock_irqsave(&d40c
->lock
, flags
);
1705 err
= d40_free_dma(d40c
);
1708 dev_err(&d40c
->chan
.dev
->device
,
1709 "[%s] Failed to free channel\n", __func__
);
1710 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1713 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1717 unsigned long dma_flags
)
1719 struct d40_desc
*d40d
;
1720 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1722 unsigned long flags
;
1725 if (d40c
->phy_chan
== NULL
) {
1726 dev_err(&d40c
->chan
.dev
->device
,
1727 "[%s] Channel is not allocated.\n", __func__
);
1728 return ERR_PTR(-EINVAL
);
1731 spin_lock_irqsave(&d40c
->lock
, flags
);
1732 d40d
= d40_desc_get(d40c
);
1735 dev_err(&d40c
->chan
.dev
->device
,
1736 "[%s] Descriptor is NULL\n", __func__
);
1740 d40d
->txd
.flags
= dma_flags
;
1742 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1744 d40d
->txd
.tx_submit
= d40_tx_submit
;
1746 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1748 if (d40_pool_lli_alloc(d40d
, 1, true) < 0) {
1749 dev_err(&d40c
->chan
.dev
->device
,
1750 "[%s] Out of memory\n", __func__
);
1754 d40d
->lli_tx_len
= 1;
1756 d40_log_fill_lli(d40d
->lli_log
.src
,
1760 d40c
->log_def
.lcsp1
,
1761 d40c
->dma_cfg
.src_info
.data_width
,
1764 d40_log_fill_lli(d40d
->lli_log
.dst
,
1768 d40c
->log_def
.lcsp3
,
1769 d40c
->dma_cfg
.dst_info
.data_width
,
1774 if (d40_pool_lli_alloc(d40d
, 1, false) < 0) {
1775 dev_err(&d40c
->chan
.dev
->device
,
1776 "[%s] Out of memory\n", __func__
);
1780 err
= d40_phy_fill_lli(d40d
->lli_phy
.src
,
1783 d40c
->dma_cfg
.src_info
.psize
,
1787 d40c
->dma_cfg
.src_info
.data_width
,
1792 err
= d40_phy_fill_lli(d40d
->lli_phy
.dst
,
1795 d40c
->dma_cfg
.dst_info
.psize
,
1799 d40c
->dma_cfg
.dst_info
.data_width
,
1805 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1806 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1809 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1813 dev_err(&d40c
->chan
.dev
->device
,
1814 "[%s] Failed filling in PHY LLI\n", __func__
);
1815 d40_pool_lli_free(d40d
);
1817 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1821 static int d40_prep_slave_sg_log(struct d40_desc
*d40d
,
1822 struct d40_chan
*d40c
,
1823 struct scatterlist
*sgl
,
1824 unsigned int sg_len
,
1825 enum dma_data_direction direction
,
1826 unsigned long dma_flags
)
1828 dma_addr_t dev_addr
= 0;
1831 if (d40_pool_lli_alloc(d40d
, sg_len
, true) < 0) {
1832 dev_err(&d40c
->chan
.dev
->device
,
1833 "[%s] Out of memory\n", __func__
);
1837 d40d
->lli_len
= sg_len
;
1838 if (d40d
->lli_len
<= d40c
->base
->plat_data
->llis_per_log
)
1839 d40d
->lli_tx_len
= d40d
->lli_len
;
1841 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1845 * Check if there is space available in lcla.
1846 * If not, split list into 1-length and run only
1849 if (d40_lcla_id_get(d40c
, &d40c
->base
->lcla_pool
) != 0)
1850 d40d
->lli_tx_len
= 1;
1852 if (direction
== DMA_FROM_DEVICE
)
1853 dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1854 else if (direction
== DMA_TO_DEVICE
)
1855 dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1859 total_size
= d40_log_sg_to_dev(&d40c
->lcla
,
1863 d40c
->dma_cfg
.src_info
.data_width
,
1864 d40c
->dma_cfg
.dst_info
.data_width
,
1866 dma_flags
& DMA_PREP_INTERRUPT
,
1867 dev_addr
, d40d
->lli_tx_len
,
1868 d40c
->base
->plat_data
->llis_per_log
);
1876 static int d40_prep_slave_sg_phy(struct d40_desc
*d40d
,
1877 struct d40_chan
*d40c
,
1878 struct scatterlist
*sgl
,
1879 unsigned int sgl_len
,
1880 enum dma_data_direction direction
,
1881 unsigned long dma_flags
)
1883 dma_addr_t src_dev_addr
;
1884 dma_addr_t dst_dev_addr
;
1887 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1888 dev_err(&d40c
->chan
.dev
->device
,
1889 "[%s] Out of memory\n", __func__
);
1893 d40d
->lli_len
= sgl_len
;
1894 d40d
->lli_tx_len
= sgl_len
;
1896 if (direction
== DMA_FROM_DEVICE
) {
1898 src_dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1899 } else if (direction
== DMA_TO_DEVICE
) {
1900 dst_dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1905 res
= d40_phy_sg_to_lli(sgl
,
1909 d40d
->lli_phy
.src_addr
,
1911 d40c
->dma_cfg
.src_info
.data_width
,
1912 d40c
->dma_cfg
.src_info
.psize
,
1917 res
= d40_phy_sg_to_lli(sgl
,
1921 d40d
->lli_phy
.dst_addr
,
1923 d40c
->dma_cfg
.dst_info
.data_width
,
1924 d40c
->dma_cfg
.dst_info
.psize
,
1929 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1930 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1934 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
1935 struct scatterlist
*sgl
,
1936 unsigned int sg_len
,
1937 enum dma_data_direction direction
,
1938 unsigned long dma_flags
)
1940 struct d40_desc
*d40d
;
1941 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1943 unsigned long flags
;
1946 if (d40c
->phy_chan
== NULL
) {
1947 dev_err(&d40c
->chan
.dev
->device
,
1948 "[%s] Cannot prepare unallocated channel\n", __func__
);
1949 return ERR_PTR(-EINVAL
);
1952 if (d40c
->dma_cfg
.pre_transfer
)
1953 d40c
->dma_cfg
.pre_transfer(chan
,
1954 d40c
->dma_cfg
.pre_transfer_data
,
1957 spin_lock_irqsave(&d40c
->lock
, flags
);
1958 d40d
= d40_desc_get(d40c
);
1959 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1964 if (d40c
->log_num
!= D40_PHY_CHAN
)
1965 err
= d40_prep_slave_sg_log(d40d
, d40c
, sgl
, sg_len
,
1966 direction
, dma_flags
);
1968 err
= d40_prep_slave_sg_phy(d40d
, d40c
, sgl
, sg_len
,
1969 direction
, dma_flags
);
1971 dev_err(&d40c
->chan
.dev
->device
,
1972 "[%s] Failed to prepare %s slave sg job: %d\n",
1974 d40c
->log_num
!= D40_PHY_CHAN
? "log" : "phy", err
);
1978 d40d
->txd
.flags
= dma_flags
;
1980 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1982 d40d
->txd
.tx_submit
= d40_tx_submit
;
1987 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
1988 dma_cookie_t cookie
,
1989 struct dma_tx_state
*txstate
)
1991 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
1992 dma_cookie_t last_used
;
1993 dma_cookie_t last_complete
;
1996 if (d40c
->phy_chan
== NULL
) {
1997 dev_err(&d40c
->chan
.dev
->device
,
1998 "[%s] Cannot read status of unallocated channel\n",
2003 last_complete
= d40c
->completed
;
2004 last_used
= chan
->cookie
;
2006 if (d40_is_paused(d40c
))
2009 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2011 dma_set_tx_state(txstate
, last_complete
, last_used
,
2012 stedma40_residue(chan
));
2017 static void d40_issue_pending(struct dma_chan
*chan
)
2019 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2020 unsigned long flags
;
2022 if (d40c
->phy_chan
== NULL
) {
2023 dev_err(&d40c
->chan
.dev
->device
,
2024 "[%s] Channel is not allocated!\n", __func__
);
2028 spin_lock_irqsave(&d40c
->lock
, flags
);
2030 /* Busy means that pending jobs are already being processed */
2032 (void) d40_queue_start(d40c
);
2034 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2037 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2040 unsigned long flags
;
2041 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2043 if (d40c
->phy_chan
== NULL
) {
2044 dev_err(&d40c
->chan
.dev
->device
,
2045 "[%s] Channel is not allocated!\n", __func__
);
2050 case DMA_TERMINATE_ALL
:
2051 spin_lock_irqsave(&d40c
->lock
, flags
);
2053 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2056 return d40_pause(chan
);
2058 return d40_resume(chan
);
2061 /* Other commands are unimplemented */
2065 /* Initialization functions */
2067 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2068 struct d40_chan
*chans
, int offset
,
2072 struct d40_chan
*d40c
;
2074 INIT_LIST_HEAD(&dma
->channels
);
2076 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2079 d40c
->chan
.device
= dma
;
2081 /* Invalidate lcla element */
2082 d40c
->lcla
.src_id
= -1;
2083 d40c
->lcla
.dst_id
= -1;
2085 spin_lock_init(&d40c
->lock
);
2087 d40c
->log_num
= D40_PHY_CHAN
;
2089 INIT_LIST_HEAD(&d40c
->active
);
2090 INIT_LIST_HEAD(&d40c
->queue
);
2091 INIT_LIST_HEAD(&d40c
->client
);
2093 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2094 (unsigned long) d40c
);
2096 list_add_tail(&d40c
->chan
.device_node
,
2101 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2102 int num_reserved_chans
)
2106 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2107 0, base
->num_log_chans
);
2109 dma_cap_zero(base
->dma_slave
.cap_mask
);
2110 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2112 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2113 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2114 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2115 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2116 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2117 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2118 base
->dma_slave
.device_control
= d40_control
;
2119 base
->dma_slave
.dev
= base
->dev
;
2121 err
= dma_async_device_register(&base
->dma_slave
);
2125 "[%s] Failed to register slave channels\n",
2130 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2131 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2133 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2134 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2136 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2137 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2138 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2139 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2140 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2141 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2142 base
->dma_memcpy
.device_control
= d40_control
;
2143 base
->dma_memcpy
.dev
= base
->dev
;
2145 * This controller can only access address at even
2146 * 32bit boundaries, i.e. 2^2
2148 base
->dma_memcpy
.copy_align
= 2;
2150 err
= dma_async_device_register(&base
->dma_memcpy
);
2154 "[%s] Failed to regsiter memcpy only channels\n",
2159 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2160 0, num_reserved_chans
);
2162 dma_cap_zero(base
->dma_both
.cap_mask
);
2163 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2164 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2166 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2167 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2168 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2169 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2170 base
->dma_both
.device_tx_status
= d40_tx_status
;
2171 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2172 base
->dma_both
.device_control
= d40_control
;
2173 base
->dma_both
.dev
= base
->dev
;
2174 base
->dma_both
.copy_align
= 2;
2175 err
= dma_async_device_register(&base
->dma_both
);
2179 "[%s] Failed to register logical and physical capable channels\n",
2185 dma_async_device_unregister(&base
->dma_memcpy
);
2187 dma_async_device_unregister(&base
->dma_slave
);
2192 /* Initialization functions. */
2194 static int __init
d40_phy_res_init(struct d40_base
*base
)
2197 int num_phy_chans_avail
= 0;
2199 int odd_even_bit
= -2;
2201 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2202 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2204 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2205 base
->phy_res
[i
].num
= i
;
2206 odd_even_bit
+= 2 * ((i
% 2) == 0);
2207 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2208 /* Mark security only channels as occupied */
2209 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2210 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2212 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2213 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2214 num_phy_chans_avail
++;
2216 spin_lock_init(&base
->phy_res
[i
].lock
);
2218 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2219 num_phy_chans_avail
, base
->num_phy_chans
);
2221 /* Verify settings extended vs standard */
2222 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2224 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2226 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2227 (val
[0] & 0x3) != 1)
2229 "[%s] INFO: channel %d is misconfigured (%d)\n",
2230 __func__
, i
, val
[0] & 0x3);
2232 val
[0] = val
[0] >> 2;
2235 return num_phy_chans_avail
;
2238 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2240 static const struct d40_reg_val dma_id_regs
[] = {
2242 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2243 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2245 * D40_DREG_PERIPHID2 Depends on HW revision:
2246 * MOP500/HREF ED has 0x0008,
2248 * HREF V1 has 0x0028
2250 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2253 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2254 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2255 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2256 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2258 struct stedma40_platform_data
*plat_data
;
2259 struct clk
*clk
= NULL
;
2260 void __iomem
*virtbase
= NULL
;
2261 struct resource
*res
= NULL
;
2262 struct d40_base
*base
= NULL
;
2263 int num_log_chans
= 0;
2267 clk
= clk_get(&pdev
->dev
, NULL
);
2270 dev_err(&pdev
->dev
, "[%s] No matching clock found\n",
2277 /* Get IO for DMAC base address */
2278 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2282 if (request_mem_region(res
->start
, resource_size(res
),
2283 D40_NAME
" I/O base") == NULL
)
2286 virtbase
= ioremap(res
->start
, resource_size(res
));
2290 /* HW version check */
2291 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2292 if (dma_id_regs
[i
].val
!=
2293 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2295 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2299 readl(virtbase
+ dma_id_regs
[i
].reg
));
2304 i
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2306 if ((i
& 0xf) != D40_PERIPHID2_DESIGNER
) {
2308 "[%s] Unknown designer! Got %x wanted %x\n",
2309 __func__
, i
& 0xf, D40_PERIPHID2_DESIGNER
);
2313 /* The number of physical channels on this HW */
2314 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2316 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2317 (i
>> 4) & 0xf, res
->start
);
2319 plat_data
= pdev
->dev
.platform_data
;
2321 /* Count the number of logical channels in use */
2322 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2323 if (plat_data
->dev_rx
[i
] != 0)
2326 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2327 if (plat_data
->dev_tx
[i
] != 0)
2330 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2331 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2332 sizeof(struct d40_chan
), GFP_KERNEL
);
2335 dev_err(&pdev
->dev
, "[%s] Out of memory\n", __func__
);
2340 base
->num_phy_chans
= num_phy_chans
;
2341 base
->num_log_chans
= num_log_chans
;
2342 base
->phy_start
= res
->start
;
2343 base
->phy_size
= resource_size(res
);
2344 base
->virtbase
= virtbase
;
2345 base
->plat_data
= plat_data
;
2346 base
->dev
= &pdev
->dev
;
2347 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2348 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2350 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2355 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2356 sizeof(struct d40_chan
*),
2358 if (!base
->lookup_phy_chans
)
2361 if (num_log_chans
+ plat_data
->memcpy_len
) {
2363 * The max number of logical channels are event lines for all
2364 * src devices and dst devices
2366 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2367 sizeof(struct d40_chan
*),
2369 if (!base
->lookup_log_chans
)
2372 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
* sizeof(u32
),
2374 if (!base
->lcla_pool
.alloc_map
)
2377 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2378 0, SLAB_HWCACHE_ALIGN
,
2380 if (base
->desc_slab
== NULL
)
2393 release_mem_region(res
->start
,
2394 resource_size(res
));
2399 kfree(base
->lcla_pool
.alloc_map
);
2400 kfree(base
->lookup_log_chans
);
2401 kfree(base
->lookup_phy_chans
);
2402 kfree(base
->phy_res
);
2409 static void __init
d40_hw_init(struct d40_base
*base
)
2412 static const struct d40_reg_val dma_init_reg
[] = {
2413 /* Clock every part of the DMA block from start */
2414 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2416 /* Interrupts on all logical channels */
2417 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2418 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2419 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2420 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2421 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2422 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2423 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2424 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2425 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2426 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2427 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2428 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2431 u32 prmseo
[2] = {0, 0};
2432 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2436 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2437 writel(dma_init_reg
[i
].val
,
2438 base
->virtbase
+ dma_init_reg
[i
].reg
);
2440 /* Configure all our dma channels to default settings */
2441 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2443 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2445 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2447 activeo
[i
% 2] |= 3;
2451 /* Enable interrupt # */
2452 pcmis
= (pcmis
<< 1) | 1;
2454 /* Clear interrupt # */
2455 pcicr
= (pcicr
<< 1) | 1;
2457 /* Set channel to physical mode */
2458 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2463 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2464 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2465 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2466 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2468 /* Write which interrupt to enable */
2469 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2471 /* Write which interrupt to clear */
2472 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2476 static int __init
d40_probe(struct platform_device
*pdev
)
2480 struct d40_base
*base
;
2481 struct resource
*res
= NULL
;
2482 int num_reserved_chans
;
2485 base
= d40_hw_detect_init(pdev
);
2490 num_reserved_chans
= d40_phy_res_init(base
);
2492 platform_set_drvdata(pdev
, base
);
2494 spin_lock_init(&base
->interrupt_lock
);
2495 spin_lock_init(&base
->execmd_lock
);
2497 /* Get IO for logical channel parameter address */
2498 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2502 "[%s] No \"lcpa\" memory resource\n",
2506 base
->lcpa_size
= resource_size(res
);
2507 base
->phy_lcpa
= res
->start
;
2509 if (request_mem_region(res
->start
, resource_size(res
),
2510 D40_NAME
" I/O lcpa") == NULL
) {
2513 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2514 __func__
, res
->start
, res
->end
);
2518 /* We make use of ESRAM memory for this. */
2519 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2520 if (res
->start
!= val
&& val
!= 0) {
2521 dev_warn(&pdev
->dev
,
2522 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2523 __func__
, val
, res
->start
);
2525 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2527 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2528 if (!base
->lcpa_base
) {
2531 "[%s] Failed to ioremap LCPA region\n",
2535 /* Get IO for logical channel link address */
2536 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcla");
2540 "[%s] No \"lcla\" resource defined\n",
2545 base
->lcla_pool
.base_size
= resource_size(res
);
2546 base
->lcla_pool
.phy
= res
->start
;
2548 if (request_mem_region(res
->start
, resource_size(res
),
2549 D40_NAME
" I/O lcla") == NULL
) {
2552 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2553 __func__
, res
->start
, res
->end
);
2556 val
= readl(base
->virtbase
+ D40_DREG_LCLA
);
2557 if (res
->start
!= val
&& val
!= 0) {
2558 dev_warn(&pdev
->dev
,
2559 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2560 __func__
, val
, res
->start
);
2562 writel(res
->start
, base
->virtbase
+ D40_DREG_LCLA
);
2564 base
->lcla_pool
.base
= ioremap(res
->start
, resource_size(res
));
2565 if (!base
->lcla_pool
.base
) {
2568 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2569 __func__
, res
->start
, res
->end
);
2573 spin_lock_init(&base
->lcla_pool
.lock
);
2575 base
->lcla_pool
.num_blocks
= base
->num_phy_chans
;
2577 base
->irq
= platform_get_irq(pdev
, 0);
2579 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2582 dev_err(&pdev
->dev
, "[%s] No IRQ defined\n", __func__
);
2586 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2592 dev_info(base
->dev
, "initialized\n");
2597 if (base
->desc_slab
)
2598 kmem_cache_destroy(base
->desc_slab
);
2600 iounmap(base
->virtbase
);
2601 if (base
->lcla_pool
.phy
)
2602 release_mem_region(base
->lcla_pool
.phy
,
2603 base
->lcla_pool
.base_size
);
2605 release_mem_region(base
->phy_lcpa
,
2607 if (base
->phy_start
)
2608 release_mem_region(base
->phy_start
,
2611 clk_disable(base
->clk
);
2615 kfree(base
->lcla_pool
.alloc_map
);
2616 kfree(base
->lookup_log_chans
);
2617 kfree(base
->lookup_phy_chans
);
2618 kfree(base
->phy_res
);
2622 dev_err(&pdev
->dev
, "[%s] probe failed\n", __func__
);
2626 static struct platform_driver d40_driver
= {
2628 .owner
= THIS_MODULE
,
2633 int __init
stedma40_init(void)
2635 return platform_driver_probe(&d40_driver
, d40_probe
);
2637 arch_initcall(stedma40_init
);