2 * driver/dma/ste_dma40.c
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 /* Hardware requirement on LCLA alignment */
34 #define LCLA_ALIGNMENT 0x40000
35 /* Attempts before giving up to trying to get pages that are aligned */
36 #define MAX_LCLA_ALLOC_ATTEMPTS 256
38 /* Bit markings for allocation map */
39 #define D40_ALLOC_FREE (1 << 31)
40 #define D40_ALLOC_PHY (1 << 30)
41 #define D40_ALLOC_LOG_FREE 0
43 /* Hardware designer of the block */
44 #define D40_PERIPHID2_DESIGNER 0x8
47 * enum 40_command - The different commands and/or statuses.
49 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
50 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
51 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
52 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
57 D40_DMA_SUSPEND_REQ
= 2,
62 * struct d40_lli_pool - Structure for keeping LLIs in memory
64 * @base: Pointer to memory area when the pre_alloc_lli's are not large
65 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
66 * pre_alloc_lli is used.
67 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
68 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
69 * one buffer to one buffer.
74 /* Space for dst and src, plus an extra for padding */
75 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
79 * struct d40_desc - A descriptor is one DMA job.
81 * @lli_phy: LLI settings for physical channel. Both src and dst=
82 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
84 * @lli_log: Same as above but for logical channels.
85 * @lli_pool: The pool with two entries pre-allocated.
86 * @lli_len: Number of llis of current descriptor.
87 * @lli_count: Number of transfered llis.
88 * @lli_tx_len: Max number of LLIs per transfer, there can be
89 * many transfer for one descriptor.
90 * @txd: DMA engine struct. Used for among other things for communication
93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor.
96 * This descriptor is used for both logical and physical transfers.
101 struct d40_phy_lli_bidir lli_phy
;
103 struct d40_log_lli_bidir lli_log
;
105 struct d40_lli_pool lli_pool
;
110 struct dma_async_tx_descriptor txd
;
111 struct list_head node
;
113 enum dma_data_direction dir
;
114 bool is_in_client_list
;
118 * struct d40_lcla_pool - LCLA pool settings and data.
120 * @base: The virtual address of LCLA. 18 bit aligned.
121 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
122 * This pointer is only there for clean-up on error.
123 * @pages: The number of pages needed for all physical channels.
124 * Only used later for clean-up on error
125 * @lock: Lock to protect the content in this struct.
126 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
127 * @num_blocks: The number of entries of alloc_map. Equals to the
128 * number of physical channels.
130 struct d40_lcla_pool
{
132 void *base_unaligned
;
140 * struct d40_phy_res - struct for handling eventlines mapped to physical
143 * @lock: A lock protection this entity.
144 * @num: The physical channel number of this entity.
145 * @allocated_src: Bit mapped to show which src event line's are mapped to
146 * this physical channel. Can also be free or physically allocated.
147 * @allocated_dst: Same as for src but is dst.
148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149 * event line number. Both allocated_src and allocated_dst can not be
150 * allocated to a physical channel, since the interrupt handler has then
151 * no way of figure out which one the interrupt belongs to.
163 * struct d40_chan - Struct that describes a channel.
165 * @lock: A spinlock to protect this struct.
166 * @log_num: The logical number, if any of this channel.
167 * @completed: Starts with 1, after first interrupt it is set to dma engine's
169 * @pending_tx: The number of pending transfers. Used between interrupt handler
171 * @busy: Set to true when transfer is ongoing on this channel.
172 * @phy_chan: Pointer to physical channel which this instance runs on. If this
173 * point is NULL, then the channel is not allocated.
174 * @chan: DMA engine handle.
175 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
176 * transfer and call client callback.
177 * @client: Cliented owned descriptor list.
178 * @active: Active descriptor.
179 * @queue: Queued jobs.
180 * @dma_cfg: The client configuration of this dma channel.
181 * @base: Pointer to the device instance struct.
182 * @src_def_cfg: Default cfg register setting for src.
183 * @dst_def_cfg: Default cfg register setting for dst.
184 * @log_def: Default logical channel settings.
185 * @lcla: Space for one dst src pair for logical channel transfers.
186 * @lcpa: Pointer to dst and src lcpa settings.
188 * This struct can either "be" a logical or a physical channel.
193 /* ID of the most recent completed transfer */
197 struct d40_phy_res
*phy_chan
;
198 struct dma_chan chan
;
199 struct tasklet_struct tasklet
;
200 struct list_head client
;
201 struct list_head active
;
202 struct list_head queue
;
203 struct stedma40_chan_cfg dma_cfg
;
204 struct d40_base
*base
;
205 /* Default register configurations */
208 struct d40_def_lcsp log_def
;
209 struct d40_lcla_elem lcla
;
210 struct d40_log_lli_full
*lcpa
;
211 /* Runtime reconfiguration */
212 dma_addr_t runtime_addr
;
213 enum dma_data_direction runtime_direction
;
217 * struct d40_base - The big global struct, one for each probe'd instance.
219 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
220 * @execmd_lock: Lock for execute command usage since several channels share
221 * the same physical register.
222 * @dev: The device structure.
223 * @virtbase: The virtual base address of the DMA's register.
224 * @rev: silicon revision detected.
225 * @clk: Pointer to the DMA clock structure.
226 * @phy_start: Physical memory start of the DMA registers.
227 * @phy_size: Size of the DMA register map.
228 * @irq: The IRQ number.
229 * @num_phy_chans: The number of physical channels. Read from HW. This
230 * is the number of available channels for this driver, not counting "Secure
231 * mode" allocated physical channels.
232 * @num_log_chans: The number of logical channels. Calculated from
234 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
235 * @dma_slave: dma_device channels that can do only do slave transfers.
236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
237 * @phy_chans: Room for all possible physical channels in system.
238 * @log_chans: Room for all possible logical channels in system.
239 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
240 * to log_chans entries.
241 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
242 * to phy_chans entries.
243 * @plat_data: Pointer to provided platform_data which is the driver
245 * @phy_res: Vector containing all physical channels.
246 * @lcla_pool: lcla pool settings and data.
247 * @lcpa_base: The virtual mapped address of LCPA.
248 * @phy_lcpa: The physical address of the LCPA.
249 * @lcpa_size: The size of the LCPA area.
250 * @desc_slab: cache for descriptors.
253 spinlock_t interrupt_lock
;
254 spinlock_t execmd_lock
;
256 void __iomem
*virtbase
;
259 phys_addr_t phy_start
;
260 resource_size_t phy_size
;
264 struct dma_device dma_both
;
265 struct dma_device dma_slave
;
266 struct dma_device dma_memcpy
;
267 struct d40_chan
*phy_chans
;
268 struct d40_chan
*log_chans
;
269 struct d40_chan
**lookup_log_chans
;
270 struct d40_chan
**lookup_phy_chans
;
271 struct stedma40_platform_data
*plat_data
;
272 /* Physical half channels */
273 struct d40_phy_res
*phy_res
;
274 struct d40_lcla_pool lcla_pool
;
277 resource_size_t lcpa_size
;
278 struct kmem_cache
*desc_slab
;
282 * struct d40_interrupt_lookup - lookup table for interrupt handler
284 * @src: Interrupt mask register.
285 * @clr: Interrupt clear register.
286 * @is_error: true if this is an error interrupt.
287 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
288 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
290 struct d40_interrupt_lookup
{
298 * struct d40_reg_val - simple lookup struct
300 * @reg: The register.
301 * @val: The value that belongs to the register in reg.
308 static int d40_pool_lli_alloc(struct d40_desc
*d40d
,
309 int lli_len
, bool is_log
)
315 align
= sizeof(struct d40_log_lli
);
317 align
= sizeof(struct d40_phy_lli
);
320 base
= d40d
->lli_pool
.pre_alloc_lli
;
321 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
322 d40d
->lli_pool
.base
= NULL
;
324 d40d
->lli_pool
.size
= ALIGN(lli_len
* 2 * align
, align
);
326 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
327 d40d
->lli_pool
.base
= base
;
329 if (d40d
->lli_pool
.base
== NULL
)
334 d40d
->lli_log
.src
= PTR_ALIGN((struct d40_log_lli
*) base
,
336 d40d
->lli_log
.dst
= PTR_ALIGN(d40d
->lli_log
.src
+ lli_len
,
339 d40d
->lli_phy
.src
= PTR_ALIGN((struct d40_phy_lli
*)base
,
341 d40d
->lli_phy
.dst
= PTR_ALIGN(d40d
->lli_phy
.src
+ lli_len
,
344 d40d
->lli_phy
.src_addr
= virt_to_phys(d40d
->lli_phy
.src
);
345 d40d
->lli_phy
.dst_addr
= virt_to_phys(d40d
->lli_phy
.dst
);
351 static void d40_pool_lli_free(struct d40_desc
*d40d
)
353 kfree(d40d
->lli_pool
.base
);
354 d40d
->lli_pool
.base
= NULL
;
355 d40d
->lli_pool
.size
= 0;
356 d40d
->lli_log
.src
= NULL
;
357 d40d
->lli_log
.dst
= NULL
;
358 d40d
->lli_phy
.src
= NULL
;
359 d40d
->lli_phy
.dst
= NULL
;
360 d40d
->lli_phy
.src_addr
= 0;
361 d40d
->lli_phy
.dst_addr
= 0;
364 static dma_cookie_t
d40_assign_cookie(struct d40_chan
*d40c
,
365 struct d40_desc
*desc
)
367 dma_cookie_t cookie
= d40c
->chan
.cookie
;
372 d40c
->chan
.cookie
= cookie
;
373 desc
->txd
.cookie
= cookie
;
378 static void d40_desc_remove(struct d40_desc
*d40d
)
380 list_del(&d40d
->node
);
383 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
388 if (!list_empty(&d40c
->client
)) {
389 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
390 if (async_tx_test_ack(&d
->txd
)) {
391 d40_pool_lli_free(d
);
396 d
= kmem_cache_alloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
398 memset(d
, 0, sizeof(struct d40_desc
));
399 INIT_LIST_HEAD(&d
->node
);
405 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
407 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
410 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
412 list_add_tail(&desc
->node
, &d40c
->active
);
415 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
419 if (list_empty(&d40c
->active
))
422 d
= list_first_entry(&d40c
->active
,
428 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
430 list_add_tail(&desc
->node
, &d40c
->queue
);
433 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
437 if (list_empty(&d40c
->queue
))
440 d
= list_first_entry(&d40c
->queue
,
446 /* Support functions for logical channels */
448 static int d40_lcla_id_get(struct d40_chan
*d40c
)
452 struct d40_log_lli
*lcla_lidx_base
=
453 d40c
->base
->lcla_pool
.base
+ d40c
->phy_chan
->num
* 1024;
455 int lli_per_log
= d40c
->base
->plat_data
->llis_per_log
;
458 if (d40c
->lcla
.src_id
>= 0 && d40c
->lcla
.dst_id
>= 0)
461 if (d40c
->base
->lcla_pool
.num_blocks
> 32)
464 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
466 for (i
= 0; i
< d40c
->base
->lcla_pool
.num_blocks
; i
++) {
467 if (!(d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &
469 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] |=
475 if (src_id
>= d40c
->base
->lcla_pool
.num_blocks
)
478 for (; i
< d40c
->base
->lcla_pool
.num_blocks
; i
++) {
479 if (!(d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &
481 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] |=
488 if (dst_id
== src_id
)
491 d40c
->lcla
.src_id
= src_id
;
492 d40c
->lcla
.dst_id
= dst_id
;
493 d40c
->lcla
.dst
= lcla_lidx_base
+ dst_id
* lli_per_log
+ 1;
494 d40c
->lcla
.src
= lcla_lidx_base
+ src_id
* lli_per_log
+ 1;
496 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
499 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
504 static int d40_channel_execute_command(struct d40_chan
*d40c
,
505 enum d40_command command
)
508 void __iomem
*active_reg
;
513 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
515 if (d40c
->phy_chan
->num
% 2 == 0)
516 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
518 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
520 if (command
== D40_DMA_SUSPEND_REQ
) {
521 status
= (readl(active_reg
) &
522 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
523 D40_CHAN_POS(d40c
->phy_chan
->num
);
525 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
529 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
530 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
533 if (command
== D40_DMA_SUSPEND_REQ
) {
535 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
536 status
= (readl(active_reg
) &
537 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
538 D40_CHAN_POS(d40c
->phy_chan
->num
);
542 * Reduce the number of bus accesses while
543 * waiting for the DMA to suspend.
547 if (status
== D40_DMA_STOP
||
548 status
== D40_DMA_SUSPENDED
)
552 if (i
== D40_SUSPEND_MAX_IT
) {
553 dev_err(&d40c
->chan
.dev
->device
,
554 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
555 __func__
, d40c
->phy_chan
->num
, d40c
->log_num
,
563 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
567 static void d40_term_all(struct d40_chan
*d40c
)
569 struct d40_desc
*d40d
;
572 /* Release active descriptors */
573 while ((d40d
= d40_first_active_get(d40c
))) {
574 d40_desc_remove(d40d
);
576 /* Return desc to free-list */
577 d40_desc_free(d40c
, d40d
);
580 /* Release queued descriptors waiting for transfer */
581 while ((d40d
= d40_first_queued(d40c
))) {
582 d40_desc_remove(d40d
);
584 /* Return desc to free-list */
585 d40_desc_free(d40c
, d40d
);
588 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
590 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &=
591 (~(0x1 << d40c
->lcla
.dst_id
));
592 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &=
593 (~(0x1 << d40c
->lcla
.src_id
));
595 d40c
->lcla
.src_id
= -1;
596 d40c
->lcla
.dst_id
= -1;
598 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
600 d40c
->pending_tx
= 0;
604 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
609 /* Notice, that disable requires the physical channel to be stopped */
611 val
= D40_ACTIVATE_EVENTLINE
;
613 val
= D40_DEACTIVATE_EVENTLINE
;
615 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
617 /* Enable event line connected to device (or memcpy) */
618 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
619 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
620 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
622 writel((val
<< D40_EVENTLINE_POS(event
)) |
623 ~D40_EVENTLINE_MASK(event
),
624 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
625 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
628 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
629 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
631 writel((val
<< D40_EVENTLINE_POS(event
)) |
632 ~D40_EVENTLINE_MASK(event
),
633 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
634 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
638 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
641 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
645 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
646 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
649 val
|= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
650 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
655 static void d40_config_enable_lidx(struct d40_chan
*d40c
)
657 /* Set LIDX for lcla */
658 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
659 D40_SREG_ELEM_LOG_LIDX_MASK
,
660 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
661 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDELT
);
663 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
664 D40_SREG_ELEM_LOG_LIDX_MASK
,
665 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
666 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSELT
);
669 static int d40_config_write(struct d40_chan
*d40c
)
675 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
679 /* Odd addresses are even addresses + 4 */
680 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
681 /* Setup channel mode to logical or physical */
682 var
= ((u32
)(d40c
->log_num
!= D40_PHY_CHAN
) + 1) <<
683 D40_CHAN_POS(d40c
->phy_chan
->num
);
684 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
686 /* Setup operational mode option register */
687 var
= ((d40c
->dma_cfg
.channel_type
>> STEDMA40_INFO_CH_MODE_OPT_POS
) &
688 0x3) << D40_CHAN_POS(d40c
->phy_chan
->num
);
690 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
692 if (d40c
->log_num
!= D40_PHY_CHAN
) {
693 /* Set default config for CFG reg */
694 writel(d40c
->src_def_cfg
,
695 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
696 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
698 writel(d40c
->dst_def_cfg
,
699 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
700 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
703 d40_config_enable_lidx(d40c
);
708 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
710 if (d40d
->lli_phy
.dst
&& d40d
->lli_phy
.src
) {
711 d40_phy_lli_write(d40c
->base
->virtbase
,
715 } else if (d40d
->lli_log
.dst
&& d40d
->lli_log
.src
) {
716 struct d40_log_lli
*src
= d40d
->lli_log
.src
;
717 struct d40_log_lli
*dst
= d40d
->lli_log
.dst
;
720 src
+= d40d
->lli_count
;
721 dst
+= d40d
->lli_count
;
722 s
= d40_log_lli_write(d40c
->lcpa
,
723 d40c
->lcla
.src
, d40c
->lcla
.dst
,
725 d40c
->base
->plat_data
->llis_per_log
);
727 /* If s equals to zero, the job is not linked */
729 (void) dma_map_single(d40c
->base
->dev
, d40c
->lcla
.src
,
730 s
* sizeof(struct d40_log_lli
),
732 (void) dma_map_single(d40c
->base
->dev
, d40c
->lcla
.dst
,
733 s
* sizeof(struct d40_log_lli
),
737 d40d
->lli_count
+= d40d
->lli_tx_len
;
740 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
742 struct d40_chan
*d40c
= container_of(tx
->chan
,
745 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
748 spin_lock_irqsave(&d40c
->lock
, flags
);
750 tx
->cookie
= d40_assign_cookie(d40c
, d40d
);
752 d40_desc_queue(d40c
, d40d
);
754 spin_unlock_irqrestore(&d40c
->lock
, flags
);
759 static int d40_start(struct d40_chan
*d40c
)
761 if (d40c
->base
->rev
== 0) {
764 if (d40c
->log_num
!= D40_PHY_CHAN
) {
765 err
= d40_channel_execute_command(d40c
,
766 D40_DMA_SUSPEND_REQ
);
772 if (d40c
->log_num
!= D40_PHY_CHAN
)
773 d40_config_set_event(d40c
, true);
775 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
778 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
780 struct d40_desc
*d40d
;
783 /* Start queued jobs, if any */
784 d40d
= d40_first_queued(d40c
);
789 /* Remove from queue */
790 d40_desc_remove(d40d
);
792 /* Add to active queue */
793 d40_desc_submit(d40c
, d40d
);
795 /* Initiate DMA job */
796 d40_desc_load(d40c
, d40d
);
799 err
= d40_start(d40c
);
808 /* called from interrupt context */
809 static void dma_tc_handle(struct d40_chan
*d40c
)
811 struct d40_desc
*d40d
;
816 /* Get first active entry from list */
817 d40d
= d40_first_active_get(d40c
);
822 if (d40d
->lli_count
< d40d
->lli_len
) {
824 d40_desc_load(d40c
, d40d
);
826 (void) d40_start(d40c
);
830 if (d40_queue_start(d40c
) == NULL
)
834 tasklet_schedule(&d40c
->tasklet
);
838 static void dma_tasklet(unsigned long data
)
840 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
841 struct d40_desc
*d40d_fin
;
843 dma_async_tx_callback callback
;
844 void *callback_param
;
846 spin_lock_irqsave(&d40c
->lock
, flags
);
848 /* Get first active entry from list */
849 d40d_fin
= d40_first_active_get(d40c
);
851 if (d40d_fin
== NULL
)
854 d40c
->completed
= d40d_fin
->txd
.cookie
;
857 * If terminating a channel pending_tx is set to zero.
858 * This prevents any finished active jobs to return to the client.
860 if (d40c
->pending_tx
== 0) {
861 spin_unlock_irqrestore(&d40c
->lock
, flags
);
865 /* Callback to client */
866 callback
= d40d_fin
->txd
.callback
;
867 callback_param
= d40d_fin
->txd
.callback_param
;
869 if (async_tx_test_ack(&d40d_fin
->txd
)) {
870 d40_pool_lli_free(d40d_fin
);
871 d40_desc_remove(d40d_fin
);
872 /* Return desc to free-list */
873 d40_desc_free(d40c
, d40d_fin
);
875 if (!d40d_fin
->is_in_client_list
) {
876 d40_desc_remove(d40d_fin
);
877 list_add_tail(&d40d_fin
->node
, &d40c
->client
);
878 d40d_fin
->is_in_client_list
= true;
884 if (d40c
->pending_tx
)
885 tasklet_schedule(&d40c
->tasklet
);
887 spin_unlock_irqrestore(&d40c
->lock
, flags
);
890 callback(callback_param
);
895 /* Rescue manouver if receiving double interrupts */
896 if (d40c
->pending_tx
> 0)
898 spin_unlock_irqrestore(&d40c
->lock
, flags
);
901 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
903 static const struct d40_interrupt_lookup il
[] = {
904 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
905 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
906 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
907 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
908 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
909 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
910 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
911 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
912 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
913 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
917 u32 regs
[ARRAY_SIZE(il
)];
922 struct d40_chan
*d40c
;
924 struct d40_base
*base
= data
;
926 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
928 /* Read interrupt status of both logical and physical channels */
929 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
930 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
934 chan
= find_next_bit((unsigned long *)regs
,
935 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
937 /* No more set bits found? */
938 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
941 row
= chan
/ BITS_PER_LONG
;
942 idx
= chan
& (BITS_PER_LONG
- 1);
945 tmp
= readl(base
->virtbase
+ il
[row
].clr
);
947 writel(tmp
, base
->virtbase
+ il
[row
].clr
);
949 if (il
[row
].offset
== D40_PHY_CHAN
)
950 d40c
= base
->lookup_phy_chans
[idx
];
952 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
953 spin_lock(&d40c
->lock
);
955 if (!il
[row
].is_error
)
959 "[%s] IRQ chan: %ld offset %d idx %d\n",
960 __func__
, chan
, il
[row
].offset
, idx
);
962 spin_unlock(&d40c
->lock
);
965 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
971 static int d40_validate_conf(struct d40_chan
*d40c
,
972 struct stedma40_chan_cfg
*conf
)
975 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
976 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
977 bool is_log
= (conf
->channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
978 == STEDMA40_CHANNEL_IN_LOG_MODE
;
981 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid direction.\n",
986 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
987 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
988 d40c
->runtime_addr
== 0) {
990 dev_err(&d40c
->chan
.dev
->device
,
991 "[%s] Invalid TX channel address (%d)\n",
992 __func__
, conf
->dst_dev_type
);
996 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
997 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
998 d40c
->runtime_addr
== 0) {
999 dev_err(&d40c
->chan
.dev
->device
,
1000 "[%s] Invalid RX channel address (%d)\n",
1001 __func__
, conf
->src_dev_type
);
1005 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1006 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1007 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid dst\n",
1012 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1013 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1014 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid src\n",
1019 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1020 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1021 dev_err(&d40c
->chan
.dev
->device
,
1022 "[%s] No event line\n", __func__
);
1026 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1027 (src_event_group
!= dst_event_group
)) {
1028 dev_err(&d40c
->chan
.dev
->device
,
1029 "[%s] Invalid event group\n", __func__
);
1033 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1035 * DMAC HW supports it. Will be added to this driver,
1036 * in case any dma client requires it.
1038 dev_err(&d40c
->chan
.dev
->device
,
1039 "[%s] periph to periph not supported\n",
1047 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1048 int log_event_line
, bool is_log
)
1050 unsigned long flags
;
1051 spin_lock_irqsave(&phy
->lock
, flags
);
1053 /* Physical interrupts are masked per physical full channel */
1054 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1055 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1056 phy
->allocated_dst
= D40_ALLOC_PHY
;
1057 phy
->allocated_src
= D40_ALLOC_PHY
;
1063 /* Logical channel */
1065 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1068 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1069 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1071 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1072 phy
->allocated_src
|= 1 << log_event_line
;
1077 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1080 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1081 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1083 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1084 phy
->allocated_dst
|= 1 << log_event_line
;
1091 spin_unlock_irqrestore(&phy
->lock
, flags
);
1094 spin_unlock_irqrestore(&phy
->lock
, flags
);
1098 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1101 unsigned long flags
;
1102 bool is_free
= false;
1104 spin_lock_irqsave(&phy
->lock
, flags
);
1105 if (!log_event_line
) {
1106 /* Physical interrupts are masked per physical full channel */
1107 phy
->allocated_dst
= D40_ALLOC_FREE
;
1108 phy
->allocated_src
= D40_ALLOC_FREE
;
1113 /* Logical channel */
1115 phy
->allocated_src
&= ~(1 << log_event_line
);
1116 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1117 phy
->allocated_src
= D40_ALLOC_FREE
;
1119 phy
->allocated_dst
&= ~(1 << log_event_line
);
1120 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1121 phy
->allocated_dst
= D40_ALLOC_FREE
;
1124 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1128 spin_unlock_irqrestore(&phy
->lock
, flags
);
1133 static int d40_allocate_channel(struct d40_chan
*d40c
)
1138 struct d40_phy_res
*phys
;
1143 bool is_log
= (d40c
->dma_cfg
.channel_type
&
1144 STEDMA40_CHANNEL_IN_OPER_MODE
)
1145 == STEDMA40_CHANNEL_IN_LOG_MODE
;
1148 phys
= d40c
->base
->phy_res
;
1150 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1151 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1152 log_num
= 2 * dev_type
;
1154 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1155 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1156 /* dst event lines are used for logical memcpy */
1157 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1158 log_num
= 2 * dev_type
+ 1;
1163 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1164 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1167 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1168 /* Find physical half channel */
1169 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1171 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1176 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1177 int phy_num
= j
+ event_group
* 2;
1178 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1179 if (d40_alloc_mask_set(&phys
[i
],
1188 d40c
->phy_chan
= &phys
[i
];
1189 d40c
->log_num
= D40_PHY_CHAN
;
1195 /* Find logical channel */
1196 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1197 int phy_num
= j
+ event_group
* 2;
1199 * Spread logical channels across all available physical rather
1200 * than pack every logical channel at the first available phy
1204 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1205 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1206 event_line
, is_log
))
1210 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1211 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1212 event_line
, is_log
))
1220 d40c
->phy_chan
= &phys
[i
];
1221 d40c
->log_num
= log_num
;
1225 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1227 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1233 static int d40_config_memcpy(struct d40_chan
*d40c
)
1235 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1237 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1238 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1239 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1240 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1241 memcpy
[d40c
->chan
.chan_id
];
1243 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1244 dma_has_cap(DMA_SLAVE
, cap
)) {
1245 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1247 dev_err(&d40c
->chan
.dev
->device
, "[%s] No memcpy\n",
1256 static int d40_free_dma(struct d40_chan
*d40c
)
1261 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1264 struct d40_desc
*_d
;
1267 /* Terminate all queued and active transfers */
1270 /* Release client owned descriptors */
1271 if (!list_empty(&d40c
->client
))
1272 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1273 d40_pool_lli_free(d
);
1275 /* Return desc to free-list */
1276 d40_desc_free(d40c
, d
);
1280 dev_err(&d40c
->chan
.dev
->device
, "[%s] phy == null\n",
1285 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1286 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1287 dev_err(&d40c
->chan
.dev
->device
, "[%s] channel already free\n",
1292 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1293 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1294 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1296 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1297 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1300 dev_err(&d40c
->chan
.dev
->device
,
1301 "[%s] Unknown direction\n", __func__
);
1305 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1307 dev_err(&d40c
->chan
.dev
->device
, "[%s] suspend failed\n",
1312 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1313 /* Release logical channel, deactivate the event line */
1315 d40_config_set_event(d40c
, false);
1316 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1319 * Check if there are more logical allocation
1320 * on this phy channel.
1322 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1323 /* Resume the other logical channels if any */
1324 if (d40_chan_has_events(d40c
)) {
1325 res
= d40_channel_execute_command(d40c
,
1328 dev_err(&d40c
->chan
.dev
->device
,
1329 "[%s] Executing RUN command\n",
1337 (void) d40_alloc_mask_free(phy
, is_src
, 0);
1340 /* Release physical channel */
1341 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1343 dev_err(&d40c
->chan
.dev
->device
,
1344 "[%s] Failed to stop channel\n", __func__
);
1347 d40c
->phy_chan
= NULL
;
1348 /* Invalidate channel type */
1349 d40c
->dma_cfg
.channel_type
= 0;
1350 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1355 static int d40_pause(struct dma_chan
*chan
)
1357 struct d40_chan
*d40c
=
1358 container_of(chan
, struct d40_chan
, chan
);
1360 unsigned long flags
;
1362 spin_lock_irqsave(&d40c
->lock
, flags
);
1364 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1366 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1367 d40_config_set_event(d40c
, false);
1368 /* Resume the other logical channels if any */
1369 if (d40_chan_has_events(d40c
))
1370 res
= d40_channel_execute_command(d40c
,
1375 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1379 static bool d40_is_paused(struct d40_chan
*d40c
)
1381 bool is_paused
= false;
1382 unsigned long flags
;
1383 void __iomem
*active_reg
;
1387 spin_lock_irqsave(&d40c
->lock
, flags
);
1389 if (d40c
->log_num
== D40_PHY_CHAN
) {
1390 if (d40c
->phy_chan
->num
% 2 == 0)
1391 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1393 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1395 status
= (readl(active_reg
) &
1396 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1397 D40_CHAN_POS(d40c
->phy_chan
->num
);
1398 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1404 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1405 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
)
1406 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1407 else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1408 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1410 dev_err(&d40c
->chan
.dev
->device
,
1411 "[%s] Unknown direction\n", __func__
);
1414 status
= d40_chan_has_events(d40c
);
1415 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1416 D40_EVENTLINE_POS(event
);
1418 if (status
!= D40_DMA_RUN
)
1421 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1427 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1431 if (d40c
->log_num
!= D40_PHY_CHAN
)
1432 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1434 is_link
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1435 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1436 D40_CHAN_REG_SDLNK
) &
1437 D40_SREG_LNK_PHYS_LNK_MASK
;
1441 static u32
d40_residue(struct d40_chan
*d40c
)
1445 if (d40c
->log_num
!= D40_PHY_CHAN
)
1446 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1447 >> D40_MEM_LCSP2_ECNT_POS
;
1449 num_elt
= (readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1450 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1451 D40_CHAN_REG_SDELT
) &
1452 D40_SREG_ELEM_PHY_ECNT_MASK
) >>
1453 D40_SREG_ELEM_PHY_ECNT_POS
;
1454 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1457 static int d40_resume(struct dma_chan
*chan
)
1459 struct d40_chan
*d40c
=
1460 container_of(chan
, struct d40_chan
, chan
);
1462 unsigned long flags
;
1464 spin_lock_irqsave(&d40c
->lock
, flags
);
1466 if (d40c
->base
->rev
== 0)
1467 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1468 res
= d40_channel_execute_command(d40c
,
1469 D40_DMA_SUSPEND_REQ
);
1473 /* If bytes left to transfer or linked tx resume job */
1474 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
1475 if (d40c
->log_num
!= D40_PHY_CHAN
)
1476 d40_config_set_event(d40c
, true);
1477 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1481 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1485 static u32
stedma40_residue(struct dma_chan
*chan
)
1487 struct d40_chan
*d40c
=
1488 container_of(chan
, struct d40_chan
, chan
);
1490 unsigned long flags
;
1492 spin_lock_irqsave(&d40c
->lock
, flags
);
1493 bytes_left
= d40_residue(d40c
);
1494 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1499 /* Public DMA functions in addition to the DMA engine framework */
1501 int stedma40_set_psize(struct dma_chan
*chan
,
1505 struct d40_chan
*d40c
=
1506 container_of(chan
, struct d40_chan
, chan
);
1507 unsigned long flags
;
1509 spin_lock_irqsave(&d40c
->lock
, flags
);
1511 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1512 d40c
->log_def
.lcsp1
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1513 d40c
->log_def
.lcsp3
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1514 d40c
->log_def
.lcsp1
|= src_psize
<<
1515 D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1516 d40c
->log_def
.lcsp3
|= dst_psize
<<
1517 D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1521 if (src_psize
== STEDMA40_PSIZE_PHY_1
)
1522 d40c
->src_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1524 d40c
->src_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1525 d40c
->src_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1526 D40_SREG_CFG_PSIZE_POS
);
1527 d40c
->src_def_cfg
|= src_psize
<< D40_SREG_CFG_PSIZE_POS
;
1530 if (dst_psize
== STEDMA40_PSIZE_PHY_1
)
1531 d40c
->dst_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1533 d40c
->dst_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1534 d40c
->dst_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1535 D40_SREG_CFG_PSIZE_POS
);
1536 d40c
->dst_def_cfg
|= dst_psize
<< D40_SREG_CFG_PSIZE_POS
;
1539 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1542 EXPORT_SYMBOL(stedma40_set_psize
);
1544 struct dma_async_tx_descriptor
*stedma40_memcpy_sg(struct dma_chan
*chan
,
1545 struct scatterlist
*sgl_dst
,
1546 struct scatterlist
*sgl_src
,
1547 unsigned int sgl_len
,
1548 unsigned long dma_flags
)
1551 struct d40_desc
*d40d
;
1552 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1554 unsigned long flags
;
1556 if (d40c
->phy_chan
== NULL
) {
1557 dev_err(&d40c
->chan
.dev
->device
,
1558 "[%s] Unallocated channel.\n", __func__
);
1559 return ERR_PTR(-EINVAL
);
1562 spin_lock_irqsave(&d40c
->lock
, flags
);
1563 d40d
= d40_desc_get(d40c
);
1568 d40d
->lli_len
= sgl_len
;
1569 d40d
->lli_tx_len
= d40d
->lli_len
;
1570 d40d
->txd
.flags
= dma_flags
;
1572 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1573 if (d40d
->lli_len
> d40c
->base
->plat_data
->llis_per_log
)
1574 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1578 * Check if there is space available in lcla. If not,
1579 * split list into 1-length and run only in lcpa
1582 if (d40_lcla_id_get(d40c
) != 0)
1583 d40d
->lli_tx_len
= 1;
1585 if (d40_pool_lli_alloc(d40d
, sgl_len
, true) < 0) {
1586 dev_err(&d40c
->chan
.dev
->device
,
1587 "[%s] Out of memory\n", __func__
);
1591 (void) d40_log_sg_to_lli(d40c
->lcla
.src_id
,
1595 d40c
->log_def
.lcsp1
,
1596 d40c
->dma_cfg
.src_info
.data_width
,
1597 dma_flags
& DMA_PREP_INTERRUPT
,
1599 d40c
->base
->plat_data
->llis_per_log
);
1601 (void) d40_log_sg_to_lli(d40c
->lcla
.dst_id
,
1605 d40c
->log_def
.lcsp3
,
1606 d40c
->dma_cfg
.dst_info
.data_width
,
1607 dma_flags
& DMA_PREP_INTERRUPT
,
1609 d40c
->base
->plat_data
->llis_per_log
);
1613 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1614 dev_err(&d40c
->chan
.dev
->device
,
1615 "[%s] Out of memory\n", __func__
);
1619 res
= d40_phy_sg_to_lli(sgl_src
,
1623 d40d
->lli_phy
.src_addr
,
1625 d40c
->dma_cfg
.src_info
.data_width
,
1626 d40c
->dma_cfg
.src_info
.psize
,
1632 res
= d40_phy_sg_to_lli(sgl_dst
,
1636 d40d
->lli_phy
.dst_addr
,
1638 d40c
->dma_cfg
.dst_info
.data_width
,
1639 d40c
->dma_cfg
.dst_info
.psize
,
1645 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1646 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1649 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1651 d40d
->txd
.tx_submit
= d40_tx_submit
;
1653 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1657 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1660 EXPORT_SYMBOL(stedma40_memcpy_sg
);
1662 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1664 struct stedma40_chan_cfg
*info
= data
;
1665 struct d40_chan
*d40c
=
1666 container_of(chan
, struct d40_chan
, chan
);
1670 err
= d40_validate_conf(d40c
, info
);
1672 d40c
->dma_cfg
= *info
;
1674 err
= d40_config_memcpy(d40c
);
1678 EXPORT_SYMBOL(stedma40_filter
);
1680 /* DMA ENGINE functions */
1681 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1684 unsigned long flags
;
1685 struct d40_chan
*d40c
=
1686 container_of(chan
, struct d40_chan
, chan
);
1688 spin_lock_irqsave(&d40c
->lock
, flags
);
1690 d40c
->completed
= chan
->cookie
= 1;
1693 * If no dma configuration is set (channel_type == 0)
1694 * use default configuration (memcpy)
1696 if (d40c
->dma_cfg
.channel_type
== 0) {
1697 err
= d40_config_memcpy(d40c
);
1699 dev_err(&d40c
->chan
.dev
->device
,
1700 "[%s] Failed to configure memcpy channel\n",
1705 is_free_phy
= (d40c
->phy_chan
== NULL
);
1707 err
= d40_allocate_channel(d40c
);
1709 dev_err(&d40c
->chan
.dev
->device
,
1710 "[%s] Failed to allocate channel\n", __func__
);
1714 /* Fill in basic CFG register values */
1715 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1716 &d40c
->dst_def_cfg
, d40c
->log_num
!= D40_PHY_CHAN
);
1718 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1719 d40_log_cfg(&d40c
->dma_cfg
,
1720 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1722 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1723 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1724 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
1726 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1727 d40c
->dma_cfg
.dst_dev_type
*
1728 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
1732 * Only write channel configuration to the DMA if the physical
1733 * resource is free. In case of multiple logical channels
1734 * on the same physical resource, only the first write is necessary.
1737 err
= d40_config_write(d40c
);
1739 dev_err(&d40c
->chan
.dev
->device
,
1740 "[%s] Failed to configure channel\n",
1745 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1749 static void d40_free_chan_resources(struct dma_chan
*chan
)
1751 struct d40_chan
*d40c
=
1752 container_of(chan
, struct d40_chan
, chan
);
1754 unsigned long flags
;
1756 if (d40c
->phy_chan
== NULL
) {
1757 dev_err(&d40c
->chan
.dev
->device
,
1758 "[%s] Cannot free unallocated channel\n", __func__
);
1763 spin_lock_irqsave(&d40c
->lock
, flags
);
1765 err
= d40_free_dma(d40c
);
1768 dev_err(&d40c
->chan
.dev
->device
,
1769 "[%s] Failed to free channel\n", __func__
);
1770 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1773 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1777 unsigned long dma_flags
)
1779 struct d40_desc
*d40d
;
1780 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1782 unsigned long flags
;
1785 if (d40c
->phy_chan
== NULL
) {
1786 dev_err(&d40c
->chan
.dev
->device
,
1787 "[%s] Channel is not allocated.\n", __func__
);
1788 return ERR_PTR(-EINVAL
);
1791 spin_lock_irqsave(&d40c
->lock
, flags
);
1792 d40d
= d40_desc_get(d40c
);
1795 dev_err(&d40c
->chan
.dev
->device
,
1796 "[%s] Descriptor is NULL\n", __func__
);
1800 d40d
->txd
.flags
= dma_flags
;
1802 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1804 d40d
->txd
.tx_submit
= d40_tx_submit
;
1806 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1808 if (d40_pool_lli_alloc(d40d
, 1, true) < 0) {
1809 dev_err(&d40c
->chan
.dev
->device
,
1810 "[%s] Out of memory\n", __func__
);
1814 d40d
->lli_tx_len
= 1;
1816 d40_log_fill_lli(d40d
->lli_log
.src
,
1820 d40c
->log_def
.lcsp1
,
1821 d40c
->dma_cfg
.src_info
.data_width
,
1824 d40_log_fill_lli(d40d
->lli_log
.dst
,
1828 d40c
->log_def
.lcsp3
,
1829 d40c
->dma_cfg
.dst_info
.data_width
,
1834 if (d40_pool_lli_alloc(d40d
, 1, false) < 0) {
1835 dev_err(&d40c
->chan
.dev
->device
,
1836 "[%s] Out of memory\n", __func__
);
1840 err
= d40_phy_fill_lli(d40d
->lli_phy
.src
,
1843 d40c
->dma_cfg
.src_info
.psize
,
1847 d40c
->dma_cfg
.src_info
.data_width
,
1852 err
= d40_phy_fill_lli(d40d
->lli_phy
.dst
,
1855 d40c
->dma_cfg
.dst_info
.psize
,
1859 d40c
->dma_cfg
.dst_info
.data_width
,
1865 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1866 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1869 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1873 dev_err(&d40c
->chan
.dev
->device
,
1874 "[%s] Failed filling in PHY LLI\n", __func__
);
1875 d40_pool_lli_free(d40d
);
1877 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1881 static int d40_prep_slave_sg_log(struct d40_desc
*d40d
,
1882 struct d40_chan
*d40c
,
1883 struct scatterlist
*sgl
,
1884 unsigned int sg_len
,
1885 enum dma_data_direction direction
,
1886 unsigned long dma_flags
)
1888 dma_addr_t dev_addr
= 0;
1891 if (d40_pool_lli_alloc(d40d
, sg_len
, true) < 0) {
1892 dev_err(&d40c
->chan
.dev
->device
,
1893 "[%s] Out of memory\n", __func__
);
1897 d40d
->lli_len
= sg_len
;
1898 if (d40d
->lli_len
<= d40c
->base
->plat_data
->llis_per_log
)
1899 d40d
->lli_tx_len
= d40d
->lli_len
;
1901 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1905 * Check if there is space available in lcla.
1906 * If not, split list into 1-length and run only
1909 if (d40_lcla_id_get(d40c
) != 0)
1910 d40d
->lli_tx_len
= 1;
1912 if (direction
== DMA_FROM_DEVICE
)
1913 if (d40c
->runtime_addr
)
1914 dev_addr
= d40c
->runtime_addr
;
1916 dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1917 else if (direction
== DMA_TO_DEVICE
)
1918 if (d40c
->runtime_addr
)
1919 dev_addr
= d40c
->runtime_addr
;
1921 dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1926 total_size
= d40_log_sg_to_dev(&d40c
->lcla
,
1930 d40c
->dma_cfg
.src_info
.data_width
,
1931 d40c
->dma_cfg
.dst_info
.data_width
,
1933 dma_flags
& DMA_PREP_INTERRUPT
,
1934 dev_addr
, d40d
->lli_tx_len
,
1935 d40c
->base
->plat_data
->llis_per_log
);
1943 static int d40_prep_slave_sg_phy(struct d40_desc
*d40d
,
1944 struct d40_chan
*d40c
,
1945 struct scatterlist
*sgl
,
1946 unsigned int sgl_len
,
1947 enum dma_data_direction direction
,
1948 unsigned long dma_flags
)
1950 dma_addr_t src_dev_addr
;
1951 dma_addr_t dst_dev_addr
;
1954 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1955 dev_err(&d40c
->chan
.dev
->device
,
1956 "[%s] Out of memory\n", __func__
);
1960 d40d
->lli_len
= sgl_len
;
1961 d40d
->lli_tx_len
= sgl_len
;
1963 if (direction
== DMA_FROM_DEVICE
) {
1965 if (d40c
->runtime_addr
)
1966 src_dev_addr
= d40c
->runtime_addr
;
1968 src_dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1969 } else if (direction
== DMA_TO_DEVICE
) {
1970 if (d40c
->runtime_addr
)
1971 dst_dev_addr
= d40c
->runtime_addr
;
1973 dst_dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1978 res
= d40_phy_sg_to_lli(sgl
,
1982 d40d
->lli_phy
.src_addr
,
1984 d40c
->dma_cfg
.src_info
.data_width
,
1985 d40c
->dma_cfg
.src_info
.psize
,
1990 res
= d40_phy_sg_to_lli(sgl
,
1994 d40d
->lli_phy
.dst_addr
,
1996 d40c
->dma_cfg
.dst_info
.data_width
,
1997 d40c
->dma_cfg
.dst_info
.psize
,
2002 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
2003 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
2007 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
2008 struct scatterlist
*sgl
,
2009 unsigned int sg_len
,
2010 enum dma_data_direction direction
,
2011 unsigned long dma_flags
)
2013 struct d40_desc
*d40d
;
2014 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
2016 unsigned long flags
;
2019 if (d40c
->phy_chan
== NULL
) {
2020 dev_err(&d40c
->chan
.dev
->device
,
2021 "[%s] Cannot prepare unallocated channel\n", __func__
);
2022 return ERR_PTR(-EINVAL
);
2025 if (d40c
->dma_cfg
.pre_transfer
)
2026 d40c
->dma_cfg
.pre_transfer(chan
,
2027 d40c
->dma_cfg
.pre_transfer_data
,
2030 spin_lock_irqsave(&d40c
->lock
, flags
);
2031 d40d
= d40_desc_get(d40c
);
2032 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2037 if (d40c
->log_num
!= D40_PHY_CHAN
)
2038 err
= d40_prep_slave_sg_log(d40d
, d40c
, sgl
, sg_len
,
2039 direction
, dma_flags
);
2041 err
= d40_prep_slave_sg_phy(d40d
, d40c
, sgl
, sg_len
,
2042 direction
, dma_flags
);
2044 dev_err(&d40c
->chan
.dev
->device
,
2045 "[%s] Failed to prepare %s slave sg job: %d\n",
2047 d40c
->log_num
!= D40_PHY_CHAN
? "log" : "phy", err
);
2051 d40d
->txd
.flags
= dma_flags
;
2053 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
2055 d40d
->txd
.tx_submit
= d40_tx_submit
;
2060 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2061 dma_cookie_t cookie
,
2062 struct dma_tx_state
*txstate
)
2064 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2065 dma_cookie_t last_used
;
2066 dma_cookie_t last_complete
;
2069 if (d40c
->phy_chan
== NULL
) {
2070 dev_err(&d40c
->chan
.dev
->device
,
2071 "[%s] Cannot read status of unallocated channel\n",
2076 last_complete
= d40c
->completed
;
2077 last_used
= chan
->cookie
;
2079 if (d40_is_paused(d40c
))
2082 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2084 dma_set_tx_state(txstate
, last_complete
, last_used
,
2085 stedma40_residue(chan
));
2090 static void d40_issue_pending(struct dma_chan
*chan
)
2092 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2093 unsigned long flags
;
2095 if (d40c
->phy_chan
== NULL
) {
2096 dev_err(&d40c
->chan
.dev
->device
,
2097 "[%s] Channel is not allocated!\n", __func__
);
2101 spin_lock_irqsave(&d40c
->lock
, flags
);
2103 /* Busy means that pending jobs are already being processed */
2105 (void) d40_queue_start(d40c
);
2107 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2110 /* Runtime reconfiguration extension */
2111 static void d40_set_runtime_config(struct dma_chan
*chan
,
2112 struct dma_slave_config
*config
)
2114 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2115 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2116 enum dma_slave_buswidth config_addr_width
;
2117 dma_addr_t config_addr
;
2118 u32 config_maxburst
;
2119 enum stedma40_periph_data_width addr_width
;
2122 if (config
->direction
== DMA_FROM_DEVICE
) {
2123 dma_addr_t dev_addr_rx
=
2124 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2126 config_addr
= config
->src_addr
;
2128 dev_dbg(d40c
->base
->dev
,
2129 "channel has a pre-wired RX address %08x "
2130 "overriding with %08x\n",
2131 dev_addr_rx
, config_addr
);
2132 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2133 dev_dbg(d40c
->base
->dev
,
2134 "channel was not configured for peripheral "
2135 "to memory transfer (%d) overriding\n",
2137 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2139 config_addr_width
= config
->src_addr_width
;
2140 config_maxburst
= config
->src_maxburst
;
2142 } else if (config
->direction
== DMA_TO_DEVICE
) {
2143 dma_addr_t dev_addr_tx
=
2144 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2146 config_addr
= config
->dst_addr
;
2148 dev_dbg(d40c
->base
->dev
,
2149 "channel has a pre-wired TX address %08x "
2150 "overriding with %08x\n",
2151 dev_addr_tx
, config_addr
);
2152 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2153 dev_dbg(d40c
->base
->dev
,
2154 "channel was not configured for memory "
2155 "to peripheral transfer (%d) overriding\n",
2157 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2159 config_addr_width
= config
->dst_addr_width
;
2160 config_maxburst
= config
->dst_maxburst
;
2163 dev_err(d40c
->base
->dev
,
2164 "unrecognized channel direction %d\n",
2169 switch (config_addr_width
) {
2170 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2171 addr_width
= STEDMA40_BYTE_WIDTH
;
2173 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2174 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2176 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2177 addr_width
= STEDMA40_WORD_WIDTH
;
2179 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2180 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2183 dev_err(d40c
->base
->dev
,
2184 "illegal peripheral address width "
2186 config
->src_addr_width
);
2190 if (config_maxburst
>= 16)
2191 psize
= STEDMA40_PSIZE_LOG_16
;
2192 else if (config_maxburst
>= 8)
2193 psize
= STEDMA40_PSIZE_LOG_8
;
2194 else if (config_maxburst
>= 4)
2195 psize
= STEDMA40_PSIZE_LOG_4
;
2197 psize
= STEDMA40_PSIZE_LOG_1
;
2199 /* Set up all the endpoint configs */
2200 cfg
->src_info
.data_width
= addr_width
;
2201 cfg
->src_info
.psize
= psize
;
2202 cfg
->src_info
.endianess
= STEDMA40_LITTLE_ENDIAN
;
2203 cfg
->src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2204 cfg
->dst_info
.data_width
= addr_width
;
2205 cfg
->dst_info
.psize
= psize
;
2206 cfg
->dst_info
.endianess
= STEDMA40_LITTLE_ENDIAN
;
2207 cfg
->dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2209 /* These settings will take precedence later */
2210 d40c
->runtime_addr
= config_addr
;
2211 d40c
->runtime_direction
= config
->direction
;
2212 dev_dbg(d40c
->base
->dev
,
2213 "configured channel %s for %s, data width %d, "
2214 "maxburst %d bytes, LE, no flow control\n",
2215 dma_chan_name(chan
),
2216 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
2221 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2224 unsigned long flags
;
2225 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2227 if (d40c
->phy_chan
== NULL
) {
2228 dev_err(&d40c
->chan
.dev
->device
,
2229 "[%s] Channel is not allocated!\n", __func__
);
2234 case DMA_TERMINATE_ALL
:
2235 spin_lock_irqsave(&d40c
->lock
, flags
);
2237 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2240 return d40_pause(chan
);
2242 return d40_resume(chan
);
2243 case DMA_SLAVE_CONFIG
:
2244 d40_set_runtime_config(chan
,
2245 (struct dma_slave_config
*) arg
);
2251 /* Other commands are unimplemented */
2255 /* Initialization functions */
2257 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2258 struct d40_chan
*chans
, int offset
,
2262 struct d40_chan
*d40c
;
2264 INIT_LIST_HEAD(&dma
->channels
);
2266 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2269 d40c
->chan
.device
= dma
;
2271 /* Invalidate lcla element */
2272 d40c
->lcla
.src_id
= -1;
2273 d40c
->lcla
.dst_id
= -1;
2275 spin_lock_init(&d40c
->lock
);
2277 d40c
->log_num
= D40_PHY_CHAN
;
2279 INIT_LIST_HEAD(&d40c
->active
);
2280 INIT_LIST_HEAD(&d40c
->queue
);
2281 INIT_LIST_HEAD(&d40c
->client
);
2283 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2284 (unsigned long) d40c
);
2286 list_add_tail(&d40c
->chan
.device_node
,
2291 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2292 int num_reserved_chans
)
2296 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2297 0, base
->num_log_chans
);
2299 dma_cap_zero(base
->dma_slave
.cap_mask
);
2300 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2302 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2303 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2304 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2305 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2306 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2307 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2308 base
->dma_slave
.device_control
= d40_control
;
2309 base
->dma_slave
.dev
= base
->dev
;
2311 err
= dma_async_device_register(&base
->dma_slave
);
2315 "[%s] Failed to register slave channels\n",
2320 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2321 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2323 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2324 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2326 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2327 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2328 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2329 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2330 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2331 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2332 base
->dma_memcpy
.device_control
= d40_control
;
2333 base
->dma_memcpy
.dev
= base
->dev
;
2335 * This controller can only access address at even
2336 * 32bit boundaries, i.e. 2^2
2338 base
->dma_memcpy
.copy_align
= 2;
2340 err
= dma_async_device_register(&base
->dma_memcpy
);
2344 "[%s] Failed to regsiter memcpy only channels\n",
2349 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2350 0, num_reserved_chans
);
2352 dma_cap_zero(base
->dma_both
.cap_mask
);
2353 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2354 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2356 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2357 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2358 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2359 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2360 base
->dma_both
.device_tx_status
= d40_tx_status
;
2361 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2362 base
->dma_both
.device_control
= d40_control
;
2363 base
->dma_both
.dev
= base
->dev
;
2364 base
->dma_both
.copy_align
= 2;
2365 err
= dma_async_device_register(&base
->dma_both
);
2369 "[%s] Failed to register logical and physical capable channels\n",
2375 dma_async_device_unregister(&base
->dma_memcpy
);
2377 dma_async_device_unregister(&base
->dma_slave
);
2382 /* Initialization functions. */
2384 static int __init
d40_phy_res_init(struct d40_base
*base
)
2387 int num_phy_chans_avail
= 0;
2389 int odd_even_bit
= -2;
2391 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2392 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2394 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2395 base
->phy_res
[i
].num
= i
;
2396 odd_even_bit
+= 2 * ((i
% 2) == 0);
2397 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2398 /* Mark security only channels as occupied */
2399 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2400 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2402 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2403 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2404 num_phy_chans_avail
++;
2406 spin_lock_init(&base
->phy_res
[i
].lock
);
2409 /* Mark disabled channels as occupied */
2410 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2411 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2412 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2413 num_phy_chans_avail
--;
2416 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2417 num_phy_chans_avail
, base
->num_phy_chans
);
2419 /* Verify settings extended vs standard */
2420 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2422 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2424 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2425 (val
[0] & 0x3) != 1)
2427 "[%s] INFO: channel %d is misconfigured (%d)\n",
2428 __func__
, i
, val
[0] & 0x3);
2430 val
[0] = val
[0] >> 2;
2433 return num_phy_chans_avail
;
2436 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2438 static const struct d40_reg_val dma_id_regs
[] = {
2440 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2441 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2443 * D40_DREG_PERIPHID2 Depends on HW revision:
2444 * MOP500/HREF ED has 0x0008,
2446 * HREF V1 has 0x0028
2448 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2451 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2452 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2453 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2454 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2456 struct stedma40_platform_data
*plat_data
;
2457 struct clk
*clk
= NULL
;
2458 void __iomem
*virtbase
= NULL
;
2459 struct resource
*res
= NULL
;
2460 struct d40_base
*base
= NULL
;
2461 int num_log_chans
= 0;
2466 clk
= clk_get(&pdev
->dev
, NULL
);
2469 dev_err(&pdev
->dev
, "[%s] No matching clock found\n",
2476 /* Get IO for DMAC base address */
2477 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2481 if (request_mem_region(res
->start
, resource_size(res
),
2482 D40_NAME
" I/O base") == NULL
)
2485 virtbase
= ioremap(res
->start
, resource_size(res
));
2489 /* HW version check */
2490 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2491 if (dma_id_regs
[i
].val
!=
2492 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2494 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2498 readl(virtbase
+ dma_id_regs
[i
].reg
));
2503 /* Get silicon revision */
2504 val
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2506 if ((val
& 0xf) != D40_PERIPHID2_DESIGNER
) {
2508 "[%s] Unknown designer! Got %x wanted %x\n",
2509 __func__
, val
& 0xf, D40_PERIPHID2_DESIGNER
);
2513 /* The number of physical channels on this HW */
2514 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2516 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2517 (val
>> 4) & 0xf, res
->start
);
2519 plat_data
= pdev
->dev
.platform_data
;
2521 /* Count the number of logical channels in use */
2522 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2523 if (plat_data
->dev_rx
[i
] != 0)
2526 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2527 if (plat_data
->dev_tx
[i
] != 0)
2530 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2531 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2532 sizeof(struct d40_chan
), GFP_KERNEL
);
2535 dev_err(&pdev
->dev
, "[%s] Out of memory\n", __func__
);
2539 base
->rev
= (val
>> 4) & 0xf;
2541 base
->num_phy_chans
= num_phy_chans
;
2542 base
->num_log_chans
= num_log_chans
;
2543 base
->phy_start
= res
->start
;
2544 base
->phy_size
= resource_size(res
);
2545 base
->virtbase
= virtbase
;
2546 base
->plat_data
= plat_data
;
2547 base
->dev
= &pdev
->dev
;
2548 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2549 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2551 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2556 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2557 sizeof(struct d40_chan
*),
2559 if (!base
->lookup_phy_chans
)
2562 if (num_log_chans
+ plat_data
->memcpy_len
) {
2564 * The max number of logical channels are event lines for all
2565 * src devices and dst devices
2567 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2568 sizeof(struct d40_chan
*),
2570 if (!base
->lookup_log_chans
)
2573 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
* sizeof(u32
),
2575 if (!base
->lcla_pool
.alloc_map
)
2578 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2579 0, SLAB_HWCACHE_ALIGN
,
2581 if (base
->desc_slab
== NULL
)
2594 release_mem_region(res
->start
,
2595 resource_size(res
));
2600 kfree(base
->lcla_pool
.alloc_map
);
2601 kfree(base
->lookup_log_chans
);
2602 kfree(base
->lookup_phy_chans
);
2603 kfree(base
->phy_res
);
2610 static void __init
d40_hw_init(struct d40_base
*base
)
2613 static const struct d40_reg_val dma_init_reg
[] = {
2614 /* Clock every part of the DMA block from start */
2615 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2617 /* Interrupts on all logical channels */
2618 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2619 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2620 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2621 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2622 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2623 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2624 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2625 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2626 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2627 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2628 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2629 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2632 u32 prmseo
[2] = {0, 0};
2633 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2637 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2638 writel(dma_init_reg
[i
].val
,
2639 base
->virtbase
+ dma_init_reg
[i
].reg
);
2641 /* Configure all our dma channels to default settings */
2642 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2644 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2646 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2648 activeo
[i
% 2] |= 3;
2652 /* Enable interrupt # */
2653 pcmis
= (pcmis
<< 1) | 1;
2655 /* Clear interrupt # */
2656 pcicr
= (pcicr
<< 1) | 1;
2658 /* Set channel to physical mode */
2659 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2664 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2665 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2666 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2667 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2669 /* Write which interrupt to enable */
2670 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2672 /* Write which interrupt to clear */
2673 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2677 static int __init
d40_lcla_allocate(struct d40_base
*base
)
2679 unsigned long *page_list
;
2684 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2685 * To full fill this hardware requirement without wasting 256 kb
2686 * we allocate pages until we get an aligned one.
2688 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
2696 /* Calculating how many pages that are required */
2697 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
2699 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
2700 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
2701 base
->lcla_pool
.pages
);
2702 if (!page_list
[i
]) {
2705 "[%s] Failed to allocate %d pages.\n",
2706 __func__
, base
->lcla_pool
.pages
);
2708 for (j
= 0; j
< i
; j
++)
2709 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2713 if ((virt_to_phys((void *)page_list
[i
]) &
2714 (LCLA_ALIGNMENT
- 1)) == 0)
2718 for (j
= 0; j
< i
; j
++)
2719 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2721 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
2722 base
->lcla_pool
.base
= (void *)page_list
[i
];
2724 /* After many attempts, no succees with finding the correct
2725 * alignment try with allocating a big buffer */
2727 "[%s] Failed to get %d pages @ 18 bit align.\n",
2728 __func__
, base
->lcla_pool
.pages
);
2729 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
2730 base
->num_phy_chans
+
2733 if (!base
->lcla_pool
.base_unaligned
) {
2738 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
2742 writel(virt_to_phys(base
->lcla_pool
.base
),
2743 base
->virtbase
+ D40_DREG_LCLA
);
2749 static int __init
d40_probe(struct platform_device
*pdev
)
2753 struct d40_base
*base
;
2754 struct resource
*res
= NULL
;
2755 int num_reserved_chans
;
2758 base
= d40_hw_detect_init(pdev
);
2763 num_reserved_chans
= d40_phy_res_init(base
);
2765 platform_set_drvdata(pdev
, base
);
2767 spin_lock_init(&base
->interrupt_lock
);
2768 spin_lock_init(&base
->execmd_lock
);
2770 /* Get IO for logical channel parameter address */
2771 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2775 "[%s] No \"lcpa\" memory resource\n",
2779 base
->lcpa_size
= resource_size(res
);
2780 base
->phy_lcpa
= res
->start
;
2782 if (request_mem_region(res
->start
, resource_size(res
),
2783 D40_NAME
" I/O lcpa") == NULL
) {
2786 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2787 __func__
, res
->start
, res
->end
);
2791 /* We make use of ESRAM memory for this. */
2792 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2793 if (res
->start
!= val
&& val
!= 0) {
2794 dev_warn(&pdev
->dev
,
2795 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2796 __func__
, val
, res
->start
);
2798 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2800 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2801 if (!base
->lcpa_base
) {
2804 "[%s] Failed to ioremap LCPA region\n",
2809 ret
= d40_lcla_allocate(base
);
2811 dev_err(&pdev
->dev
, "[%s] Failed to allocate LCLA area\n",
2816 spin_lock_init(&base
->lcla_pool
.lock
);
2818 base
->lcla_pool
.num_blocks
= base
->num_phy_chans
;
2820 base
->irq
= platform_get_irq(pdev
, 0);
2822 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2825 dev_err(&pdev
->dev
, "[%s] No IRQ defined\n", __func__
);
2829 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2835 dev_info(base
->dev
, "initialized\n");
2840 if (base
->desc_slab
)
2841 kmem_cache_destroy(base
->desc_slab
);
2843 iounmap(base
->virtbase
);
2844 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
2845 free_pages((unsigned long)base
->lcla_pool
.base
,
2846 base
->lcla_pool
.pages
);
2847 if (base
->lcla_pool
.base_unaligned
)
2848 kfree(base
->lcla_pool
.base_unaligned
);
2850 release_mem_region(base
->phy_lcpa
,
2852 if (base
->phy_start
)
2853 release_mem_region(base
->phy_start
,
2856 clk_disable(base
->clk
);
2860 kfree(base
->lcla_pool
.alloc_map
);
2861 kfree(base
->lookup_log_chans
);
2862 kfree(base
->lookup_phy_chans
);
2863 kfree(base
->phy_res
);
2867 dev_err(&pdev
->dev
, "[%s] probe failed\n", __func__
);
2871 static struct platform_driver d40_driver
= {
2873 .owner
= THIS_MODULE
,
2878 int __init
stedma40_init(void)
2880 return platform_driver_probe(&d40_driver
, d40_probe
);
2882 arch_initcall(stedma40_init
);