2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/err.h>
20 #include <linux/amba/bus.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/platform_data/dma-ste-dma40.h>
24 #include "dmaengine.h"
25 #include "ste_dma40_ll.h"
27 #define D40_NAME "dma40"
29 #define D40_PHY_CHAN -1
31 /* For masking out/in 2 bit channel positions */
32 #define D40_CHAN_POS(chan) (2 * (chan / 2))
33 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
35 /* Maximum iterations taken before giving up suspending a channel */
36 #define D40_SUSPEND_MAX_IT 500
39 #define DMA40_AUTOSUSPEND_DELAY 100
41 /* Hardware requirement on LCLA alignment */
42 #define LCLA_ALIGNMENT 0x40000
44 /* Max number of links per event group */
45 #define D40_LCLA_LINK_PER_EVENT_GRP 128
46 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
48 /* Max number of logical channels per physical channel */
49 #define D40_MAX_LOG_CHAN_PER_PHY 32
51 /* Attempts before giving up to trying to get pages that are aligned */
52 #define MAX_LCLA_ALLOC_ATTEMPTS 256
54 /* Bit markings for allocation map */
55 #define D40_ALLOC_FREE (1 << 31)
56 #define D40_ALLOC_PHY (1 << 30)
57 #define D40_ALLOC_LOG_FREE 0
59 /* Reserved event lines for memcpy only. */
60 #define DB8500_DMA_MEMCPY_EV_0 51
61 #define DB8500_DMA_MEMCPY_EV_1 56
62 #define DB8500_DMA_MEMCPY_EV_2 57
63 #define DB8500_DMA_MEMCPY_EV_3 58
64 #define DB8500_DMA_MEMCPY_EV_4 59
65 #define DB8500_DMA_MEMCPY_EV_5 60
67 static int dma40_memcpy_channels
[] = {
68 DB8500_DMA_MEMCPY_EV_0
,
69 DB8500_DMA_MEMCPY_EV_1
,
70 DB8500_DMA_MEMCPY_EV_2
,
71 DB8500_DMA_MEMCPY_EV_3
,
72 DB8500_DMA_MEMCPY_EV_4
,
73 DB8500_DMA_MEMCPY_EV_5
,
76 /* Default configuration for physcial memcpy */
77 struct stedma40_chan_cfg dma40_memcpy_conf_phy
= {
78 .mode
= STEDMA40_MODE_PHYSICAL
,
79 .dir
= STEDMA40_MEM_TO_MEM
,
81 .src_info
.data_width
= STEDMA40_BYTE_WIDTH
,
82 .src_info
.psize
= STEDMA40_PSIZE_PHY_1
,
83 .src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
,
85 .dst_info
.data_width
= STEDMA40_BYTE_WIDTH
,
86 .dst_info
.psize
= STEDMA40_PSIZE_PHY_1
,
87 .dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
,
90 /* Default configuration for logical memcpy */
91 struct stedma40_chan_cfg dma40_memcpy_conf_log
= {
92 .mode
= STEDMA40_MODE_LOGICAL
,
93 .dir
= STEDMA40_MEM_TO_MEM
,
95 .src_info
.data_width
= STEDMA40_BYTE_WIDTH
,
96 .src_info
.psize
= STEDMA40_PSIZE_LOG_1
,
97 .src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
,
99 .dst_info
.data_width
= STEDMA40_BYTE_WIDTH
,
100 .dst_info
.psize
= STEDMA40_PSIZE_LOG_1
,
101 .dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
,
105 * enum 40_command - The different commands and/or statuses.
107 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
108 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
109 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
110 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
115 D40_DMA_SUSPEND_REQ
= 2,
116 D40_DMA_SUSPENDED
= 3
120 * enum d40_events - The different Event Enables for the event lines.
122 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
123 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
124 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
125 * @D40_ROUND_EVENTLINE: Status check for event line.
129 D40_DEACTIVATE_EVENTLINE
= 0,
130 D40_ACTIVATE_EVENTLINE
= 1,
131 D40_SUSPEND_REQ_EVENTLINE
= 2,
132 D40_ROUND_EVENTLINE
= 3
136 * These are the registers that has to be saved and later restored
137 * when the DMA hw is powered off.
138 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
140 static u32 d40_backup_regs
[] = {
149 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
152 * since 9540 and 8540 has the same HW revision
153 * use v4a for 9540 or ealier
154 * use v4b for 8540 or later
156 * DB8500ed has revision 0
157 * DB8500v1 has revision 2
158 * DB8500v2 has revision 3
159 * AP9540v1 has revision 4
160 * DB8540v1 has revision 4
161 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
163 static u32 d40_backup_regs_v4a
[] = {
182 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
184 static u32 d40_backup_regs_v4b
[] = {
207 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
209 static u32 d40_backup_regs_chan
[] = {
220 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
221 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
224 * struct d40_interrupt_lookup - lookup table for interrupt handler
226 * @src: Interrupt mask register.
227 * @clr: Interrupt clear register.
228 * @is_error: true if this is an error interrupt.
229 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
230 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
232 struct d40_interrupt_lookup
{
240 static struct d40_interrupt_lookup il_v4a
[] = {
241 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
242 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
243 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
244 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
245 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
246 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
247 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
248 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
249 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
250 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
253 static struct d40_interrupt_lookup il_v4b
[] = {
254 {D40_DREG_CLCTIS1
, D40_DREG_CLCICR1
, false, 0},
255 {D40_DREG_CLCTIS2
, D40_DREG_CLCICR2
, false, 32},
256 {D40_DREG_CLCTIS3
, D40_DREG_CLCICR3
, false, 64},
257 {D40_DREG_CLCTIS4
, D40_DREG_CLCICR4
, false, 96},
258 {D40_DREG_CLCTIS5
, D40_DREG_CLCICR5
, false, 128},
259 {D40_DREG_CLCEIS1
, D40_DREG_CLCICR1
, true, 0},
260 {D40_DREG_CLCEIS2
, D40_DREG_CLCICR2
, true, 32},
261 {D40_DREG_CLCEIS3
, D40_DREG_CLCICR3
, true, 64},
262 {D40_DREG_CLCEIS4
, D40_DREG_CLCICR4
, true, 96},
263 {D40_DREG_CLCEIS5
, D40_DREG_CLCICR5
, true, 128},
264 {D40_DREG_CPCTIS
, D40_DREG_CPCICR
, false, D40_PHY_CHAN
},
265 {D40_DREG_CPCEIS
, D40_DREG_CPCICR
, true, D40_PHY_CHAN
},
269 * struct d40_reg_val - simple lookup struct
271 * @reg: The register.
272 * @val: The value that belongs to the register in reg.
279 static __initdata
struct d40_reg_val dma_init_reg_v4a
[] = {
280 /* Clock every part of the DMA block from start */
281 { .reg
= D40_DREG_GCC
, .val
= D40_DREG_GCC_ENABLE_ALL
},
283 /* Interrupts on all logical channels */
284 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
285 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
286 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
287 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
288 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
289 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
290 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
291 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
292 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
293 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
294 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
295 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
297 static __initdata
struct d40_reg_val dma_init_reg_v4b
[] = {
298 /* Clock every part of the DMA block from start */
299 { .reg
= D40_DREG_GCC
, .val
= D40_DREG_GCC_ENABLE_ALL
},
301 /* Interrupts on all logical channels */
302 { .reg
= D40_DREG_CLCMIS1
, .val
= 0xFFFFFFFF},
303 { .reg
= D40_DREG_CLCMIS2
, .val
= 0xFFFFFFFF},
304 { .reg
= D40_DREG_CLCMIS3
, .val
= 0xFFFFFFFF},
305 { .reg
= D40_DREG_CLCMIS4
, .val
= 0xFFFFFFFF},
306 { .reg
= D40_DREG_CLCMIS5
, .val
= 0xFFFFFFFF},
307 { .reg
= D40_DREG_CLCICR1
, .val
= 0xFFFFFFFF},
308 { .reg
= D40_DREG_CLCICR2
, .val
= 0xFFFFFFFF},
309 { .reg
= D40_DREG_CLCICR3
, .val
= 0xFFFFFFFF},
310 { .reg
= D40_DREG_CLCICR4
, .val
= 0xFFFFFFFF},
311 { .reg
= D40_DREG_CLCICR5
, .val
= 0xFFFFFFFF},
312 { .reg
= D40_DREG_CLCTIS1
, .val
= 0xFFFFFFFF},
313 { .reg
= D40_DREG_CLCTIS2
, .val
= 0xFFFFFFFF},
314 { .reg
= D40_DREG_CLCTIS3
, .val
= 0xFFFFFFFF},
315 { .reg
= D40_DREG_CLCTIS4
, .val
= 0xFFFFFFFF},
316 { .reg
= D40_DREG_CLCTIS5
, .val
= 0xFFFFFFFF}
320 * struct d40_lli_pool - Structure for keeping LLIs in memory
322 * @base: Pointer to memory area when the pre_alloc_lli's are not large
323 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
324 * pre_alloc_lli is used.
325 * @dma_addr: DMA address, if mapped
326 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
327 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
328 * one buffer to one buffer.
330 struct d40_lli_pool
{
334 /* Space for dst and src, plus an extra for padding */
335 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
339 * struct d40_desc - A descriptor is one DMA job.
341 * @lli_phy: LLI settings for physical channel. Both src and dst=
342 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
343 * lli_len equals one.
344 * @lli_log: Same as above but for logical channels.
345 * @lli_pool: The pool with two entries pre-allocated.
346 * @lli_len: Number of llis of current descriptor.
347 * @lli_current: Number of transferred llis.
348 * @lcla_alloc: Number of LCLA entries allocated.
349 * @txd: DMA engine struct. Used for among other things for communication
352 * @is_in_client_list: true if the client owns this descriptor.
353 * @cyclic: true if this is a cyclic job
355 * This descriptor is used for both logical and physical transfers.
359 struct d40_phy_lli_bidir lli_phy
;
361 struct d40_log_lli_bidir lli_log
;
363 struct d40_lli_pool lli_pool
;
368 struct dma_async_tx_descriptor txd
;
369 struct list_head node
;
371 bool is_in_client_list
;
376 * struct d40_lcla_pool - LCLA pool settings and data.
378 * @base: The virtual address of LCLA. 18 bit aligned.
379 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
380 * This pointer is only there for clean-up on error.
381 * @pages: The number of pages needed for all physical channels.
382 * Only used later for clean-up on error
383 * @lock: Lock to protect the content in this struct.
384 * @alloc_map: big map over which LCLA entry is own by which job.
386 struct d40_lcla_pool
{
389 void *base_unaligned
;
392 struct d40_desc
**alloc_map
;
396 * struct d40_phy_res - struct for handling eventlines mapped to physical
399 * @lock: A lock protection this entity.
400 * @reserved: True if used by secure world or otherwise.
401 * @num: The physical channel number of this entity.
402 * @allocated_src: Bit mapped to show which src event line's are mapped to
403 * this physical channel. Can also be free or physically allocated.
404 * @allocated_dst: Same as for src but is dst.
405 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
407 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
421 * struct d40_chan - Struct that describes a channel.
423 * @lock: A spinlock to protect this struct.
424 * @log_num: The logical number, if any of this channel.
425 * @pending_tx: The number of pending transfers. Used between interrupt handler
427 * @busy: Set to true when transfer is ongoing on this channel.
428 * @phy_chan: Pointer to physical channel which this instance runs on. If this
429 * point is NULL, then the channel is not allocated.
430 * @chan: DMA engine handle.
431 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
432 * transfer and call client callback.
433 * @client: Cliented owned descriptor list.
434 * @pending_queue: Submitted jobs, to be issued by issue_pending()
435 * @active: Active descriptor.
436 * @done: Completed jobs
437 * @queue: Queued jobs.
438 * @prepare_queue: Prepared jobs.
439 * @dma_cfg: The client configuration of this dma channel.
440 * @configured: whether the dma_cfg configuration is valid
441 * @base: Pointer to the device instance struct.
442 * @src_def_cfg: Default cfg register setting for src.
443 * @dst_def_cfg: Default cfg register setting for dst.
444 * @log_def: Default logical channel settings.
445 * @lcpa: Pointer to dst and src lcpa settings.
446 * @runtime_addr: runtime configured address.
447 * @runtime_direction: runtime configured direction.
449 * This struct can either "be" a logical or a physical channel.
456 struct d40_phy_res
*phy_chan
;
457 struct dma_chan chan
;
458 struct tasklet_struct tasklet
;
459 struct list_head client
;
460 struct list_head pending_queue
;
461 struct list_head active
;
462 struct list_head done
;
463 struct list_head queue
;
464 struct list_head prepare_queue
;
465 struct stedma40_chan_cfg dma_cfg
;
467 struct d40_base
*base
;
468 /* Default register configurations */
471 struct d40_def_lcsp log_def
;
472 struct d40_log_lli_full
*lcpa
;
473 /* Runtime reconfiguration */
474 dma_addr_t runtime_addr
;
475 enum dma_transfer_direction runtime_direction
;
479 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
482 * @backup: the pointer to the registers address array for backup
483 * @backup_size: the size of the registers address array for backup
484 * @realtime_en: the realtime enable register
485 * @realtime_clear: the realtime clear register
486 * @high_prio_en: the high priority enable register
487 * @high_prio_clear: the high priority clear register
488 * @interrupt_en: the interrupt enable register
489 * @interrupt_clear: the interrupt clear register
490 * @il: the pointer to struct d40_interrupt_lookup
491 * @il_size: the size of d40_interrupt_lookup array
492 * @init_reg: the pointer to the struct d40_reg_val
493 * @init_reg_size: the size of d40_reg_val array
495 struct d40_gen_dmac
{
504 struct d40_interrupt_lookup
*il
;
506 struct d40_reg_val
*init_reg
;
511 * struct d40_base - The big global struct, one for each probe'd instance.
513 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
514 * @execmd_lock: Lock for execute command usage since several channels share
515 * the same physical register.
516 * @dev: The device structure.
517 * @virtbase: The virtual base address of the DMA's register.
518 * @rev: silicon revision detected.
519 * @clk: Pointer to the DMA clock structure.
520 * @phy_start: Physical memory start of the DMA registers.
521 * @phy_size: Size of the DMA register map.
522 * @irq: The IRQ number.
523 * @num_phy_chans: The number of physical channels. Read from HW. This
524 * is the number of available channels for this driver, not counting "Secure
525 * mode" allocated physical channels.
526 * @num_log_chans: The number of logical channels. Calculated from
528 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
529 * @dma_slave: dma_device channels that can do only do slave transfers.
530 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
531 * @phy_chans: Room for all possible physical channels in system.
532 * @log_chans: Room for all possible logical channels in system.
533 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
534 * to log_chans entries.
535 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
536 * to phy_chans entries.
537 * @plat_data: Pointer to provided platform_data which is the driver
539 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
540 * @phy_res: Vector containing all physical channels.
541 * @lcla_pool: lcla pool settings and data.
542 * @lcpa_base: The virtual mapped address of LCPA.
543 * @phy_lcpa: The physical address of the LCPA.
544 * @lcpa_size: The size of the LCPA area.
545 * @desc_slab: cache for descriptors.
546 * @reg_val_backup: Here the values of some hardware registers are stored
547 * before the DMA is powered off. They are restored when the power is back on.
548 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
550 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
551 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
552 * @initialized: true if the dma has been initialized
553 * @gen_dmac: the struct for generic registers values to represent u8500/8540
557 spinlock_t interrupt_lock
;
558 spinlock_t execmd_lock
;
560 void __iomem
*virtbase
;
563 phys_addr_t phy_start
;
564 resource_size_t phy_size
;
568 struct device_dma_parameters dma_parms
;
569 struct dma_device dma_both
;
570 struct dma_device dma_slave
;
571 struct dma_device dma_memcpy
;
572 struct d40_chan
*phy_chans
;
573 struct d40_chan
*log_chans
;
574 struct d40_chan
**lookup_log_chans
;
575 struct d40_chan
**lookup_phy_chans
;
576 struct stedma40_platform_data
*plat_data
;
577 struct regulator
*lcpa_regulator
;
578 /* Physical half channels */
579 struct d40_phy_res
*phy_res
;
580 struct d40_lcla_pool lcla_pool
;
583 resource_size_t lcpa_size
;
584 struct kmem_cache
*desc_slab
;
585 u32 reg_val_backup
[BACKUP_REGS_SZ
];
586 u32 reg_val_backup_v4
[BACKUP_REGS_SZ_MAX
];
587 u32
*reg_val_backup_chan
;
588 u16 gcc_pwr_off_mask
;
590 struct d40_gen_dmac gen_dmac
;
593 static struct device
*chan2dev(struct d40_chan
*d40c
)
595 return &d40c
->chan
.dev
->device
;
598 static bool chan_is_physical(struct d40_chan
*chan
)
600 return chan
->log_num
== D40_PHY_CHAN
;
603 static bool chan_is_logical(struct d40_chan
*chan
)
605 return !chan_is_physical(chan
);
608 static void __iomem
*chan_base(struct d40_chan
*chan
)
610 return chan
->base
->virtbase
+ D40_DREG_PCBASE
+
611 chan
->phy_chan
->num
* D40_DREG_PCDELTA
;
614 #define d40_err(dev, format, arg...) \
615 dev_err(dev, "[%s] " format, __func__, ## arg)
617 #define chan_err(d40c, format, arg...) \
618 d40_err(chan2dev(d40c), format, ## arg)
620 static int d40_pool_lli_alloc(struct d40_chan
*d40c
, struct d40_desc
*d40d
,
623 bool is_log
= chan_is_logical(d40c
);
628 align
= sizeof(struct d40_log_lli
);
630 align
= sizeof(struct d40_phy_lli
);
633 base
= d40d
->lli_pool
.pre_alloc_lli
;
634 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
635 d40d
->lli_pool
.base
= NULL
;
637 d40d
->lli_pool
.size
= lli_len
* 2 * align
;
639 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
640 d40d
->lli_pool
.base
= base
;
642 if (d40d
->lli_pool
.base
== NULL
)
647 d40d
->lli_log
.src
= PTR_ALIGN(base
, align
);
648 d40d
->lli_log
.dst
= d40d
->lli_log
.src
+ lli_len
;
650 d40d
->lli_pool
.dma_addr
= 0;
652 d40d
->lli_phy
.src
= PTR_ALIGN(base
, align
);
653 d40d
->lli_phy
.dst
= d40d
->lli_phy
.src
+ lli_len
;
655 d40d
->lli_pool
.dma_addr
= dma_map_single(d40c
->base
->dev
,
660 if (dma_mapping_error(d40c
->base
->dev
,
661 d40d
->lli_pool
.dma_addr
)) {
662 kfree(d40d
->lli_pool
.base
);
663 d40d
->lli_pool
.base
= NULL
;
664 d40d
->lli_pool
.dma_addr
= 0;
672 static void d40_pool_lli_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
674 if (d40d
->lli_pool
.dma_addr
)
675 dma_unmap_single(d40c
->base
->dev
, d40d
->lli_pool
.dma_addr
,
676 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
678 kfree(d40d
->lli_pool
.base
);
679 d40d
->lli_pool
.base
= NULL
;
680 d40d
->lli_pool
.size
= 0;
681 d40d
->lli_log
.src
= NULL
;
682 d40d
->lli_log
.dst
= NULL
;
683 d40d
->lli_phy
.src
= NULL
;
684 d40d
->lli_phy
.dst
= NULL
;
687 static int d40_lcla_alloc_one(struct d40_chan
*d40c
,
688 struct d40_desc
*d40d
)
694 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
697 * Allocate both src and dst at the same time, therefore the half
698 * start on 1 since 0 can't be used since zero is used as end marker.
700 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
701 int idx
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
+ i
;
703 if (!d40c
->base
->lcla_pool
.alloc_map
[idx
]) {
704 d40c
->base
->lcla_pool
.alloc_map
[idx
] = d40d
;
711 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
716 static int d40_lcla_free_all(struct d40_chan
*d40c
,
717 struct d40_desc
*d40d
)
723 if (chan_is_physical(d40c
))
726 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
728 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
729 int idx
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
+ i
;
731 if (d40c
->base
->lcla_pool
.alloc_map
[idx
] == d40d
) {
732 d40c
->base
->lcla_pool
.alloc_map
[idx
] = NULL
;
734 if (d40d
->lcla_alloc
== 0) {
741 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
747 static void d40_desc_remove(struct d40_desc
*d40d
)
749 list_del(&d40d
->node
);
752 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
754 struct d40_desc
*desc
= NULL
;
756 if (!list_empty(&d40c
->client
)) {
760 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
761 if (async_tx_test_ack(&d
->txd
)) {
764 memset(desc
, 0, sizeof(*desc
));
771 desc
= kmem_cache_zalloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
774 INIT_LIST_HEAD(&desc
->node
);
779 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
782 d40_pool_lli_free(d40c
, d40d
);
783 d40_lcla_free_all(d40c
, d40d
);
784 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
787 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
789 list_add_tail(&desc
->node
, &d40c
->active
);
792 static void d40_phy_lli_load(struct d40_chan
*chan
, struct d40_desc
*desc
)
794 struct d40_phy_lli
*lli_dst
= desc
->lli_phy
.dst
;
795 struct d40_phy_lli
*lli_src
= desc
->lli_phy
.src
;
796 void __iomem
*base
= chan_base(chan
);
798 writel(lli_src
->reg_cfg
, base
+ D40_CHAN_REG_SSCFG
);
799 writel(lli_src
->reg_elt
, base
+ D40_CHAN_REG_SSELT
);
800 writel(lli_src
->reg_ptr
, base
+ D40_CHAN_REG_SSPTR
);
801 writel(lli_src
->reg_lnk
, base
+ D40_CHAN_REG_SSLNK
);
803 writel(lli_dst
->reg_cfg
, base
+ D40_CHAN_REG_SDCFG
);
804 writel(lli_dst
->reg_elt
, base
+ D40_CHAN_REG_SDELT
);
805 writel(lli_dst
->reg_ptr
, base
+ D40_CHAN_REG_SDPTR
);
806 writel(lli_dst
->reg_lnk
, base
+ D40_CHAN_REG_SDLNK
);
809 static void d40_desc_done(struct d40_chan
*d40c
, struct d40_desc
*desc
)
811 list_add_tail(&desc
->node
, &d40c
->done
);
814 static void d40_log_lli_to_lcxa(struct d40_chan
*chan
, struct d40_desc
*desc
)
816 struct d40_lcla_pool
*pool
= &chan
->base
->lcla_pool
;
817 struct d40_log_lli_bidir
*lli
= &desc
->lli_log
;
818 int lli_current
= desc
->lli_current
;
819 int lli_len
= desc
->lli_len
;
820 bool cyclic
= desc
->cyclic
;
821 int curr_lcla
= -EINVAL
;
823 bool use_esram_lcla
= chan
->base
->plat_data
->use_esram_lcla
;
827 * We may have partially running cyclic transfers, in case we did't get
828 * enough LCLA entries.
830 linkback
= cyclic
&& lli_current
== 0;
833 * For linkback, we need one LCLA even with only one link, because we
834 * can't link back to the one in LCPA space
836 if (linkback
|| (lli_len
- lli_current
> 1)) {
838 * If the channel is expected to use only soft_lli don't
839 * allocate a lcla. This is to avoid a HW issue that exists
840 * in some controller during a peripheral to memory transfer
841 * that uses linked lists.
843 if (!(chan
->phy_chan
->use_soft_lli
&&
844 chan
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
))
845 curr_lcla
= d40_lcla_alloc_one(chan
, desc
);
847 first_lcla
= curr_lcla
;
851 * For linkback, we normally load the LCPA in the loop since we need to
852 * link it to the second LCLA and not the first. However, if we
853 * couldn't even get a first LCLA, then we have to run in LCPA and
856 if (!linkback
|| curr_lcla
== -EINVAL
) {
857 unsigned int flags
= 0;
859 if (curr_lcla
== -EINVAL
)
860 flags
|= LLI_TERM_INT
;
862 d40_log_lli_lcpa_write(chan
->lcpa
,
863 &lli
->dst
[lli_current
],
864 &lli
->src
[lli_current
],
873 for (; lli_current
< lli_len
; lli_current
++) {
874 unsigned int lcla_offset
= chan
->phy_chan
->num
* 1024 +
876 struct d40_log_lli
*lcla
= pool
->base
+ lcla_offset
;
877 unsigned int flags
= 0;
880 if (lli_current
+ 1 < lli_len
)
881 next_lcla
= d40_lcla_alloc_one(chan
, desc
);
883 next_lcla
= linkback
? first_lcla
: -EINVAL
;
885 if (cyclic
|| next_lcla
== -EINVAL
)
886 flags
|= LLI_TERM_INT
;
888 if (linkback
&& curr_lcla
== first_lcla
) {
889 /* First link goes in both LCPA and LCLA */
890 d40_log_lli_lcpa_write(chan
->lcpa
,
891 &lli
->dst
[lli_current
],
892 &lli
->src
[lli_current
],
897 * One unused LCLA in the cyclic case if the very first
900 d40_log_lli_lcla_write(lcla
,
901 &lli
->dst
[lli_current
],
902 &lli
->src
[lli_current
],
906 * Cache maintenance is not needed if lcla is
909 if (!use_esram_lcla
) {
910 dma_sync_single_range_for_device(chan
->base
->dev
,
911 pool
->dma_addr
, lcla_offset
,
912 2 * sizeof(struct d40_log_lli
),
915 curr_lcla
= next_lcla
;
917 if (curr_lcla
== -EINVAL
|| curr_lcla
== first_lcla
) {
924 desc
->lli_current
= lli_current
;
927 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
929 if (chan_is_physical(d40c
)) {
930 d40_phy_lli_load(d40c
, d40d
);
931 d40d
->lli_current
= d40d
->lli_len
;
933 d40_log_lli_to_lcxa(d40c
, d40d
);
936 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
940 if (list_empty(&d40c
->active
))
943 d
= list_first_entry(&d40c
->active
,
949 /* remove desc from current queue and add it to the pending_queue */
950 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
952 d40_desc_remove(desc
);
953 desc
->is_in_client_list
= false;
954 list_add_tail(&desc
->node
, &d40c
->pending_queue
);
957 static struct d40_desc
*d40_first_pending(struct d40_chan
*d40c
)
961 if (list_empty(&d40c
->pending_queue
))
964 d
= list_first_entry(&d40c
->pending_queue
,
970 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
974 if (list_empty(&d40c
->queue
))
977 d
= list_first_entry(&d40c
->queue
,
983 static struct d40_desc
*d40_first_done(struct d40_chan
*d40c
)
985 if (list_empty(&d40c
->done
))
988 return list_first_entry(&d40c
->done
, struct d40_desc
, node
);
991 static int d40_psize_2_burst_size(bool is_log
, int psize
)
994 if (psize
== STEDMA40_PSIZE_LOG_1
)
997 if (psize
== STEDMA40_PSIZE_PHY_1
)
1005 * The dma only supports transmitting packages up to
1006 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
1007 * dma elements required to send the entire sg list
1009 static int d40_size_2_dmalen(int size
, u32 data_width1
, u32 data_width2
)
1012 u32 max_w
= max(data_width1
, data_width2
);
1013 u32 min_w
= min(data_width1
, data_width2
);
1014 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
1016 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
1017 seg_max
-= (1 << max_w
);
1019 if (!IS_ALIGNED(size
, 1 << max_w
))
1022 if (size
<= seg_max
)
1025 dmalen
= size
/ seg_max
;
1026 if (dmalen
* seg_max
< size
)
1032 static int d40_sg_2_dmalen(struct scatterlist
*sgl
, int sg_len
,
1033 u32 data_width1
, u32 data_width2
)
1035 struct scatterlist
*sg
;
1040 for_each_sg(sgl
, sg
, sg_len
, i
) {
1041 ret
= d40_size_2_dmalen(sg_dma_len(sg
),
1042 data_width1
, data_width2
);
1052 static void dma40_backup(void __iomem
*baseaddr
, u32
*backup
,
1053 u32
*regaddr
, int num
, bool save
)
1057 for (i
= 0; i
< num
; i
++) {
1058 void __iomem
*addr
= baseaddr
+ regaddr
[i
];
1061 backup
[i
] = readl_relaxed(addr
);
1063 writel_relaxed(backup
[i
], addr
);
1067 static void d40_save_restore_registers(struct d40_base
*base
, bool save
)
1071 /* Save/Restore channel specific registers */
1072 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
1076 if (base
->phy_res
[i
].reserved
)
1079 addr
= base
->virtbase
+ D40_DREG_PCBASE
+ i
* D40_DREG_PCDELTA
;
1080 idx
= i
* ARRAY_SIZE(d40_backup_regs_chan
);
1082 dma40_backup(addr
, &base
->reg_val_backup_chan
[idx
],
1083 d40_backup_regs_chan
,
1084 ARRAY_SIZE(d40_backup_regs_chan
),
1088 /* Save/Restore global registers */
1089 dma40_backup(base
->virtbase
, base
->reg_val_backup
,
1090 d40_backup_regs
, ARRAY_SIZE(d40_backup_regs
),
1093 /* Save/Restore registers only existing on dma40 v3 and later */
1094 if (base
->gen_dmac
.backup
)
1095 dma40_backup(base
->virtbase
, base
->reg_val_backup_v4
,
1096 base
->gen_dmac
.backup
,
1097 base
->gen_dmac
.backup_size
,
1101 static void d40_save_restore_registers(struct d40_base
*base
, bool save
)
1106 static int __d40_execute_command_phy(struct d40_chan
*d40c
,
1107 enum d40_command command
)
1111 void __iomem
*active_reg
;
1113 unsigned long flags
;
1116 if (command
== D40_DMA_STOP
) {
1117 ret
= __d40_execute_command_phy(d40c
, D40_DMA_SUSPEND_REQ
);
1122 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
1124 if (d40c
->phy_chan
->num
% 2 == 0)
1125 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1127 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1129 if (command
== D40_DMA_SUSPEND_REQ
) {
1130 status
= (readl(active_reg
) &
1131 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1132 D40_CHAN_POS(d40c
->phy_chan
->num
);
1134 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1138 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
1139 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
1142 if (command
== D40_DMA_SUSPEND_REQ
) {
1144 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
1145 status
= (readl(active_reg
) &
1146 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1147 D40_CHAN_POS(d40c
->phy_chan
->num
);
1151 * Reduce the number of bus accesses while
1152 * waiting for the DMA to suspend.
1156 if (status
== D40_DMA_STOP
||
1157 status
== D40_DMA_SUSPENDED
)
1161 if (i
== D40_SUSPEND_MAX_IT
) {
1163 "unable to suspend the chl %d (log: %d) status %x\n",
1164 d40c
->phy_chan
->num
, d40c
->log_num
,
1172 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
1176 static void d40_term_all(struct d40_chan
*d40c
)
1178 struct d40_desc
*d40d
;
1179 struct d40_desc
*_d
;
1181 /* Release completed descriptors */
1182 while ((d40d
= d40_first_done(d40c
))) {
1183 d40_desc_remove(d40d
);
1184 d40_desc_free(d40c
, d40d
);
1187 /* Release active descriptors */
1188 while ((d40d
= d40_first_active_get(d40c
))) {
1189 d40_desc_remove(d40d
);
1190 d40_desc_free(d40c
, d40d
);
1193 /* Release queued descriptors waiting for transfer */
1194 while ((d40d
= d40_first_queued(d40c
))) {
1195 d40_desc_remove(d40d
);
1196 d40_desc_free(d40c
, d40d
);
1199 /* Release pending descriptors */
1200 while ((d40d
= d40_first_pending(d40c
))) {
1201 d40_desc_remove(d40d
);
1202 d40_desc_free(d40c
, d40d
);
1205 /* Release client owned descriptors */
1206 if (!list_empty(&d40c
->client
))
1207 list_for_each_entry_safe(d40d
, _d
, &d40c
->client
, node
) {
1208 d40_desc_remove(d40d
);
1209 d40_desc_free(d40c
, d40d
);
1212 /* Release descriptors in prepare queue */
1213 if (!list_empty(&d40c
->prepare_queue
))
1214 list_for_each_entry_safe(d40d
, _d
,
1215 &d40c
->prepare_queue
, node
) {
1216 d40_desc_remove(d40d
);
1217 d40_desc_free(d40c
, d40d
);
1220 d40c
->pending_tx
= 0;
1223 static void __d40_config_set_event(struct d40_chan
*d40c
,
1224 enum d40_events event_type
, u32 event
,
1227 void __iomem
*addr
= chan_base(d40c
) + reg
;
1231 switch (event_type
) {
1233 case D40_DEACTIVATE_EVENTLINE
:
1235 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
1236 | ~D40_EVENTLINE_MASK(event
), addr
);
1239 case D40_SUSPEND_REQ_EVENTLINE
:
1240 status
= (readl(addr
) & D40_EVENTLINE_MASK(event
)) >>
1241 D40_EVENTLINE_POS(event
);
1243 if (status
== D40_DEACTIVATE_EVENTLINE
||
1244 status
== D40_SUSPEND_REQ_EVENTLINE
)
1247 writel((D40_SUSPEND_REQ_EVENTLINE
<< D40_EVENTLINE_POS(event
))
1248 | ~D40_EVENTLINE_MASK(event
), addr
);
1250 for (tries
= 0 ; tries
< D40_SUSPEND_MAX_IT
; tries
++) {
1252 status
= (readl(addr
) & D40_EVENTLINE_MASK(event
)) >>
1253 D40_EVENTLINE_POS(event
);
1257 * Reduce the number of bus accesses while
1258 * waiting for the DMA to suspend.
1262 if (status
== D40_DEACTIVATE_EVENTLINE
)
1266 if (tries
== D40_SUSPEND_MAX_IT
) {
1268 "unable to stop the event_line chl %d (log: %d)"
1269 "status %x\n", d40c
->phy_chan
->num
,
1270 d40c
->log_num
, status
);
1274 case D40_ACTIVATE_EVENTLINE
:
1276 * The hardware sometimes doesn't register the enable when src and dst
1277 * event lines are active on the same logical channel. Retry to ensure
1278 * it does. Usually only one retry is sufficient.
1282 writel((D40_ACTIVATE_EVENTLINE
<<
1283 D40_EVENTLINE_POS(event
)) |
1284 ~D40_EVENTLINE_MASK(event
), addr
);
1286 if (readl(addr
) & D40_EVENTLINE_MASK(event
))
1291 dev_dbg(chan2dev(d40c
),
1292 "[%s] workaround enable S%cLNK (%d tries)\n",
1293 __func__
, reg
== D40_CHAN_REG_SSLNK
? 'S' : 'D',
1299 case D40_ROUND_EVENTLINE
:
1306 static void d40_config_set_event(struct d40_chan
*d40c
,
1307 enum d40_events event_type
)
1309 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dev_type
);
1311 /* Enable event line connected to device (or memcpy) */
1312 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
1313 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
1314 __d40_config_set_event(d40c
, event_type
, event
,
1315 D40_CHAN_REG_SSLNK
);
1317 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
)
1318 __d40_config_set_event(d40c
, event_type
, event
,
1319 D40_CHAN_REG_SDLNK
);
1322 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
1324 void __iomem
*chanbase
= chan_base(d40c
);
1327 val
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
1328 val
|= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
1334 __d40_execute_command_log(struct d40_chan
*d40c
, enum d40_command command
)
1336 unsigned long flags
;
1339 void __iomem
*active_reg
;
1341 if (d40c
->phy_chan
->num
% 2 == 0)
1342 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1344 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1347 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
1351 case D40_DMA_SUSPEND_REQ
:
1353 active_status
= (readl(active_reg
) &
1354 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1355 D40_CHAN_POS(d40c
->phy_chan
->num
);
1357 if (active_status
== D40_DMA_RUN
)
1358 d40_config_set_event(d40c
, D40_SUSPEND_REQ_EVENTLINE
);
1360 d40_config_set_event(d40c
, D40_DEACTIVATE_EVENTLINE
);
1362 if (!d40_chan_has_events(d40c
) && (command
== D40_DMA_STOP
))
1363 ret
= __d40_execute_command_phy(d40c
, command
);
1369 d40_config_set_event(d40c
, D40_ACTIVATE_EVENTLINE
);
1370 ret
= __d40_execute_command_phy(d40c
, command
);
1373 case D40_DMA_SUSPENDED
:
1378 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
1382 static int d40_channel_execute_command(struct d40_chan
*d40c
,
1383 enum d40_command command
)
1385 if (chan_is_logical(d40c
))
1386 return __d40_execute_command_log(d40c
, command
);
1388 return __d40_execute_command_phy(d40c
, command
);
1391 static u32
d40_get_prmo(struct d40_chan
*d40c
)
1393 static const unsigned int phy_map
[] = {
1394 [STEDMA40_PCHAN_BASIC_MODE
]
1395 = D40_DREG_PRMO_PCHAN_BASIC
,
1396 [STEDMA40_PCHAN_MODULO_MODE
]
1397 = D40_DREG_PRMO_PCHAN_MODULO
,
1398 [STEDMA40_PCHAN_DOUBLE_DST_MODE
]
1399 = D40_DREG_PRMO_PCHAN_DOUBLE_DST
,
1401 static const unsigned int log_map
[] = {
1402 [STEDMA40_LCHAN_SRC_PHY_DST_LOG
]
1403 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG
,
1404 [STEDMA40_LCHAN_SRC_LOG_DST_PHY
]
1405 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY
,
1406 [STEDMA40_LCHAN_SRC_LOG_DST_LOG
]
1407 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
1410 if (chan_is_physical(d40c
))
1411 return phy_map
[d40c
->dma_cfg
.mode_opt
];
1413 return log_map
[d40c
->dma_cfg
.mode_opt
];
1416 static void d40_config_write(struct d40_chan
*d40c
)
1421 /* Odd addresses are even addresses + 4 */
1422 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
1423 /* Setup channel mode to logical or physical */
1424 var
= ((u32
)(chan_is_logical(d40c
)) + 1) <<
1425 D40_CHAN_POS(d40c
->phy_chan
->num
);
1426 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
1428 /* Setup operational mode option register */
1429 var
= d40_get_prmo(d40c
) << D40_CHAN_POS(d40c
->phy_chan
->num
);
1431 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
1433 if (chan_is_logical(d40c
)) {
1434 int lidx
= (d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
)
1435 & D40_SREG_ELEM_LOG_LIDX_MASK
;
1436 void __iomem
*chanbase
= chan_base(d40c
);
1438 /* Set default config for CFG reg */
1439 writel(d40c
->src_def_cfg
, chanbase
+ D40_CHAN_REG_SSCFG
);
1440 writel(d40c
->dst_def_cfg
, chanbase
+ D40_CHAN_REG_SDCFG
);
1442 /* Set LIDX for lcla */
1443 writel(lidx
, chanbase
+ D40_CHAN_REG_SSELT
);
1444 writel(lidx
, chanbase
+ D40_CHAN_REG_SDELT
);
1446 /* Clear LNK which will be used by d40_chan_has_events() */
1447 writel(0, chanbase
+ D40_CHAN_REG_SSLNK
);
1448 writel(0, chanbase
+ D40_CHAN_REG_SDLNK
);
1452 static u32
d40_residue(struct d40_chan
*d40c
)
1456 if (chan_is_logical(d40c
))
1457 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1458 >> D40_MEM_LCSP2_ECNT_POS
;
1460 u32 val
= readl(chan_base(d40c
) + D40_CHAN_REG_SDELT
);
1461 num_elt
= (val
& D40_SREG_ELEM_PHY_ECNT_MASK
)
1462 >> D40_SREG_ELEM_PHY_ECNT_POS
;
1465 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1468 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1472 if (chan_is_logical(d40c
))
1473 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1475 is_link
= readl(chan_base(d40c
) + D40_CHAN_REG_SDLNK
)
1476 & D40_SREG_LNK_PHYS_LNK_MASK
;
1481 static int d40_pause(struct d40_chan
*d40c
)
1484 unsigned long flags
;
1489 pm_runtime_get_sync(d40c
->base
->dev
);
1490 spin_lock_irqsave(&d40c
->lock
, flags
);
1492 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1494 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1495 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1496 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1500 static int d40_resume(struct d40_chan
*d40c
)
1503 unsigned long flags
;
1508 spin_lock_irqsave(&d40c
->lock
, flags
);
1509 pm_runtime_get_sync(d40c
->base
->dev
);
1511 /* If bytes left to transfer or linked tx resume job */
1512 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
))
1513 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1515 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1516 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1517 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1521 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
1523 struct d40_chan
*d40c
= container_of(tx
->chan
,
1526 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
1527 unsigned long flags
;
1528 dma_cookie_t cookie
;
1530 spin_lock_irqsave(&d40c
->lock
, flags
);
1531 cookie
= dma_cookie_assign(tx
);
1532 d40_desc_queue(d40c
, d40d
);
1533 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1538 static int d40_start(struct d40_chan
*d40c
)
1540 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1543 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
1545 struct d40_desc
*d40d
;
1548 /* Start queued jobs, if any */
1549 d40d
= d40_first_queued(d40c
);
1554 pm_runtime_get_sync(d40c
->base
->dev
);
1557 /* Remove from queue */
1558 d40_desc_remove(d40d
);
1560 /* Add to active queue */
1561 d40_desc_submit(d40c
, d40d
);
1563 /* Initiate DMA job */
1564 d40_desc_load(d40c
, d40d
);
1567 err
= d40_start(d40c
);
1576 /* called from interrupt context */
1577 static void dma_tc_handle(struct d40_chan
*d40c
)
1579 struct d40_desc
*d40d
;
1581 /* Get first active entry from list */
1582 d40d
= d40_first_active_get(d40c
);
1589 * If this was a paritially loaded list, we need to reloaded
1590 * it, and only when the list is completed. We need to check
1591 * for done because the interrupt will hit for every link, and
1592 * not just the last one.
1594 if (d40d
->lli_current
< d40d
->lli_len
1595 && !d40_tx_is_linked(d40c
)
1596 && !d40_residue(d40c
)) {
1597 d40_lcla_free_all(d40c
, d40d
);
1598 d40_desc_load(d40c
, d40d
);
1599 (void) d40_start(d40c
);
1601 if (d40d
->lli_current
== d40d
->lli_len
)
1602 d40d
->lli_current
= 0;
1605 d40_lcla_free_all(d40c
, d40d
);
1607 if (d40d
->lli_current
< d40d
->lli_len
) {
1608 d40_desc_load(d40c
, d40d
);
1610 (void) d40_start(d40c
);
1614 if (d40_queue_start(d40c
) == NULL
)
1616 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1617 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1619 d40_desc_remove(d40d
);
1620 d40_desc_done(d40c
, d40d
);
1624 tasklet_schedule(&d40c
->tasklet
);
1628 static void dma_tasklet(unsigned long data
)
1630 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
1631 struct d40_desc
*d40d
;
1632 unsigned long flags
;
1633 dma_async_tx_callback callback
;
1634 void *callback_param
;
1636 spin_lock_irqsave(&d40c
->lock
, flags
);
1638 /* Get first entry from the done list */
1639 d40d
= d40_first_done(d40c
);
1641 /* Check if we have reached here for cyclic job */
1642 d40d
= d40_first_active_get(d40c
);
1643 if (d40d
== NULL
|| !d40d
->cyclic
)
1648 dma_cookie_complete(&d40d
->txd
);
1651 * If terminating a channel pending_tx is set to zero.
1652 * This prevents any finished active jobs to return to the client.
1654 if (d40c
->pending_tx
== 0) {
1655 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1659 /* Callback to client */
1660 callback
= d40d
->txd
.callback
;
1661 callback_param
= d40d
->txd
.callback_param
;
1663 if (!d40d
->cyclic
) {
1664 if (async_tx_test_ack(&d40d
->txd
)) {
1665 d40_desc_remove(d40d
);
1666 d40_desc_free(d40c
, d40d
);
1667 } else if (!d40d
->is_in_client_list
) {
1668 d40_desc_remove(d40d
);
1669 d40_lcla_free_all(d40c
, d40d
);
1670 list_add_tail(&d40d
->node
, &d40c
->client
);
1671 d40d
->is_in_client_list
= true;
1677 if (d40c
->pending_tx
)
1678 tasklet_schedule(&d40c
->tasklet
);
1680 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1682 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1683 callback(callback_param
);
1688 /* Rescue manouver if receiving double interrupts */
1689 if (d40c
->pending_tx
> 0)
1691 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1694 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1700 struct d40_chan
*d40c
;
1701 unsigned long flags
;
1702 struct d40_base
*base
= data
;
1703 u32 regs
[base
->gen_dmac
.il_size
];
1704 struct d40_interrupt_lookup
*il
= base
->gen_dmac
.il
;
1705 u32 il_size
= base
->gen_dmac
.il_size
;
1707 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1709 /* Read interrupt status of both logical and physical channels */
1710 for (i
= 0; i
< il_size
; i
++)
1711 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1715 chan
= find_next_bit((unsigned long *)regs
,
1716 BITS_PER_LONG
* il_size
, chan
+ 1);
1718 /* No more set bits found? */
1719 if (chan
== BITS_PER_LONG
* il_size
)
1722 row
= chan
/ BITS_PER_LONG
;
1723 idx
= chan
& (BITS_PER_LONG
- 1);
1725 if (il
[row
].offset
== D40_PHY_CHAN
)
1726 d40c
= base
->lookup_phy_chans
[idx
];
1728 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1732 * No error because this can happen if something else
1733 * in the system is using the channel.
1739 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1741 spin_lock(&d40c
->lock
);
1743 if (!il
[row
].is_error
)
1744 dma_tc_handle(d40c
);
1746 d40_err(base
->dev
, "IRQ chan: %ld offset %d idx %d\n",
1747 chan
, il
[row
].offset
, idx
);
1749 spin_unlock(&d40c
->lock
);
1752 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1757 static int d40_validate_conf(struct d40_chan
*d40c
,
1758 struct stedma40_chan_cfg
*conf
)
1761 bool is_log
= conf
->mode
== STEDMA40_MODE_LOGICAL
;
1764 chan_err(d40c
, "Invalid direction.\n");
1768 if ((is_log
&& conf
->dev_type
> d40c
->base
->num_log_chans
) ||
1769 (!is_log
&& conf
->dev_type
> d40c
->base
->num_phy_chans
) ||
1770 (conf
->dev_type
< 0)) {
1771 chan_err(d40c
, "Invalid device type (%d)\n", conf
->dev_type
);
1775 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1776 d40c
->base
->plat_data
->dev_tx
[conf
->dev_type
] == 0 &&
1777 d40c
->runtime_addr
== 0) {
1778 chan_err(d40c
, "Invalid TX channel address (%d)\n",
1783 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1784 d40c
->base
->plat_data
->dev_rx
[conf
->dev_type
] == 0 &&
1785 d40c
->runtime_addr
== 0) {
1786 chan_err(d40c
, "Invalid RX channel address (%d)\n",
1791 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1793 * DMAC HW supports it. Will be added to this driver,
1794 * in case any dma client requires it.
1796 chan_err(d40c
, "periph to periph not supported\n");
1800 if (d40_psize_2_burst_size(is_log
, conf
->src_info
.psize
) *
1801 (1 << conf
->src_info
.data_width
) !=
1802 d40_psize_2_burst_size(is_log
, conf
->dst_info
.psize
) *
1803 (1 << conf
->dst_info
.data_width
)) {
1805 * The DMAC hardware only supports
1806 * src (burst x width) == dst (burst x width)
1809 chan_err(d40c
, "src (burst x width) != dst (burst x width)\n");
1816 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
,
1817 bool is_src
, int log_event_line
, bool is_log
,
1820 unsigned long flags
;
1821 spin_lock_irqsave(&phy
->lock
, flags
);
1823 *first_user
= ((phy
->allocated_src
| phy
->allocated_dst
)
1827 /* Physical interrupts are masked per physical full channel */
1828 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1829 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1830 phy
->allocated_dst
= D40_ALLOC_PHY
;
1831 phy
->allocated_src
= D40_ALLOC_PHY
;
1837 /* Logical channel */
1839 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1842 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1843 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1845 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1846 phy
->allocated_src
|= 1 << log_event_line
;
1851 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1854 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1855 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1857 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1858 phy
->allocated_dst
|= 1 << log_event_line
;
1865 spin_unlock_irqrestore(&phy
->lock
, flags
);
1868 spin_unlock_irqrestore(&phy
->lock
, flags
);
1872 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1875 unsigned long flags
;
1876 bool is_free
= false;
1878 spin_lock_irqsave(&phy
->lock
, flags
);
1879 if (!log_event_line
) {
1880 phy
->allocated_dst
= D40_ALLOC_FREE
;
1881 phy
->allocated_src
= D40_ALLOC_FREE
;
1886 /* Logical channel */
1888 phy
->allocated_src
&= ~(1 << log_event_line
);
1889 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1890 phy
->allocated_src
= D40_ALLOC_FREE
;
1892 phy
->allocated_dst
&= ~(1 << log_event_line
);
1893 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1894 phy
->allocated_dst
= D40_ALLOC_FREE
;
1897 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1901 spin_unlock_irqrestore(&phy
->lock
, flags
);
1906 static int d40_allocate_channel(struct d40_chan
*d40c
, bool *first_phy_user
)
1908 int dev_type
= d40c
->dma_cfg
.dev_type
;
1911 struct d40_phy_res
*phys
;
1917 bool is_log
= d40c
->dma_cfg
.mode
== STEDMA40_MODE_LOGICAL
;
1919 phys
= d40c
->base
->phy_res
;
1920 num_phy_chans
= d40c
->base
->num_phy_chans
;
1922 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1923 log_num
= 2 * dev_type
;
1925 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1926 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1927 /* dst event lines are used for logical memcpy */
1928 log_num
= 2 * dev_type
+ 1;
1933 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1934 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1937 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1938 /* Find physical half channel */
1939 if (d40c
->dma_cfg
.use_fixed_channel
) {
1940 i
= d40c
->dma_cfg
.phy_channel
;
1941 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1946 for (i
= 0; i
< num_phy_chans
; i
++) {
1947 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1954 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1955 int phy_num
= j
+ event_group
* 2;
1956 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1957 if (d40_alloc_mask_set(&phys
[i
],
1967 d40c
->phy_chan
= &phys
[i
];
1968 d40c
->log_num
= D40_PHY_CHAN
;
1974 /* Find logical channel */
1975 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1976 int phy_num
= j
+ event_group
* 2;
1978 if (d40c
->dma_cfg
.use_fixed_channel
) {
1979 i
= d40c
->dma_cfg
.phy_channel
;
1981 if ((i
!= phy_num
) && (i
!= phy_num
+ 1)) {
1982 dev_err(chan2dev(d40c
),
1983 "invalid fixed phy channel %d\n", i
);
1987 if (d40_alloc_mask_set(&phys
[i
], is_src
, event_line
,
1988 is_log
, first_phy_user
))
1991 dev_err(chan2dev(d40c
),
1992 "could not allocate fixed phy channel %d\n", i
);
1997 * Spread logical channels across all available physical rather
1998 * than pack every logical channel at the first available phy
2002 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
2003 if (d40_alloc_mask_set(&phys
[i
], is_src
,
2009 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
2010 if (d40_alloc_mask_set(&phys
[i
], is_src
,
2020 d40c
->phy_chan
= &phys
[i
];
2021 d40c
->log_num
= log_num
;
2025 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
2027 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
2033 static int d40_config_memcpy(struct d40_chan
*d40c
)
2035 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
2037 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
2038 d40c
->dma_cfg
= dma40_memcpy_conf_log
;
2039 d40c
->dma_cfg
.dev_type
= dma40_memcpy_channels
[d40c
->chan
.chan_id
];
2041 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
2042 dma_has_cap(DMA_SLAVE
, cap
)) {
2043 d40c
->dma_cfg
= dma40_memcpy_conf_phy
;
2045 chan_err(d40c
, "No memcpy\n");
2052 static int d40_free_dma(struct d40_chan
*d40c
)
2056 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dev_type
);
2057 struct d40_phy_res
*phy
= d40c
->phy_chan
;
2060 /* Terminate all queued and active transfers */
2064 chan_err(d40c
, "phy == null\n");
2068 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
2069 phy
->allocated_dst
== D40_ALLOC_FREE
) {
2070 chan_err(d40c
, "channel already free\n");
2074 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
2075 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
)
2077 else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
2080 chan_err(d40c
, "Unknown direction\n");
2084 pm_runtime_get_sync(d40c
->base
->dev
);
2085 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
2087 chan_err(d40c
, "stop failed\n");
2091 d40_alloc_mask_free(phy
, is_src
, chan_is_logical(d40c
) ? event
: 0);
2093 if (chan_is_logical(d40c
))
2094 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
2096 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
2099 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2100 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2104 d40c
->phy_chan
= NULL
;
2105 d40c
->configured
= false;
2108 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2109 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2113 static bool d40_is_paused(struct d40_chan
*d40c
)
2115 void __iomem
*chanbase
= chan_base(d40c
);
2116 bool is_paused
= false;
2117 unsigned long flags
;
2118 void __iomem
*active_reg
;
2120 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dev_type
);
2122 spin_lock_irqsave(&d40c
->lock
, flags
);
2124 if (chan_is_physical(d40c
)) {
2125 if (d40c
->phy_chan
->num
% 2 == 0)
2126 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
2128 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
2130 status
= (readl(active_reg
) &
2131 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
2132 D40_CHAN_POS(d40c
->phy_chan
->num
);
2133 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
2139 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
2140 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
2141 status
= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
2142 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
2143 status
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
2145 chan_err(d40c
, "Unknown direction\n");
2149 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
2150 D40_EVENTLINE_POS(event
);
2152 if (status
!= D40_DMA_RUN
)
2155 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2160 static u32
stedma40_residue(struct dma_chan
*chan
)
2162 struct d40_chan
*d40c
=
2163 container_of(chan
, struct d40_chan
, chan
);
2165 unsigned long flags
;
2167 spin_lock_irqsave(&d40c
->lock
, flags
);
2168 bytes_left
= d40_residue(d40c
);
2169 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2175 d40_prep_sg_log(struct d40_chan
*chan
, struct d40_desc
*desc
,
2176 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
2177 unsigned int sg_len
, dma_addr_t src_dev_addr
,
2178 dma_addr_t dst_dev_addr
)
2180 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2181 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
2182 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
2185 ret
= d40_log_sg_to_lli(sg_src
, sg_len
,
2188 chan
->log_def
.lcsp1
,
2189 src_info
->data_width
,
2190 dst_info
->data_width
);
2192 ret
= d40_log_sg_to_lli(sg_dst
, sg_len
,
2195 chan
->log_def
.lcsp3
,
2196 dst_info
->data_width
,
2197 src_info
->data_width
);
2199 return ret
< 0 ? ret
: 0;
2203 d40_prep_sg_phy(struct d40_chan
*chan
, struct d40_desc
*desc
,
2204 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
2205 unsigned int sg_len
, dma_addr_t src_dev_addr
,
2206 dma_addr_t dst_dev_addr
)
2208 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2209 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
2210 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
2211 unsigned long flags
= 0;
2215 flags
|= LLI_CYCLIC
| LLI_TERM_INT
;
2217 ret
= d40_phy_sg_to_lli(sg_src
, sg_len
, src_dev_addr
,
2219 virt_to_phys(desc
->lli_phy
.src
),
2221 src_info
, dst_info
, flags
);
2223 ret
= d40_phy_sg_to_lli(sg_dst
, sg_len
, dst_dev_addr
,
2225 virt_to_phys(desc
->lli_phy
.dst
),
2227 dst_info
, src_info
, flags
);
2229 dma_sync_single_for_device(chan
->base
->dev
, desc
->lli_pool
.dma_addr
,
2230 desc
->lli_pool
.size
, DMA_TO_DEVICE
);
2232 return ret
< 0 ? ret
: 0;
2235 static struct d40_desc
*
2236 d40_prep_desc(struct d40_chan
*chan
, struct scatterlist
*sg
,
2237 unsigned int sg_len
, unsigned long dma_flags
)
2239 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2240 struct d40_desc
*desc
;
2243 desc
= d40_desc_get(chan
);
2247 desc
->lli_len
= d40_sg_2_dmalen(sg
, sg_len
, cfg
->src_info
.data_width
,
2248 cfg
->dst_info
.data_width
);
2249 if (desc
->lli_len
< 0) {
2250 chan_err(chan
, "Unaligned size\n");
2254 ret
= d40_pool_lli_alloc(chan
, desc
, desc
->lli_len
);
2256 chan_err(chan
, "Could not allocate lli\n");
2260 desc
->lli_current
= 0;
2261 desc
->txd
.flags
= dma_flags
;
2262 desc
->txd
.tx_submit
= d40_tx_submit
;
2264 dma_async_tx_descriptor_init(&desc
->txd
, &chan
->chan
);
2269 d40_desc_free(chan
, desc
);
2274 d40_get_dev_addr(struct d40_chan
*chan
, enum dma_transfer_direction direction
)
2276 struct stedma40_platform_data
*plat
= chan
->base
->plat_data
;
2277 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2278 dma_addr_t addr
= 0;
2280 if (chan
->runtime_addr
)
2281 return chan
->runtime_addr
;
2283 if (direction
== DMA_DEV_TO_MEM
)
2284 addr
= plat
->dev_rx
[cfg
->dev_type
];
2285 else if (direction
== DMA_MEM_TO_DEV
)
2286 addr
= plat
->dev_tx
[cfg
->dev_type
];
2291 static struct dma_async_tx_descriptor
*
2292 d40_prep_sg(struct dma_chan
*dchan
, struct scatterlist
*sg_src
,
2293 struct scatterlist
*sg_dst
, unsigned int sg_len
,
2294 enum dma_transfer_direction direction
, unsigned long dma_flags
)
2296 struct d40_chan
*chan
= container_of(dchan
, struct d40_chan
, chan
);
2297 dma_addr_t src_dev_addr
= 0;
2298 dma_addr_t dst_dev_addr
= 0;
2299 struct d40_desc
*desc
;
2300 unsigned long flags
;
2303 if (!chan
->phy_chan
) {
2304 chan_err(chan
, "Cannot prepare unallocated channel\n");
2308 spin_lock_irqsave(&chan
->lock
, flags
);
2310 desc
= d40_prep_desc(chan
, sg_src
, sg_len
, dma_flags
);
2314 if (sg_next(&sg_src
[sg_len
- 1]) == sg_src
)
2315 desc
->cyclic
= true;
2317 if (direction
!= DMA_TRANS_NONE
) {
2318 dma_addr_t dev_addr
= d40_get_dev_addr(chan
, direction
);
2320 if (direction
== DMA_DEV_TO_MEM
)
2321 src_dev_addr
= dev_addr
;
2322 else if (direction
== DMA_MEM_TO_DEV
)
2323 dst_dev_addr
= dev_addr
;
2326 if (chan_is_logical(chan
))
2327 ret
= d40_prep_sg_log(chan
, desc
, sg_src
, sg_dst
,
2328 sg_len
, src_dev_addr
, dst_dev_addr
);
2330 ret
= d40_prep_sg_phy(chan
, desc
, sg_src
, sg_dst
,
2331 sg_len
, src_dev_addr
, dst_dev_addr
);
2334 chan_err(chan
, "Failed to prepare %s sg job: %d\n",
2335 chan_is_logical(chan
) ? "log" : "phy", ret
);
2340 * add descriptor to the prepare queue in order to be able
2341 * to free them later in terminate_all
2343 list_add_tail(&desc
->node
, &chan
->prepare_queue
);
2345 spin_unlock_irqrestore(&chan
->lock
, flags
);
2351 d40_desc_free(chan
, desc
);
2352 spin_unlock_irqrestore(&chan
->lock
, flags
);
2356 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
2358 struct stedma40_chan_cfg
*info
= data
;
2359 struct d40_chan
*d40c
=
2360 container_of(chan
, struct d40_chan
, chan
);
2364 err
= d40_validate_conf(d40c
, info
);
2366 d40c
->dma_cfg
= *info
;
2368 err
= d40_config_memcpy(d40c
);
2371 d40c
->configured
= true;
2375 EXPORT_SYMBOL(stedma40_filter
);
2377 static void __d40_set_prio_rt(struct d40_chan
*d40c
, int dev_type
, bool src
)
2379 bool realtime
= d40c
->dma_cfg
.realtime
;
2380 bool highprio
= d40c
->dma_cfg
.high_priority
;
2382 u32 event
= D40_TYPE_TO_EVENT(dev_type
);
2383 u32 group
= D40_TYPE_TO_GROUP(dev_type
);
2384 u32 bit
= 1 << event
;
2386 struct d40_gen_dmac
*dmac
= &d40c
->base
->gen_dmac
;
2388 rtreg
= realtime
? dmac
->realtime_en
: dmac
->realtime_clear
;
2390 * Due to a hardware bug, in some cases a logical channel triggered by
2391 * a high priority destination event line can generate extra packet
2394 * The workaround is to not set the high priority level for the
2395 * destination event lines that trigger logical channels.
2397 if (!src
&& chan_is_logical(d40c
))
2400 prioreg
= highprio
? dmac
->high_prio_en
: dmac
->high_prio_clear
;
2402 /* Destination event lines are stored in the upper halfword */
2406 writel(bit
, d40c
->base
->virtbase
+ prioreg
+ group
* 4);
2407 writel(bit
, d40c
->base
->virtbase
+ rtreg
+ group
* 4);
2410 static void d40_set_prio_realtime(struct d40_chan
*d40c
)
2412 if (d40c
->base
->rev
< 3)
2415 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
2416 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
2417 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.dev_type
, true);
2419 if ((d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
) ||
2420 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
2421 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.dev_type
, false);
2424 /* DMA ENGINE functions */
2425 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
2428 unsigned long flags
;
2429 struct d40_chan
*d40c
=
2430 container_of(chan
, struct d40_chan
, chan
);
2432 spin_lock_irqsave(&d40c
->lock
, flags
);
2434 dma_cookie_init(chan
);
2436 /* If no dma configuration is set use default configuration (memcpy) */
2437 if (!d40c
->configured
) {
2438 err
= d40_config_memcpy(d40c
);
2440 chan_err(d40c
, "Failed to configure memcpy channel\n");
2445 err
= d40_allocate_channel(d40c
, &is_free_phy
);
2447 chan_err(d40c
, "Failed to allocate channel\n");
2448 d40c
->configured
= false;
2452 pm_runtime_get_sync(d40c
->base
->dev
);
2453 /* Fill in basic CFG register values */
2454 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
2455 &d40c
->dst_def_cfg
, chan_is_logical(d40c
));
2457 d40_set_prio_realtime(d40c
);
2459 if (chan_is_logical(d40c
)) {
2460 d40_log_cfg(&d40c
->dma_cfg
,
2461 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2463 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
2464 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2465 d40c
->dma_cfg
.dev_type
* D40_LCPA_CHAN_SIZE
;
2467 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2468 d40c
->dma_cfg
.dev_type
*
2469 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
2472 dev_dbg(chan2dev(d40c
), "allocated %s channel (phy %d%s)\n",
2473 chan_is_logical(d40c
) ? "logical" : "physical",
2474 d40c
->phy_chan
->num
,
2475 d40c
->dma_cfg
.use_fixed_channel
? ", fixed" : "");
2479 * Only write channel configuration to the DMA if the physical
2480 * resource is free. In case of multiple logical channels
2481 * on the same physical resource, only the first write is necessary.
2484 d40_config_write(d40c
);
2486 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2487 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2488 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2492 static void d40_free_chan_resources(struct dma_chan
*chan
)
2494 struct d40_chan
*d40c
=
2495 container_of(chan
, struct d40_chan
, chan
);
2497 unsigned long flags
;
2499 if (d40c
->phy_chan
== NULL
) {
2500 chan_err(d40c
, "Cannot free unallocated channel\n");
2504 spin_lock_irqsave(&d40c
->lock
, flags
);
2506 err
= d40_free_dma(d40c
);
2509 chan_err(d40c
, "Failed to free channel\n");
2510 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2513 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
2517 unsigned long dma_flags
)
2519 struct scatterlist dst_sg
;
2520 struct scatterlist src_sg
;
2522 sg_init_table(&dst_sg
, 1);
2523 sg_init_table(&src_sg
, 1);
2525 sg_dma_address(&dst_sg
) = dst
;
2526 sg_dma_address(&src_sg
) = src
;
2528 sg_dma_len(&dst_sg
) = size
;
2529 sg_dma_len(&src_sg
) = size
;
2531 return d40_prep_sg(chan
, &src_sg
, &dst_sg
, 1, DMA_NONE
, dma_flags
);
2534 static struct dma_async_tx_descriptor
*
2535 d40_prep_memcpy_sg(struct dma_chan
*chan
,
2536 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
2537 struct scatterlist
*src_sg
, unsigned int src_nents
,
2538 unsigned long dma_flags
)
2540 if (dst_nents
!= src_nents
)
2543 return d40_prep_sg(chan
, src_sg
, dst_sg
, src_nents
, DMA_NONE
, dma_flags
);
2546 static struct dma_async_tx_descriptor
*
2547 d40_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2548 unsigned int sg_len
, enum dma_transfer_direction direction
,
2549 unsigned long dma_flags
, void *context
)
2551 if (!is_slave_direction(direction
))
2554 return d40_prep_sg(chan
, sgl
, sgl
, sg_len
, direction
, dma_flags
);
2557 static struct dma_async_tx_descriptor
*
2558 dma40_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
2559 size_t buf_len
, size_t period_len
,
2560 enum dma_transfer_direction direction
, unsigned long flags
,
2563 unsigned int periods
= buf_len
/ period_len
;
2564 struct dma_async_tx_descriptor
*txd
;
2565 struct scatterlist
*sg
;
2568 sg
= kcalloc(periods
+ 1, sizeof(struct scatterlist
), GFP_NOWAIT
);
2569 for (i
= 0; i
< periods
; i
++) {
2570 sg_dma_address(&sg
[i
]) = dma_addr
;
2571 sg_dma_len(&sg
[i
]) = period_len
;
2572 dma_addr
+= period_len
;
2575 sg
[periods
].offset
= 0;
2576 sg_dma_len(&sg
[periods
]) = 0;
2577 sg
[periods
].page_link
=
2578 ((unsigned long)sg
| 0x01) & ~0x02;
2580 txd
= d40_prep_sg(chan
, sg
, sg
, periods
, direction
,
2581 DMA_PREP_INTERRUPT
);
2588 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2589 dma_cookie_t cookie
,
2590 struct dma_tx_state
*txstate
)
2592 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2593 enum dma_status ret
;
2595 if (d40c
->phy_chan
== NULL
) {
2596 chan_err(d40c
, "Cannot read status of unallocated channel\n");
2600 ret
= dma_cookie_status(chan
, cookie
, txstate
);
2601 if (ret
!= DMA_SUCCESS
)
2602 dma_set_residue(txstate
, stedma40_residue(chan
));
2604 if (d40_is_paused(d40c
))
2610 static void d40_issue_pending(struct dma_chan
*chan
)
2612 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2613 unsigned long flags
;
2615 if (d40c
->phy_chan
== NULL
) {
2616 chan_err(d40c
, "Channel is not allocated!\n");
2620 spin_lock_irqsave(&d40c
->lock
, flags
);
2622 list_splice_tail_init(&d40c
->pending_queue
, &d40c
->queue
);
2624 /* Busy means that queued jobs are already being processed */
2626 (void) d40_queue_start(d40c
);
2628 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2631 static void d40_terminate_all(struct dma_chan
*chan
)
2633 unsigned long flags
;
2634 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2637 spin_lock_irqsave(&d40c
->lock
, flags
);
2639 pm_runtime_get_sync(d40c
->base
->dev
);
2640 ret
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
2642 chan_err(d40c
, "Failed to stop channel\n");
2645 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2646 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2648 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2649 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2653 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2657 dma40_config_to_halfchannel(struct d40_chan
*d40c
,
2658 struct stedma40_half_channel_info
*info
,
2659 enum dma_slave_buswidth width
,
2662 enum stedma40_periph_data_width addr_width
;
2666 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2667 addr_width
= STEDMA40_BYTE_WIDTH
;
2669 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2670 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2672 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2673 addr_width
= STEDMA40_WORD_WIDTH
;
2675 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2676 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2679 dev_err(d40c
->base
->dev
,
2680 "illegal peripheral address width "
2686 if (chan_is_logical(d40c
)) {
2688 psize
= STEDMA40_PSIZE_LOG_16
;
2689 else if (maxburst
>= 8)
2690 psize
= STEDMA40_PSIZE_LOG_8
;
2691 else if (maxburst
>= 4)
2692 psize
= STEDMA40_PSIZE_LOG_4
;
2694 psize
= STEDMA40_PSIZE_LOG_1
;
2697 psize
= STEDMA40_PSIZE_PHY_16
;
2698 else if (maxburst
>= 8)
2699 psize
= STEDMA40_PSIZE_PHY_8
;
2700 else if (maxburst
>= 4)
2701 psize
= STEDMA40_PSIZE_PHY_4
;
2703 psize
= STEDMA40_PSIZE_PHY_1
;
2706 info
->data_width
= addr_width
;
2707 info
->psize
= psize
;
2708 info
->flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2713 /* Runtime reconfiguration extension */
2714 static int d40_set_runtime_config(struct dma_chan
*chan
,
2715 struct dma_slave_config
*config
)
2717 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2718 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2719 enum dma_slave_buswidth src_addr_width
, dst_addr_width
;
2720 dma_addr_t config_addr
;
2721 u32 src_maxburst
, dst_maxburst
;
2724 src_addr_width
= config
->src_addr_width
;
2725 src_maxburst
= config
->src_maxburst
;
2726 dst_addr_width
= config
->dst_addr_width
;
2727 dst_maxburst
= config
->dst_maxburst
;
2729 if (config
->direction
== DMA_DEV_TO_MEM
) {
2730 dma_addr_t dev_addr_rx
=
2731 d40c
->base
->plat_data
->dev_rx
[cfg
->dev_type
];
2733 config_addr
= config
->src_addr
;
2735 dev_dbg(d40c
->base
->dev
,
2736 "channel has a pre-wired RX address %08x "
2737 "overriding with %08x\n",
2738 dev_addr_rx
, config_addr
);
2739 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2740 dev_dbg(d40c
->base
->dev
,
2741 "channel was not configured for peripheral "
2742 "to memory transfer (%d) overriding\n",
2744 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2746 /* Configure the memory side */
2747 if (dst_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
2748 dst_addr_width
= src_addr_width
;
2749 if (dst_maxburst
== 0)
2750 dst_maxburst
= src_maxburst
;
2752 } else if (config
->direction
== DMA_MEM_TO_DEV
) {
2753 dma_addr_t dev_addr_tx
=
2754 d40c
->base
->plat_data
->dev_tx
[cfg
->dev_type
];
2756 config_addr
= config
->dst_addr
;
2758 dev_dbg(d40c
->base
->dev
,
2759 "channel has a pre-wired TX address %08x "
2760 "overriding with %08x\n",
2761 dev_addr_tx
, config_addr
);
2762 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2763 dev_dbg(d40c
->base
->dev
,
2764 "channel was not configured for memory "
2765 "to peripheral transfer (%d) overriding\n",
2767 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2769 /* Configure the memory side */
2770 if (src_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
2771 src_addr_width
= dst_addr_width
;
2772 if (src_maxburst
== 0)
2773 src_maxburst
= dst_maxburst
;
2775 dev_err(d40c
->base
->dev
,
2776 "unrecognized channel direction %d\n",
2781 if (src_maxburst
* src_addr_width
!= dst_maxburst
* dst_addr_width
) {
2782 dev_err(d40c
->base
->dev
,
2783 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2791 if (src_maxburst
> 16) {
2793 dst_maxburst
= src_maxburst
* src_addr_width
/ dst_addr_width
;
2794 } else if (dst_maxburst
> 16) {
2796 src_maxburst
= dst_maxburst
* dst_addr_width
/ src_addr_width
;
2799 ret
= dma40_config_to_halfchannel(d40c
, &cfg
->src_info
,
2805 ret
= dma40_config_to_halfchannel(d40c
, &cfg
->dst_info
,
2811 /* Fill in register values */
2812 if (chan_is_logical(d40c
))
2813 d40_log_cfg(cfg
, &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2815 d40_phy_cfg(cfg
, &d40c
->src_def_cfg
,
2816 &d40c
->dst_def_cfg
, false);
2818 /* These settings will take precedence later */
2819 d40c
->runtime_addr
= config_addr
;
2820 d40c
->runtime_direction
= config
->direction
;
2821 dev_dbg(d40c
->base
->dev
,
2822 "configured channel %s for %s, data width %d/%d, "
2823 "maxburst %d/%d elements, LE, no flow control\n",
2824 dma_chan_name(chan
),
2825 (config
->direction
== DMA_DEV_TO_MEM
) ? "RX" : "TX",
2826 src_addr_width
, dst_addr_width
,
2827 src_maxburst
, dst_maxburst
);
2832 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2835 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2837 if (d40c
->phy_chan
== NULL
) {
2838 chan_err(d40c
, "Channel is not allocated!\n");
2843 case DMA_TERMINATE_ALL
:
2844 d40_terminate_all(chan
);
2847 return d40_pause(d40c
);
2849 return d40_resume(d40c
);
2850 case DMA_SLAVE_CONFIG
:
2851 return d40_set_runtime_config(chan
,
2852 (struct dma_slave_config
*) arg
);
2857 /* Other commands are unimplemented */
2861 /* Initialization functions */
2863 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2864 struct d40_chan
*chans
, int offset
,
2868 struct d40_chan
*d40c
;
2870 INIT_LIST_HEAD(&dma
->channels
);
2872 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2875 d40c
->chan
.device
= dma
;
2877 spin_lock_init(&d40c
->lock
);
2879 d40c
->log_num
= D40_PHY_CHAN
;
2881 INIT_LIST_HEAD(&d40c
->done
);
2882 INIT_LIST_HEAD(&d40c
->active
);
2883 INIT_LIST_HEAD(&d40c
->queue
);
2884 INIT_LIST_HEAD(&d40c
->pending_queue
);
2885 INIT_LIST_HEAD(&d40c
->client
);
2886 INIT_LIST_HEAD(&d40c
->prepare_queue
);
2888 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2889 (unsigned long) d40c
);
2891 list_add_tail(&d40c
->chan
.device_node
,
2896 static void d40_ops_init(struct d40_base
*base
, struct dma_device
*dev
)
2898 if (dma_has_cap(DMA_SLAVE
, dev
->cap_mask
))
2899 dev
->device_prep_slave_sg
= d40_prep_slave_sg
;
2901 if (dma_has_cap(DMA_MEMCPY
, dev
->cap_mask
)) {
2902 dev
->device_prep_dma_memcpy
= d40_prep_memcpy
;
2905 * This controller can only access address at even
2906 * 32bit boundaries, i.e. 2^2
2908 dev
->copy_align
= 2;
2911 if (dma_has_cap(DMA_SG
, dev
->cap_mask
))
2912 dev
->device_prep_dma_sg
= d40_prep_memcpy_sg
;
2914 if (dma_has_cap(DMA_CYCLIC
, dev
->cap_mask
))
2915 dev
->device_prep_dma_cyclic
= dma40_prep_dma_cyclic
;
2917 dev
->device_alloc_chan_resources
= d40_alloc_chan_resources
;
2918 dev
->device_free_chan_resources
= d40_free_chan_resources
;
2919 dev
->device_issue_pending
= d40_issue_pending
;
2920 dev
->device_tx_status
= d40_tx_status
;
2921 dev
->device_control
= d40_control
;
2922 dev
->dev
= base
->dev
;
2925 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2926 int num_reserved_chans
)
2930 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2931 0, base
->num_log_chans
);
2933 dma_cap_zero(base
->dma_slave
.cap_mask
);
2934 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2935 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2937 d40_ops_init(base
, &base
->dma_slave
);
2939 err
= dma_async_device_register(&base
->dma_slave
);
2942 d40_err(base
->dev
, "Failed to register slave channels\n");
2946 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2947 base
->num_log_chans
, ARRAY_SIZE(dma40_memcpy_channels
));
2949 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2950 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2951 dma_cap_set(DMA_SG
, base
->dma_memcpy
.cap_mask
);
2953 d40_ops_init(base
, &base
->dma_memcpy
);
2955 err
= dma_async_device_register(&base
->dma_memcpy
);
2959 "Failed to regsiter memcpy only channels\n");
2963 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2964 0, num_reserved_chans
);
2966 dma_cap_zero(base
->dma_both
.cap_mask
);
2967 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2968 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2969 dma_cap_set(DMA_SG
, base
->dma_both
.cap_mask
);
2970 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2972 d40_ops_init(base
, &base
->dma_both
);
2973 err
= dma_async_device_register(&base
->dma_both
);
2977 "Failed to register logical and physical capable channels\n");
2982 dma_async_device_unregister(&base
->dma_memcpy
);
2984 dma_async_device_unregister(&base
->dma_slave
);
2989 /* Suspend resume functionality */
2991 static int dma40_pm_suspend(struct device
*dev
)
2993 struct platform_device
*pdev
= to_platform_device(dev
);
2994 struct d40_base
*base
= platform_get_drvdata(pdev
);
2997 if (base
->lcpa_regulator
)
2998 ret
= regulator_disable(base
->lcpa_regulator
);
3002 static int dma40_runtime_suspend(struct device
*dev
)
3004 struct platform_device
*pdev
= to_platform_device(dev
);
3005 struct d40_base
*base
= platform_get_drvdata(pdev
);
3007 d40_save_restore_registers(base
, true);
3009 /* Don't disable/enable clocks for v1 due to HW bugs */
3011 writel_relaxed(base
->gcc_pwr_off_mask
,
3012 base
->virtbase
+ D40_DREG_GCC
);
3017 static int dma40_runtime_resume(struct device
*dev
)
3019 struct platform_device
*pdev
= to_platform_device(dev
);
3020 struct d40_base
*base
= platform_get_drvdata(pdev
);
3022 if (base
->initialized
)
3023 d40_save_restore_registers(base
, false);
3025 writel_relaxed(D40_DREG_GCC_ENABLE_ALL
,
3026 base
->virtbase
+ D40_DREG_GCC
);
3030 static int dma40_resume(struct device
*dev
)
3032 struct platform_device
*pdev
= to_platform_device(dev
);
3033 struct d40_base
*base
= platform_get_drvdata(pdev
);
3036 if (base
->lcpa_regulator
)
3037 ret
= regulator_enable(base
->lcpa_regulator
);
3042 static const struct dev_pm_ops dma40_pm_ops
= {
3043 .suspend
= dma40_pm_suspend
,
3044 .runtime_suspend
= dma40_runtime_suspend
,
3045 .runtime_resume
= dma40_runtime_resume
,
3046 .resume
= dma40_resume
,
3048 #define DMA40_PM_OPS (&dma40_pm_ops)
3050 #define DMA40_PM_OPS NULL
3053 /* Initialization functions. */
3055 static int __init
d40_phy_res_init(struct d40_base
*base
)
3058 int num_phy_chans_avail
= 0;
3060 int odd_even_bit
= -2;
3061 int gcc
= D40_DREG_GCC_ENA
;
3063 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
3064 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
3066 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3067 base
->phy_res
[i
].num
= i
;
3068 odd_even_bit
+= 2 * ((i
% 2) == 0);
3069 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
3070 /* Mark security only channels as occupied */
3071 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
3072 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
3073 base
->phy_res
[i
].reserved
= true;
3074 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i
),
3076 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i
),
3081 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
3082 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
3083 base
->phy_res
[i
].reserved
= false;
3084 num_phy_chans_avail
++;
3086 spin_lock_init(&base
->phy_res
[i
].lock
);
3089 /* Mark disabled channels as occupied */
3090 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
3091 int chan
= base
->plat_data
->disabled_channels
[i
];
3093 base
->phy_res
[chan
].allocated_src
= D40_ALLOC_PHY
;
3094 base
->phy_res
[chan
].allocated_dst
= D40_ALLOC_PHY
;
3095 base
->phy_res
[chan
].reserved
= true;
3096 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan
),
3098 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan
),
3100 num_phy_chans_avail
--;
3103 /* Mark soft_lli channels */
3104 for (i
= 0; i
< base
->plat_data
->num_of_soft_lli_chans
; i
++) {
3105 int chan
= base
->plat_data
->soft_lli_chans
[i
];
3107 base
->phy_res
[chan
].use_soft_lli
= true;
3110 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
3111 num_phy_chans_avail
, base
->num_phy_chans
);
3113 /* Verify settings extended vs standard */
3114 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
3116 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3118 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
3119 (val
[0] & 0x3) != 1)
3121 "[%s] INFO: channel %d is misconfigured (%d)\n",
3122 __func__
, i
, val
[0] & 0x3);
3124 val
[0] = val
[0] >> 2;
3128 * To keep things simple, Enable all clocks initially.
3129 * The clocks will get managed later post channel allocation.
3130 * The clocks for the event lines on which reserved channels exists
3131 * are not managed here.
3133 writel(D40_DREG_GCC_ENABLE_ALL
, base
->virtbase
+ D40_DREG_GCC
);
3134 base
->gcc_pwr_off_mask
= gcc
;
3136 return num_phy_chans_avail
;
3139 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
3141 struct stedma40_platform_data
*plat_data
;
3142 struct clk
*clk
= NULL
;
3143 void __iomem
*virtbase
= NULL
;
3144 struct resource
*res
= NULL
;
3145 struct d40_base
*base
= NULL
;
3146 int num_log_chans
= 0;
3148 int clk_ret
= -EINVAL
;
3154 clk
= clk_get(&pdev
->dev
, NULL
);
3156 d40_err(&pdev
->dev
, "No matching clock found\n");
3160 clk_ret
= clk_prepare_enable(clk
);
3162 d40_err(&pdev
->dev
, "Failed to prepare/enable clock\n");
3166 /* Get IO for DMAC base address */
3167 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
3171 if (request_mem_region(res
->start
, resource_size(res
),
3172 D40_NAME
" I/O base") == NULL
)
3175 virtbase
= ioremap(res
->start
, resource_size(res
));
3179 /* This is just a regular AMBA PrimeCell ID actually */
3180 for (pid
= 0, i
= 0; i
< 4; i
++)
3181 pid
|= (readl(virtbase
+ resource_size(res
) - 0x20 + 4 * i
)
3183 for (cid
= 0, i
= 0; i
< 4; i
++)
3184 cid
|= (readl(virtbase
+ resource_size(res
) - 0x10 + 4 * i
)
3187 if (cid
!= AMBA_CID
) {
3188 d40_err(&pdev
->dev
, "Unknown hardware! No PrimeCell ID\n");
3191 if (AMBA_MANF_BITS(pid
) != AMBA_VENDOR_ST
) {
3192 d40_err(&pdev
->dev
, "Unknown designer! Got %x wanted %x\n",
3193 AMBA_MANF_BITS(pid
),
3199 * DB8500ed has revision 0
3201 * DB8500v1 has revision 2
3202 * DB8500v2 has revision 3
3203 * AP9540v1 has revision 4
3204 * DB8540v1 has revision 4
3206 rev
= AMBA_REV_BITS(pid
);
3208 d40_err(&pdev
->dev
, "hardware revision: %d is not supported", rev
);
3212 plat_data
= pdev
->dev
.platform_data
;
3214 /* The number of physical channels on this HW */
3215 if (plat_data
->num_of_phy_chans
)
3216 num_phy_chans
= plat_data
->num_of_phy_chans
;
3218 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
3220 num_log_chans
= num_phy_chans
* D40_MAX_LOG_CHAN_PER_PHY
;
3222 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x with %d physical channels\n",
3223 rev
, res
->start
, num_phy_chans
);
3225 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
3226 (num_phy_chans
+ num_log_chans
+ ARRAY_SIZE(dma40_memcpy_channels
)) *
3227 sizeof(struct d40_chan
), GFP_KERNEL
);
3230 d40_err(&pdev
->dev
, "Out of memory\n");
3236 base
->num_phy_chans
= num_phy_chans
;
3237 base
->num_log_chans
= num_log_chans
;
3238 base
->phy_start
= res
->start
;
3239 base
->phy_size
= resource_size(res
);
3240 base
->virtbase
= virtbase
;
3241 base
->plat_data
= plat_data
;
3242 base
->dev
= &pdev
->dev
;
3243 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
3244 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
3246 if (base
->plat_data
->num_of_phy_chans
== 14) {
3247 base
->gen_dmac
.backup
= d40_backup_regs_v4b
;
3248 base
->gen_dmac
.backup_size
= BACKUP_REGS_SZ_V4B
;
3249 base
->gen_dmac
.interrupt_en
= D40_DREG_CPCMIS
;
3250 base
->gen_dmac
.interrupt_clear
= D40_DREG_CPCICR
;
3251 base
->gen_dmac
.realtime_en
= D40_DREG_CRSEG1
;
3252 base
->gen_dmac
.realtime_clear
= D40_DREG_CRCEG1
;
3253 base
->gen_dmac
.high_prio_en
= D40_DREG_CPSEG1
;
3254 base
->gen_dmac
.high_prio_clear
= D40_DREG_CPCEG1
;
3255 base
->gen_dmac
.il
= il_v4b
;
3256 base
->gen_dmac
.il_size
= ARRAY_SIZE(il_v4b
);
3257 base
->gen_dmac
.init_reg
= dma_init_reg_v4b
;
3258 base
->gen_dmac
.init_reg_size
= ARRAY_SIZE(dma_init_reg_v4b
);
3260 if (base
->rev
>= 3) {
3261 base
->gen_dmac
.backup
= d40_backup_regs_v4a
;
3262 base
->gen_dmac
.backup_size
= BACKUP_REGS_SZ_V4A
;
3264 base
->gen_dmac
.interrupt_en
= D40_DREG_PCMIS
;
3265 base
->gen_dmac
.interrupt_clear
= D40_DREG_PCICR
;
3266 base
->gen_dmac
.realtime_en
= D40_DREG_RSEG1
;
3267 base
->gen_dmac
.realtime_clear
= D40_DREG_RCEG1
;
3268 base
->gen_dmac
.high_prio_en
= D40_DREG_PSEG1
;
3269 base
->gen_dmac
.high_prio_clear
= D40_DREG_PCEG1
;
3270 base
->gen_dmac
.il
= il_v4a
;
3271 base
->gen_dmac
.il_size
= ARRAY_SIZE(il_v4a
);
3272 base
->gen_dmac
.init_reg
= dma_init_reg_v4a
;
3273 base
->gen_dmac
.init_reg_size
= ARRAY_SIZE(dma_init_reg_v4a
);
3276 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
3281 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
3282 sizeof(struct d40_chan
*),
3284 if (!base
->lookup_phy_chans
)
3287 base
->lookup_log_chans
= kzalloc(num_log_chans
*
3288 sizeof(struct d40_chan
*),
3290 if (!base
->lookup_log_chans
)
3293 base
->reg_val_backup_chan
= kmalloc(base
->num_phy_chans
*
3294 sizeof(d40_backup_regs_chan
),
3296 if (!base
->reg_val_backup_chan
)
3299 base
->lcla_pool
.alloc_map
=
3300 kzalloc(num_phy_chans
* sizeof(struct d40_desc
*)
3301 * D40_LCLA_LINK_PER_EVENT_GRP
, GFP_KERNEL
);
3302 if (!base
->lcla_pool
.alloc_map
)
3305 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
3306 0, SLAB_HWCACHE_ALIGN
,
3308 if (base
->desc_slab
== NULL
)
3315 clk_disable_unprepare(clk
);
3321 release_mem_region(res
->start
,
3322 resource_size(res
));
3327 kfree(base
->lcla_pool
.alloc_map
);
3328 kfree(base
->reg_val_backup_chan
);
3329 kfree(base
->lookup_log_chans
);
3330 kfree(base
->lookup_phy_chans
);
3331 kfree(base
->phy_res
);
3338 static void __init
d40_hw_init(struct d40_base
*base
)
3342 u32 prmseo
[2] = {0, 0};
3343 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3346 struct d40_reg_val
*dma_init_reg
= base
->gen_dmac
.init_reg
;
3347 u32 reg_size
= base
->gen_dmac
.init_reg_size
;
3349 for (i
= 0; i
< reg_size
; i
++)
3350 writel(dma_init_reg
[i
].val
,
3351 base
->virtbase
+ dma_init_reg
[i
].reg
);
3353 /* Configure all our dma channels to default settings */
3354 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3356 activeo
[i
% 2] = activeo
[i
% 2] << 2;
3358 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
3360 activeo
[i
% 2] |= 3;
3364 /* Enable interrupt # */
3365 pcmis
= (pcmis
<< 1) | 1;
3367 /* Clear interrupt # */
3368 pcicr
= (pcicr
<< 1) | 1;
3370 /* Set channel to physical mode */
3371 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
3376 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
3377 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
3378 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
3379 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
3381 /* Write which interrupt to enable */
3382 writel(pcmis
, base
->virtbase
+ base
->gen_dmac
.interrupt_en
);
3384 /* Write which interrupt to clear */
3385 writel(pcicr
, base
->virtbase
+ base
->gen_dmac
.interrupt_clear
);
3387 /* These are __initdata and cannot be accessed after init */
3388 base
->gen_dmac
.init_reg
= NULL
;
3389 base
->gen_dmac
.init_reg_size
= 0;
3392 static int __init
d40_lcla_allocate(struct d40_base
*base
)
3394 struct d40_lcla_pool
*pool
= &base
->lcla_pool
;
3395 unsigned long *page_list
;
3400 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3401 * To full fill this hardware requirement without wasting 256 kb
3402 * we allocate pages until we get an aligned one.
3404 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
3412 /* Calculating how many pages that are required */
3413 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
3415 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
3416 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
3417 base
->lcla_pool
.pages
);
3418 if (!page_list
[i
]) {
3420 d40_err(base
->dev
, "Failed to allocate %d pages.\n",
3421 base
->lcla_pool
.pages
);
3423 for (j
= 0; j
< i
; j
++)
3424 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
3428 if ((virt_to_phys((void *)page_list
[i
]) &
3429 (LCLA_ALIGNMENT
- 1)) == 0)
3433 for (j
= 0; j
< i
; j
++)
3434 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
3436 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
3437 base
->lcla_pool
.base
= (void *)page_list
[i
];
3440 * After many attempts and no succees with finding the correct
3441 * alignment, try with allocating a big buffer.
3444 "[%s] Failed to get %d pages @ 18 bit align.\n",
3445 __func__
, base
->lcla_pool
.pages
);
3446 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
3447 base
->num_phy_chans
+
3450 if (!base
->lcla_pool
.base_unaligned
) {
3455 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
3459 pool
->dma_addr
= dma_map_single(base
->dev
, pool
->base
,
3460 SZ_1K
* base
->num_phy_chans
,
3462 if (dma_mapping_error(base
->dev
, pool
->dma_addr
)) {
3468 writel(virt_to_phys(base
->lcla_pool
.base
),
3469 base
->virtbase
+ D40_DREG_LCLA
);
3475 static int __init
d40_probe(struct platform_device
*pdev
)
3479 struct d40_base
*base
;
3480 struct resource
*res
= NULL
;
3481 int num_reserved_chans
;
3484 base
= d40_hw_detect_init(pdev
);
3489 num_reserved_chans
= d40_phy_res_init(base
);
3491 platform_set_drvdata(pdev
, base
);
3493 spin_lock_init(&base
->interrupt_lock
);
3494 spin_lock_init(&base
->execmd_lock
);
3496 /* Get IO for logical channel parameter address */
3497 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
3500 d40_err(&pdev
->dev
, "No \"lcpa\" memory resource\n");
3503 base
->lcpa_size
= resource_size(res
);
3504 base
->phy_lcpa
= res
->start
;
3506 if (request_mem_region(res
->start
, resource_size(res
),
3507 D40_NAME
" I/O lcpa") == NULL
) {
3510 "Failed to request LCPA region 0x%x-0x%x\n",
3511 res
->start
, res
->end
);
3515 /* We make use of ESRAM memory for this. */
3516 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
3517 if (res
->start
!= val
&& val
!= 0) {
3518 dev_warn(&pdev
->dev
,
3519 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3520 __func__
, val
, res
->start
);
3522 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
3524 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
3525 if (!base
->lcpa_base
) {
3527 d40_err(&pdev
->dev
, "Failed to ioremap LCPA region\n");
3530 /* If lcla has to be located in ESRAM we don't need to allocate */
3531 if (base
->plat_data
->use_esram_lcla
) {
3532 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3537 "No \"lcla_esram\" memory resource\n");
3540 base
->lcla_pool
.base
= ioremap(res
->start
,
3541 resource_size(res
));
3542 if (!base
->lcla_pool
.base
) {
3544 d40_err(&pdev
->dev
, "Failed to ioremap LCLA region\n");
3547 writel(res
->start
, base
->virtbase
+ D40_DREG_LCLA
);
3550 ret
= d40_lcla_allocate(base
);
3552 d40_err(&pdev
->dev
, "Failed to allocate LCLA area\n");
3557 spin_lock_init(&base
->lcla_pool
.lock
);
3559 base
->irq
= platform_get_irq(pdev
, 0);
3561 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
3563 d40_err(&pdev
->dev
, "No IRQ defined\n");
3567 pm_runtime_irq_safe(base
->dev
);
3568 pm_runtime_set_autosuspend_delay(base
->dev
, DMA40_AUTOSUSPEND_DELAY
);
3569 pm_runtime_use_autosuspend(base
->dev
);
3570 pm_runtime_enable(base
->dev
);
3571 pm_runtime_resume(base
->dev
);
3573 if (base
->plat_data
->use_esram_lcla
) {
3575 base
->lcpa_regulator
= regulator_get(base
->dev
, "lcla_esram");
3576 if (IS_ERR(base
->lcpa_regulator
)) {
3577 d40_err(&pdev
->dev
, "Failed to get lcpa_regulator\n");
3578 base
->lcpa_regulator
= NULL
;
3582 ret
= regulator_enable(base
->lcpa_regulator
);
3585 "Failed to enable lcpa_regulator\n");
3586 regulator_put(base
->lcpa_regulator
);
3587 base
->lcpa_regulator
= NULL
;
3592 base
->initialized
= true;
3593 err
= d40_dmaengine_init(base
, num_reserved_chans
);
3597 base
->dev
->dma_parms
= &base
->dma_parms
;
3598 err
= dma_set_max_seg_size(base
->dev
, STEDMA40_MAX_SEG_SIZE
);
3600 d40_err(&pdev
->dev
, "Failed to set dma max seg size\n");
3606 dev_info(base
->dev
, "initialized\n");
3611 if (base
->desc_slab
)
3612 kmem_cache_destroy(base
->desc_slab
);
3614 iounmap(base
->virtbase
);
3616 if (base
->lcla_pool
.base
&& base
->plat_data
->use_esram_lcla
) {
3617 iounmap(base
->lcla_pool
.base
);
3618 base
->lcla_pool
.base
= NULL
;
3621 if (base
->lcla_pool
.dma_addr
)
3622 dma_unmap_single(base
->dev
, base
->lcla_pool
.dma_addr
,
3623 SZ_1K
* base
->num_phy_chans
,
3626 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
3627 free_pages((unsigned long)base
->lcla_pool
.base
,
3628 base
->lcla_pool
.pages
);
3630 kfree(base
->lcla_pool
.base_unaligned
);
3633 release_mem_region(base
->phy_lcpa
,
3635 if (base
->phy_start
)
3636 release_mem_region(base
->phy_start
,
3639 clk_disable_unprepare(base
->clk
);
3643 if (base
->lcpa_regulator
) {
3644 regulator_disable(base
->lcpa_regulator
);
3645 regulator_put(base
->lcpa_regulator
);
3648 kfree(base
->lcla_pool
.alloc_map
);
3649 kfree(base
->lookup_log_chans
);
3650 kfree(base
->lookup_phy_chans
);
3651 kfree(base
->phy_res
);
3655 d40_err(&pdev
->dev
, "probe failed\n");
3659 static struct platform_driver d40_driver
= {
3661 .owner
= THIS_MODULE
,
3667 static int __init
stedma40_init(void)
3669 return platform_driver_probe(&d40_driver
, d40_probe
);
3671 subsys_initcall(stedma40_init
);