2 * intel_mid_dma.c - Intel Langwell DMA Drivers
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/intel_mid_dma.h>
31 #define MAX_CHAN 4 /*max ch across controllers*/
32 #include "intel_mid_dma_regs.h"
34 #define INTEL_MID_DMAC1_ID 0x0814
35 #define INTEL_MID_DMAC2_ID 0x0813
36 #define INTEL_MID_GP_DMAC2_ID 0x0827
37 #define INTEL_MFLD_DMAC1_ID 0x0830
38 #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
39 #define LNW_PERIPHRAL_MASK_SIZE 0x10
40 #define LNW_PERIPHRAL_STATUS 0x0
41 #define LNW_PERIPHRAL_MASK 0x8
43 struct intel_mid_dma_probe_info
{
50 #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
51 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
52 .max_chan = (_max_chan), \
53 .ch_base = (_ch_base), \
54 .block_size = (_block_size), \
55 .pimr_mask = (_pimr_mask), \
58 /*****************************************************************************
61 * get_ch_index - convert status to channel
62 * @status: status mask
63 * @base: dma ch base value
65 * Modify the status mask and return the channel index needing
66 * attention (or -1 if neither)
68 static int get_ch_index(int *status
, unsigned int base
)
71 for (i
= 0; i
< MAX_CHAN
; i
++) {
72 if (*status
& (1 << (i
+ base
))) {
73 *status
= *status
& ~(1 << (i
+ base
));
74 pr_debug("MDMA: index %d New status %x\n", i
, *status
);
82 * get_block_ts - calculates dma transaction length
83 * @len: dma transfer length
84 * @tx_width: dma transfer src width
85 * @block_size: dma controller max block size
87 * Based on src width calculate the DMA trsaction length in data items
88 * return data items or FFFF if exceeds max length for block
90 static int get_block_ts(int len
, int tx_width
, int block_size
)
92 int byte_width
= 0, block_ts
= 0;
95 case LNW_DMA_WIDTH_8BIT
:
98 case LNW_DMA_WIDTH_16BIT
:
101 case LNW_DMA_WIDTH_32BIT
:
107 block_ts
= len
/byte_width
;
108 if (block_ts
> block_size
)
113 /*****************************************************************************
114 DMAC1 interrupt Functions*/
117 * dmac1_mask_periphral_intr - mask the periphral interrupt
118 * @midc: dma channel for which masking is required
120 * Masks the DMA periphral interrupt
121 * this is valid for DMAC1 family controllers only
122 * This controller should have periphral mask registers already mapped
124 static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan
*midc
)
127 struct middma_device
*mid
= to_middma_device(midc
->chan
.device
);
129 if (mid
->pimr_mask
) {
130 pimr
= readl(mid
->mask_reg
+ LNW_PERIPHRAL_MASK
);
131 pimr
|= mid
->pimr_mask
;
132 writel(pimr
, mid
->mask_reg
+ LNW_PERIPHRAL_MASK
);
138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
139 * @midc: dma channel for which masking is required
141 * UnMasks the DMA periphral interrupt,
142 * this is valid for DMAC1 family controllers only
143 * This controller should have periphral mask registers already mapped
145 static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan
*midc
)
148 struct middma_device
*mid
= to_middma_device(midc
->chan
.device
);
150 if (mid
->pimr_mask
) {
151 pimr
= readl(mid
->mask_reg
+ LNW_PERIPHRAL_MASK
);
152 pimr
&= ~mid
->pimr_mask
;
153 writel(pimr
, mid
->mask_reg
+ LNW_PERIPHRAL_MASK
);
159 * enable_dma_interrupt - enable the periphral interrupt
160 * @midc: dma channel for which enable interrupt is required
162 * Enable the DMA periphral interrupt,
163 * this is valid for DMAC1 family controllers only
164 * This controller should have periphral mask registers already mapped
166 static void enable_dma_interrupt(struct intel_mid_dma_chan
*midc
)
168 dmac1_unmask_periphral_intr(midc
);
171 iowrite32(UNMASK_INTR_REG(midc
->ch_id
), midc
->dma_base
+ MASK_TFR
);
172 iowrite32(UNMASK_INTR_REG(midc
->ch_id
), midc
->dma_base
+ MASK_ERR
);
177 * disable_dma_interrupt - disable the periphral interrupt
178 * @midc: dma channel for which disable interrupt is required
180 * Disable the DMA periphral interrupt,
181 * this is valid for DMAC1 family controllers only
182 * This controller should have periphral mask registers already mapped
184 static void disable_dma_interrupt(struct intel_mid_dma_chan
*midc
)
186 /*Check LPE PISR, make sure fwd is disabled*/
187 dmac1_mask_periphral_intr(midc
);
188 iowrite32(MASK_INTR_REG(midc
->ch_id
), midc
->dma_base
+ MASK_BLOCK
);
189 iowrite32(MASK_INTR_REG(midc
->ch_id
), midc
->dma_base
+ MASK_TFR
);
190 iowrite32(MASK_INTR_REG(midc
->ch_id
), midc
->dma_base
+ MASK_ERR
);
194 /*****************************************************************************
195 DMA channel helper Functions*/
197 * mid_desc_get - get a descriptor
198 * @midc: dma channel for which descriptor is required
200 * Obtain a descriptor for the channel. Returns NULL if none are free.
201 * Once the descriptor is returned it is private until put on another
204 static struct intel_mid_dma_desc
*midc_desc_get(struct intel_mid_dma_chan
*midc
)
206 struct intel_mid_dma_desc
*desc
, *_desc
;
207 struct intel_mid_dma_desc
*ret
= NULL
;
209 spin_lock_bh(&midc
->lock
);
210 list_for_each_entry_safe(desc
, _desc
, &midc
->free_list
, desc_node
) {
211 if (async_tx_test_ack(&desc
->txd
)) {
212 list_del(&desc
->desc_node
);
217 spin_unlock_bh(&midc
->lock
);
222 * mid_desc_put - put a descriptor
223 * @midc: dma channel for which descriptor is required
224 * @desc: descriptor to put
226 * Return a descriptor from lwn_desc_get back to the free pool
228 static void midc_desc_put(struct intel_mid_dma_chan
*midc
,
229 struct intel_mid_dma_desc
*desc
)
232 spin_lock_bh(&midc
->lock
);
233 list_add_tail(&desc
->desc_node
, &midc
->free_list
);
234 spin_unlock_bh(&midc
->lock
);
238 * midc_dostart - begin a DMA transaction
239 * @midc: channel for which txn is to be started
240 * @first: first descriptor of series
242 * Load a transaction into the engine. This must be called with midc->lock
243 * held and bh disabled.
245 static void midc_dostart(struct intel_mid_dma_chan
*midc
,
246 struct intel_mid_dma_desc
*first
)
248 struct middma_device
*mid
= to_middma_device(midc
->chan
.device
);
250 /* channel is idle */
251 if (midc
->busy
&& test_ch_en(midc
->dma_base
, midc
->ch_id
)) {
253 pr_err("ERR_MDMA: channel is busy in start\n");
254 /* The tasklet will hopefully advance the queue... */
258 /*write registers and en*/
259 iowrite32(first
->sar
, midc
->ch_regs
+ SAR
);
260 iowrite32(first
->dar
, midc
->ch_regs
+ DAR
);
261 iowrite32(first
->cfg_hi
, midc
->ch_regs
+ CFG_HIGH
);
262 iowrite32(first
->cfg_lo
, midc
->ch_regs
+ CFG_LOW
);
263 iowrite32(first
->ctl_lo
, midc
->ch_regs
+ CTL_LOW
);
264 iowrite32(first
->ctl_hi
, midc
->ch_regs
+ CTL_HIGH
);
265 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
266 (int)first
->sar
, (int)first
->dar
, first
->cfg_hi
,
267 first
->cfg_lo
, first
->ctl_hi
, first
->ctl_lo
);
269 iowrite32(ENABLE_CHANNEL(midc
->ch_id
), mid
->dma_base
+ DMA_CHAN_EN
);
270 first
->status
= DMA_IN_PROGRESS
;
274 * midc_descriptor_complete - process completed descriptor
275 * @midc: channel owning the descriptor
276 * @desc: the descriptor itself
278 * Process a completed descriptor and perform any callbacks upon
279 * the completion. The completion handling drops the lock during the
280 * callbacks but must be called with the lock held.
282 static void midc_descriptor_complete(struct intel_mid_dma_chan
*midc
,
283 struct intel_mid_dma_desc
*desc
)
285 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
286 dma_async_tx_callback callback_txd
= NULL
;
287 void *param_txd
= NULL
;
289 midc
->completed
= txd
->cookie
;
290 callback_txd
= txd
->callback
;
291 param_txd
= txd
->callback_param
;
293 list_move(&desc
->desc_node
, &midc
->free_list
);
295 spin_unlock_bh(&midc
->lock
);
297 pr_debug("MDMA: TXD callback set ... calling\n");
298 callback_txd(param_txd
);
299 spin_lock_bh(&midc
->lock
);
302 spin_lock_bh(&midc
->lock
);
306 * midc_scan_descriptors - check the descriptors in channel
307 * mark completed when tx is completete
309 * @midc: channel to scan
311 * Walk the descriptor chain for the device and process any entries
314 static void midc_scan_descriptors(struct middma_device
*mid
,
315 struct intel_mid_dma_chan
*midc
)
317 struct intel_mid_dma_desc
*desc
= NULL
, *_desc
= NULL
;
320 list_for_each_entry_safe(desc
, _desc
, &midc
->active_list
, desc_node
) {
321 if (desc
->status
== DMA_IN_PROGRESS
) {
322 desc
->status
= DMA_SUCCESS
;
323 midc_descriptor_complete(midc
, desc
);
329 /*****************************************************************************
330 DMA engine callback Functions*/
332 * intel_mid_dma_tx_submit - callback to submit DMA transaction
333 * @tx: dma engine descriptor
335 * Submit the DMA trasaction for this descriptor, start if ch idle
337 static dma_cookie_t
intel_mid_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
339 struct intel_mid_dma_desc
*desc
= to_intel_mid_dma_desc(tx
);
340 struct intel_mid_dma_chan
*midc
= to_intel_mid_dma_chan(tx
->chan
);
343 spin_lock_bh(&midc
->lock
);
344 cookie
= midc
->chan
.cookie
;
349 midc
->chan
.cookie
= cookie
;
350 desc
->txd
.cookie
= cookie
;
353 if (list_empty(&midc
->active_list
)) {
354 midc_dostart(midc
, desc
);
355 list_add_tail(&desc
->desc_node
, &midc
->active_list
);
357 list_add_tail(&desc
->desc_node
, &midc
->queue
);
359 spin_unlock_bh(&midc
->lock
);
365 * intel_mid_dma_issue_pending - callback to issue pending txn
366 * @chan: chan where pending trascation needs to be checked and submitted
368 * Call for scan to issue pending descriptors
370 static void intel_mid_dma_issue_pending(struct dma_chan
*chan
)
372 struct intel_mid_dma_chan
*midc
= to_intel_mid_dma_chan(chan
);
374 spin_lock_bh(&midc
->lock
);
375 if (!list_empty(&midc
->queue
))
376 midc_scan_descriptors(to_middma_device(chan
->device
), midc
);
377 spin_unlock_bh(&midc
->lock
);
381 * intel_mid_dma_tx_status - Return status of txn
382 * @chan: chan for where status needs to be checked
383 * @cookie: cookie for txn
384 * @txstate: DMA txn state
386 * Return status of DMA txn
388 static enum dma_status
intel_mid_dma_tx_status(struct dma_chan
*chan
,
390 struct dma_tx_state
*txstate
)
392 struct intel_mid_dma_chan
*midc
= to_intel_mid_dma_chan(chan
);
393 dma_cookie_t last_used
;
394 dma_cookie_t last_complete
;
397 last_complete
= midc
->completed
;
398 last_used
= chan
->cookie
;
400 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
401 if (ret
!= DMA_SUCCESS
) {
402 midc_scan_descriptors(to_middma_device(chan
->device
), midc
);
404 last_complete
= midc
->completed
;
405 last_used
= chan
->cookie
;
407 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
411 txstate
->last
= last_complete
;
412 txstate
->used
= last_used
;
413 txstate
->residue
= 0;
419 * intel_mid_dma_device_control - DMA device control
420 * @chan: chan for DMA control
422 * @arg: cmd arg value
424 * Perform DMA control command
426 static int intel_mid_dma_device_control(struct dma_chan
*chan
,
427 enum dma_ctrl_cmd cmd
, unsigned long arg
)
429 struct intel_mid_dma_chan
*midc
= to_intel_mid_dma_chan(chan
);
430 struct middma_device
*mid
= to_middma_device(chan
->device
);
431 struct intel_mid_dma_desc
*desc
, *_desc
;
434 if (cmd
!= DMA_TERMINATE_ALL
)
437 spin_lock_bh(&midc
->lock
);
438 if (midc
->busy
== false) {
439 spin_unlock_bh(&midc
->lock
);
442 list_splice_init(&midc
->free_list
, &list
);
443 midc
->descs_allocated
= 0;
446 /* Disable interrupts */
447 disable_dma_interrupt(midc
);
449 spin_unlock_bh(&midc
->lock
);
450 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
451 pr_debug("MDMA: freeing descriptor %p\n", desc
);
452 pci_pool_free(mid
->dma_pool
, desc
, desc
->txd
.phys
);
458 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
459 * @chan: chan for DMA transfer
460 * @sgl: scatter gather list
461 * @sg_len: length of sg txn
462 * @direction: DMA transfer dirtn
465 * Do DMA sg txn: NOT supported now
467 static struct dma_async_tx_descriptor
*intel_mid_dma_prep_slave_sg(
468 struct dma_chan
*chan
, struct scatterlist
*sgl
,
469 unsigned int sg_len
, enum dma_data_direction direction
,
472 /*not supported now*/
477 * intel_mid_dma_prep_memcpy - Prep memcpy txn
478 * @chan: chan for DMA transfer
479 * @dest: destn address
481 * @len: DMA transfer len
484 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
485 * The periphral txn details should be filled in slave structure properly
486 * Returns the descriptor for this txn
488 static struct dma_async_tx_descriptor
*intel_mid_dma_prep_memcpy(
489 struct dma_chan
*chan
, dma_addr_t dest
,
490 dma_addr_t src
, size_t len
, unsigned long flags
)
492 struct intel_mid_dma_chan
*midc
;
493 struct intel_mid_dma_desc
*desc
= NULL
;
494 struct intel_mid_dma_slave
*mids
;
495 union intel_mid_dma_ctl_lo ctl_lo
;
496 union intel_mid_dma_ctl_hi ctl_hi
;
497 union intel_mid_dma_cfg_lo cfg_lo
;
498 union intel_mid_dma_cfg_hi cfg_hi
;
499 enum intel_mid_dma_width width
= 0;
501 pr_debug("MDMA: Prep for memcpy\n");
506 mids
= chan
->private;
509 midc
= to_intel_mid_dma_chan(chan
);
512 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
513 midc
->dma
->pci_id
, midc
->ch_id
, len
);
514 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
515 mids
->cfg_mode
, mids
->dirn
, mids
->hs_mode
, mids
->src_width
);
518 if (mids
->hs_mode
== LNW_DMA_SW_HS
) {
520 cfg_lo
.cfgx
.hs_sel_dst
= 1;
521 cfg_lo
.cfgx
.hs_sel_src
= 1;
522 } else if (mids
->hs_mode
== LNW_DMA_HW_HS
)
523 cfg_lo
.cfg_lo
= 0x00000;
526 if (mids
->cfg_mode
== LNW_DMA_MEM_TO_MEM
) {
531 if (midc
->dma
->pimr_mask
) {
532 cfg_hi
.cfgx
.protctl
= 0x0; /*default value*/
533 cfg_hi
.cfgx
.fifo_mode
= 1;
534 if (mids
->dirn
== DMA_TO_DEVICE
) {
535 cfg_hi
.cfgx
.src_per
= 0;
536 if (mids
->device_instance
== 0)
537 cfg_hi
.cfgx
.dst_per
= 3;
538 if (mids
->device_instance
== 1)
539 cfg_hi
.cfgx
.dst_per
= 1;
540 } else if (mids
->dirn
== DMA_FROM_DEVICE
) {
541 if (mids
->device_instance
== 0)
542 cfg_hi
.cfgx
.src_per
= 2;
543 if (mids
->device_instance
== 1)
544 cfg_hi
.cfgx
.src_per
= 0;
545 cfg_hi
.cfgx
.dst_per
= 0;
548 cfg_hi
.cfgx
.protctl
= 0x1; /*default value*/
549 cfg_hi
.cfgx
.src_per
= cfg_hi
.cfgx
.dst_per
=
550 midc
->ch_id
- midc
->dma
->chan_base
;
555 ctl_hi
.ctlx
.reser
= 0;
556 width
= mids
->src_width
;
558 ctl_hi
.ctlx
.block_ts
= get_block_ts(len
, width
, midc
->dma
->block_size
);
559 pr_debug("MDMA:calc len %d for block size %d\n",
560 ctl_hi
.ctlx
.block_ts
, midc
->dma
->block_size
);
563 ctl_lo
.ctlx
.int_en
= 1;
564 ctl_lo
.ctlx
.dst_tr_width
= mids
->dst_width
;
565 ctl_lo
.ctlx
.src_tr_width
= mids
->src_width
;
566 ctl_lo
.ctlx
.dst_msize
= mids
->src_msize
;
567 ctl_lo
.ctlx
.src_msize
= mids
->dst_msize
;
569 if (mids
->cfg_mode
== LNW_DMA_MEM_TO_MEM
) {
570 ctl_lo
.ctlx
.tt_fc
= 0;
571 ctl_lo
.ctlx
.sinc
= 0;
572 ctl_lo
.ctlx
.dinc
= 0;
574 if (mids
->dirn
== DMA_TO_DEVICE
) {
575 ctl_lo
.ctlx
.sinc
= 0;
576 ctl_lo
.ctlx
.dinc
= 2;
577 ctl_lo
.ctlx
.tt_fc
= 1;
578 } else if (mids
->dirn
== DMA_FROM_DEVICE
) {
579 ctl_lo
.ctlx
.sinc
= 2;
580 ctl_lo
.ctlx
.dinc
= 0;
581 ctl_lo
.ctlx
.tt_fc
= 2;
585 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
586 ctl_lo
.ctl_lo
, ctl_hi
.ctl_hi
, cfg_lo
.cfg_lo
, cfg_hi
.cfg_hi
);
588 enable_dma_interrupt(midc
);
590 desc
= midc_desc_get(midc
);
596 desc
->cfg_hi
= cfg_hi
.cfg_hi
;
597 desc
->cfg_lo
= cfg_lo
.cfg_lo
;
598 desc
->ctl_lo
= ctl_lo
.ctl_lo
;
599 desc
->ctl_hi
= ctl_hi
.ctl_hi
;
601 desc
->dirn
= mids
->dirn
;
605 pr_err("ERR_MDMA: Failed to get desc\n");
606 midc_desc_put(midc
, desc
);
611 * intel_mid_dma_free_chan_resources - Frees dma resources
612 * @chan: chan requiring attention
614 * Frees the allocated resources on this DMA chan
616 static void intel_mid_dma_free_chan_resources(struct dma_chan
*chan
)
618 struct intel_mid_dma_chan
*midc
= to_intel_mid_dma_chan(chan
);
619 struct middma_device
*mid
= to_middma_device(chan
->device
);
620 struct intel_mid_dma_desc
*desc
, *_desc
;
622 if (true == midc
->busy
) {
623 /*trying to free ch in use!!!!!*/
624 pr_err("ERR_MDMA: trying to free ch in use\n");
626 pm_runtime_put(&mid
->pdev
->dev
);
627 spin_lock_bh(&midc
->lock
);
628 midc
->descs_allocated
= 0;
629 list_for_each_entry_safe(desc
, _desc
, &midc
->active_list
, desc_node
) {
630 list_del(&desc
->desc_node
);
631 pci_pool_free(mid
->dma_pool
, desc
, desc
->txd
.phys
);
633 list_for_each_entry_safe(desc
, _desc
, &midc
->free_list
, desc_node
) {
634 list_del(&desc
->desc_node
);
635 pci_pool_free(mid
->dma_pool
, desc
, desc
->txd
.phys
);
637 list_for_each_entry_safe(desc
, _desc
, &midc
->queue
, desc_node
) {
638 list_del(&desc
->desc_node
);
639 pci_pool_free(mid
->dma_pool
, desc
, desc
->txd
.phys
);
641 spin_unlock_bh(&midc
->lock
);
642 midc
->in_use
= false;
644 /* Disable CH interrupts */
645 iowrite32(MASK_INTR_REG(midc
->ch_id
), mid
->dma_base
+ MASK_BLOCK
);
646 iowrite32(MASK_INTR_REG(midc
->ch_id
), mid
->dma_base
+ MASK_ERR
);
650 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
651 * @chan: chan requiring attention
653 * Allocates DMA resources on this chan
654 * Return the descriptors allocated
656 static int intel_mid_dma_alloc_chan_resources(struct dma_chan
*chan
)
658 struct intel_mid_dma_chan
*midc
= to_intel_mid_dma_chan(chan
);
659 struct middma_device
*mid
= to_middma_device(chan
->device
);
660 struct intel_mid_dma_desc
*desc
;
664 pm_runtime_get_sync(&mid
->pdev
->dev
);
666 if (mid
->state
== SUSPENDED
) {
667 if (dma_resume(mid
->pdev
)) {
668 pr_err("ERR_MDMA: resume failed");
673 /* ASSERT: channel is idle */
674 if (test_ch_en(mid
->dma_base
, midc
->ch_id
)) {
676 pr_err("ERR_MDMA: ch not idle\n");
677 pm_runtime_put(&mid
->pdev
->dev
);
680 midc
->completed
= chan
->cookie
= 1;
682 spin_lock_bh(&midc
->lock
);
683 while (midc
->descs_allocated
< DESCS_PER_CHANNEL
) {
684 spin_unlock_bh(&midc
->lock
);
685 desc
= pci_pool_alloc(mid
->dma_pool
, GFP_KERNEL
, &phys
);
687 pr_err("ERR_MDMA: desc failed\n");
688 pm_runtime_put(&mid
->pdev
->dev
);
692 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
693 desc
->txd
.tx_submit
= intel_mid_dma_tx_submit
;
694 desc
->txd
.flags
= DMA_CTRL_ACK
;
695 desc
->txd
.phys
= phys
;
696 spin_lock_bh(&midc
->lock
);
697 i
= ++midc
->descs_allocated
;
698 list_add_tail(&desc
->desc_node
, &midc
->free_list
);
700 spin_unlock_bh(&midc
->lock
);
703 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i
);
708 * midc_handle_error - Handle DMA txn error
709 * @mid: controller where error occured
710 * @midc: chan where error occured
712 * Scan the descriptor for error
714 static void midc_handle_error(struct middma_device
*mid
,
715 struct intel_mid_dma_chan
*midc
)
717 midc_scan_descriptors(mid
, midc
);
721 * dma_tasklet - DMA interrupt tasklet
722 * @data: tasklet arg (the controller structure)
724 * Scan the controller for interrupts for completion/error
725 * Clear the interrupt and call for handling completion/error
727 static void dma_tasklet(unsigned long data
)
729 struct middma_device
*mid
= NULL
;
730 struct intel_mid_dma_chan
*midc
= NULL
;
734 mid
= (struct middma_device
*)data
;
736 pr_err("ERR_MDMA: tasklet Null param\n");
739 pr_debug("MDMA: in tasklet for device %x\n", mid
->pci_id
);
740 status
= ioread32(mid
->dma_base
+ RAW_TFR
);
741 pr_debug("MDMA:RAW_TFR %x\n", status
);
742 status
&= mid
->intr_mask
;
745 i
= get_ch_index(&status
, mid
->chan_base
);
747 pr_err("ERR_MDMA:Invalid ch index %x\n", i
);
752 pr_err("ERR_MDMA:Null param midc\n");
755 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
756 status
, midc
->ch_id
, i
);
757 /*clearing this interrupts first*/
758 iowrite32((1 << midc
->ch_id
), mid
->dma_base
+ CLEAR_TFR
);
759 iowrite32((1 << midc
->ch_id
), mid
->dma_base
+ CLEAR_BLOCK
);
761 spin_lock_bh(&midc
->lock
);
762 midc_scan_descriptors(mid
, midc
);
763 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
764 iowrite32(UNMASK_INTR_REG(midc
->ch_id
),
765 mid
->dma_base
+ MASK_TFR
);
766 spin_unlock_bh(&midc
->lock
);
769 status
= ioread32(mid
->dma_base
+ RAW_ERR
);
770 status
&= mid
->intr_mask
;
773 i
= get_ch_index(&status
, mid
->chan_base
);
775 pr_err("ERR_MDMA:Invalid ch index %x\n", i
);
780 pr_err("ERR_MDMA:Null param midc\n");
783 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
784 status
, midc
->ch_id
, i
);
786 iowrite32((1 << midc
->ch_id
), mid
->dma_base
+ CLEAR_ERR
);
787 spin_lock_bh(&midc
->lock
);
788 midc_handle_error(mid
, midc
);
789 iowrite32(UNMASK_INTR_REG(midc
->ch_id
),
790 mid
->dma_base
+ MASK_ERR
);
791 spin_unlock_bh(&midc
->lock
);
793 pr_debug("MDMA:Exiting takslet...\n");
797 static void dma_tasklet1(unsigned long data
)
799 pr_debug("MDMA:in takslet1...\n");
800 return dma_tasklet(data
);
803 static void dma_tasklet2(unsigned long data
)
805 pr_debug("MDMA:in takslet2...\n");
806 return dma_tasklet(data
);
810 * intel_mid_dma_interrupt - DMA ISR
811 * @irq: IRQ where interrupt occurred
812 * @data: ISR cllback data (the controller structure)
814 * See if this is our interrupt if so then schedule the tasklet
817 static irqreturn_t
intel_mid_dma_interrupt(int irq
, void *data
)
819 struct middma_device
*mid
= data
;
820 u32 tfr_status
, err_status
;
821 int call_tasklet
= 0;
823 tfr_status
= ioread32(mid
->dma_base
+ RAW_TFR
);
824 err_status
= ioread32(mid
->dma_base
+ RAW_ERR
);
825 if (!tfr_status
&& !err_status
)
829 pr_debug("MDMA:Got an interrupt on irq %d\n", irq
);
831 pr_err("ERR_MDMA:null pointer mid\n");
835 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status
, mid
->intr_mask
);
836 tfr_status
&= mid
->intr_mask
;
838 /*need to disable intr*/
839 iowrite32((tfr_status
<< 8), mid
->dma_base
+ MASK_TFR
);
840 pr_debug("MDMA: Calling tasklet %x\n", tfr_status
);
843 err_status
&= mid
->intr_mask
;
845 iowrite32(MASK_INTR_REG(err_status
), mid
->dma_base
+ MASK_ERR
);
849 tasklet_schedule(&mid
->tasklet
);
854 static irqreturn_t
intel_mid_dma_interrupt1(int irq
, void *data
)
856 return intel_mid_dma_interrupt(irq
, data
);
859 static irqreturn_t
intel_mid_dma_interrupt2(int irq
, void *data
)
861 return intel_mid_dma_interrupt(irq
, data
);
865 * mid_setup_dma - Setup the DMA controller
866 * @pdev: Controller PCI device structure
868 * Initilize the DMA controller, channels, registers with DMA engine,
869 * ISR. Initilize DMA controller channels.
871 static int mid_setup_dma(struct pci_dev
*pdev
)
873 struct middma_device
*dma
= pci_get_drvdata(pdev
);
875 unsigned int irq_level
;
877 /* DMA coherent memory pool for DMA descriptor allocations */
878 dma
->dma_pool
= pci_pool_create("intel_mid_dma_desc_pool", pdev
,
879 sizeof(struct intel_mid_dma_desc
),
881 if (NULL
== dma
->dma_pool
) {
882 pr_err("ERR_MDMA:pci_pool_create failed\n");
888 INIT_LIST_HEAD(&dma
->common
.channels
);
889 dma
->pci_id
= pdev
->device
;
890 if (dma
->pimr_mask
) {
891 dma
->mask_reg
= ioremap(LNW_PERIPHRAL_MASK_BASE
,
892 LNW_PERIPHRAL_MASK_SIZE
);
893 if (dma
->mask_reg
== NULL
) {
894 pr_err("ERR_MDMA:Cant map periphral intr space !!\n");
898 dma
->mask_reg
= NULL
;
900 pr_debug("MDMA:Adding %d channel for this controller\n", dma
->max_chan
);
901 /*init CH structures*/
903 dma
->state
= RUNNING
;
904 for (i
= 0; i
< dma
->max_chan
; i
++) {
905 struct intel_mid_dma_chan
*midch
= &dma
->ch
[i
];
907 midch
->chan
.device
= &dma
->common
;
908 midch
->chan
.cookie
= 1;
909 midch
->chan
.chan_id
= i
;
910 midch
->ch_id
= dma
->chan_base
+ i
;
911 pr_debug("MDMA:Init CH %d, ID %d\n", i
, midch
->ch_id
);
913 midch
->dma_base
= dma
->dma_base
;
914 midch
->ch_regs
= dma
->dma_base
+ DMA_CH_SIZE
* midch
->ch_id
;
916 dma
->intr_mask
|= 1 << (dma
->chan_base
+ i
);
917 spin_lock_init(&midch
->lock
);
919 INIT_LIST_HEAD(&midch
->active_list
);
920 INIT_LIST_HEAD(&midch
->queue
);
921 INIT_LIST_HEAD(&midch
->free_list
);
923 iowrite32(MASK_INTR_REG(midch
->ch_id
),
924 dma
->dma_base
+ MASK_BLOCK
);
925 iowrite32(MASK_INTR_REG(midch
->ch_id
),
926 dma
->dma_base
+ MASK_SRC_TRAN
);
927 iowrite32(MASK_INTR_REG(midch
->ch_id
),
928 dma
->dma_base
+ MASK_DST_TRAN
);
929 iowrite32(MASK_INTR_REG(midch
->ch_id
),
930 dma
->dma_base
+ MASK_ERR
);
931 iowrite32(MASK_INTR_REG(midch
->ch_id
),
932 dma
->dma_base
+ MASK_TFR
);
934 disable_dma_interrupt(midch
);
935 list_add_tail(&midch
->chan
.device_node
, &dma
->common
.channels
);
937 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma
->intr_mask
);
939 /*init dma structure*/
940 dma_cap_zero(dma
->common
.cap_mask
);
941 dma_cap_set(DMA_MEMCPY
, dma
->common
.cap_mask
);
942 dma_cap_set(DMA_SLAVE
, dma
->common
.cap_mask
);
943 dma_cap_set(DMA_PRIVATE
, dma
->common
.cap_mask
);
944 dma
->common
.dev
= &pdev
->dev
;
945 dma
->common
.chancnt
= dma
->max_chan
;
947 dma
->common
.device_alloc_chan_resources
=
948 intel_mid_dma_alloc_chan_resources
;
949 dma
->common
.device_free_chan_resources
=
950 intel_mid_dma_free_chan_resources
;
952 dma
->common
.device_tx_status
= intel_mid_dma_tx_status
;
953 dma
->common
.device_prep_dma_memcpy
= intel_mid_dma_prep_memcpy
;
954 dma
->common
.device_issue_pending
= intel_mid_dma_issue_pending
;
955 dma
->common
.device_prep_slave_sg
= intel_mid_dma_prep_slave_sg
;
956 dma
->common
.device_control
= intel_mid_dma_device_control
;
959 iowrite32(REG_BIT0
, dma
->dma_base
+ DMA_CFG
);
962 if (dma
->pimr_mask
) {
963 irq_level
= IRQF_SHARED
;
964 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
965 err
= request_irq(pdev
->irq
, intel_mid_dma_interrupt1
,
966 IRQF_SHARED
, "INTEL_MID_DMAC1", dma
);
970 dma
->intr_mask
= 0x03;
972 pr_debug("MDMA:Requesting irq for DMAC2\n");
973 err
= request_irq(pdev
->irq
, intel_mid_dma_interrupt2
,
974 0, "INTEL_MID_DMAC2", dma
);
978 /*register device w/ engine*/
979 err
= dma_async_device_register(&dma
->common
);
981 pr_err("ERR_MDMA:device_register failed: %d\n", err
);
984 if (dma
->pimr_mask
) {
985 pr_debug("setting up tasklet1 for DMAC1\n");
986 tasklet_init(&dma
->tasklet
, dma_tasklet1
, (unsigned long)dma
);
988 pr_debug("setting up tasklet2 for DMAC2\n");
989 tasklet_init(&dma
->tasklet
, dma_tasklet2
, (unsigned long)dma
);
994 free_irq(pdev
->irq
, dma
);
996 pci_pool_destroy(dma
->dma_pool
);
999 pr_err("ERR_MDMA:setup_dma failed: %d\n", err
);
1005 * middma_shutdown - Shutdown the DMA controller
1006 * @pdev: Controller PCI device structure
1009 * Unregister DMa controller, clear all structures and free interrupt
1011 static void middma_shutdown(struct pci_dev
*pdev
)
1013 struct middma_device
*device
= pci_get_drvdata(pdev
);
1015 dma_async_device_unregister(&device
->common
);
1016 pci_pool_destroy(device
->dma_pool
);
1017 if (device
->mask_reg
)
1018 iounmap(device
->mask_reg
);
1019 if (device
->dma_base
)
1020 iounmap(device
->dma_base
);
1021 free_irq(pdev
->irq
, device
);
1026 * intel_mid_dma_probe - PCI Probe
1027 * @pdev: Controller PCI device structure
1028 * @id: pci device id structure
1030 * Initilize the PCI device, map BARs, query driver data.
1031 * Call setup_dma to complete contoller and chan initilzation
1033 static int __devinit
intel_mid_dma_probe(struct pci_dev
*pdev
,
1034 const struct pci_device_id
*id
)
1036 struct middma_device
*device
;
1037 u32 base_addr
, bar_size
;
1038 struct intel_mid_dma_probe_info
*info
;
1041 pr_debug("MDMA: probe for %x\n", pdev
->device
);
1042 info
= (void *)id
->driver_data
;
1043 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1044 info
->max_chan
, info
->ch_base
,
1045 info
->block_size
, info
->pimr_mask
);
1047 err
= pci_enable_device(pdev
);
1049 goto err_enable_device
;
1051 err
= pci_request_regions(pdev
, "intel_mid_dmac");
1053 goto err_request_regions
;
1055 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1057 goto err_set_dma_mask
;
1059 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1061 goto err_set_dma_mask
;
1063 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1065 pr_err("ERR_MDMA:kzalloc failed probe\n");
1069 device
->pdev
= pci_dev_get(pdev
);
1071 base_addr
= pci_resource_start(pdev
, 0);
1072 bar_size
= pci_resource_len(pdev
, 0);
1073 device
->dma_base
= ioremap_nocache(base_addr
, DMA_REG_SIZE
);
1074 if (!device
->dma_base
) {
1075 pr_err("ERR_MDMA:ioremap failed\n");
1079 pci_set_drvdata(pdev
, device
);
1080 pci_set_master(pdev
);
1081 device
->max_chan
= info
->max_chan
;
1082 device
->chan_base
= info
->ch_base
;
1083 device
->block_size
= info
->block_size
;
1084 device
->pimr_mask
= info
->pimr_mask
;
1086 err
= mid_setup_dma(pdev
);
1090 pm_runtime_set_active(&pdev
->dev
);
1091 pm_runtime_enable(&pdev
->dev
);
1092 pm_runtime_allow(&pdev
->dev
);
1096 iounmap(device
->dma_base
);
1102 pci_release_regions(pdev
);
1103 pci_disable_device(pdev
);
1104 err_request_regions
:
1106 pr_err("ERR_MDMA:Probe failed %d\n", err
);
1111 * intel_mid_dma_remove - PCI remove
1112 * @pdev: Controller PCI device structure
1114 * Free up all resources and data
1115 * Call shutdown_dma to complete contoller and chan cleanup
1117 static void __devexit
intel_mid_dma_remove(struct pci_dev
*pdev
)
1119 struct middma_device
*device
= pci_get_drvdata(pdev
);
1120 middma_shutdown(pdev
);
1123 pci_release_regions(pdev
);
1124 pci_disable_device(pdev
);
1127 /* Power Management */
1129 * dma_suspend - PCI suspend function
1131 * @pci: PCI device structure
1132 * @state: PM message
1134 * This function is called by OS when a power event occurs
1136 int dma_suspend(struct pci_dev
*pci
, pm_message_t state
)
1139 struct middma_device
*device
= pci_get_drvdata(pci
);
1140 pr_debug("MDMA: dma_suspend called\n");
1142 for (i
= 0; i
< device
->max_chan
; i
++) {
1143 if (device
->ch
[i
].in_use
)
1146 device
->state
= SUSPENDED
;
1147 pci_set_drvdata(pci
, device
);
1148 pci_save_state(pci
);
1149 pci_disable_device(pci
);
1150 pci_set_power_state(pci
, PCI_D3hot
);
1155 * dma_resume - PCI resume function
1157 * @pci: PCI device structure
1159 * This function is called by OS when a power event occurs
1161 int dma_resume(struct pci_dev
*pci
)
1164 struct middma_device
*device
= pci_get_drvdata(pci
);
1166 pr_debug("MDMA: dma_resume called\n");
1167 pci_set_power_state(pci
, PCI_D0
);
1168 pci_restore_state(pci
);
1169 ret
= pci_enable_device(pci
);
1171 pr_err("MDMA: device cant be enabled for %x\n", pci
->device
);
1174 device
->state
= RUNNING
;
1175 iowrite32(REG_BIT0
, device
->dma_base
+ DMA_CFG
);
1176 pci_set_drvdata(pci
, device
);
1180 static int dma_runtime_suspend(struct device
*dev
)
1182 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1183 return dma_suspend(pci_dev
, PMSG_SUSPEND
);
1186 static int dma_runtime_resume(struct device
*dev
)
1188 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1189 return dma_resume(pci_dev
);
1192 static int dma_runtime_idle(struct device
*dev
)
1194 struct pci_dev
*pdev
= to_pci_dev(dev
);
1195 struct middma_device
*device
= pci_get_drvdata(pdev
);
1198 for (i
= 0; i
< device
->max_chan
; i
++) {
1199 if (device
->ch
[i
].in_use
)
1203 return pm_schedule_suspend(dev
, 0);
1206 /******************************************************************************
1209 static struct pci_device_id intel_mid_dma_ids
[] = {
1210 { PCI_VDEVICE(INTEL
, INTEL_MID_DMAC1_ID
), INFO(2, 6, 4095, 0x200020)},
1211 { PCI_VDEVICE(INTEL
, INTEL_MID_DMAC2_ID
), INFO(2, 0, 2047, 0)},
1212 { PCI_VDEVICE(INTEL
, INTEL_MID_GP_DMAC2_ID
), INFO(2, 0, 2047, 0)},
1213 { PCI_VDEVICE(INTEL
, INTEL_MFLD_DMAC1_ID
), INFO(4, 0, 4095, 0x400040)},
1216 MODULE_DEVICE_TABLE(pci
, intel_mid_dma_ids
);
1218 static const struct dev_pm_ops intel_mid_dma_pm
= {
1219 .runtime_suspend
= dma_runtime_suspend
,
1220 .runtime_resume
= dma_runtime_resume
,
1221 .runtime_idle
= dma_runtime_idle
,
1224 static struct pci_driver intel_mid_dma_pci
= {
1225 .name
= "Intel MID DMA",
1226 .id_table
= intel_mid_dma_ids
,
1227 .probe
= intel_mid_dma_probe
,
1228 .remove
= __devexit_p(intel_mid_dma_remove
),
1230 .suspend
= dma_suspend
,
1231 .resume
= dma_resume
,
1233 .pm
= &intel_mid_dma_pm
,
1238 static int __init
intel_mid_dma_init(void)
1240 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1241 INTEL_MID_DMA_DRIVER_VERSION
);
1242 return pci_register_driver(&intel_mid_dma_pci
);
1244 fs_initcall(intel_mid_dma_init
);
1246 static void __exit
intel_mid_dma_exit(void)
1248 pci_unregister_driver(&intel_mid_dma_pci
);
1250 module_exit(intel_mid_dma_exit
);
1252 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1253 MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1254 MODULE_LICENSE("GPL v2");
1255 MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION
);