dma: mmp_pdma: add support for cyclic DMA descriptors
[deliverable/linux.git] / drivers / dma / mmp_pdma.c
index 3c2ad72a1621392a3c51e969a8d9dc4b9a6f9e17..f0e6d7d49b065e79ae79c099c0adc727b3ae4b39 100644 (file)
@@ -18,7 +18,9 @@
 #include <linux/platform_data/mmp_dma.h>
 #include <linux/dmapool.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 #include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
 
 #include "dmaengine.h"
 
@@ -71,7 +73,7 @@
 #define DCMD_LENGTH    0x01fff         /* length mask (max = 8K - 1) */
 
 #define PDMA_ALIGNMENT         3
-#define PDMA_MAX_DESC_BYTES    0x1000
+#define PDMA_MAX_DESC_BYTES    DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
        u32 ddadr;      /* Points to the next descriptor + flags */
@@ -96,6 +98,9 @@ struct mmp_pdma_chan {
        struct mmp_pdma_phy *phy;
        enum dma_transfer_direction dir;
 
+       struct mmp_pdma_desc_sw *cyclic_first;  /* first desc_sw if channel
+                                                * is in cyclic mode */
+
        /* channel's basic info */
        struct tasklet_struct tasklet;
        u32 dcmd;
@@ -107,6 +112,7 @@ struct mmp_pdma_chan {
        struct list_head chain_pending; /* Link descriptors queue for pending */
        struct list_head chain_running; /* Link descriptors queue for running */
        bool idle;                      /* channel statue machine */
+       bool byte_align;
 
        struct dma_pool *desc_pool;     /* Descriptors pool */
 };
@@ -140,7 +146,7 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 
 static void enable_chan(struct mmp_pdma_phy *phy)
 {
-       u32 reg;
+       u32 reg, dalgn;
 
        if (!phy->vchan)
                return;
@@ -148,6 +154,13 @@ static void enable_chan(struct mmp_pdma_phy *phy)
        reg = DRCMR(phy->vchan->drcmr);
        writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 
+       dalgn = readl(phy->base + DALGN);
+       if (phy->vchan->byte_align)
+               dalgn |= 1 << phy->idx;
+       else
+               dalgn &= ~(1 << phy->idx);
+       writel(dalgn, phy->base + DALGN);
+
        reg = (phy->idx << 2) + DCSR;
        writel(readl(phy->base + reg) | DCSR_RUN,
                                        phy->base + reg);
@@ -269,25 +282,6 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
        spin_unlock_irqrestore(&pdev->phy_lock, flags);
 }
 
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
-                                       struct mmp_pdma_desc_sw *desc)
-{
-       struct mmp_pdma_desc_sw *tail =
-                               to_mmp_pdma_desc(chan->chain_pending.prev);
-
-       if (list_empty(&chan->chain_pending))
-               goto out_splice;
-
-       /* one irq per queue, even appended */
-       tail->desc.ddadr = desc->async_tx.phys;
-       tail->desc.dcmd &= ~DCMD_ENDIRQEN;
-
-       /* softly link to pending list */
-out_splice:
-       list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
-}
-
 /**
  * start_pending_queue - transfer any pending transactions
  * pending list ==> running list
@@ -350,7 +344,8 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
                cookie = dma_cookie_assign(&child->async_tx);
        }
 
-       append_pending_queue(chan, desc);
+       /* softly link to pending list - desc->tx_list ==> pending list */
+       list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
 
        spin_unlock_irqrestore(&chan->desc_lock, flags);
 
@@ -453,6 +448,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
                return NULL;
 
        chan = to_mmp_pdma_chan(dchan);
+       chan->byte_align = false;
 
        if (!chan->dir) {
                chan->dir = DMA_MEM_TO_MEM;
@@ -469,6 +465,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
                }
 
                copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+               if (dma_src & 0x7 || dma_dst & 0x7)
+                       chan->byte_align = true;
 
                new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
                new->desc.dsadr = dma_src;
@@ -505,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
        new->desc.ddadr = DDADR_STOP;
        new->desc.dcmd |= DCMD_ENDIRQEN;
 
+       chan->cyclic_first = NULL;
+
        return &first->async_tx;
 
 fail:
@@ -528,12 +528,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
        if ((sgl == NULL) || (sg_len == 0))
                return NULL;
 
+       chan->byte_align = false;
+
        for_each_sg(sgl, sg, sg_len, i) {
                addr = sg_dma_address(sg);
                avail = sg_dma_len(sgl);
 
                do {
                        len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+                       if (addr & 0x7)
+                               chan->byte_align = true;
 
                        /* allocate and populate the descriptor */
                        new = mmp_pdma_alloc_descriptor(chan);
@@ -576,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
        new->desc.ddadr = DDADR_STOP;
        new->desc.dcmd |= DCMD_ENDIRQEN;
 
+       chan->dir = dir;
+       chan->cyclic_first = NULL;
+
+       return &first->async_tx;
+
+fail:
+       if (first)
+               mmp_pdma_free_desc_list(chan, &first->tx_list);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+       struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long flags, void *context)
+{
+       struct mmp_pdma_chan *chan;
+       struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+       dma_addr_t dma_src, dma_dst;
+
+       if (!dchan || !len || !period_len)
+               return NULL;
+
+       /* the buffer length must be a multiple of period_len */
+       if (len % period_len != 0)
+               return NULL;
+
+       if (period_len > PDMA_MAX_DESC_BYTES)
+               return NULL;
+
+       chan = to_mmp_pdma_chan(dchan);
+
+       switch (direction) {
+       case DMA_MEM_TO_DEV:
+               dma_src = buf_addr;
+               dma_dst = chan->dev_addr;
+               break;
+       case DMA_DEV_TO_MEM:
+               dma_dst = buf_addr;
+               dma_src = chan->dev_addr;
+               break;
+       default:
+               dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+               return NULL;
+       }
+
+       chan->dir = direction;
+
+       do {
+               /* Allocate the link descriptor from DMA pool */
+               new = mmp_pdma_alloc_descriptor(chan);
+               if (!new) {
+                       dev_err(chan->dev, "no memory for desc\n");
+                       goto fail;
+               }
+
+               new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+                                       (DCMD_LENGTH & period_len);
+               new->desc.dsadr = dma_src;
+               new->desc.dtadr = dma_dst;
+
+               if (!first)
+                       first = new;
+               else
+                       prev->desc.ddadr = new->async_tx.phys;
+
+               new->async_tx.cookie = 0;
+               async_tx_ack(&new->async_tx);
+
+               prev = new;
+               len -= period_len;
+
+               if (chan->dir == DMA_MEM_TO_DEV)
+                       dma_src += period_len;
+               else
+                       dma_dst += period_len;
+
+               /* Insert the link descriptor to the LD ring */
+               list_add_tail(&new->node, &first->tx_list);
+       } while (len);
+
+       first->async_tx.flags = flags; /* client is in control of this ack */
+       first->async_tx.cookie = -EBUSY;
+
+       /* make the cyclic link */
+       new->desc.ddadr = first->async_tx.phys;
+       chan->cyclic_first = first;
+
        return &first->async_tx;
 
 fail:
@@ -635,8 +727,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
                        chan->dcmd |= DCMD_BURST32;
 
                chan->dir = cfg->direction;
-               chan->drcmr = cfg->slave_id;
                chan->dev_addr = addr;
+               /* FIXME: drivers should be ported over to use the filter
+                * function. Once that's done, the following two lines can
+                * be removed.
+                */
+               if (cfg->slave_id)
+                       chan->drcmr = cfg->slave_id;
                break;
        default:
                return -ENOSYS;
@@ -677,29 +774,51 @@ static void dma_do_tasklet(unsigned long data)
        LIST_HEAD(chain_cleanup);
        unsigned long flags;
 
-       /* submit pending list; callback for each desc; free desc */
+       if (chan->cyclic_first) {
+               dma_async_tx_callback cb = NULL;
+               void *cb_data = NULL;
 
-       spin_lock_irqsave(&chan->desc_lock, flags);
+               spin_lock_irqsave(&chan->desc_lock, flags);
+               desc = chan->cyclic_first;
+               cb = desc->async_tx.callback;
+               cb_data = desc->async_tx.callback_param;
+               spin_unlock_irqrestore(&chan->desc_lock, flags);
 
-       /* update the cookie if we have some descriptors to cleanup */
-       if (!list_empty(&chan->chain_running)) {
-               dma_cookie_t cookie;
+               if (cb)
+                       cb(cb_data);
 
-               desc = to_mmp_pdma_desc(chan->chain_running.prev);
-               cookie = desc->async_tx.cookie;
-               dma_cookie_complete(&desc->async_tx);
+               return;
+       }
+
+       /* submit pending list; callback for each desc; free desc */
+       spin_lock_irqsave(&chan->desc_lock, flags);
 
-               dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+       list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+               /*
+                * move the descriptors to a temporary list so we can drop
+                * the lock during the entire cleanup operation
+                */
+               list_del(&desc->node);
+               list_add(&desc->node, &chain_cleanup);
+
+               /*
+                * Look for the first list entry which has the ENDIRQEN flag
+                * set. That is the descriptor we got an interrupt for, so
+                * complete that transaction and its cookie.
+                */
+               if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+                       dma_cookie_t cookie = desc->async_tx.cookie;
+                       dma_cookie_complete(&desc->async_tx);
+                       dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+                       break;
+               }
        }
 
        /*
-        * move the descriptors to a temporary list so we can drop the lock
-        * during the entire cleanup operation
+        * The hardware is idle and ready for more when the
+        * chain_running list is empty.
         */
-       list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
-       /* the hardware is now idle and ready for more */
-       chan->idle = true;
+       chan->idle = list_empty(&chan->chain_running);
 
        /* Start any pending transactions automatically */
        start_pending_queue(chan);
@@ -771,6 +890,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+                                          struct of_dma *ofdma)
+{
+       struct mmp_pdma_device *d = ofdma->of_dma_data;
+       struct dma_chan *chan, *candidate;
+
+retry:
+       candidate = NULL;
+
+       /* walk the list of channels registered with the current instance and
+        * find one that is currently unused */
+       list_for_each_entry(chan, &d->device.channels, device_node)
+               if (chan->client_count == 0) {
+                       candidate = chan;
+                       break;
+               }
+
+       if (!candidate)
+               return NULL;
+
+       /* dma_get_slave_channel will return NULL if we lost a race between
+        * the lookup and the reservation */
+       chan = dma_get_slave_channel(candidate);
+
+       if (chan) {
+               struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+               c->drcmr = dma_spec->args[0];
+               return chan;
+       }
+
+       goto retry;
+}
+
 static int mmp_pdma_probe(struct platform_device *op)
 {
        struct mmp_pdma_device *pdev;
@@ -788,9 +940,6 @@ static int mmp_pdma_probe(struct platform_device *op)
        spin_lock_init(&pdev->phy_lock);
 
        iores = platform_get_resource(op, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -EINVAL;
-
        pdev->base = devm_ioremap_resource(pdev->dev, iores);
        if (IS_ERR(pdev->base))
                return PTR_ERR(pdev->base);
@@ -835,13 +984,14 @@ static int mmp_pdma_probe(struct platform_device *op)
 
        dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
        dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
-       dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+       dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
        pdev->device.dev = &op->dev;
        pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
        pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
        pdev->device.device_tx_status = mmp_pdma_tx_status;
        pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
        pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+       pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
        pdev->device.device_issue_pending = mmp_pdma_issue_pending;
        pdev->device.device_control = mmp_pdma_control;
        pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -857,7 +1007,17 @@ static int mmp_pdma_probe(struct platform_device *op)
                return ret;
        }
 
-       dev_info(pdev->device.dev, "initialized\n");
+       if (op->dev.of_node) {
+               /* Device-tree DMA controller registration */
+               ret = of_dma_controller_register(op->dev.of_node,
+                                                mmp_pdma_dma_xlate, pdev);
+               if (ret < 0) {
+                       dev_err(&op->dev, "of_dma_controller_register failed\n");
+                       return ret;
+               }
+       }
+
+       dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
        return 0;
 }
 
@@ -877,6 +1037,19 @@ static struct platform_driver mmp_pdma_driver = {
        .remove         = mmp_pdma_remove,
 };
 
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+       struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+       if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+               return false;
+
+       c->drcmr = *(unsigned int *) param;
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
 module_platform_driver(mmp_pdma_driver);
 
 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
This page took 0.032241 seconds and 5 git commands to generate.