Merge branch 'topic/core' into for-linus
authorVinod Koul <vinod.koul@intel.com>
Tue, 17 May 2016 04:43:40 +0000 (10:13 +0530)
committerVinod Koul <vinod.koul@intel.com>
Tue, 17 May 2016 04:43:40 +0000 (10:13 +0530)
drivers/dma/dmaengine.c
drivers/dma/edma.c
drivers/dma/fsldma.c
drivers/dma/ioat/init.c
drivers/dma/ioat/registers.h
drivers/dma/mmp_pdma.c

index 2432c2a5557057cd1367f272df955f3b09de5506..8c9f45fd55fc16b3527e256591383583a2490d80 100644 (file)
@@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
        do {
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
-                       pr_err("%s: timeout!\n", __func__);
+                       dev_err(chan->device->dev, "%s: timeout!\n", __func__);
                        return DMA_ERROR;
                }
                if (status != DMA_IN_PROGRESS)
@@ -519,7 +519,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
        struct dma_chan *chan;
 
        if (mask && !__dma_device_satisfies_mask(dev, mask)) {
-               pr_debug("%s: wrong capabilities\n", __func__);
+               dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
                return NULL;
        }
        /* devices with multiple channels need special handling as we need to
@@ -534,12 +534,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 
        list_for_each_entry(chan, &dev->channels, device_node) {
                if (chan->client_count) {
-                       pr_debug("%s: %s busy\n",
+                       dev_dbg(dev->dev, "%s: %s busy\n",
                                 __func__, dma_chan_name(chan));
                        continue;
                }
                if (fn && !fn(chan, fn_param)) {
-                       pr_debug("%s: %s filter said false\n",
+                       dev_dbg(dev->dev, "%s: %s filter said false\n",
                                 __func__, dma_chan_name(chan));
                        continue;
                }
@@ -568,11 +568,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
 
                if (err) {
                        if (err == -ENODEV) {
-                               pr_debug("%s: %s module removed\n", __func__,
-                                        dma_chan_name(chan));
+                               dev_dbg(device->dev, "%s: %s module removed\n",
+                                       __func__, dma_chan_name(chan));
                                list_del_rcu(&device->global_node);
                        } else
-                               pr_debug("%s: failed to get %s: (%d)\n",
+                               dev_dbg(device->dev,
+                                       "%s: failed to get %s: (%d)\n",
                                         __func__, dma_chan_name(chan), err);
 
                        if (--device->privatecnt == 0)
@@ -603,7 +604,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
                device->privatecnt++;
                err = dma_chan_get(chan);
                if (err) {
-                       pr_debug("%s: failed to get %s: (%d)\n",
+                       dev_dbg(chan->device->dev,
+                               "%s: failed to get %s: (%d)\n",
                                __func__, dma_chan_name(chan), err);
                        chan = NULL;
                        if (--device->privatecnt == 0)
@@ -815,8 +817,9 @@ void dmaengine_get(void)
                                list_del_rcu(&device->global_node);
                                break;
                        } else if (err)
-                               pr_debug("%s: failed to get %s: (%d)\n",
-                                      __func__, dma_chan_name(chan), err);
+                               dev_dbg(chan->device->dev,
+                                       "%s: failed to get %s: (%d)\n",
+                                       __func__, dma_chan_name(chan), err);
                }
        }
 
@@ -1223,8 +1226,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 
        while (tx->cookie == -EBUSY) {
                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
-                       pr_err("%s timeout waiting for descriptor submission\n",
-                              __func__);
+                       dev_err(tx->chan->device->dev,
+                               "%s timeout waiting for descriptor submission\n",
+                               __func__);
                        return DMA_ERROR;
                }
                cpu_relax();
index ee3463e774f8e4dc5c46e2614719f9d0723cfd06..694c44e487ed284cf4c50771f4fd67dd1cf3acc4 100644 (file)
@@ -1518,8 +1518,17 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
 
        dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
 
-       if (!edma_error_pending(ecc))
+       if (!edma_error_pending(ecc)) {
+               /*
+                * The registers indicate no pending error event but the irq
+                * handler has been called.
+                * Ask eDMA to re-evaluate the error registers.
+                */
+               dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
+                       __func__);
+               edma_write(ecc, EDMA_EEVAL, 1);
                return IRQ_NONE;
+       }
 
        while (1) {
                /* Event missed register(s) */
index aac85c30c2cf6fc64669841c5b6abb5317df1b94..a8828ed639b3027c476fc7d82d4d6607221b2ae1 100644 (file)
@@ -462,13 +462,12 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
        struct fsl_desc_sw *desc;
        dma_addr_t pdesc;
 
-       desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+       desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
        if (!desc) {
                chan_dbg(chan, "out of memory for link descriptor\n");
                return NULL;
        }
 
-       memset(desc, 0, sizeof(*desc));
        INIT_LIST_HEAD(&desc->tx_list);
        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
        desc->async_tx.tx_submit = fsl_dma_tx_submit;
index efdee1a69fc4af12d7e2db0663edb06e9051196e..d406056e889246d1ec8e03dab74d0727046fd192 100644 (file)
@@ -690,12 +690,11 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
        /* allocate a completion writeback area */
        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
        ioat_chan->completion =
-               dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
-                              GFP_KERNEL, &ioat_chan->completion_dma);
+               dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
+                               GFP_KERNEL, &ioat_chan->completion_dma);
        if (!ioat_chan->completion)
                return -ENOMEM;
 
-       memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
        writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
               ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
        writel(((u64)ioat_chan->completion_dma) >> 32,
@@ -1074,6 +1073,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
        struct ioatdma_chan *ioat_chan;
        bool is_raid_device = false;
        int err;
+       u16 val16;
 
        dma = &ioat_dma->dma_dev;
        dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
@@ -1173,6 +1173,17 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
        if (dca)
                ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
 
+       /* disable relaxed ordering */
+       err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
+       if (err)
+               return err;
+
+       /* clear relaxed ordering enable */
+       val16 &= ~IOAT_DEVCTRL_ROE;
+       err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
+       if (err)
+               return err;
+
        return 0;
 }
 
index 4994a3623aee43fb09d88d62314cc9c2cb2472c4..70534981a49bbf453be6b0bb90434402e66bb4f9 100644 (file)
 #define IOAT_PCI_CHANERR_INT_OFFSET            0x180
 #define IOAT_PCI_CHANERRMASK_INT_OFFSET                0x184
 
+/* PCIe config registers */
+
+/* EXPCAPID + N */
+#define IOAT_DEVCTRL_OFFSET                    0x8
+/* relaxed ordering enable */
+#define IOAT_DEVCTRL_ROE                       0x10
+
 /* MMIO Device Registers */
 #define IOAT_CHANCNT_OFFSET                    0x00    /*  8-bit */
 
index e39457f13d4dd4d923b022312f883dfc39f99ba7..56f1fd68b6205af0de5c6f55687305c42de0f1e9 100644 (file)
@@ -364,13 +364,12 @@ mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
        struct mmp_pdma_desc_sw *desc;
        dma_addr_t pdesc;
 
-       desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+       desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
        if (!desc) {
                dev_err(chan->dev, "out of memory for link descriptor\n");
                return NULL;
        }
 
-       memset(desc, 0, sizeof(*desc));
        INIT_LIST_HEAD(&desc->tx_list);
        dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
        /* each desc has submit */
This page took 0.036885 seconds and 5 git commands to generate.