Merge branch 'omap-for-v4.8/legacy' into for-next
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
index e3e47ace7daf6a6188551e6687901a611eca3fbe..59fa204b15f32d9a184e9881172b731f4b070480 100644 (file)
 #include <media/videobuf2-dma-contig.h>
 #include <media/videobuf2-memops.h>
 
-struct vb2_dc_conf {
-       struct device           *dev;
-       struct dma_attrs        attrs;
-};
-
 struct vb2_dc_buf {
        struct device                   *dev;
        void                            *vaddr;
        unsigned long                   size;
        void                            *cookie;
        dma_addr_t                      dma_addr;
-       struct dma_attrs                attrs;
+       unsigned long                   attrs;
        enum dma_data_direction         dma_dir;
        struct sg_table                 *dma_sgt;
        struct frame_vector             *vec;
@@ -135,32 +130,32 @@ static void vb2_dc_put(void *buf_priv)
                kfree(buf->sgt_base);
        }
        dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
-                       &buf->attrs);
+                      buf->attrs);
        put_device(buf->dev);
        kfree(buf);
 }
 
-static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
-                         enum dma_data_direction dma_dir, gfp_t gfp_flags)
+static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
+                         unsigned long size, enum dma_data_direction dma_dir,
+                         gfp_t gfp_flags)
 {
-       struct vb2_dc_conf *conf = alloc_ctx;
-       struct device *dev = conf->dev;
        struct vb2_dc_buf *buf;
 
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
                return ERR_PTR(-ENOMEM);
 
-       buf->attrs = conf->attrs;
+       if (attrs)
+               buf->attrs = attrs;
        buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
-                                       GFP_KERNEL | gfp_flags, &buf->attrs);
+                                       GFP_KERNEL | gfp_flags, buf->attrs);
        if (!buf->cookie) {
                dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
                kfree(buf);
                return ERR_PTR(-ENOMEM);
        }
 
-       if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+       if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
                buf->vaddr = buf->cookie;
 
        /* Prevent the device from being released while the buffer is used */
@@ -194,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
        vma->vm_pgoff = 0;
 
        ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
-               buf->dma_addr, buf->size, &buf->attrs);
+               buf->dma_addr, buf->size, buf->attrs);
 
        if (ret) {
                pr_err("Remapping memory failed, error: %d\n", ret);
@@ -377,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
        }
 
        ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
-               buf->size, &buf->attrs);
+               buf->size, buf->attrs);
        if (ret < 0) {
                dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
                kfree(sgt);
@@ -426,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv)
        struct page **pages;
 
        if (sgt) {
-               DEFINE_DMA_ATTRS(attrs);
-
-               dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
                /*
                 * No need to sync to CPU, it's already synced to the CPU
                 * since the finish() memop will have been called before this.
                 */
                dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                  buf->dma_dir, &attrs);
+                                  buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
                pages = frame_vector_pages(buf->vec);
                /* sgt should exist only if vector contains pages... */
                BUG_ON(IS_ERR(pages));
@@ -478,10 +470,9 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn
 }
 #endif
 
-static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
        unsigned long size, enum dma_data_direction dma_dir)
 {
-       struct vb2_dc_conf *conf = alloc_ctx;
        struct vb2_dc_buf *buf;
        struct frame_vector *vec;
        unsigned long offset;
@@ -490,9 +481,6 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        struct sg_table *sgt;
        unsigned long contig_size;
        unsigned long dma_align = dma_get_cache_alignment();
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
 
        /* Only cache aligned DMA transfers are reliable */
        if (!IS_ALIGNED(vaddr | size, dma_align)) {
@@ -509,7 +497,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        if (!buf)
                return ERR_PTR(-ENOMEM);
 
-       buf->dev = conf->dev;
+       buf->dev = dev;
        buf->dma_dir = dma_dir;
 
        offset = vaddr & ~PAGE_MASK;
@@ -554,7 +542,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
         * prepare() memop is called.
         */
        sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                     buf->dma_dir, &attrs);
+                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (sgt->nents <= 0) {
                pr_err("failed to map scatterlist\n");
                ret = -EIO;
@@ -578,7 +566,7 @@ out:
 
 fail_map_sg:
        dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                          buf->dma_dir, &attrs);
+                          buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 
 fail_sgt_init:
        sg_free_table(sgt);
@@ -676,10 +664,9 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
        kfree(buf);
 }
 
-static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
        unsigned long size, enum dma_data_direction dma_dir)
 {
-       struct vb2_dc_conf *conf = alloc_ctx;
        struct vb2_dc_buf *buf;
        struct dma_buf_attachment *dba;
 
@@ -690,7 +677,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
        if (!buf)
                return ERR_PTR(-ENOMEM);
 
-       buf->dev = conf->dev;
+       buf->dev = dev;
        /* create attachment for the dmabuf with the user device */
        dba = dma_buf_attach(dbuf, buf->dev);
        if (IS_ERR(dba)) {
@@ -729,30 +716,6 @@ const struct vb2_mem_ops vb2_dma_contig_memops = {
 };
 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
 
-void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
-                                   struct dma_attrs *attrs)
-{
-       struct vb2_dc_conf *conf;
-
-       conf = kzalloc(sizeof *conf, GFP_KERNEL);
-       if (!conf)
-               return ERR_PTR(-ENOMEM);
-
-       conf->dev = dev;
-       if (attrs)
-               conf->attrs = *attrs;
-
-       return conf;
-}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
-
-void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
-{
-       if (!IS_ERR_OR_NULL(alloc_ctx))
-               kfree(alloc_ctx);
-}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
-
 /**
  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
  * @dev:       device for configuring DMA parameters
@@ -780,7 +743,7 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
 {
        if (!dev->dma_parms) {
-               dev->dma_parms = kzalloc(sizeof(dev->dma_parms), GFP_KERNEL);
+               dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
                if (!dev->dma_parms)
                        return -ENOMEM;
        }
This page took 0.030422 seconds and 5 git commands to generate.