2 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/dma-buf.h>
24 static struct sg_table
*omap_gem_map_dma_buf(
25 struct dma_buf_attachment
*attachment
,
26 enum dma_data_direction dir
)
28 struct drm_gem_object
*obj
= attachment
->dmabuf
->priv
;
33 sg
= kzalloc(sizeof(*sg
), GFP_KERNEL
);
35 return ERR_PTR(-ENOMEM
);
37 /* camera, etc, need physically contiguous.. but we need a
38 * better way to know this..
40 ret
= omap_gem_get_paddr(obj
, &paddr
, true);
44 ret
= sg_alloc_table(sg
, 1, GFP_KERNEL
);
48 sg_init_table(sg
->sgl
, 1);
49 sg_dma_len(sg
->sgl
) = obj
->size
;
50 sg_set_page(sg
->sgl
, pfn_to_page(PFN_DOWN(paddr
)), obj
->size
, 0);
51 sg_dma_address(sg
->sgl
) = paddr
;
53 /* this should be after _get_paddr() to ensure we have pages attached */
54 omap_gem_dma_sync(obj
, dir
);
62 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
63 struct sg_table
*sg
, enum dma_data_direction dir
)
65 struct drm_gem_object
*obj
= attachment
->dmabuf
->priv
;
66 omap_gem_put_paddr(obj
);
71 static void omap_gem_dmabuf_release(struct dma_buf
*buffer
)
73 struct drm_gem_object
*obj
= buffer
->priv
;
74 /* release reference that was taken when dmabuf was exported
75 * in omap_gem_prime_set()..
77 drm_gem_object_unreference_unlocked(obj
);
81 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf
*buffer
,
82 size_t start
, size_t len
, enum dma_data_direction dir
)
84 struct drm_gem_object
*obj
= buffer
->priv
;
86 if (omap_gem_flags(obj
) & OMAP_BO_TILED
) {
87 /* TODO we would need to pin at least part of the buffer to
88 * get de-tiled view. For now just reject it.
92 /* make sure we have the pages: */
93 return omap_gem_get_pages(obj
, &pages
, true);
96 static void omap_gem_dmabuf_end_cpu_access(struct dma_buf
*buffer
,
97 size_t start
, size_t len
, enum dma_data_direction dir
)
99 struct drm_gem_object
*obj
= buffer
->priv
;
100 omap_gem_put_pages(obj
);
104 static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf
*buffer
,
105 unsigned long page_num
)
107 struct drm_gem_object
*obj
= buffer
->priv
;
109 omap_gem_get_pages(obj
, &pages
, false);
110 omap_gem_cpu_sync(obj
, page_num
);
111 return kmap_atomic(pages
[page_num
]);
114 static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf
*buffer
,
115 unsigned long page_num
, void *addr
)
120 static void *omap_gem_dmabuf_kmap(struct dma_buf
*buffer
,
121 unsigned long page_num
)
123 struct drm_gem_object
*obj
= buffer
->priv
;
125 omap_gem_get_pages(obj
, &pages
, false);
126 omap_gem_cpu_sync(obj
, page_num
);
127 return kmap(pages
[page_num
]);
130 static void omap_gem_dmabuf_kunmap(struct dma_buf
*buffer
,
131 unsigned long page_num
, void *addr
)
133 struct drm_gem_object
*obj
= buffer
->priv
;
135 omap_gem_get_pages(obj
, &pages
, false);
136 kunmap(pages
[page_num
]);
140 * TODO maybe we can split up drm_gem_mmap to avoid duplicating
141 * some here.. or at least have a drm_dmabuf_mmap helper.
143 static int omap_gem_dmabuf_mmap(struct dma_buf
*buffer
,
144 struct vm_area_struct
*vma
)
146 struct drm_gem_object
*obj
= buffer
->priv
;
149 if (WARN_ON(!obj
->filp
))
152 /* Check for valid size. */
153 if (omap_gem_mmap_size(obj
) < vma
->vm_end
- vma
->vm_start
) {
158 if (!obj
->dev
->driver
->gem_vm_ops
) {
163 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
164 vma
->vm_ops
= obj
->dev
->driver
->gem_vm_ops
;
165 vma
->vm_private_data
= obj
;
166 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
168 /* Take a ref for this mapping of the object, so that the fault
169 * handler can dereference the mmap offset's pointer to the object.
170 * This reference is cleaned up by the corresponding vm_close
171 * (which should happen whether the vma was created by this call, or
172 * by a vm_open due to mremap or partial unmap or whatever).
174 vma
->vm_ops
->open(vma
);
178 return omap_gem_mmap_obj(obj
, vma
);
181 static struct dma_buf_ops omap_dmabuf_ops
= {
182 .map_dma_buf
= omap_gem_map_dma_buf
,
183 .unmap_dma_buf
= omap_gem_unmap_dma_buf
,
184 .release
= omap_gem_dmabuf_release
,
185 .begin_cpu_access
= omap_gem_dmabuf_begin_cpu_access
,
186 .end_cpu_access
= omap_gem_dmabuf_end_cpu_access
,
187 .kmap_atomic
= omap_gem_dmabuf_kmap_atomic
,
188 .kunmap_atomic
= omap_gem_dmabuf_kunmap_atomic
,
189 .kmap
= omap_gem_dmabuf_kmap
,
190 .kunmap
= omap_gem_dmabuf_kunmap
,
191 .mmap
= omap_gem_dmabuf_mmap
,
194 struct dma_buf
*omap_gem_prime_export(struct drm_device
*dev
,
195 struct drm_gem_object
*obj
, int flags
)
197 return dma_buf_export(obj
, &omap_dmabuf_ops
, obj
->size
, flags
);
200 struct drm_gem_object
*omap_gem_prime_import(struct drm_device
*dev
,
201 struct dma_buf
*buffer
)
203 struct drm_gem_object
*obj
;
205 /* is this one of own objects? */
206 if (buffer
->ops
== &omap_dmabuf_ops
) {
208 /* is it from our device? */
209 if (obj
->dev
== dev
) {
211 * Importing dmabuf exported from out own gem increases
212 * refcount on gem itself instead of f_count of dmabuf.
214 drm_gem_object_reference(obj
);
220 * TODO add support for importing buffers from other devices..
221 * for now we don't need this but would be nice to add eventually
223 return ERR_PTR(-EINVAL
);