3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj
*obj
)
25 struct drm_device
*dev
= obj
->base
.dev
;
27 unsigned int nr_pages
;
30 DRM_DEBUG_KMS("already allocated.\n");
34 init_dma_attrs(&obj
->dma_attrs
);
37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 * region will be allocated else physically contiguous
41 if (!(obj
->flags
& EXYNOS_BO_NONCONTIG
))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS
, &obj
->dma_attrs
);
45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping.
48 if (obj
->flags
& EXYNOS_BO_WC
|| !(obj
->flags
& EXYNOS_BO_CACHABLE
))
49 attr
= DMA_ATTR_WRITE_COMBINE
;
51 attr
= DMA_ATTR_NON_CONSISTENT
;
53 dma_set_attr(attr
, &obj
->dma_attrs
);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &obj
->dma_attrs
);
56 nr_pages
= obj
->size
>> PAGE_SHIFT
;
58 if (!is_drm_iommu_supported(dev
)) {
59 dma_addr_t start_addr
;
62 obj
->pages
= drm_calloc_large(nr_pages
, sizeof(struct page
*));
64 DRM_ERROR("failed to allocate pages.\n");
68 obj
->cookie
= dma_alloc_attrs(dev
->dev
,
70 &obj
->dma_addr
, GFP_KERNEL
,
73 DRM_ERROR("failed to allocate buffer.\n");
74 drm_free_large(obj
->pages
);
78 start_addr
= obj
->dma_addr
;
79 while (i
< nr_pages
) {
80 obj
->pages
[i
] = phys_to_page(start_addr
);
81 start_addr
+= PAGE_SIZE
;
85 obj
->pages
= dma_alloc_attrs(dev
->dev
, obj
->size
,
86 &obj
->dma_addr
, GFP_KERNEL
,
89 DRM_ERROR("failed to allocate buffer.\n");
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 (unsigned long)obj
->dma_addr
,
101 static void exynos_drm_free_buf(struct exynos_drm_gem_obj
*obj
)
103 struct drm_device
*dev
= obj
->base
.dev
;
105 if (!obj
->dma_addr
) {
106 DRM_DEBUG_KMS("dma_addr is invalid.\n");
110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111 (unsigned long)obj
->dma_addr
, obj
->size
);
113 if (!is_drm_iommu_supported(dev
)) {
114 dma_free_attrs(dev
->dev
, obj
->size
, obj
->cookie
,
115 (dma_addr_t
)obj
->dma_addr
, &obj
->dma_attrs
);
116 drm_free_large(obj
->pages
);
118 dma_free_attrs(dev
->dev
, obj
->size
, obj
->pages
,
119 (dma_addr_t
)obj
->dma_addr
, &obj
->dma_attrs
);
121 obj
->dma_addr
= (dma_addr_t
)NULL
;
124 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
125 struct drm_file
*file_priv
,
126 unsigned int *handle
)
131 * allocate a id of idr table where the obj is registered
132 * and handle has the id what user can see.
134 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
138 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle
);
140 /* drop reference from allocate - handle holds it now. */
141 drm_gem_object_unreference_unlocked(obj
);
146 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj
*exynos_gem_obj
)
148 struct drm_gem_object
*obj
= &exynos_gem_obj
->base
;
150 DRM_DEBUG_KMS("handle count = %d\n", obj
->handle_count
);
153 * do not release memory region from exporter.
155 * the region will be released by exporter
156 * once dmabuf's refcount becomes 0.
158 if (obj
->import_attach
)
161 exynos_drm_free_buf(exynos_gem_obj
);
164 drm_gem_free_mmap_offset(obj
);
166 /* release file pointer to gem object. */
167 drm_gem_object_release(obj
);
169 kfree(exynos_gem_obj
);
170 exynos_gem_obj
= NULL
;
173 unsigned long exynos_drm_gem_get_size(struct drm_device
*dev
,
174 unsigned int gem_handle
,
175 struct drm_file
*file_priv
)
177 struct exynos_drm_gem_obj
*exynos_gem_obj
;
178 struct drm_gem_object
*obj
;
180 obj
= drm_gem_object_lookup(dev
, file_priv
, gem_handle
);
182 DRM_ERROR("failed to lookup gem object.\n");
186 exynos_gem_obj
= to_exynos_gem_obj(obj
);
188 drm_gem_object_unreference_unlocked(obj
);
190 return exynos_gem_obj
->size
;
194 struct exynos_drm_gem_obj
*exynos_drm_gem_init(struct drm_device
*dev
,
197 struct exynos_drm_gem_obj
*exynos_gem_obj
;
198 struct drm_gem_object
*obj
;
201 exynos_gem_obj
= kzalloc(sizeof(*exynos_gem_obj
), GFP_KERNEL
);
203 return ERR_PTR(-ENOMEM
);
205 exynos_gem_obj
->size
= size
;
206 obj
= &exynos_gem_obj
->base
;
208 ret
= drm_gem_object_init(dev
, obj
, size
);
210 DRM_ERROR("failed to initialize gem object\n");
211 kfree(exynos_gem_obj
);
215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj
->filp
);
217 return exynos_gem_obj
;
220 struct exynos_drm_gem_obj
*exynos_drm_gem_create(struct drm_device
*dev
,
224 struct exynos_drm_gem_obj
*exynos_gem_obj
;
227 if (flags
& ~(EXYNOS_BO_MASK
)) {
228 DRM_ERROR("invalid flags.\n");
229 return ERR_PTR(-EINVAL
);
233 DRM_ERROR("invalid size.\n");
234 return ERR_PTR(-EINVAL
);
237 size
= roundup(size
, PAGE_SIZE
);
239 exynos_gem_obj
= exynos_drm_gem_init(dev
, size
);
240 if (IS_ERR(exynos_gem_obj
))
241 return exynos_gem_obj
;
243 /* set memory type and cache attribute from user side. */
244 exynos_gem_obj
->flags
= flags
;
246 ret
= exynos_drm_alloc_buf(exynos_gem_obj
);
248 drm_gem_object_release(&exynos_gem_obj
->base
);
249 kfree(exynos_gem_obj
);
253 return exynos_gem_obj
;
256 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
257 struct drm_file
*file_priv
)
259 struct drm_exynos_gem_create
*args
= data
;
260 struct exynos_drm_gem_obj
*exynos_gem_obj
;
263 exynos_gem_obj
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
264 if (IS_ERR(exynos_gem_obj
))
265 return PTR_ERR(exynos_gem_obj
);
267 ret
= exynos_drm_gem_handle_create(&exynos_gem_obj
->base
, file_priv
,
270 exynos_drm_gem_destroy(exynos_gem_obj
);
277 dma_addr_t
*exynos_drm_gem_get_dma_addr(struct drm_device
*dev
,
278 unsigned int gem_handle
,
279 struct drm_file
*filp
)
281 struct exynos_drm_gem_obj
*exynos_gem_obj
;
282 struct drm_gem_object
*obj
;
284 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
286 DRM_ERROR("failed to lookup gem object.\n");
287 return ERR_PTR(-EINVAL
);
290 exynos_gem_obj
= to_exynos_gem_obj(obj
);
292 return &exynos_gem_obj
->dma_addr
;
295 void exynos_drm_gem_put_dma_addr(struct drm_device
*dev
,
296 unsigned int gem_handle
,
297 struct drm_file
*filp
)
299 struct drm_gem_object
*obj
;
301 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
303 DRM_ERROR("failed to lookup gem object.\n");
307 drm_gem_object_unreference_unlocked(obj
);
310 * decrease obj->refcount one more time because we has already
311 * increased it at exynos_drm_gem_get_dma_addr().
313 drm_gem_object_unreference_unlocked(obj
);
316 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj
*exynos_gem_obj
,
317 struct vm_area_struct
*vma
)
319 struct drm_device
*drm_dev
= exynos_gem_obj
->base
.dev
;
320 unsigned long vm_size
;
323 vma
->vm_flags
&= ~VM_PFNMAP
;
326 vm_size
= vma
->vm_end
- vma
->vm_start
;
328 /* check if user-requested size is valid. */
329 if (vm_size
> exynos_gem_obj
->size
)
332 ret
= dma_mmap_attrs(drm_dev
->dev
, vma
, exynos_gem_obj
->pages
,
333 exynos_gem_obj
->dma_addr
, exynos_gem_obj
->size
,
334 &exynos_gem_obj
->dma_attrs
);
336 DRM_ERROR("failed to mmap.\n");
343 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
344 struct drm_file
*file_priv
)
345 { struct exynos_drm_gem_obj
*exynos_gem_obj
;
346 struct drm_exynos_gem_info
*args
= data
;
347 struct drm_gem_object
*obj
;
349 mutex_lock(&dev
->struct_mutex
);
351 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
353 DRM_ERROR("failed to lookup gem object.\n");
354 mutex_unlock(&dev
->struct_mutex
);
358 exynos_gem_obj
= to_exynos_gem_obj(obj
);
360 args
->flags
= exynos_gem_obj
->flags
;
361 args
->size
= exynos_gem_obj
->size
;
363 drm_gem_object_unreference(obj
);
364 mutex_unlock(&dev
->struct_mutex
);
369 int exynos_gem_map_sgt_with_dma(struct drm_device
*drm_dev
,
370 struct sg_table
*sgt
,
371 enum dma_data_direction dir
)
375 mutex_lock(&drm_dev
->struct_mutex
);
377 nents
= dma_map_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
379 DRM_ERROR("failed to map sgl with dma.\n");
380 mutex_unlock(&drm_dev
->struct_mutex
);
384 mutex_unlock(&drm_dev
->struct_mutex
);
388 void exynos_gem_unmap_sgt_from_dma(struct drm_device
*drm_dev
,
389 struct sg_table
*sgt
,
390 enum dma_data_direction dir
)
392 dma_unmap_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
395 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
397 exynos_drm_gem_destroy(to_exynos_gem_obj(obj
));
400 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
401 struct drm_device
*dev
,
402 struct drm_mode_create_dumb
*args
)
404 struct exynos_drm_gem_obj
*exynos_gem_obj
;
408 * allocate memory to be used for framebuffer.
409 * - this callback would be called by user application
410 * with DRM_IOCTL_MODE_CREATE_DUMB command.
413 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
414 args
->size
= args
->pitch
* args
->height
;
416 if (is_drm_iommu_supported(dev
)) {
417 exynos_gem_obj
= exynos_drm_gem_create(dev
,
418 EXYNOS_BO_NONCONTIG
| EXYNOS_BO_WC
,
421 exynos_gem_obj
= exynos_drm_gem_create(dev
,
422 EXYNOS_BO_CONTIG
| EXYNOS_BO_WC
,
426 if (IS_ERR(exynos_gem_obj
)) {
427 dev_warn(dev
->dev
, "FB allocation failed.\n");
428 return PTR_ERR(exynos_gem_obj
);
431 ret
= exynos_drm_gem_handle_create(&exynos_gem_obj
->base
, file_priv
,
434 exynos_drm_gem_destroy(exynos_gem_obj
);
441 int exynos_drm_gem_dumb_map_offset(struct drm_file
*file_priv
,
442 struct drm_device
*dev
, uint32_t handle
,
445 struct drm_gem_object
*obj
;
448 mutex_lock(&dev
->struct_mutex
);
451 * get offset of memory allocated for drm framebuffer.
452 * - this callback would be called by user application
453 * with DRM_IOCTL_MODE_MAP_DUMB command.
456 obj
= drm_gem_object_lookup(dev
, file_priv
, handle
);
458 DRM_ERROR("failed to lookup gem object.\n");
463 ret
= drm_gem_create_mmap_offset(obj
);
467 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
468 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset
);
471 drm_gem_object_unreference(obj
);
473 mutex_unlock(&dev
->struct_mutex
);
477 int exynos_drm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
479 struct drm_gem_object
*obj
= vma
->vm_private_data
;
480 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
485 page_offset
= ((unsigned long)vmf
->virtual_address
-
486 vma
->vm_start
) >> PAGE_SHIFT
;
488 if (page_offset
>= (exynos_gem_obj
->size
>> PAGE_SHIFT
)) {
489 DRM_ERROR("invalid page offset\n");
494 pfn
= page_to_pfn(exynos_gem_obj
->pages
[page_offset
]);
495 ret
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
502 return VM_FAULT_NOPAGE
;
506 return VM_FAULT_SIGBUS
;
510 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
512 struct exynos_drm_gem_obj
*exynos_gem_obj
;
513 struct drm_gem_object
*obj
;
516 /* set vm_area_struct. */
517 ret
= drm_gem_mmap(filp
, vma
);
519 DRM_ERROR("failed to mmap.\n");
523 obj
= vma
->vm_private_data
;
524 exynos_gem_obj
= to_exynos_gem_obj(obj
);
526 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj
->flags
);
528 /* non-cachable as default. */
529 if (exynos_gem_obj
->flags
& EXYNOS_BO_CACHABLE
)
530 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
531 else if (exynos_gem_obj
->flags
& EXYNOS_BO_WC
)
533 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
536 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
538 ret
= exynos_drm_gem_mmap_buffer(exynos_gem_obj
, vma
);
545 drm_gem_vm_close(vma
);
546 drm_gem_free_mmap_offset(obj
);
551 /* low-level interface prime helpers */
552 struct sg_table
*exynos_drm_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
554 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
557 npages
= exynos_gem_obj
->size
>> PAGE_SHIFT
;
559 return drm_prime_pages_to_sg(exynos_gem_obj
->pages
, npages
);
562 struct drm_gem_object
*
563 exynos_drm_gem_prime_import_sg_table(struct drm_device
*dev
,
564 struct dma_buf_attachment
*attach
,
565 struct sg_table
*sgt
)
567 struct exynos_drm_gem_obj
*exynos_gem_obj
;
571 exynos_gem_obj
= exynos_drm_gem_init(dev
, attach
->dmabuf
->size
);
572 if (IS_ERR(exynos_gem_obj
)) {
573 ret
= PTR_ERR(exynos_gem_obj
);
577 exynos_gem_obj
->dma_addr
= sg_dma_address(sgt
->sgl
);
579 npages
= exynos_gem_obj
->size
>> PAGE_SHIFT
;
580 exynos_gem_obj
->pages
= drm_malloc_ab(npages
, sizeof(struct page
*));
581 if (!exynos_gem_obj
->pages
) {
586 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, exynos_gem_obj
->pages
, NULL
,
591 if (sgt
->nents
== 1) {
592 /* always physically continuous memory if sgt->nents is 1. */
593 exynos_gem_obj
->flags
|= EXYNOS_BO_CONTIG
;
596 * this case could be CONTIG or NONCONTIG type but for now
598 * TODO. we have to find a way that exporter can notify
599 * the type of its own buffer to importer.
601 exynos_gem_obj
->flags
|= EXYNOS_BO_NONCONTIG
;
604 return &exynos_gem_obj
->base
;
607 drm_free_large(exynos_gem_obj
->pages
);
609 drm_gem_object_release(&exynos_gem_obj
->base
);
610 kfree(exynos_gem_obj
);
614 void *exynos_drm_gem_prime_vmap(struct drm_gem_object
*obj
)
619 void exynos_drm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)