3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/exynos_drm.h>
15 #include "exynos_drm_drv.h"
16 #include "exynos_drm_gem.h"
17 #include "exynos_drm_buf.h"
18 #include "exynos_drm_iommu.h"
20 static int lowlevel_buffer_allocate(struct drm_device
*dev
,
21 unsigned int flags
, struct exynos_drm_gem_buf
*buf
)
25 unsigned int nr_pages
;
27 DRM_DEBUG_KMS("%s\n", __FILE__
);
30 DRM_DEBUG_KMS("already allocated.\n");
34 init_dma_attrs(&buf
->dma_attrs
);
37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 * region will be allocated else physically contiguous
41 if (!(flags
& EXYNOS_BO_NONCONTIG
))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS
, &buf
->dma_attrs
);
45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping.
48 if (flags
& EXYNOS_BO_WC
|| !(flags
& EXYNOS_BO_CACHABLE
))
49 attr
= DMA_ATTR_WRITE_COMBINE
;
51 attr
= DMA_ATTR_NON_CONSISTENT
;
53 dma_set_attr(attr
, &buf
->dma_attrs
);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &buf
->dma_attrs
);
56 nr_pages
= buf
->size
>> PAGE_SHIFT
;
58 if (!is_drm_iommu_supported(dev
)) {
59 dma_addr_t start_addr
;
62 buf
->pages
= kzalloc(sizeof(struct page
) * nr_pages
,
65 DRM_ERROR("failed to allocate pages.\n");
69 buf
->kvaddr
= dma_alloc_attrs(dev
->dev
, buf
->size
,
70 &buf
->dma_addr
, GFP_KERNEL
,
73 DRM_ERROR("failed to allocate buffer.\n");
78 start_addr
= buf
->dma_addr
;
79 while (i
< nr_pages
) {
80 buf
->pages
[i
] = phys_to_page(start_addr
);
81 start_addr
+= PAGE_SIZE
;
86 buf
->pages
= dma_alloc_attrs(dev
->dev
, buf
->size
,
87 &buf
->dma_addr
, GFP_KERNEL
,
90 DRM_ERROR("failed to allocate buffer.\n");
95 buf
->sgt
= drm_prime_pages_to_sg(buf
->pages
, nr_pages
);
97 DRM_ERROR("failed to get sg table.\n");
102 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
103 (unsigned long)buf
->dma_addr
,
109 dma_free_attrs(dev
->dev
, buf
->size
, buf
->pages
,
110 (dma_addr_t
)buf
->dma_addr
, &buf
->dma_attrs
);
111 buf
->dma_addr
= (dma_addr_t
)NULL
;
113 if (!is_drm_iommu_supported(dev
))
119 static void lowlevel_buffer_deallocate(struct drm_device
*dev
,
120 unsigned int flags
, struct exynos_drm_gem_buf
*buf
)
122 DRM_DEBUG_KMS("%s.\n", __FILE__
);
124 if (!buf
->dma_addr
) {
125 DRM_DEBUG_KMS("dma_addr is invalid.\n");
129 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
130 (unsigned long)buf
->dma_addr
,
133 sg_free_table(buf
->sgt
);
138 if (!is_drm_iommu_supported(dev
)) {
139 dma_free_attrs(dev
->dev
, buf
->size
, buf
->kvaddr
,
140 (dma_addr_t
)buf
->dma_addr
, &buf
->dma_attrs
);
143 dma_free_attrs(dev
->dev
, buf
->size
, buf
->pages
,
144 (dma_addr_t
)buf
->dma_addr
, &buf
->dma_attrs
);
146 buf
->dma_addr
= (dma_addr_t
)NULL
;
149 struct exynos_drm_gem_buf
*exynos_drm_init_buf(struct drm_device
*dev
,
152 struct exynos_drm_gem_buf
*buffer
;
154 DRM_DEBUG_KMS("%s.\n", __FILE__
);
155 DRM_DEBUG_KMS("desired size = 0x%x\n", size
);
157 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
159 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
167 void exynos_drm_fini_buf(struct drm_device
*dev
,
168 struct exynos_drm_gem_buf
*buffer
)
170 DRM_DEBUG_KMS("%s.\n", __FILE__
);
173 DRM_DEBUG_KMS("buffer is null.\n");
181 int exynos_drm_alloc_buf(struct drm_device
*dev
,
182 struct exynos_drm_gem_buf
*buf
, unsigned int flags
)
186 * allocate memory region and set the memory information
187 * to vaddr and dma_addr of a buffer object.
189 if (lowlevel_buffer_allocate(dev
, flags
, buf
) < 0)
195 void exynos_drm_free_buf(struct drm_device
*dev
,
196 unsigned int flags
, struct exynos_drm_gem_buf
*buffer
)
199 lowlevel_buffer_deallocate(dev
, flags
, buffer
);