drm/radeon/uvd: revert lower msg&fb buffer requirements on UVD3
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_buf.c
CommitLineData
1c248b7d
ID
1/* exynos_drm_buf.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
d81aecb5
ID
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
1c248b7d
ID
10 */
11
760285e7
DH
12#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
1c248b7d
ID
14
15#include "exynos_drm_drv.h"
2c871127 16#include "exynos_drm_gem.h"
1c248b7d 17#include "exynos_drm_buf.h"
694be458 18#include "exynos_drm_iommu.h"
1c248b7d 19
1c248b7d 20static int lowlevel_buffer_allocate(struct drm_device *dev,
2b35892e 21 unsigned int flags, struct exynos_drm_gem_buf *buf)
1c248b7d 22{
0519f9a1 23 int ret = 0;
1169af21 24 enum dma_attr attr;
4744ad24 25 unsigned int nr_pages;
2b35892e 26
2b35892e
ID
27 if (buf->dma_addr) {
28 DRM_DEBUG_KMS("already allocated.\n");
29 return 0;
30 }
31
0519f9a1
ID
32 init_dma_attrs(&buf->dma_attrs);
33
1169af21
ID
34 /*
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
37 * as possible.
38 */
1dcfe238 39 if (!(flags & EXYNOS_BO_NONCONTIG))
1169af21
ID
40 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
41
42 /*
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
45 */
46 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
0519f9a1 47 attr = DMA_ATTR_WRITE_COMBINE;
1169af21
ID
48 else
49 attr = DMA_ATTR_NON_CONSISTENT;
0519f9a1
ID
50
51 dma_set_attr(attr, &buf->dma_attrs);
4744ad24 52 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
0519f9a1 53
694be458
ID
54 nr_pages = buf->size >> PAGE_SHIFT;
55
56 if (!is_drm_iommu_supported(dev)) {
57 dma_addr_t start_addr;
58 unsigned int i = 0;
59
42ac99a7 60 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
694be458
ID
61 if (!buf->pages) {
62 DRM_ERROR("failed to allocate pages.\n");
63 return -ENOMEM;
64 }
65
19e307bc
SK
66 buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
67 buf->size,
694be458
ID
68 &buf->dma_addr, GFP_KERNEL,
69 &buf->dma_attrs);
70 if (!buf->kvaddr) {
71 DRM_ERROR("failed to allocate buffer.\n");
4bb615c5
SWK
72 ret = -ENOMEM;
73 goto err_free;
694be458
ID
74 }
75
76 start_addr = buf->dma_addr;
77 while (i < nr_pages) {
78 buf->pages[i] = phys_to_page(start_addr);
79 start_addr += PAGE_SIZE;
80 i++;
81 }
82 } else {
83
84 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
85 &buf->dma_addr, GFP_KERNEL,
86 &buf->dma_attrs);
87 if (!buf->pages) {
88 DRM_ERROR("failed to allocate buffer.\n");
89 return -ENOMEM;
90 }
2b35892e
ID
91 }
92
4744ad24 93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
662bb699 94 if (IS_ERR(buf->sgt)) {
4744ad24 95 DRM_ERROR("failed to get sg table.\n");
662bb699 96 ret = PTR_ERR(buf->sgt);
0519f9a1 97 goto err_free_attrs;
1c248b7d
ID
98 }
99
4744ad24 100 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
2b35892e
ID
101 (unsigned long)buf->dma_addr,
102 buf->size);
103
104 return ret;
0519f9a1 105
0519f9a1 106err_free_attrs:
4744ad24 107 dma_free_attrs(dev->dev, buf->size, buf->pages,
0519f9a1
ID
108 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
109 buf->dma_addr = (dma_addr_t)NULL;
4bb615c5 110err_free:
694be458 111 if (!is_drm_iommu_supported(dev))
af51a5e7 112 drm_free_large(buf->pages);
694be458 113
2b35892e 114 return ret;
1c248b7d
ID
115}
116
117static void lowlevel_buffer_deallocate(struct drm_device *dev,
2b35892e 118 unsigned int flags, struct exynos_drm_gem_buf *buf)
1c248b7d 119{
2b35892e
ID
120 if (!buf->dma_addr) {
121 DRM_DEBUG_KMS("dma_addr is invalid.\n");
122 return;
123 }
124
4744ad24 125 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
2b35892e
ID
126 (unsigned long)buf->dma_addr,
127 buf->size);
128
129 sg_free_table(buf->sgt);
130
131 kfree(buf->sgt);
132 buf->sgt = NULL;
133
694be458
ID
134 if (!is_drm_iommu_supported(dev)) {
135 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
136 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
af51a5e7 137 drm_free_large(buf->pages);
694be458
ID
138 } else
139 dma_free_attrs(dev->dev, buf->size, buf->pages,
0519f9a1 140 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
694be458 141
2b35892e 142 buf->dma_addr = (dma_addr_t)NULL;
1c248b7d
ID
143}
144
2b35892e
ID
145struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
146 unsigned int size)
1c248b7d 147{
2c871127 148 struct exynos_drm_gem_buf *buffer;
1c248b7d 149
2c871127 150 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
1c248b7d 151
2c871127 152 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
38bb5253 153 if (!buffer)
ee5e770e 154 return NULL;
1c248b7d 155
2c871127 156 buffer->size = size;
2c871127 157 return buffer;
1c248b7d
ID
158}
159
2b35892e
ID
160void exynos_drm_fini_buf(struct drm_device *dev,
161 struct exynos_drm_gem_buf *buffer)
1c248b7d 162{
2c871127
ID
163 kfree(buffer);
164 buffer = NULL;
1c248b7d
ID
165}
166
2b35892e
ID
167int exynos_drm_alloc_buf(struct drm_device *dev,
168 struct exynos_drm_gem_buf *buf, unsigned int flags)
169{
170
171 /*
172 * allocate memory region and set the memory information
173 * to vaddr and dma_addr of a buffer object.
174 */
175 if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
176 return -ENOMEM;
177
178 return 0;
179}
180
181void exynos_drm_free_buf(struct drm_device *dev,
182 unsigned int flags, struct exynos_drm_gem_buf *buffer)
183{
184
185 lowlevel_buffer_deallocate(dev, flags, buffer);
186}
This page took 0.150125 seconds and 5 git commands to generate.