drm/exynos: cleanup function calling written twice
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
CommitLineData
1c248b7d
ID
1/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
d81aecb5
ID
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
1c248b7d
ID
10 */
11
760285e7 12#include <drm/drmP.h>
0de23977 13#include <drm/drm_vma_manager.h>
1c248b7d 14
2b35892e 15#include <linux/shmem_fs.h>
01ed50dd 16#include <linux/dma-buf.h>
1c248b7d
ID
17#include <drm/exynos_drm.h>
18
19#include "exynos_drm_drv.h"
20#include "exynos_drm_gem.h"
3fec4532 21#include "exynos_drm_iommu.h"
1c248b7d 22
2a8cb489
JS
23static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
24{
25 struct drm_device *dev = obj->base.dev;
26 enum dma_attr attr;
27 unsigned int nr_pages;
28
29 if (obj->dma_addr) {
30 DRM_DEBUG_KMS("already allocated.\n");
31 return 0;
32 }
33
34 init_dma_attrs(&obj->dma_attrs);
35
36 /*
37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 * region will be allocated else physically contiguous
39 * as possible.
40 */
41 if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
43
44 /*
45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping.
47 */
48 if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49 attr = DMA_ATTR_WRITE_COMBINE;
50 else
51 attr = DMA_ATTR_NON_CONSISTENT;
52
53 dma_set_attr(attr, &obj->dma_attrs);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
55
56 nr_pages = obj->size >> PAGE_SHIFT;
57
58 if (!is_drm_iommu_supported(dev)) {
2a8cb489
JS
59 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
60 if (!obj->pages) {
61 DRM_ERROR("failed to allocate pages.\n");
62 return -ENOMEM;
63 }
333e8e58 64 }
2a8cb489 65
333e8e58
JS
66 obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
67 GFP_KERNEL, &obj->dma_attrs);
68 if (!obj->cookie) {
69 DRM_ERROR("failed to allocate buffer.\n");
70 if (obj->pages)
2a8cb489 71 drm_free_large(obj->pages);
333e8e58
JS
72 return -ENOMEM;
73 }
74
75 if (obj->pages) {
76 dma_addr_t start_addr;
77 unsigned int i = 0;
2a8cb489
JS
78
79 start_addr = obj->dma_addr;
80 while (i < nr_pages) {
5e0fb1f9
JS
81 obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
82 start_addr));
2a8cb489
JS
83 start_addr += PAGE_SIZE;
84 i++;
85 }
86 } else {
333e8e58 87 obj->pages = obj->cookie;
2a8cb489
JS
88 }
89
90 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
91 (unsigned long)obj->dma_addr,
92 obj->size);
93
94 return 0;
95}
96
97static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
98{
99 struct drm_device *dev = obj->base.dev;
100
101 if (!obj->dma_addr) {
102 DRM_DEBUG_KMS("dma_addr is invalid.\n");
103 return;
104 }
105
106 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
107 (unsigned long)obj->dma_addr, obj->size);
108
333e8e58
JS
109 dma_free_attrs(dev->dev, obj->size, obj->cookie,
110 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
111
112 if (!is_drm_iommu_supported(dev))
2a8cb489 113 drm_free_large(obj->pages);
2a8cb489
JS
114}
115
2364839a
JS
116static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
117 struct drm_file *file_priv,
118 unsigned int *handle)
1c248b7d 119{
1c248b7d
ID
120 int ret;
121
1c248b7d
ID
122 /*
123 * allocate a id of idr table where the obj is registered
124 * and handle has the id what user can see.
125 */
126 ret = drm_gem_handle_create(file_priv, obj, handle);
127 if (ret)
2364839a 128 return ret;
1c248b7d
ID
129
130 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
131
132 /* drop reference from allocate - handle holds it now. */
133 drm_gem_object_unreference_unlocked(obj);
134
2364839a
JS
135 return 0;
136}
137
138void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
139{
2a8cb489 140 struct drm_gem_object *obj = &exynos_gem_obj->base;
2364839a 141
a8e11d1c 142 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
1c248b7d 143
c374e731
ID
144 /*
145 * do not release memory region from exporter.
146 *
147 * the region will be released by exporter
148 * once dmabuf's refcount becomes 0.
149 */
150 if (obj->import_attach)
7c93537a
JS
151 drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
152 else
153 exynos_drm_free_buf(exynos_gem_obj);
2b35892e 154
0de23977 155 drm_gem_free_mmap_offset(obj);
2364839a
JS
156
157 /* release file pointer to gem object. */
1c248b7d
ID
158 drm_gem_object_release(obj);
159
1c248b7d 160 kfree(exynos_gem_obj);
2364839a
JS
161}
162
a4f19aaa
ID
163unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
164 unsigned int gem_handle,
165 struct drm_file *file_priv)
166{
167 struct exynos_drm_gem_obj *exynos_gem_obj;
168 struct drm_gem_object *obj;
169
170 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
171 if (!obj) {
172 DRM_ERROR("failed to lookup gem object.\n");
173 return 0;
174 }
175
176 exynos_gem_obj = to_exynos_gem_obj(obj);
177
178 drm_gem_object_unreference_unlocked(obj);
179
2a8cb489 180 return exynos_gem_obj->size;
a4f19aaa
ID
181}
182
b319dc6a 183static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
2364839a
JS
184 unsigned long size)
185{
186 struct exynos_drm_gem_obj *exynos_gem_obj;
187 struct drm_gem_object *obj;
188 int ret;
189
190 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
38bb5253 191 if (!exynos_gem_obj)
5f3f4266 192 return ERR_PTR(-ENOMEM);
2364839a 193
2b35892e 194 exynos_gem_obj->size = size;
2364839a
JS
195 obj = &exynos_gem_obj->base;
196
197 ret = drm_gem_object_init(dev, obj, size);
198 if (ret < 0) {
199 DRM_ERROR("failed to initialize gem object\n");
200 kfree(exynos_gem_obj);
5f3f4266 201 return ERR_PTR(ret);
2364839a
JS
202 }
203
204 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
205
206 return exynos_gem_obj;
1c248b7d
ID
207}
208
f088d5a9 209struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
2b35892e
ID
210 unsigned int flags,
211 unsigned long size)
f088d5a9 212{
2364839a 213 struct exynos_drm_gem_obj *exynos_gem_obj;
2b35892e 214 int ret;
f088d5a9 215
c4130bcd
JS
216 if (flags & ~(EXYNOS_BO_MASK)) {
217 DRM_ERROR("invalid flags.\n");
218 return ERR_PTR(-EINVAL);
219 }
220
dcf9af82
ID
221 if (!size) {
222 DRM_ERROR("invalid size.\n");
223 return ERR_PTR(-EINVAL);
224 }
225
eb57da88 226 size = roundup(size, PAGE_SIZE);
f088d5a9 227
2364839a 228 exynos_gem_obj = exynos_drm_gem_init(dev, size);
2a8cb489
JS
229 if (IS_ERR(exynos_gem_obj))
230 return exynos_gem_obj;
2b35892e
ID
231
232 /* set memory type and cache attribute from user side. */
233 exynos_gem_obj->flags = flags;
234
2a8cb489
JS
235 ret = exynos_drm_alloc_buf(exynos_gem_obj);
236 if (ret < 0) {
237 drm_gem_object_release(&exynos_gem_obj->base);
238 kfree(exynos_gem_obj);
239 return ERR_PTR(ret);
240 }
f088d5a9
ID
241
242 return exynos_gem_obj;
f088d5a9
ID
243}
244
1c248b7d 245int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
ee5e770e 246 struct drm_file *file_priv)
1c248b7d
ID
247{
248 struct drm_exynos_gem_create *args = data;
ee5e770e 249 struct exynos_drm_gem_obj *exynos_gem_obj;
2364839a 250 int ret;
1c248b7d 251
2b35892e 252 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
1c248b7d
ID
253 if (IS_ERR(exynos_gem_obj))
254 return PTR_ERR(exynos_gem_obj);
255
2364839a
JS
256 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
257 &args->handle);
258 if (ret) {
259 exynos_drm_gem_destroy(exynos_gem_obj);
260 return ret;
261 }
262
1c248b7d
ID
263 return 0;
264}
265
d87342c1 266dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
f0b1bda7 267 unsigned int gem_handle,
d87342c1 268 struct drm_file *filp)
f0b1bda7
ID
269{
270 struct exynos_drm_gem_obj *exynos_gem_obj;
271 struct drm_gem_object *obj;
272
d87342c1 273 obj = drm_gem_object_lookup(dev, filp, gem_handle);
f0b1bda7
ID
274 if (!obj) {
275 DRM_ERROR("failed to lookup gem object.\n");
276 return ERR_PTR(-EINVAL);
277 }
278
279 exynos_gem_obj = to_exynos_gem_obj(obj);
280
2a8cb489 281 return &exynos_gem_obj->dma_addr;
f0b1bda7
ID
282}
283
284void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
285 unsigned int gem_handle,
d87342c1 286 struct drm_file *filp)
f0b1bda7 287{
f0b1bda7
ID
288 struct drm_gem_object *obj;
289
d87342c1 290 obj = drm_gem_object_lookup(dev, filp, gem_handle);
f0b1bda7
ID
291 if (!obj) {
292 DRM_ERROR("failed to lookup gem object.\n");
293 return;
294 }
295
f0b1bda7
ID
296 drm_gem_object_unreference_unlocked(obj);
297
298 /*
299 * decrease obj->refcount one more time because we has already
300 * increased it at exynos_drm_gem_get_dma_addr().
301 */
302 drm_gem_object_unreference_unlocked(obj);
303}
304
832316c7 305int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
ee5e770e 306 struct vm_area_struct *vma)
1c248b7d 307{
832316c7 308 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
0519f9a1 309 unsigned long vm_size;
5b07c660 310 int ret;
1c248b7d 311
832316c7
ID
312 vma->vm_flags &= ~VM_PFNMAP;
313 vma->vm_pgoff = 0;
1c248b7d 314
0519f9a1 315 vm_size = vma->vm_end - vma->vm_start;
2b35892e 316
1c248b7d 317 /* check if user-requested size is valid. */
2a8cb489 318 if (vm_size > exynos_gem_obj->size)
1c248b7d
ID
319 return -EINVAL;
320
2a8cb489
JS
321 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
322 exynos_gem_obj->dma_addr, exynos_gem_obj->size,
323 &exynos_gem_obj->dma_attrs);
5b07c660
ID
324 if (ret < 0) {
325 DRM_ERROR("failed to mmap.\n");
326 return ret;
327 }
328
1c248b7d
ID
329 return 0;
330}
331
40cd7e0c
ID
332int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
333 struct drm_file *file_priv)
334{ struct exynos_drm_gem_obj *exynos_gem_obj;
335 struct drm_exynos_gem_info *args = data;
336 struct drm_gem_object *obj;
337
338 mutex_lock(&dev->struct_mutex);
339
340 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
341 if (!obj) {
342 DRM_ERROR("failed to lookup gem object.\n");
343 mutex_unlock(&dev->struct_mutex);
344 return -EINVAL;
345 }
346
347 exynos_gem_obj = to_exynos_gem_obj(obj);
348
349 args->flags = exynos_gem_obj->flags;
350 args->size = exynos_gem_obj->size;
351
352 drm_gem_object_unreference(obj);
353 mutex_unlock(&dev->struct_mutex);
354
355 return 0;
356}
357
2a3098ff
ID
358int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
359 struct sg_table *sgt,
360 enum dma_data_direction dir)
361{
362 int nents;
363
364 mutex_lock(&drm_dev->struct_mutex);
365
366 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
367 if (!nents) {
368 DRM_ERROR("failed to map sgl with dma.\n");
369 mutex_unlock(&drm_dev->struct_mutex);
370 return nents;
371 }
372
373 mutex_unlock(&drm_dev->struct_mutex);
374 return 0;
375}
376
377void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
378 struct sg_table *sgt,
379 enum dma_data_direction dir)
380{
381 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
382}
383
ee5e770e 384void exynos_drm_gem_free_object(struct drm_gem_object *obj)
1c248b7d 385{
2364839a 386 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
1c248b7d
ID
387}
388
389int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
ee5e770e
JS
390 struct drm_device *dev,
391 struct drm_mode_create_dumb *args)
1c248b7d
ID
392{
393 struct exynos_drm_gem_obj *exynos_gem_obj;
333e8e58 394 unsigned int flags;
2364839a 395 int ret;
1c248b7d 396
1c248b7d 397 /*
c6b78bc8 398 * allocate memory to be used for framebuffer.
1c248b7d
ID
399 * - this callback would be called by user application
400 * with DRM_IOCTL_MODE_CREATE_DUMB command.
401 */
402
3fd6b694 403 args->pitch = args->width * ((args->bpp + 7) / 8);
7da5907c 404 args->size = args->pitch * args->height;
1c248b7d 405
333e8e58
JS
406 if (is_drm_iommu_supported(dev))
407 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
408 else
409 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
3fec4532 410
333e8e58 411 exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
122beea8
RS
412 if (IS_ERR(exynos_gem_obj)) {
413 dev_warn(dev->dev, "FB allocation failed.\n");
1c248b7d 414 return PTR_ERR(exynos_gem_obj);
122beea8 415 }
1c248b7d 416
2364839a
JS
417 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
418 &args->handle);
419 if (ret) {
420 exynos_drm_gem_destroy(exynos_gem_obj);
421 return ret;
422 }
423
1c248b7d
ID
424 return 0;
425}
426
427int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
ee5e770e
JS
428 struct drm_device *dev, uint32_t handle,
429 uint64_t *offset)
1c248b7d 430{
1c248b7d 431 struct drm_gem_object *obj;
2d91cf17 432 int ret = 0;
1c248b7d 433
1c248b7d
ID
434 mutex_lock(&dev->struct_mutex);
435
436 /*
437 * get offset of memory allocated for drm framebuffer.
438 * - this callback would be called by user application
439 * with DRM_IOCTL_MODE_MAP_DUMB command.
440 */
441
442 obj = drm_gem_object_lookup(dev, file_priv, handle);
443 if (!obj) {
444 DRM_ERROR("failed to lookup gem object.\n");
2d91cf17
JS
445 ret = -EINVAL;
446 goto unlock;
1c248b7d
ID
447 }
448
0de23977
DH
449 ret = drm_gem_create_mmap_offset(obj);
450 if (ret)
451 goto out;
1c248b7d 452
0de23977 453 *offset = drm_vma_node_offset_addr(&obj->vma_node);
1c248b7d
ID
454 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
455
2d91cf17
JS
456out:
457 drm_gem_object_unreference(obj);
458unlock:
1c248b7d 459 mutex_unlock(&dev->struct_mutex);
2d91cf17 460 return ret;
1c248b7d
ID
461}
462
463int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
464{
465 struct drm_gem_object *obj = vma->vm_private_data;
0e9a2ee3 466 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
0e9a2ee3 467 unsigned long pfn;
1c248b7d
ID
468 pgoff_t page_offset;
469 int ret;
470
471 page_offset = ((unsigned long)vmf->virtual_address -
472 vma->vm_start) >> PAGE_SHIFT;
473
2a8cb489 474 if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
0e9a2ee3
JS
475 DRM_ERROR("invalid page offset\n");
476 ret = -EINVAL;
477 goto out;
478 }
1c248b7d 479
2a8cb489 480 pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
0e9a2ee3
JS
481 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
482
483out:
23597e26
JS
484 switch (ret) {
485 case 0:
486 case -ERESTARTSYS:
487 case -EINTR:
488 return VM_FAULT_NOPAGE;
489 case -ENOMEM:
490 return VM_FAULT_OOM;
491 default:
492 return VM_FAULT_SIGBUS;
493 }
1c248b7d
ID
494}
495
496int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
497{
c01d73fa
ID
498 struct exynos_drm_gem_obj *exynos_gem_obj;
499 struct drm_gem_object *obj;
1c248b7d
ID
500 int ret;
501
1c248b7d
ID
502 /* set vm_area_struct. */
503 ret = drm_gem_mmap(filp, vma);
504 if (ret < 0) {
505 DRM_ERROR("failed to mmap.\n");
506 return ret;
507 }
508
c01d73fa
ID
509 obj = vma->vm_private_data;
510 exynos_gem_obj = to_exynos_gem_obj(obj);
511
211b8878
JS
512 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
513
514 /* non-cachable as default. */
515 if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
516 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
517 else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
518 vma->vm_page_prot =
519 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
520 else
521 vma->vm_page_prot =
522 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
c01d73fa 523
832316c7
ID
524 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
525 if (ret)
526 goto err_close_vm;
527
528 return ret;
529
530err_close_vm:
531 drm_gem_vm_close(vma);
532 drm_gem_free_mmap_offset(obj);
533
1c248b7d
ID
534 return ret;
535}
01ed50dd
JS
536
537/* low-level interface prime helpers */
538struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
539{
540 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
01ed50dd
JS
541 int npages;
542
2a8cb489 543 npages = exynos_gem_obj->size >> PAGE_SHIFT;
01ed50dd 544
2a8cb489 545 return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
01ed50dd
JS
546}
547
548struct drm_gem_object *
549exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
550 struct dma_buf_attachment *attach,
551 struct sg_table *sgt)
552{
553 struct exynos_drm_gem_obj *exynos_gem_obj;
01ed50dd
JS
554 int npages;
555 int ret;
556
2a8cb489
JS
557 exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
558 if (IS_ERR(exynos_gem_obj)) {
559 ret = PTR_ERR(exynos_gem_obj);
50002d4c 560 return ERR_PTR(ret);
2a8cb489 561 }
01ed50dd 562
2a8cb489 563 exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
01ed50dd 564
2a8cb489
JS
565 npages = exynos_gem_obj->size >> PAGE_SHIFT;
566 exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
567 if (!exynos_gem_obj->pages) {
01ed50dd
JS
568 ret = -ENOMEM;
569 goto err;
570 }
571
2a8cb489
JS
572 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
573 npages);
01ed50dd
JS
574 if (ret < 0)
575 goto err_free_large;
576
7c93537a
JS
577 exynos_gem_obj->sgt = sgt;
578
01ed50dd
JS
579 if (sgt->nents == 1) {
580 /* always physically continuous memory if sgt->nents is 1. */
581 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
582 } else {
583 /*
584 * this case could be CONTIG or NONCONTIG type but for now
585 * sets NONCONTIG.
586 * TODO. we have to find a way that exporter can notify
587 * the type of its own buffer to importer.
588 */
589 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
590 }
591
592 return &exynos_gem_obj->base;
593
594err_free_large:
2a8cb489 595 drm_free_large(exynos_gem_obj->pages);
01ed50dd 596err:
2a8cb489
JS
597 drm_gem_object_release(&exynos_gem_obj->base);
598 kfree(exynos_gem_obj);
01ed50dd
JS
599 return ERR_PTR(ret);
600}
601
602void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
603{
604 return NULL;
605}
606
607void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
608{
609 /* Nothing to do */
610}
This page took 0.414153 seconds and 5 git commands to generate.