Merge remote-tracking branch 'asoc/topic/rt5677' into asoc-next
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
19
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
23
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
25 {
26 struct drm_device *dev = exynos_gem->base.dev;
27 enum dma_attr attr;
28 unsigned int nr_pages;
29 struct sg_table sgt;
30 int ret = -ENOMEM;
31
32 if (exynos_gem->dma_addr) {
33 DRM_DEBUG_KMS("already allocated.\n");
34 return 0;
35 }
36
37 init_dma_attrs(&exynos_gem->dma_attrs);
38
39 /*
40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 * region will be allocated else physically contiguous
42 * as possible.
43 */
44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
45 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
46
47 /*
48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 * else cachable mapping.
50 */
51 if (exynos_gem->flags & EXYNOS_BO_WC ||
52 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
53 attr = DMA_ATTR_WRITE_COMBINE;
54 else
55 attr = DMA_ATTR_NON_CONSISTENT;
56
57 dma_set_attr(attr, &exynos_gem->dma_attrs);
58 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
59
60 nr_pages = exynos_gem->size >> PAGE_SHIFT;
61
62 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!exynos_gem->pages) {
64 DRM_ERROR("failed to allocate pages.\n");
65 return -ENOMEM;
66 }
67
68 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
69 &exynos_gem->dma_addr, GFP_KERNEL,
70 &exynos_gem->dma_attrs);
71 if (!exynos_gem->cookie) {
72 DRM_ERROR("failed to allocate buffer.\n");
73 goto err_free;
74 }
75
76 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
77 exynos_gem->dma_addr, exynos_gem->size,
78 &exynos_gem->dma_attrs);
79 if (ret < 0) {
80 DRM_ERROR("failed to get sgtable.\n");
81 goto err_dma_free;
82 }
83
84 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
85 nr_pages)) {
86 DRM_ERROR("invalid sgtable.\n");
87 ret = -EINVAL;
88 goto err_sgt_free;
89 }
90
91 sg_free_table(&sgt);
92
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
95
96 return 0;
97
98 err_sgt_free:
99 sg_free_table(&sgt);
100 err_dma_free:
101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
102 exynos_gem->dma_addr, &exynos_gem->dma_attrs);
103 err_free:
104 drm_free_large(exynos_gem->pages);
105
106 return ret;
107 }
108
109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
110 {
111 struct drm_device *dev = exynos_gem->base.dev;
112
113 if (!exynos_gem->dma_addr) {
114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
115 return;
116 }
117
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
120
121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
122 (dma_addr_t)exynos_gem->dma_addr,
123 &exynos_gem->dma_attrs);
124
125 drm_free_large(exynos_gem->pages);
126 }
127
128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
131 {
132 int ret;
133
134 /*
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
137 */
138 ret = drm_gem_handle_create(file_priv, obj, handle);
139 if (ret)
140 return ret;
141
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
143
144 /* drop reference from allocate - handle holds it now. */
145 drm_gem_object_unreference_unlocked(obj);
146
147 return 0;
148 }
149
150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
151 {
152 struct drm_gem_object *obj = &exynos_gem->base;
153
154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
155
156 /*
157 * do not release memory region from exporter.
158 *
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
161 */
162 if (obj->import_attach)
163 drm_prime_gem_destroy(obj, exynos_gem->sgt);
164 else
165 exynos_drm_free_buf(exynos_gem);
166
167 /* release file pointer to gem object. */
168 drm_gem_object_release(obj);
169
170 kfree(exynos_gem);
171 }
172
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 unsigned int gem_handle,
175 struct drm_file *file_priv)
176 {
177 struct exynos_drm_gem *exynos_gem;
178 struct drm_gem_object *obj;
179
180 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
181 if (!obj) {
182 DRM_ERROR("failed to lookup gem object.\n");
183 return 0;
184 }
185
186 exynos_gem = to_exynos_gem(obj);
187
188 drm_gem_object_unreference_unlocked(obj);
189
190 return exynos_gem->size;
191 }
192
193 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
194 unsigned long size)
195 {
196 struct exynos_drm_gem *exynos_gem;
197 struct drm_gem_object *obj;
198 int ret;
199
200 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
201 if (!exynos_gem)
202 return ERR_PTR(-ENOMEM);
203
204 exynos_gem->size = size;
205 obj = &exynos_gem->base;
206
207 ret = drm_gem_object_init(dev, obj, size);
208 if (ret < 0) {
209 DRM_ERROR("failed to initialize gem object\n");
210 kfree(exynos_gem);
211 return ERR_PTR(ret);
212 }
213
214 ret = drm_gem_create_mmap_offset(obj);
215 if (ret < 0) {
216 drm_gem_object_release(obj);
217 kfree(exynos_gem);
218 return ERR_PTR(ret);
219 }
220
221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222
223 return exynos_gem;
224 }
225
226 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
227 unsigned int flags,
228 unsigned long size)
229 {
230 struct exynos_drm_gem *exynos_gem;
231 int ret;
232
233 if (flags & ~(EXYNOS_BO_MASK)) {
234 DRM_ERROR("invalid flags.\n");
235 return ERR_PTR(-EINVAL);
236 }
237
238 if (!size) {
239 DRM_ERROR("invalid size.\n");
240 return ERR_PTR(-EINVAL);
241 }
242
243 size = roundup(size, PAGE_SIZE);
244
245 exynos_gem = exynos_drm_gem_init(dev, size);
246 if (IS_ERR(exynos_gem))
247 return exynos_gem;
248
249 /* set memory type and cache attribute from user side. */
250 exynos_gem->flags = flags;
251
252 ret = exynos_drm_alloc_buf(exynos_gem);
253 if (ret < 0) {
254 drm_gem_object_release(&exynos_gem->base);
255 kfree(exynos_gem);
256 return ERR_PTR(ret);
257 }
258
259 return exynos_gem;
260 }
261
262 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file_priv)
264 {
265 struct drm_exynos_gem_create *args = data;
266 struct exynos_drm_gem *exynos_gem;
267 int ret;
268
269 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
270 if (IS_ERR(exynos_gem))
271 return PTR_ERR(exynos_gem);
272
273 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
274 &args->handle);
275 if (ret) {
276 exynos_drm_gem_destroy(exynos_gem);
277 return ret;
278 }
279
280 return 0;
281 }
282
283 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
284 struct drm_file *file_priv)
285 {
286 struct drm_exynos_gem_map *args = data;
287
288 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
289 &args->offset);
290 }
291
292 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
293 unsigned int gem_handle,
294 struct drm_file *filp)
295 {
296 struct exynos_drm_gem *exynos_gem;
297 struct drm_gem_object *obj;
298
299 obj = drm_gem_object_lookup(dev, filp, gem_handle);
300 if (!obj) {
301 DRM_ERROR("failed to lookup gem object.\n");
302 return ERR_PTR(-EINVAL);
303 }
304
305 exynos_gem = to_exynos_gem(obj);
306
307 return &exynos_gem->dma_addr;
308 }
309
310 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
311 unsigned int gem_handle,
312 struct drm_file *filp)
313 {
314 struct drm_gem_object *obj;
315
316 obj = drm_gem_object_lookup(dev, filp, gem_handle);
317 if (!obj) {
318 DRM_ERROR("failed to lookup gem object.\n");
319 return;
320 }
321
322 drm_gem_object_unreference_unlocked(obj);
323
324 /*
325 * decrease obj->refcount one more time because we has already
326 * increased it at exynos_drm_gem_get_dma_addr().
327 */
328 drm_gem_object_unreference_unlocked(obj);
329 }
330
331 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
332 struct vm_area_struct *vma)
333 {
334 struct drm_device *drm_dev = exynos_gem->base.dev;
335 unsigned long vm_size;
336 int ret;
337
338 vma->vm_flags &= ~VM_PFNMAP;
339 vma->vm_pgoff = 0;
340
341 vm_size = vma->vm_end - vma->vm_start;
342
343 /* check if user-requested size is valid. */
344 if (vm_size > exynos_gem->size)
345 return -EINVAL;
346
347 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
348 exynos_gem->dma_addr, exynos_gem->size,
349 &exynos_gem->dma_attrs);
350 if (ret < 0) {
351 DRM_ERROR("failed to mmap.\n");
352 return ret;
353 }
354
355 return 0;
356 }
357
358 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
359 struct drm_file *file_priv)
360 {
361 struct exynos_drm_gem *exynos_gem;
362 struct drm_exynos_gem_info *args = data;
363 struct drm_gem_object *obj;
364
365 mutex_lock(&dev->struct_mutex);
366
367 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
368 if (!obj) {
369 DRM_ERROR("failed to lookup gem object.\n");
370 mutex_unlock(&dev->struct_mutex);
371 return -EINVAL;
372 }
373
374 exynos_gem = to_exynos_gem(obj);
375
376 args->flags = exynos_gem->flags;
377 args->size = exynos_gem->size;
378
379 drm_gem_object_unreference(obj);
380 mutex_unlock(&dev->struct_mutex);
381
382 return 0;
383 }
384
385 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
386 struct sg_table *sgt,
387 enum dma_data_direction dir)
388 {
389 int nents;
390
391 mutex_lock(&drm_dev->struct_mutex);
392
393 nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
394 if (!nents) {
395 DRM_ERROR("failed to map sgl with dma.\n");
396 mutex_unlock(&drm_dev->struct_mutex);
397 return nents;
398 }
399
400 mutex_unlock(&drm_dev->struct_mutex);
401 return 0;
402 }
403
404 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
405 struct sg_table *sgt,
406 enum dma_data_direction dir)
407 {
408 dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
409 }
410
411 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
412 {
413 exynos_drm_gem_destroy(to_exynos_gem(obj));
414 }
415
416 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
417 struct drm_device *dev,
418 struct drm_mode_create_dumb *args)
419 {
420 struct exynos_drm_gem *exynos_gem;
421 unsigned int flags;
422 int ret;
423
424 /*
425 * allocate memory to be used for framebuffer.
426 * - this callback would be called by user application
427 * with DRM_IOCTL_MODE_CREATE_DUMB command.
428 */
429
430 args->pitch = args->width * ((args->bpp + 7) / 8);
431 args->size = args->pitch * args->height;
432
433 if (is_drm_iommu_supported(dev))
434 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
435 else
436 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
437
438 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
439 if (IS_ERR(exynos_gem)) {
440 dev_warn(dev->dev, "FB allocation failed.\n");
441 return PTR_ERR(exynos_gem);
442 }
443
444 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
445 &args->handle);
446 if (ret) {
447 exynos_drm_gem_destroy(exynos_gem);
448 return ret;
449 }
450
451 return 0;
452 }
453
454 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
455 struct drm_device *dev, uint32_t handle,
456 uint64_t *offset)
457 {
458 struct drm_gem_object *obj;
459 int ret = 0;
460
461 mutex_lock(&dev->struct_mutex);
462
463 /*
464 * get offset of memory allocated for drm framebuffer.
465 * - this callback would be called by user application
466 * with DRM_IOCTL_MODE_MAP_DUMB command.
467 */
468
469 obj = drm_gem_object_lookup(dev, file_priv, handle);
470 if (!obj) {
471 DRM_ERROR("failed to lookup gem object.\n");
472 ret = -EINVAL;
473 goto unlock;
474 }
475
476 *offset = drm_vma_node_offset_addr(&obj->vma_node);
477 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
478
479 drm_gem_object_unreference(obj);
480 unlock:
481 mutex_unlock(&dev->struct_mutex);
482 return ret;
483 }
484
485 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
486 {
487 struct drm_gem_object *obj = vma->vm_private_data;
488 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
489 unsigned long pfn;
490 pgoff_t page_offset;
491 int ret;
492
493 page_offset = ((unsigned long)vmf->virtual_address -
494 vma->vm_start) >> PAGE_SHIFT;
495
496 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
497 DRM_ERROR("invalid page offset\n");
498 ret = -EINVAL;
499 goto out;
500 }
501
502 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
503 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
504 __pfn_to_pfn_t(pfn, PFN_DEV));
505
506 out:
507 switch (ret) {
508 case 0:
509 case -ERESTARTSYS:
510 case -EINTR:
511 return VM_FAULT_NOPAGE;
512 case -ENOMEM:
513 return VM_FAULT_OOM;
514 default:
515 return VM_FAULT_SIGBUS;
516 }
517 }
518
519 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
520 {
521 struct exynos_drm_gem *exynos_gem;
522 struct drm_gem_object *obj;
523 int ret;
524
525 /* set vm_area_struct. */
526 ret = drm_gem_mmap(filp, vma);
527 if (ret < 0) {
528 DRM_ERROR("failed to mmap.\n");
529 return ret;
530 }
531
532 obj = vma->vm_private_data;
533 exynos_gem = to_exynos_gem(obj);
534
535 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
536
537 /* non-cachable as default. */
538 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
539 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
540 else if (exynos_gem->flags & EXYNOS_BO_WC)
541 vma->vm_page_prot =
542 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
543 else
544 vma->vm_page_prot =
545 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
546
547 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
548 if (ret)
549 goto err_close_vm;
550
551 return ret;
552
553 err_close_vm:
554 drm_gem_vm_close(vma);
555
556 return ret;
557 }
558
559 /* low-level interface prime helpers */
560 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
561 {
562 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
563 int npages;
564
565 npages = exynos_gem->size >> PAGE_SHIFT;
566
567 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
568 }
569
570 struct drm_gem_object *
571 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
572 struct dma_buf_attachment *attach,
573 struct sg_table *sgt)
574 {
575 struct exynos_drm_gem *exynos_gem;
576 int npages;
577 int ret;
578
579 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
580 if (IS_ERR(exynos_gem)) {
581 ret = PTR_ERR(exynos_gem);
582 return ERR_PTR(ret);
583 }
584
585 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
586
587 npages = exynos_gem->size >> PAGE_SHIFT;
588 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
589 if (!exynos_gem->pages) {
590 ret = -ENOMEM;
591 goto err;
592 }
593
594 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
595 npages);
596 if (ret < 0)
597 goto err_free_large;
598
599 exynos_gem->sgt = sgt;
600
601 if (sgt->nents == 1) {
602 /* always physically continuous memory if sgt->nents is 1. */
603 exynos_gem->flags |= EXYNOS_BO_CONTIG;
604 } else {
605 /*
606 * this case could be CONTIG or NONCONTIG type but for now
607 * sets NONCONTIG.
608 * TODO. we have to find a way that exporter can notify
609 * the type of its own buffer to importer.
610 */
611 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
612 }
613
614 return &exynos_gem->base;
615
616 err_free_large:
617 drm_free_large(exynos_gem->pages);
618 err:
619 drm_gem_object_release(&exynos_gem->base);
620 kfree(exynos_gem);
621 return ERR_PTR(ret);
622 }
623
624 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
625 {
626 return NULL;
627 }
628
629 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
630 {
631 /* Nothing to do */
632 }
This page took 0.168636 seconds and 5 git commands to generate.