Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12 #include <drm/drmP.h>
13
14 #include <linux/shmem_fs.h>
15 #include <drm/exynos_drm.h>
16
17 #include "exynos_drm_drv.h"
18 #include "exynos_drm_gem.h"
19 #include "exynos_drm_buf.h"
20
21 static unsigned int convert_to_vm_err_msg(int msg)
22 {
23 unsigned int out_msg;
24
25 switch (msg) {
26 case 0:
27 case -ERESTARTSYS:
28 case -EINTR:
29 out_msg = VM_FAULT_NOPAGE;
30 break;
31
32 case -ENOMEM:
33 out_msg = VM_FAULT_OOM;
34 break;
35
36 default:
37 out_msg = VM_FAULT_SIGBUS;
38 break;
39 }
40
41 return out_msg;
42 }
43
44 static int check_gem_flags(unsigned int flags)
45 {
46 if (flags & ~(EXYNOS_BO_MASK)) {
47 DRM_ERROR("invalid flags.\n");
48 return -EINVAL;
49 }
50
51 return 0;
52 }
53
54 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
55 struct vm_area_struct *vma)
56 {
57 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
58
59 /* non-cachable as default. */
60 if (obj->flags & EXYNOS_BO_CACHABLE)
61 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62 else if (obj->flags & EXYNOS_BO_WC)
63 vma->vm_page_prot =
64 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
65 else
66 vma->vm_page_prot =
67 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
68 }
69
70 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
71 {
72 /* TODO */
73
74 return roundup(size, PAGE_SIZE);
75 }
76
77 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
78 struct vm_area_struct *vma,
79 unsigned long f_vaddr,
80 pgoff_t page_offset)
81 {
82 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
83 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
84 struct scatterlist *sgl;
85 unsigned long pfn;
86 int i;
87
88 if (!buf->sgt)
89 return -EINTR;
90
91 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
92 DRM_ERROR("invalid page offset\n");
93 return -EINVAL;
94 }
95
96 sgl = buf->sgt->sgl;
97 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
98 if (page_offset < (sgl->length >> PAGE_SHIFT))
99 break;
100 page_offset -= (sgl->length >> PAGE_SHIFT);
101 }
102
103 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
104
105 return vm_insert_mixed(vma, f_vaddr, pfn);
106 }
107
108 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
109 struct drm_file *file_priv,
110 unsigned int *handle)
111 {
112 int ret;
113
114 /*
115 * allocate a id of idr table where the obj is registered
116 * and handle has the id what user can see.
117 */
118 ret = drm_gem_handle_create(file_priv, obj, handle);
119 if (ret)
120 return ret;
121
122 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
123
124 /* drop reference from allocate - handle holds it now. */
125 drm_gem_object_unreference_unlocked(obj);
126
127 return 0;
128 }
129
130 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
131 {
132 struct drm_gem_object *obj;
133 struct exynos_drm_gem_buf *buf;
134
135 DRM_DEBUG_KMS("%s\n", __FILE__);
136
137 obj = &exynos_gem_obj->base;
138 buf = exynos_gem_obj->buffer;
139
140 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
141
142 /*
143 * do not release memory region from exporter.
144 *
145 * the region will be released by exporter
146 * once dmabuf's refcount becomes 0.
147 */
148 if (obj->import_attach)
149 goto out;
150
151 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
152
153 out:
154 exynos_drm_fini_buf(obj->dev, buf);
155 exynos_gem_obj->buffer = NULL;
156
157 if (obj->map_list.map)
158 drm_gem_free_mmap_offset(obj);
159
160 /* release file pointer to gem object. */
161 drm_gem_object_release(obj);
162
163 kfree(exynos_gem_obj);
164 exynos_gem_obj = NULL;
165 }
166
167 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
168 unsigned int gem_handle,
169 struct drm_file *file_priv)
170 {
171 struct exynos_drm_gem_obj *exynos_gem_obj;
172 struct drm_gem_object *obj;
173
174 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
175 if (!obj) {
176 DRM_ERROR("failed to lookup gem object.\n");
177 return 0;
178 }
179
180 exynos_gem_obj = to_exynos_gem_obj(obj);
181
182 drm_gem_object_unreference_unlocked(obj);
183
184 return exynos_gem_obj->buffer->size;
185 }
186
187
188 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
189 unsigned long size)
190 {
191 struct exynos_drm_gem_obj *exynos_gem_obj;
192 struct drm_gem_object *obj;
193 int ret;
194
195 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
196 if (!exynos_gem_obj) {
197 DRM_ERROR("failed to allocate exynos gem object\n");
198 return NULL;
199 }
200
201 exynos_gem_obj->size = size;
202 obj = &exynos_gem_obj->base;
203
204 ret = drm_gem_object_init(dev, obj, size);
205 if (ret < 0) {
206 DRM_ERROR("failed to initialize gem object\n");
207 kfree(exynos_gem_obj);
208 return NULL;
209 }
210
211 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
212
213 return exynos_gem_obj;
214 }
215
216 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
217 unsigned int flags,
218 unsigned long size)
219 {
220 struct exynos_drm_gem_obj *exynos_gem_obj;
221 struct exynos_drm_gem_buf *buf;
222 int ret;
223
224 if (!size) {
225 DRM_ERROR("invalid size.\n");
226 return ERR_PTR(-EINVAL);
227 }
228
229 size = roundup_gem_size(size, flags);
230 DRM_DEBUG_KMS("%s\n", __FILE__);
231
232 ret = check_gem_flags(flags);
233 if (ret)
234 return ERR_PTR(ret);
235
236 buf = exynos_drm_init_buf(dev, size);
237 if (!buf)
238 return ERR_PTR(-ENOMEM);
239
240 exynos_gem_obj = exynos_drm_gem_init(dev, size);
241 if (!exynos_gem_obj) {
242 ret = -ENOMEM;
243 goto err_fini_buf;
244 }
245
246 exynos_gem_obj->buffer = buf;
247
248 /* set memory type and cache attribute from user side. */
249 exynos_gem_obj->flags = flags;
250
251 ret = exynos_drm_alloc_buf(dev, buf, flags);
252 if (ret < 0) {
253 drm_gem_object_release(&exynos_gem_obj->base);
254 goto err_fini_buf;
255 }
256
257 return exynos_gem_obj;
258
259 err_fini_buf:
260 exynos_drm_fini_buf(dev, buf);
261 return ERR_PTR(ret);
262 }
263
264 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
265 struct drm_file *file_priv)
266 {
267 struct drm_exynos_gem_create *args = data;
268 struct exynos_drm_gem_obj *exynos_gem_obj;
269 int ret;
270
271 DRM_DEBUG_KMS("%s\n", __FILE__);
272
273 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
274 if (IS_ERR(exynos_gem_obj))
275 return PTR_ERR(exynos_gem_obj);
276
277 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
278 &args->handle);
279 if (ret) {
280 exynos_drm_gem_destroy(exynos_gem_obj);
281 return ret;
282 }
283
284 return 0;
285 }
286
287 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
288 unsigned int gem_handle,
289 struct drm_file *filp)
290 {
291 struct exynos_drm_gem_obj *exynos_gem_obj;
292 struct drm_gem_object *obj;
293
294 obj = drm_gem_object_lookup(dev, filp, gem_handle);
295 if (!obj) {
296 DRM_ERROR("failed to lookup gem object.\n");
297 return ERR_PTR(-EINVAL);
298 }
299
300 exynos_gem_obj = to_exynos_gem_obj(obj);
301
302 return &exynos_gem_obj->buffer->dma_addr;
303 }
304
305 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
306 unsigned int gem_handle,
307 struct drm_file *filp)
308 {
309 struct exynos_drm_gem_obj *exynos_gem_obj;
310 struct drm_gem_object *obj;
311
312 obj = drm_gem_object_lookup(dev, filp, gem_handle);
313 if (!obj) {
314 DRM_ERROR("failed to lookup gem object.\n");
315 return;
316 }
317
318 exynos_gem_obj = to_exynos_gem_obj(obj);
319
320 drm_gem_object_unreference_unlocked(obj);
321
322 /*
323 * decrease obj->refcount one more time because we has already
324 * increased it at exynos_drm_gem_get_dma_addr().
325 */
326 drm_gem_object_unreference_unlocked(obj);
327 }
328
329 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
330 struct drm_file *file_priv)
331 {
332 struct drm_exynos_gem_map_off *args = data;
333
334 DRM_DEBUG_KMS("%s\n", __FILE__);
335
336 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
337 args->handle, (unsigned long)args->offset);
338
339 if (!(dev->driver->driver_features & DRIVER_GEM)) {
340 DRM_ERROR("does not support GEM.\n");
341 return -ENODEV;
342 }
343
344 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
345 &args->offset);
346 }
347
348 static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
349 struct file *filp)
350 {
351 struct drm_file *file_priv;
352
353 /* find current process's drm_file from filelist. */
354 list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
355 if (file_priv->filp == filp)
356 return file_priv;
357
358 WARN_ON(1);
359
360 return ERR_PTR(-EFAULT);
361 }
362
363 static int exynos_drm_gem_mmap_buffer(struct file *filp,
364 struct vm_area_struct *vma)
365 {
366 struct drm_gem_object *obj = filp->private_data;
367 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
368 struct drm_device *drm_dev = obj->dev;
369 struct exynos_drm_gem_buf *buffer;
370 struct drm_file *file_priv;
371 unsigned long vm_size;
372 int ret;
373
374 DRM_DEBUG_KMS("%s\n", __FILE__);
375
376 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
377 vma->vm_private_data = obj;
378 vma->vm_ops = drm_dev->driver->gem_vm_ops;
379
380 /* restore it to driver's fops. */
381 filp->f_op = fops_get(drm_dev->driver->fops);
382
383 file_priv = exynos_drm_find_drm_file(drm_dev, filp);
384 if (IS_ERR(file_priv))
385 return PTR_ERR(file_priv);
386
387 /* restore it to drm_file. */
388 filp->private_data = file_priv;
389
390 update_vm_cache_attr(exynos_gem_obj, vma);
391
392 vm_size = vma->vm_end - vma->vm_start;
393
394 /*
395 * a buffer contains information to physically continuous memory
396 * allocated by user request or at framebuffer creation.
397 */
398 buffer = exynos_gem_obj->buffer;
399
400 /* check if user-requested size is valid. */
401 if (vm_size > buffer->size)
402 return -EINVAL;
403
404 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
405 buffer->dma_addr, buffer->size,
406 &buffer->dma_attrs);
407 if (ret < 0) {
408 DRM_ERROR("failed to mmap.\n");
409 return ret;
410 }
411
412 /*
413 * take a reference to this mapping of the object. And this reference
414 * is unreferenced by the corresponding vm_close call.
415 */
416 drm_gem_object_reference(obj);
417
418 drm_vm_open_locked(drm_dev, vma);
419
420 return 0;
421 }
422
423 static const struct file_operations exynos_drm_gem_fops = {
424 .mmap = exynos_drm_gem_mmap_buffer,
425 };
426
427 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
428 struct drm_file *file_priv)
429 {
430 struct drm_exynos_gem_mmap *args = data;
431 struct drm_gem_object *obj;
432 unsigned int addr;
433
434 DRM_DEBUG_KMS("%s\n", __FILE__);
435
436 if (!(dev->driver->driver_features & DRIVER_GEM)) {
437 DRM_ERROR("does not support GEM.\n");
438 return -ENODEV;
439 }
440
441 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
442 if (!obj) {
443 DRM_ERROR("failed to lookup gem object.\n");
444 return -EINVAL;
445 }
446
447 /*
448 * We have to use gem object and its fops for specific mmaper,
449 * but vm_mmap() can deliver only filp. So we have to change
450 * filp->f_op and filp->private_data temporarily, then restore
451 * again. So it is important to keep lock until restoration the
452 * settings to prevent others from misuse of filp->f_op or
453 * filp->private_data.
454 */
455 mutex_lock(&dev->struct_mutex);
456
457 /*
458 * Set specific mmper's fops. And it will be restored by
459 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
460 * This is used to call specific mapper temporarily.
461 */
462 file_priv->filp->f_op = &exynos_drm_gem_fops;
463
464 /*
465 * Set gem object to private_data so that specific mmaper
466 * can get the gem object. And it will be restored by
467 * exynos_drm_gem_mmap_buffer to drm_file.
468 */
469 file_priv->filp->private_data = obj;
470
471 addr = vm_mmap(file_priv->filp, 0, args->size,
472 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
473
474 drm_gem_object_unreference(obj);
475
476 if (IS_ERR((void *)addr)) {
477 /* check filp->f_op, filp->private_data are restored */
478 if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
479 file_priv->filp->f_op = fops_get(dev->driver->fops);
480 file_priv->filp->private_data = file_priv;
481 }
482 mutex_unlock(&dev->struct_mutex);
483 return PTR_ERR((void *)addr);
484 }
485
486 mutex_unlock(&dev->struct_mutex);
487
488 args->mapped = addr;
489
490 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
491
492 return 0;
493 }
494
495 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
496 struct drm_file *file_priv)
497 { struct exynos_drm_gem_obj *exynos_gem_obj;
498 struct drm_exynos_gem_info *args = data;
499 struct drm_gem_object *obj;
500
501 mutex_lock(&dev->struct_mutex);
502
503 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
504 if (!obj) {
505 DRM_ERROR("failed to lookup gem object.\n");
506 mutex_unlock(&dev->struct_mutex);
507 return -EINVAL;
508 }
509
510 exynos_gem_obj = to_exynos_gem_obj(obj);
511
512 args->flags = exynos_gem_obj->flags;
513 args->size = exynos_gem_obj->size;
514
515 drm_gem_object_unreference(obj);
516 mutex_unlock(&dev->struct_mutex);
517
518 return 0;
519 }
520
521 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
522 {
523 struct vm_area_struct *vma_copy;
524
525 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
526 if (!vma_copy)
527 return NULL;
528
529 if (vma->vm_ops && vma->vm_ops->open)
530 vma->vm_ops->open(vma);
531
532 if (vma->vm_file)
533 get_file(vma->vm_file);
534
535 memcpy(vma_copy, vma, sizeof(*vma));
536
537 vma_copy->vm_mm = NULL;
538 vma_copy->vm_next = NULL;
539 vma_copy->vm_prev = NULL;
540
541 return vma_copy;
542 }
543
544 void exynos_gem_put_vma(struct vm_area_struct *vma)
545 {
546 if (!vma)
547 return;
548
549 if (vma->vm_ops && vma->vm_ops->close)
550 vma->vm_ops->close(vma);
551
552 if (vma->vm_file)
553 fput(vma->vm_file);
554
555 kfree(vma);
556 }
557
558 int exynos_gem_get_pages_from_userptr(unsigned long start,
559 unsigned int npages,
560 struct page **pages,
561 struct vm_area_struct *vma)
562 {
563 int get_npages;
564
565 /* the memory region mmaped with VM_PFNMAP. */
566 if (vma_is_io(vma)) {
567 unsigned int i;
568
569 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
570 unsigned long pfn;
571 int ret = follow_pfn(vma, start, &pfn);
572 if (ret)
573 return ret;
574
575 pages[i] = pfn_to_page(pfn);
576 }
577
578 if (i != npages) {
579 DRM_ERROR("failed to get user_pages.\n");
580 return -EINVAL;
581 }
582
583 return 0;
584 }
585
586 get_npages = get_user_pages(current, current->mm, start,
587 npages, 1, 1, pages, NULL);
588 get_npages = max(get_npages, 0);
589 if (get_npages != npages) {
590 DRM_ERROR("failed to get user_pages.\n");
591 while (get_npages)
592 put_page(pages[--get_npages]);
593 return -EFAULT;
594 }
595
596 return 0;
597 }
598
599 void exynos_gem_put_pages_to_userptr(struct page **pages,
600 unsigned int npages,
601 struct vm_area_struct *vma)
602 {
603 if (!vma_is_io(vma)) {
604 unsigned int i;
605
606 for (i = 0; i < npages; i++) {
607 set_page_dirty_lock(pages[i]);
608
609 /*
610 * undo the reference we took when populating
611 * the table.
612 */
613 put_page(pages[i]);
614 }
615 }
616 }
617
618 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
619 struct sg_table *sgt,
620 enum dma_data_direction dir)
621 {
622 int nents;
623
624 mutex_lock(&drm_dev->struct_mutex);
625
626 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
627 if (!nents) {
628 DRM_ERROR("failed to map sgl with dma.\n");
629 mutex_unlock(&drm_dev->struct_mutex);
630 return nents;
631 }
632
633 mutex_unlock(&drm_dev->struct_mutex);
634 return 0;
635 }
636
637 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
638 struct sg_table *sgt,
639 enum dma_data_direction dir)
640 {
641 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
642 }
643
644 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
645 {
646 DRM_DEBUG_KMS("%s\n", __FILE__);
647
648 return 0;
649 }
650
651 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
652 {
653 struct exynos_drm_gem_obj *exynos_gem_obj;
654 struct exynos_drm_gem_buf *buf;
655
656 DRM_DEBUG_KMS("%s\n", __FILE__);
657
658 exynos_gem_obj = to_exynos_gem_obj(obj);
659 buf = exynos_gem_obj->buffer;
660
661 if (obj->import_attach)
662 drm_prime_gem_destroy(obj, buf->sgt);
663
664 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
665 }
666
667 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
668 struct drm_device *dev,
669 struct drm_mode_create_dumb *args)
670 {
671 struct exynos_drm_gem_obj *exynos_gem_obj;
672 int ret;
673
674 DRM_DEBUG_KMS("%s\n", __FILE__);
675
676 /*
677 * alocate memory to be used for framebuffer.
678 * - this callback would be called by user application
679 * with DRM_IOCTL_MODE_CREATE_DUMB command.
680 */
681
682 args->pitch = args->width * ((args->bpp + 7) / 8);
683 args->size = args->pitch * args->height;
684
685 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
686 EXYNOS_BO_WC, args->size);
687 if (IS_ERR(exynos_gem_obj))
688 return PTR_ERR(exynos_gem_obj);
689
690 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
691 &args->handle);
692 if (ret) {
693 exynos_drm_gem_destroy(exynos_gem_obj);
694 return ret;
695 }
696
697 return 0;
698 }
699
700 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
701 struct drm_device *dev, uint32_t handle,
702 uint64_t *offset)
703 {
704 struct drm_gem_object *obj;
705 int ret = 0;
706
707 DRM_DEBUG_KMS("%s\n", __FILE__);
708
709 mutex_lock(&dev->struct_mutex);
710
711 /*
712 * get offset of memory allocated for drm framebuffer.
713 * - this callback would be called by user application
714 * with DRM_IOCTL_MODE_MAP_DUMB command.
715 */
716
717 obj = drm_gem_object_lookup(dev, file_priv, handle);
718 if (!obj) {
719 DRM_ERROR("failed to lookup gem object.\n");
720 ret = -EINVAL;
721 goto unlock;
722 }
723
724 if (!obj->map_list.map) {
725 ret = drm_gem_create_mmap_offset(obj);
726 if (ret)
727 goto out;
728 }
729
730 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
731 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
732
733 out:
734 drm_gem_object_unreference(obj);
735 unlock:
736 mutex_unlock(&dev->struct_mutex);
737 return ret;
738 }
739
740 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
741 struct drm_device *dev,
742 unsigned int handle)
743 {
744 int ret;
745
746 DRM_DEBUG_KMS("%s\n", __FILE__);
747
748 /*
749 * obj->refcount and obj->handle_count are decreased and
750 * if both them are 0 then exynos_drm_gem_free_object()
751 * would be called by callback to release resources.
752 */
753 ret = drm_gem_handle_delete(file_priv, handle);
754 if (ret < 0) {
755 DRM_ERROR("failed to delete drm_gem_handle.\n");
756 return ret;
757 }
758
759 return 0;
760 }
761
762 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
763 {
764 struct drm_gem_object *obj = vma->vm_private_data;
765 struct drm_device *dev = obj->dev;
766 unsigned long f_vaddr;
767 pgoff_t page_offset;
768 int ret;
769
770 page_offset = ((unsigned long)vmf->virtual_address -
771 vma->vm_start) >> PAGE_SHIFT;
772 f_vaddr = (unsigned long)vmf->virtual_address;
773
774 mutex_lock(&dev->struct_mutex);
775
776 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
777 if (ret < 0)
778 DRM_ERROR("failed to map a buffer with user.\n");
779
780 mutex_unlock(&dev->struct_mutex);
781
782 return convert_to_vm_err_msg(ret);
783 }
784
785 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
786 {
787 struct exynos_drm_gem_obj *exynos_gem_obj;
788 struct drm_gem_object *obj;
789 int ret;
790
791 DRM_DEBUG_KMS("%s\n", __FILE__);
792
793 /* set vm_area_struct. */
794 ret = drm_gem_mmap(filp, vma);
795 if (ret < 0) {
796 DRM_ERROR("failed to mmap.\n");
797 return ret;
798 }
799
800 obj = vma->vm_private_data;
801 exynos_gem_obj = to_exynos_gem_obj(obj);
802
803 ret = check_gem_flags(exynos_gem_obj->flags);
804 if (ret) {
805 drm_gem_vm_close(vma);
806 drm_gem_free_mmap_offset(obj);
807 return ret;
808 }
809
810 vma->vm_flags &= ~VM_PFNMAP;
811 vma->vm_flags |= VM_MIXEDMAP;
812
813 update_vm_cache_attr(exynos_gem_obj, vma);
814
815 return ret;
816 }
This page took 0.076973 seconds and 5 git commands to generate.