drm/msm: add madvise ioctl
[deliverable/linux.git] / drivers / gpu / drm / msm / msm_gem.c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
22
23 #include "msm_drv.h"
24 #include "msm_fence.h"
25 #include "msm_gem.h"
26 #include "msm_gpu.h"
27 #include "msm_mmu.h"
28
29 static dma_addr_t physaddr(struct drm_gem_object *obj)
30 {
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35 }
36
37 static bool use_pages(struct drm_gem_object *obj)
38 {
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41 }
42
43 /* allocate pages from VRAM carveout, used when no IOMMU: */
44 static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46 {
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) {
60 drm_free_large(p);
61 return ERR_PTR(ret);
62 }
63
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
67 paddr += PAGE_SIZE;
68 }
69
70 return p;
71 }
72
73 /* called with dev->struct_mutex held */
74 static struct page **get_pages(struct drm_gem_object *obj)
75 {
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
80 struct page **p;
81 int npages = obj->size >> PAGE_SHIFT;
82
83 if (use_pages(obj))
84 p = drm_gem_get_pages(obj);
85 else
86 p = get_pages_vram(obj, npages);
87
88 if (IS_ERR(p)) {
89 dev_err(dev->dev, "could not get pages: %ld\n",
90 PTR_ERR(p));
91 return p;
92 }
93
94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
95 if (IS_ERR(msm_obj->sgt)) {
96 dev_err(dev->dev, "failed to allocate sgt\n");
97 return ERR_CAST(msm_obj->sgt);
98 }
99
100 msm_obj->pages = p;
101
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
104 */
105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
108 }
109
110 return msm_obj->pages;
111 }
112
113 static void put_pages(struct drm_gem_object *obj)
114 {
115 struct msm_gem_object *msm_obj = to_msm_bo(obj);
116
117 if (msm_obj->pages) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
120 */
121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124 sg_free_table(msm_obj->sgt);
125 kfree(msm_obj->sgt);
126
127 if (use_pages(obj))
128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
129 else {
130 drm_mm_remove_node(msm_obj->vram_node);
131 drm_free_large(msm_obj->pages);
132 }
133
134 msm_obj->pages = NULL;
135 }
136 }
137
138 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139 {
140 struct drm_device *dev = obj->dev;
141 struct page **p;
142 mutex_lock(&dev->struct_mutex);
143 p = get_pages(obj);
144 mutex_unlock(&dev->struct_mutex);
145 return p;
146 }
147
148 void msm_gem_put_pages(struct drm_gem_object *obj)
149 {
150 /* when we start tracking the pin count, then do something here */
151 }
152
153 int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
155 {
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
160
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165 } else {
166 /*
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
170 */
171 fput(vma->vm_file);
172 get_file(obj->filp);
173 vma->vm_pgoff = 0;
174 vma->vm_file = obj->filp;
175
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177 }
178
179 return 0;
180 }
181
182 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183 {
184 int ret;
185
186 ret = drm_gem_mmap(filp, vma);
187 if (ret) {
188 DBG("mmap failed: %d", ret);
189 return ret;
190 }
191
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
193 }
194
195 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196 {
197 struct drm_gem_object *obj = vma->vm_private_data;
198 struct drm_device *dev = obj->dev;
199 struct page **pages;
200 unsigned long pfn;
201 pgoff_t pgoff;
202 int ret;
203
204 /* Make sure we don't parallel update on a fault, nor move or remove
205 * something from beneath our feet
206 */
207 ret = mutex_lock_interruptible(&dev->struct_mutex);
208 if (ret)
209 goto out;
210
211 /* make sure we have pages attached now */
212 pages = get_pages(obj);
213 if (IS_ERR(pages)) {
214 ret = PTR_ERR(pages);
215 goto out_unlock;
216 }
217
218 /* We don't use vmf->pgoff since that has the fake offset: */
219 pgoff = ((unsigned long)vmf->virtual_address -
220 vma->vm_start) >> PAGE_SHIFT;
221
222 pfn = page_to_pfn(pages[pgoff]);
223
224 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
225 pfn, pfn << PAGE_SHIFT);
226
227 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
228 __pfn_to_pfn_t(pfn, PFN_DEV));
229
230 out_unlock:
231 mutex_unlock(&dev->struct_mutex);
232 out:
233 switch (ret) {
234 case -EAGAIN:
235 case 0:
236 case -ERESTARTSYS:
237 case -EINTR:
238 case -EBUSY:
239 /*
240 * EBUSY is ok: this just means that another thread
241 * already did the job.
242 */
243 return VM_FAULT_NOPAGE;
244 case -ENOMEM:
245 return VM_FAULT_OOM;
246 default:
247 return VM_FAULT_SIGBUS;
248 }
249 }
250
251 /** get mmap offset */
252 static uint64_t mmap_offset(struct drm_gem_object *obj)
253 {
254 struct drm_device *dev = obj->dev;
255 int ret;
256
257 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
258
259 /* Make it mmapable */
260 ret = drm_gem_create_mmap_offset(obj);
261
262 if (ret) {
263 dev_err(dev->dev, "could not allocate mmap offset\n");
264 return 0;
265 }
266
267 return drm_vma_node_offset_addr(&obj->vma_node);
268 }
269
270 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
271 {
272 uint64_t offset;
273 mutex_lock(&obj->dev->struct_mutex);
274 offset = mmap_offset(obj);
275 mutex_unlock(&obj->dev->struct_mutex);
276 return offset;
277 }
278
279 /* should be called under struct_mutex.. although it can be called
280 * from atomic context without struct_mutex to acquire an extra
281 * iova ref if you know one is already held.
282 *
283 * That means when I do eventually need to add support for unpinning
284 * the refcnt counter needs to be atomic_t.
285 */
286 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
287 uint32_t *iova)
288 {
289 struct msm_gem_object *msm_obj = to_msm_bo(obj);
290 int ret = 0;
291
292 if (!msm_obj->domain[id].iova) {
293 struct msm_drm_private *priv = obj->dev->dev_private;
294 struct page **pages = get_pages(obj);
295
296 if (IS_ERR(pages))
297 return PTR_ERR(pages);
298
299 if (iommu_present(&platform_bus_type)) {
300 struct msm_mmu *mmu = priv->mmus[id];
301 uint32_t offset;
302
303 if (WARN_ON(!mmu))
304 return -EINVAL;
305
306 offset = (uint32_t)mmap_offset(obj);
307 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
308 obj->size, IOMMU_READ | IOMMU_WRITE);
309 msm_obj->domain[id].iova = offset;
310 } else {
311 msm_obj->domain[id].iova = physaddr(obj);
312 }
313 }
314
315 if (!ret)
316 *iova = msm_obj->domain[id].iova;
317
318 return ret;
319 }
320
321 /* get iova, taking a reference. Should have a matching put */
322 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
323 {
324 struct msm_gem_object *msm_obj = to_msm_bo(obj);
325 int ret;
326
327 /* this is safe right now because we don't unmap until the
328 * bo is deleted:
329 */
330 if (msm_obj->domain[id].iova) {
331 *iova = msm_obj->domain[id].iova;
332 return 0;
333 }
334
335 mutex_lock(&obj->dev->struct_mutex);
336 ret = msm_gem_get_iova_locked(obj, id, iova);
337 mutex_unlock(&obj->dev->struct_mutex);
338 return ret;
339 }
340
341 /* get iova without taking a reference, used in places where you have
342 * already done a 'msm_gem_get_iova()'.
343 */
344 uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
345 {
346 struct msm_gem_object *msm_obj = to_msm_bo(obj);
347 WARN_ON(!msm_obj->domain[id].iova);
348 return msm_obj->domain[id].iova;
349 }
350
351 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
352 {
353 // XXX TODO ..
354 // NOTE: probably don't need a _locked() version.. we wouldn't
355 // normally unmap here, but instead just mark that it could be
356 // unmapped (if the iova refcnt drops to zero), but then later
357 // if another _get_iova_locked() fails we can start unmapping
358 // things that are no longer needed..
359 }
360
361 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
362 struct drm_mode_create_dumb *args)
363 {
364 args->pitch = align_pitch(args->width, args->bpp);
365 args->size = PAGE_ALIGN(args->pitch * args->height);
366 return msm_gem_new_handle(dev, file, args->size,
367 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
368 }
369
370 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
371 uint32_t handle, uint64_t *offset)
372 {
373 struct drm_gem_object *obj;
374 int ret = 0;
375
376 /* GEM does all our handle to object mapping */
377 obj = drm_gem_object_lookup(file, handle);
378 if (obj == NULL) {
379 ret = -ENOENT;
380 goto fail;
381 }
382
383 *offset = msm_gem_mmap_offset(obj);
384
385 drm_gem_object_unreference_unlocked(obj);
386
387 fail:
388 return ret;
389 }
390
391 void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
392 {
393 struct msm_gem_object *msm_obj = to_msm_bo(obj);
394 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
395 if (!msm_obj->vaddr) {
396 struct page **pages = get_pages(obj);
397 if (IS_ERR(pages))
398 return ERR_CAST(pages);
399 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
400 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
401 if (msm_obj->vaddr == NULL)
402 return ERR_PTR(-ENOMEM);
403 }
404 return msm_obj->vaddr;
405 }
406
407 void *msm_gem_vaddr(struct drm_gem_object *obj)
408 {
409 void *ret;
410 mutex_lock(&obj->dev->struct_mutex);
411 ret = msm_gem_vaddr_locked(obj);
412 mutex_unlock(&obj->dev->struct_mutex);
413 return ret;
414 }
415
416 /* Update madvise status, returns true if not purged, else
417 * false or -errno.
418 */
419 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
420 {
421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
422
423 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
424
425 if (msm_obj->madv != __MSM_MADV_PURGED)
426 msm_obj->madv = madv;
427
428 return (msm_obj->madv != __MSM_MADV_PURGED);
429 }
430
431 /* must be called before _move_to_active().. */
432 int msm_gem_sync_object(struct drm_gem_object *obj,
433 struct msm_fence_context *fctx, bool exclusive)
434 {
435 struct msm_gem_object *msm_obj = to_msm_bo(obj);
436 struct reservation_object_list *fobj;
437 struct fence *fence;
438 int i, ret;
439
440 if (!exclusive) {
441 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
442 * which makes this a slightly strange place to call it. OTOH this
443 * is a convenient can-fail point to hook it in. (And similar to
444 * how etnaviv and nouveau handle this.)
445 */
446 ret = reservation_object_reserve_shared(msm_obj->resv);
447 if (ret)
448 return ret;
449 }
450
451 fobj = reservation_object_get_list(msm_obj->resv);
452 if (!fobj || (fobj->shared_count == 0)) {
453 fence = reservation_object_get_excl(msm_obj->resv);
454 /* don't need to wait on our own fences, since ring is fifo */
455 if (fence && (fence->context != fctx->context)) {
456 ret = fence_wait(fence, true);
457 if (ret)
458 return ret;
459 }
460 }
461
462 if (!exclusive || !fobj)
463 return 0;
464
465 for (i = 0; i < fobj->shared_count; i++) {
466 fence = rcu_dereference_protected(fobj->shared[i],
467 reservation_object_held(msm_obj->resv));
468 if (fence->context != fctx->context) {
469 ret = fence_wait(fence, true);
470 if (ret)
471 return ret;
472 }
473 }
474
475 return 0;
476 }
477
478 void msm_gem_move_to_active(struct drm_gem_object *obj,
479 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
480 {
481 struct msm_gem_object *msm_obj = to_msm_bo(obj);
482 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
483 msm_obj->gpu = gpu;
484 if (exclusive)
485 reservation_object_add_excl_fence(msm_obj->resv, fence);
486 else
487 reservation_object_add_shared_fence(msm_obj->resv, fence);
488 list_del_init(&msm_obj->mm_list);
489 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
490 }
491
492 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
493 {
494 struct drm_device *dev = obj->dev;
495 struct msm_drm_private *priv = dev->dev_private;
496 struct msm_gem_object *msm_obj = to_msm_bo(obj);
497
498 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
499
500 msm_obj->gpu = NULL;
501 list_del_init(&msm_obj->mm_list);
502 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
503 }
504
505 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
506 {
507 struct msm_gem_object *msm_obj = to_msm_bo(obj);
508 bool write = !!(op & MSM_PREP_WRITE);
509
510 if (op & MSM_PREP_NOSYNC) {
511 if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
512 return -EBUSY;
513 } else {
514 int ret;
515
516 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
517 true, timeout_to_jiffies(timeout));
518 if (ret <= 0)
519 return ret == 0 ? -ETIMEDOUT : ret;
520 }
521
522 /* TODO cache maintenance */
523
524 return 0;
525 }
526
527 int msm_gem_cpu_fini(struct drm_gem_object *obj)
528 {
529 /* TODO cache maintenance */
530 return 0;
531 }
532
533 #ifdef CONFIG_DEBUG_FS
534 static void describe_fence(struct fence *fence, const char *type,
535 struct seq_file *m)
536 {
537 if (!fence_is_signaled(fence))
538 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
539 fence->ops->get_driver_name(fence),
540 fence->ops->get_timeline_name(fence),
541 fence->seqno);
542 }
543
544 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
545 {
546 struct msm_gem_object *msm_obj = to_msm_bo(obj);
547 struct reservation_object *robj = msm_obj->resv;
548 struct reservation_object_list *fobj;
549 struct fence *fence;
550 uint64_t off = drm_vma_node_start(&obj->vma_node);
551 const char *madv;
552
553 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
554
555 switch (msm_obj->madv) {
556 case __MSM_MADV_PURGED:
557 madv = " purged";
558 break;
559 case MSM_MADV_DONTNEED:
560 madv = " purgeable";
561 break;
562 case MSM_MADV_WILLNEED:
563 default:
564 madv = "";
565 break;
566 }
567
568 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
569 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
570 obj->name, obj->refcount.refcount.counter,
571 off, msm_obj->vaddr, obj->size, madv);
572
573 rcu_read_lock();
574 fobj = rcu_dereference(robj->fence);
575 if (fobj) {
576 unsigned int i, shared_count = fobj->shared_count;
577
578 for (i = 0; i < shared_count; i++) {
579 fence = rcu_dereference(fobj->shared[i]);
580 describe_fence(fence, "Shared", m);
581 }
582 }
583
584 fence = rcu_dereference(robj->fence_excl);
585 if (fence)
586 describe_fence(fence, "Exclusive", m);
587 rcu_read_unlock();
588 }
589
590 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
591 {
592 struct msm_gem_object *msm_obj;
593 int count = 0;
594 size_t size = 0;
595
596 list_for_each_entry(msm_obj, list, mm_list) {
597 struct drm_gem_object *obj = &msm_obj->base;
598 seq_printf(m, " ");
599 msm_gem_describe(obj, m);
600 count++;
601 size += obj->size;
602 }
603
604 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
605 }
606 #endif
607
608 void msm_gem_free_object(struct drm_gem_object *obj)
609 {
610 struct drm_device *dev = obj->dev;
611 struct msm_drm_private *priv = obj->dev->dev_private;
612 struct msm_gem_object *msm_obj = to_msm_bo(obj);
613 int id;
614
615 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
616
617 /* object should not be on active list: */
618 WARN_ON(is_active(msm_obj));
619
620 list_del(&msm_obj->mm_list);
621
622 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
623 struct msm_mmu *mmu = priv->mmus[id];
624 if (mmu && msm_obj->domain[id].iova) {
625 uint32_t offset = msm_obj->domain[id].iova;
626 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
627 }
628 }
629
630 if (obj->import_attach) {
631 if (msm_obj->vaddr)
632 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
633
634 /* Don't drop the pages for imported dmabuf, as they are not
635 * ours, just free the array we allocated:
636 */
637 if (msm_obj->pages)
638 drm_free_large(msm_obj->pages);
639
640 drm_prime_gem_destroy(obj, msm_obj->sgt);
641 } else {
642 vunmap(msm_obj->vaddr);
643 put_pages(obj);
644 }
645
646 if (msm_obj->resv == &msm_obj->_resv)
647 reservation_object_fini(msm_obj->resv);
648
649 drm_gem_object_release(obj);
650
651 kfree(msm_obj);
652 }
653
654 /* convenience method to construct a GEM buffer object, and userspace handle */
655 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
656 uint32_t size, uint32_t flags, uint32_t *handle)
657 {
658 struct drm_gem_object *obj;
659 int ret;
660
661 ret = mutex_lock_interruptible(&dev->struct_mutex);
662 if (ret)
663 return ret;
664
665 obj = msm_gem_new(dev, size, flags);
666
667 mutex_unlock(&dev->struct_mutex);
668
669 if (IS_ERR(obj))
670 return PTR_ERR(obj);
671
672 ret = drm_gem_handle_create(file, obj, handle);
673
674 /* drop reference from allocate - handle holds it now */
675 drm_gem_object_unreference_unlocked(obj);
676
677 return ret;
678 }
679
680 static int msm_gem_new_impl(struct drm_device *dev,
681 uint32_t size, uint32_t flags,
682 struct reservation_object *resv,
683 struct drm_gem_object **obj)
684 {
685 struct msm_drm_private *priv = dev->dev_private;
686 struct msm_gem_object *msm_obj;
687 unsigned sz;
688 bool use_vram = false;
689
690 switch (flags & MSM_BO_CACHE_MASK) {
691 case MSM_BO_UNCACHED:
692 case MSM_BO_CACHED:
693 case MSM_BO_WC:
694 break;
695 default:
696 dev_err(dev->dev, "invalid cache flag: %x\n",
697 (flags & MSM_BO_CACHE_MASK));
698 return -EINVAL;
699 }
700
701 if (!iommu_present(&platform_bus_type))
702 use_vram = true;
703 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
704 use_vram = true;
705
706 if (WARN_ON(use_vram && !priv->vram.size))
707 return -EINVAL;
708
709 sz = sizeof(*msm_obj);
710 if (use_vram)
711 sz += sizeof(struct drm_mm_node);
712
713 msm_obj = kzalloc(sz, GFP_KERNEL);
714 if (!msm_obj)
715 return -ENOMEM;
716
717 if (use_vram)
718 msm_obj->vram_node = (void *)&msm_obj[1];
719
720 msm_obj->flags = flags;
721 msm_obj->madv = MSM_MADV_WILLNEED;
722
723 if (resv) {
724 msm_obj->resv = resv;
725 } else {
726 msm_obj->resv = &msm_obj->_resv;
727 reservation_object_init(msm_obj->resv);
728 }
729
730 INIT_LIST_HEAD(&msm_obj->submit_entry);
731 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
732
733 *obj = &msm_obj->base;
734
735 return 0;
736 }
737
738 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
739 uint32_t size, uint32_t flags)
740 {
741 struct drm_gem_object *obj = NULL;
742 int ret;
743
744 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
745
746 size = PAGE_ALIGN(size);
747
748 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
749 if (ret)
750 goto fail;
751
752 if (use_pages(obj)) {
753 ret = drm_gem_object_init(dev, obj, size);
754 if (ret)
755 goto fail;
756 } else {
757 drm_gem_private_object_init(dev, obj, size);
758 }
759
760 return obj;
761
762 fail:
763 if (obj)
764 drm_gem_object_unreference(obj);
765
766 return ERR_PTR(ret);
767 }
768
769 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
770 struct dma_buf *dmabuf, struct sg_table *sgt)
771 {
772 struct msm_gem_object *msm_obj;
773 struct drm_gem_object *obj;
774 uint32_t size;
775 int ret, npages;
776
777 /* if we don't have IOMMU, don't bother pretending we can import: */
778 if (!iommu_present(&platform_bus_type)) {
779 dev_err(dev->dev, "cannot import without IOMMU\n");
780 return ERR_PTR(-EINVAL);
781 }
782
783 size = PAGE_ALIGN(dmabuf->size);
784
785 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
786 if (ret)
787 goto fail;
788
789 drm_gem_private_object_init(dev, obj, size);
790
791 npages = size / PAGE_SIZE;
792
793 msm_obj = to_msm_bo(obj);
794 msm_obj->sgt = sgt;
795 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
796 if (!msm_obj->pages) {
797 ret = -ENOMEM;
798 goto fail;
799 }
800
801 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
802 if (ret)
803 goto fail;
804
805 return obj;
806
807 fail:
808 if (obj)
809 drm_gem_object_unreference_unlocked(obj);
810
811 return ERR_PTR(ret);
812 }
This page took 0.071103 seconds and 6 git commands to generate.