2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static dma_addr_t
physaddr(struct drm_gem_object
*obj
)
31 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
32 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
33 return (((dma_addr_t
)msm_obj
->vram_node
->start
) << PAGE_SHIFT
) +
37 static bool use_pages(struct drm_gem_object
*obj
)
39 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
40 return !msm_obj
->vram_node
;
43 /* allocate pages from VRAM carveout, used when no IOMMU: */
44 static struct page
**get_pages_vram(struct drm_gem_object
*obj
,
47 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
48 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
53 p
= drm_malloc_ab(npages
, sizeof(struct page
*));
55 return ERR_PTR(-ENOMEM
);
57 ret
= drm_mm_insert_node(&priv
->vram
.mm
, msm_obj
->vram_node
,
58 npages
, 0, DRM_MM_SEARCH_DEFAULT
);
64 paddr
= physaddr(obj
);
65 for (i
= 0; i
< npages
; i
++) {
66 p
[i
] = phys_to_page(paddr
);
73 /* called with dev->struct_mutex held */
74 static struct page
**get_pages(struct drm_gem_object
*obj
)
76 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
78 if (!msm_obj
->pages
) {
79 struct drm_device
*dev
= obj
->dev
;
81 int npages
= obj
->size
>> PAGE_SHIFT
;
84 p
= drm_gem_get_pages(obj
);
86 p
= get_pages_vram(obj
, npages
);
89 dev_err(dev
->dev
, "could not get pages: %ld\n",
94 msm_obj
->sgt
= drm_prime_pages_to_sg(p
, npages
);
95 if (IS_ERR(msm_obj
->sgt
)) {
96 dev_err(dev
->dev
, "failed to allocate sgt\n");
97 return ERR_CAST(msm_obj
->sgt
);
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
105 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
106 dma_map_sg(dev
->dev
, msm_obj
->sgt
->sgl
,
107 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
110 return msm_obj
->pages
;
113 static void put_pages(struct drm_gem_object
*obj
)
115 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
117 if (msm_obj
->pages
) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
121 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
122 dma_unmap_sg(obj
->dev
->dev
, msm_obj
->sgt
->sgl
,
123 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
124 sg_free_table(msm_obj
->sgt
);
128 drm_gem_put_pages(obj
, msm_obj
->pages
, true, false);
130 drm_mm_remove_node(msm_obj
->vram_node
);
131 drm_free_large(msm_obj
->pages
);
134 msm_obj
->pages
= NULL
;
138 struct page
**msm_gem_get_pages(struct drm_gem_object
*obj
)
140 struct drm_device
*dev
= obj
->dev
;
142 mutex_lock(&dev
->struct_mutex
);
144 mutex_unlock(&dev
->struct_mutex
);
148 void msm_gem_put_pages(struct drm_gem_object
*obj
)
150 /* when we start tracking the pin count, then do something here */
153 int msm_gem_mmap_obj(struct drm_gem_object
*obj
,
154 struct vm_area_struct
*vma
)
156 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
158 vma
->vm_flags
&= ~VM_PFNMAP
;
159 vma
->vm_flags
|= VM_MIXEDMAP
;
161 if (msm_obj
->flags
& MSM_BO_WC
) {
162 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
163 } else if (msm_obj
->flags
& MSM_BO_UNCACHED
) {
164 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
174 vma
->vm_file
= obj
->filp
;
176 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
182 int msm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
186 ret
= drm_gem_mmap(filp
, vma
);
188 DBG("mmap failed: %d", ret
);
192 return msm_gem_mmap_obj(vma
->vm_private_data
, vma
);
195 int msm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
197 struct drm_gem_object
*obj
= vma
->vm_private_data
;
198 struct drm_device
*dev
= obj
->dev
;
204 /* Make sure we don't parallel update on a fault, nor move or remove
205 * something from beneath our feet
207 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
211 /* make sure we have pages attached now */
212 pages
= get_pages(obj
);
214 ret
= PTR_ERR(pages
);
218 /* We don't use vmf->pgoff since that has the fake offset: */
219 pgoff
= ((unsigned long)vmf
->virtual_address
-
220 vma
->vm_start
) >> PAGE_SHIFT
;
222 pfn
= page_to_pfn(pages
[pgoff
]);
224 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
225 pfn
, pfn
<< PAGE_SHIFT
);
227 ret
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
228 __pfn_to_pfn_t(pfn
, PFN_DEV
));
231 mutex_unlock(&dev
->struct_mutex
);
240 * EBUSY is ok: this just means that another thread
241 * already did the job.
243 return VM_FAULT_NOPAGE
;
247 return VM_FAULT_SIGBUS
;
251 /** get mmap offset */
252 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
254 struct drm_device
*dev
= obj
->dev
;
257 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
259 /* Make it mmapable */
260 ret
= drm_gem_create_mmap_offset(obj
);
263 dev_err(dev
->dev
, "could not allocate mmap offset\n");
267 return drm_vma_node_offset_addr(&obj
->vma_node
);
270 uint64_t msm_gem_mmap_offset(struct drm_gem_object
*obj
)
273 mutex_lock(&obj
->dev
->struct_mutex
);
274 offset
= mmap_offset(obj
);
275 mutex_unlock(&obj
->dev
->struct_mutex
);
279 /* should be called under struct_mutex.. although it can be called
280 * from atomic context without struct_mutex to acquire an extra
281 * iova ref if you know one is already held.
283 * That means when I do eventually need to add support for unpinning
284 * the refcnt counter needs to be atomic_t.
286 int msm_gem_get_iova_locked(struct drm_gem_object
*obj
, int id
,
289 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
292 if (!msm_obj
->domain
[id
].iova
) {
293 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
294 struct page
**pages
= get_pages(obj
);
297 return PTR_ERR(pages
);
299 if (iommu_present(&platform_bus_type
)) {
300 struct msm_mmu
*mmu
= priv
->mmus
[id
];
306 offset
= (uint32_t)mmap_offset(obj
);
307 ret
= mmu
->funcs
->map(mmu
, offset
, msm_obj
->sgt
,
308 obj
->size
, IOMMU_READ
| IOMMU_WRITE
);
309 msm_obj
->domain
[id
].iova
= offset
;
311 msm_obj
->domain
[id
].iova
= physaddr(obj
);
316 *iova
= msm_obj
->domain
[id
].iova
;
321 /* get iova, taking a reference. Should have a matching put */
322 int msm_gem_get_iova(struct drm_gem_object
*obj
, int id
, uint32_t *iova
)
324 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
327 /* this is safe right now because we don't unmap until the
330 if (msm_obj
->domain
[id
].iova
) {
331 *iova
= msm_obj
->domain
[id
].iova
;
335 mutex_lock(&obj
->dev
->struct_mutex
);
336 ret
= msm_gem_get_iova_locked(obj
, id
, iova
);
337 mutex_unlock(&obj
->dev
->struct_mutex
);
341 /* get iova without taking a reference, used in places where you have
342 * already done a 'msm_gem_get_iova()'.
344 uint32_t msm_gem_iova(struct drm_gem_object
*obj
, int id
)
346 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
347 WARN_ON(!msm_obj
->domain
[id
].iova
);
348 return msm_obj
->domain
[id
].iova
;
351 void msm_gem_put_iova(struct drm_gem_object
*obj
, int id
)
354 // NOTE: probably don't need a _locked() version.. we wouldn't
355 // normally unmap here, but instead just mark that it could be
356 // unmapped (if the iova refcnt drops to zero), but then later
357 // if another _get_iova_locked() fails we can start unmapping
358 // things that are no longer needed..
361 int msm_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
362 struct drm_mode_create_dumb
*args
)
364 args
->pitch
= align_pitch(args
->width
, args
->bpp
);
365 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
366 return msm_gem_new_handle(dev
, file
, args
->size
,
367 MSM_BO_SCANOUT
| MSM_BO_WC
, &args
->handle
);
370 int msm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
371 uint32_t handle
, uint64_t *offset
)
373 struct drm_gem_object
*obj
;
376 /* GEM does all our handle to object mapping */
377 obj
= drm_gem_object_lookup(file
, handle
);
383 *offset
= msm_gem_mmap_offset(obj
);
385 drm_gem_object_unreference_unlocked(obj
);
391 void *msm_gem_vaddr_locked(struct drm_gem_object
*obj
)
393 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
394 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
395 if (!msm_obj
->vaddr
) {
396 struct page
**pages
= get_pages(obj
);
398 return ERR_CAST(pages
);
399 msm_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
400 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
401 if (msm_obj
->vaddr
== NULL
)
402 return ERR_PTR(-ENOMEM
);
404 return msm_obj
->vaddr
;
407 void *msm_gem_vaddr(struct drm_gem_object
*obj
)
410 mutex_lock(&obj
->dev
->struct_mutex
);
411 ret
= msm_gem_vaddr_locked(obj
);
412 mutex_unlock(&obj
->dev
->struct_mutex
);
416 /* Update madvise status, returns true if not purged, else
419 int msm_gem_madvise(struct drm_gem_object
*obj
, unsigned madv
)
421 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
423 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
425 if (msm_obj
->madv
!= __MSM_MADV_PURGED
)
426 msm_obj
->madv
= madv
;
428 return (msm_obj
->madv
!= __MSM_MADV_PURGED
);
431 /* must be called before _move_to_active().. */
432 int msm_gem_sync_object(struct drm_gem_object
*obj
,
433 struct msm_fence_context
*fctx
, bool exclusive
)
435 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
436 struct reservation_object_list
*fobj
;
441 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
442 * which makes this a slightly strange place to call it. OTOH this
443 * is a convenient can-fail point to hook it in. (And similar to
444 * how etnaviv and nouveau handle this.)
446 ret
= reservation_object_reserve_shared(msm_obj
->resv
);
451 fobj
= reservation_object_get_list(msm_obj
->resv
);
452 if (!fobj
|| (fobj
->shared_count
== 0)) {
453 fence
= reservation_object_get_excl(msm_obj
->resv
);
454 /* don't need to wait on our own fences, since ring is fifo */
455 if (fence
&& (fence
->context
!= fctx
->context
)) {
456 ret
= fence_wait(fence
, true);
462 if (!exclusive
|| !fobj
)
465 for (i
= 0; i
< fobj
->shared_count
; i
++) {
466 fence
= rcu_dereference_protected(fobj
->shared
[i
],
467 reservation_object_held(msm_obj
->resv
));
468 if (fence
->context
!= fctx
->context
) {
469 ret
= fence_wait(fence
, true);
478 void msm_gem_move_to_active(struct drm_gem_object
*obj
,
479 struct msm_gpu
*gpu
, bool exclusive
, struct fence
*fence
)
481 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
482 WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
);
485 reservation_object_add_excl_fence(msm_obj
->resv
, fence
);
487 reservation_object_add_shared_fence(msm_obj
->resv
, fence
);
488 list_del_init(&msm_obj
->mm_list
);
489 list_add_tail(&msm_obj
->mm_list
, &gpu
->active_list
);
492 void msm_gem_move_to_inactive(struct drm_gem_object
*obj
)
494 struct drm_device
*dev
= obj
->dev
;
495 struct msm_drm_private
*priv
= dev
->dev_private
;
496 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
498 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
501 list_del_init(&msm_obj
->mm_list
);
502 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
505 int msm_gem_cpu_prep(struct drm_gem_object
*obj
, uint32_t op
, ktime_t
*timeout
)
507 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
508 bool write
= !!(op
& MSM_PREP_WRITE
);
510 if (op
& MSM_PREP_NOSYNC
) {
511 if (!reservation_object_test_signaled_rcu(msm_obj
->resv
, write
))
516 ret
= reservation_object_wait_timeout_rcu(msm_obj
->resv
, write
,
517 true, timeout_to_jiffies(timeout
));
519 return ret
== 0 ? -ETIMEDOUT
: ret
;
522 /* TODO cache maintenance */
527 int msm_gem_cpu_fini(struct drm_gem_object
*obj
)
529 /* TODO cache maintenance */
533 #ifdef CONFIG_DEBUG_FS
534 static void describe_fence(struct fence
*fence
, const char *type
,
537 if (!fence_is_signaled(fence
))
538 seq_printf(m
, "\t%9s: %s %s seq %u\n", type
,
539 fence
->ops
->get_driver_name(fence
),
540 fence
->ops
->get_timeline_name(fence
),
544 void msm_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
546 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
547 struct reservation_object
*robj
= msm_obj
->resv
;
548 struct reservation_object_list
*fobj
;
550 uint64_t off
= drm_vma_node_start(&obj
->vma_node
);
553 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
555 switch (msm_obj
->madv
) {
556 case __MSM_MADV_PURGED
:
559 case MSM_MADV_DONTNEED
:
562 case MSM_MADV_WILLNEED
:
568 seq_printf(m
, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
569 msm_obj
->flags
, is_active(msm_obj
) ? 'A' : 'I',
570 obj
->name
, obj
->refcount
.refcount
.counter
,
571 off
, msm_obj
->vaddr
, obj
->size
, madv
);
574 fobj
= rcu_dereference(robj
->fence
);
576 unsigned int i
, shared_count
= fobj
->shared_count
;
578 for (i
= 0; i
< shared_count
; i
++) {
579 fence
= rcu_dereference(fobj
->shared
[i
]);
580 describe_fence(fence
, "Shared", m
);
584 fence
= rcu_dereference(robj
->fence_excl
);
586 describe_fence(fence
, "Exclusive", m
);
590 void msm_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
592 struct msm_gem_object
*msm_obj
;
596 list_for_each_entry(msm_obj
, list
, mm_list
) {
597 struct drm_gem_object
*obj
= &msm_obj
->base
;
599 msm_gem_describe(obj
, m
);
604 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
608 void msm_gem_free_object(struct drm_gem_object
*obj
)
610 struct drm_device
*dev
= obj
->dev
;
611 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
612 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
615 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
617 /* object should not be on active list: */
618 WARN_ON(is_active(msm_obj
));
620 list_del(&msm_obj
->mm_list
);
622 for (id
= 0; id
< ARRAY_SIZE(msm_obj
->domain
); id
++) {
623 struct msm_mmu
*mmu
= priv
->mmus
[id
];
624 if (mmu
&& msm_obj
->domain
[id
].iova
) {
625 uint32_t offset
= msm_obj
->domain
[id
].iova
;
626 mmu
->funcs
->unmap(mmu
, offset
, msm_obj
->sgt
, obj
->size
);
630 if (obj
->import_attach
) {
632 dma_buf_vunmap(obj
->import_attach
->dmabuf
, msm_obj
->vaddr
);
634 /* Don't drop the pages for imported dmabuf, as they are not
635 * ours, just free the array we allocated:
638 drm_free_large(msm_obj
->pages
);
640 drm_prime_gem_destroy(obj
, msm_obj
->sgt
);
642 vunmap(msm_obj
->vaddr
);
646 if (msm_obj
->resv
== &msm_obj
->_resv
)
647 reservation_object_fini(msm_obj
->resv
);
649 drm_gem_object_release(obj
);
654 /* convenience method to construct a GEM buffer object, and userspace handle */
655 int msm_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
656 uint32_t size
, uint32_t flags
, uint32_t *handle
)
658 struct drm_gem_object
*obj
;
661 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
665 obj
= msm_gem_new(dev
, size
, flags
);
667 mutex_unlock(&dev
->struct_mutex
);
672 ret
= drm_gem_handle_create(file
, obj
, handle
);
674 /* drop reference from allocate - handle holds it now */
675 drm_gem_object_unreference_unlocked(obj
);
680 static int msm_gem_new_impl(struct drm_device
*dev
,
681 uint32_t size
, uint32_t flags
,
682 struct reservation_object
*resv
,
683 struct drm_gem_object
**obj
)
685 struct msm_drm_private
*priv
= dev
->dev_private
;
686 struct msm_gem_object
*msm_obj
;
688 bool use_vram
= false;
690 switch (flags
& MSM_BO_CACHE_MASK
) {
691 case MSM_BO_UNCACHED
:
696 dev_err(dev
->dev
, "invalid cache flag: %x\n",
697 (flags
& MSM_BO_CACHE_MASK
));
701 if (!iommu_present(&platform_bus_type
))
703 else if ((flags
& MSM_BO_STOLEN
) && priv
->vram
.size
)
706 if (WARN_ON(use_vram
&& !priv
->vram
.size
))
709 sz
= sizeof(*msm_obj
);
711 sz
+= sizeof(struct drm_mm_node
);
713 msm_obj
= kzalloc(sz
, GFP_KERNEL
);
718 msm_obj
->vram_node
= (void *)&msm_obj
[1];
720 msm_obj
->flags
= flags
;
721 msm_obj
->madv
= MSM_MADV_WILLNEED
;
724 msm_obj
->resv
= resv
;
726 msm_obj
->resv
= &msm_obj
->_resv
;
727 reservation_object_init(msm_obj
->resv
);
730 INIT_LIST_HEAD(&msm_obj
->submit_entry
);
731 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
733 *obj
= &msm_obj
->base
;
738 struct drm_gem_object
*msm_gem_new(struct drm_device
*dev
,
739 uint32_t size
, uint32_t flags
)
741 struct drm_gem_object
*obj
= NULL
;
744 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
746 size
= PAGE_ALIGN(size
);
748 ret
= msm_gem_new_impl(dev
, size
, flags
, NULL
, &obj
);
752 if (use_pages(obj
)) {
753 ret
= drm_gem_object_init(dev
, obj
, size
);
757 drm_gem_private_object_init(dev
, obj
, size
);
764 drm_gem_object_unreference(obj
);
769 struct drm_gem_object
*msm_gem_import(struct drm_device
*dev
,
770 struct dma_buf
*dmabuf
, struct sg_table
*sgt
)
772 struct msm_gem_object
*msm_obj
;
773 struct drm_gem_object
*obj
;
777 /* if we don't have IOMMU, don't bother pretending we can import: */
778 if (!iommu_present(&platform_bus_type
)) {
779 dev_err(dev
->dev
, "cannot import without IOMMU\n");
780 return ERR_PTR(-EINVAL
);
783 size
= PAGE_ALIGN(dmabuf
->size
);
785 ret
= msm_gem_new_impl(dev
, size
, MSM_BO_WC
, dmabuf
->resv
, &obj
);
789 drm_gem_private_object_init(dev
, obj
, size
);
791 npages
= size
/ PAGE_SIZE
;
793 msm_obj
= to_msm_bo(obj
);
795 msm_obj
->pages
= drm_malloc_ab(npages
, sizeof(struct page
*));
796 if (!msm_obj
->pages
) {
801 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, msm_obj
->pages
, NULL
, npages
);
809 drm_gem_object_unreference_unlocked(obj
);
This page took 0.071103 seconds and 6 git commands to generate.