Commit | Line | Data |
---|---|---|
c8afe684 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/spinlock.h> | |
19 | #include <linux/shmem_fs.h> | |
05b84911 | 20 | #include <linux/dma-buf.h> |
01c8f1c4 | 21 | #include <linux/pfn_t.h> |
c8afe684 RC |
22 | |
23 | #include "msm_drv.h" | |
fde5de6c | 24 | #include "msm_fence.h" |
c8afe684 | 25 | #include "msm_gem.h" |
7198e6b0 | 26 | #include "msm_gpu.h" |
871d812a | 27 | #include "msm_mmu.h" |
c8afe684 | 28 | |
871d812a RC |
29 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
30 | { | |
31 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
32 | struct msm_drm_private *priv = obj->dev->dev_private; | |
33 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | |
34 | priv->vram.paddr; | |
35 | } | |
36 | ||
072f1f91 RC |
37 | static bool use_pages(struct drm_gem_object *obj) |
38 | { | |
39 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
40 | return !msm_obj->vram_node; | |
41 | } | |
42 | ||
871d812a RC |
43 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
44 | static struct page **get_pages_vram(struct drm_gem_object *obj, | |
45 | int npages) | |
46 | { | |
47 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
48 | struct msm_drm_private *priv = obj->dev->dev_private; | |
49 | dma_addr_t paddr; | |
50 | struct page **p; | |
51 | int ret, i; | |
52 | ||
53 | p = drm_malloc_ab(npages, sizeof(struct page *)); | |
54 | if (!p) | |
55 | return ERR_PTR(-ENOMEM); | |
56 | ||
57 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, | |
58 | npages, 0, DRM_MM_SEARCH_DEFAULT); | |
59 | if (ret) { | |
60 | drm_free_large(p); | |
61 | return ERR_PTR(ret); | |
62 | } | |
63 | ||
64 | paddr = physaddr(obj); | |
65 | for (i = 0; i < npages; i++) { | |
66 | p[i] = phys_to_page(paddr); | |
67 | paddr += PAGE_SIZE; | |
68 | } | |
69 | ||
70 | return p; | |
71 | } | |
c8afe684 RC |
72 | |
73 | /* called with dev->struct_mutex held */ | |
74 | static struct page **get_pages(struct drm_gem_object *obj) | |
75 | { | |
76 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
77 | ||
78 | if (!msm_obj->pages) { | |
79 | struct drm_device *dev = obj->dev; | |
871d812a | 80 | struct page **p; |
c8afe684 RC |
81 | int npages = obj->size >> PAGE_SHIFT; |
82 | ||
072f1f91 | 83 | if (use_pages(obj)) |
0cdbe8ac | 84 | p = drm_gem_get_pages(obj); |
871d812a RC |
85 | else |
86 | p = get_pages_vram(obj, npages); | |
87 | ||
c8afe684 RC |
88 | if (IS_ERR(p)) { |
89 | dev_err(dev->dev, "could not get pages: %ld\n", | |
90 | PTR_ERR(p)); | |
91 | return p; | |
92 | } | |
93 | ||
94 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | |
1f70e079 | 95 | if (IS_ERR(msm_obj->sgt)) { |
c8afe684 | 96 | dev_err(dev->dev, "failed to allocate sgt\n"); |
1f70e079 | 97 | return ERR_CAST(msm_obj->sgt); |
c8afe684 RC |
98 | } |
99 | ||
100 | msm_obj->pages = p; | |
101 | ||
102 | /* For non-cached buffers, ensure the new pages are clean | |
103 | * because display controller, GPU, etc. are not coherent: | |
104 | */ | |
105 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
106 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | |
107 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
108 | } | |
109 | ||
110 | return msm_obj->pages; | |
111 | } | |
112 | ||
113 | static void put_pages(struct drm_gem_object *obj) | |
114 | { | |
115 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
116 | ||
117 | if (msm_obj->pages) { | |
118 | /* For non-cached buffers, ensure the new pages are clean | |
119 | * because display controller, GPU, etc. are not coherent: | |
120 | */ | |
121 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
122 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | |
123 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
124 | sg_free_table(msm_obj->sgt); | |
125 | kfree(msm_obj->sgt); | |
126 | ||
072f1f91 | 127 | if (use_pages(obj)) |
871d812a | 128 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
1ffa2425 | 129 | else { |
871d812a | 130 | drm_mm_remove_node(msm_obj->vram_node); |
1ffa2425 MR |
131 | drm_free_large(msm_obj->pages); |
132 | } | |
871d812a | 133 | |
c8afe684 RC |
134 | msm_obj->pages = NULL; |
135 | } | |
136 | } | |
137 | ||
05b84911 RC |
138 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
139 | { | |
140 | struct drm_device *dev = obj->dev; | |
141 | struct page **p; | |
142 | mutex_lock(&dev->struct_mutex); | |
143 | p = get_pages(obj); | |
144 | mutex_unlock(&dev->struct_mutex); | |
145 | return p; | |
146 | } | |
147 | ||
148 | void msm_gem_put_pages(struct drm_gem_object *obj) | |
149 | { | |
150 | /* when we start tracking the pin count, then do something here */ | |
151 | } | |
152 | ||
c8afe684 RC |
153 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
154 | struct vm_area_struct *vma) | |
155 | { | |
156 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
157 | ||
158 | vma->vm_flags &= ~VM_PFNMAP; | |
159 | vma->vm_flags |= VM_MIXEDMAP; | |
160 | ||
161 | if (msm_obj->flags & MSM_BO_WC) { | |
162 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
163 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | |
164 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
165 | } else { | |
166 | /* | |
167 | * Shunt off cached objs to shmem file so they have their own | |
168 | * address_space (so unmap_mapping_range does what we want, | |
169 | * in particular in the case of mmap'd dmabufs) | |
170 | */ | |
171 | fput(vma->vm_file); | |
172 | get_file(obj->filp); | |
173 | vma->vm_pgoff = 0; | |
174 | vma->vm_file = obj->filp; | |
175 | ||
176 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
177 | } | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
183 | { | |
184 | int ret; | |
185 | ||
186 | ret = drm_gem_mmap(filp, vma); | |
187 | if (ret) { | |
188 | DBG("mmap failed: %d", ret); | |
189 | return ret; | |
190 | } | |
191 | ||
192 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | |
193 | } | |
194 | ||
195 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
196 | { | |
197 | struct drm_gem_object *obj = vma->vm_private_data; | |
c8afe684 RC |
198 | struct drm_device *dev = obj->dev; |
199 | struct page **pages; | |
200 | unsigned long pfn; | |
201 | pgoff_t pgoff; | |
202 | int ret; | |
203 | ||
204 | /* Make sure we don't parallel update on a fault, nor move or remove | |
205 | * something from beneath our feet | |
206 | */ | |
207 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
208 | if (ret) | |
209 | goto out; | |
210 | ||
211 | /* make sure we have pages attached now */ | |
212 | pages = get_pages(obj); | |
213 | if (IS_ERR(pages)) { | |
214 | ret = PTR_ERR(pages); | |
215 | goto out_unlock; | |
216 | } | |
217 | ||
218 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
219 | pgoff = ((unsigned long)vmf->virtual_address - | |
220 | vma->vm_start) >> PAGE_SHIFT; | |
221 | ||
871d812a | 222 | pfn = page_to_pfn(pages[pgoff]); |
c8afe684 RC |
223 | |
224 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | |
225 | pfn, pfn << PAGE_SHIFT); | |
226 | ||
01c8f1c4 DW |
227 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, |
228 | __pfn_to_pfn_t(pfn, PFN_DEV)); | |
c8afe684 RC |
229 | |
230 | out_unlock: | |
231 | mutex_unlock(&dev->struct_mutex); | |
232 | out: | |
233 | switch (ret) { | |
234 | case -EAGAIN: | |
c8afe684 RC |
235 | case 0: |
236 | case -ERESTARTSYS: | |
237 | case -EINTR: | |
505886d5 RC |
238 | case -EBUSY: |
239 | /* | |
240 | * EBUSY is ok: this just means that another thread | |
241 | * already did the job. | |
242 | */ | |
c8afe684 RC |
243 | return VM_FAULT_NOPAGE; |
244 | case -ENOMEM: | |
245 | return VM_FAULT_OOM; | |
246 | default: | |
247 | return VM_FAULT_SIGBUS; | |
248 | } | |
249 | } | |
250 | ||
251 | /** get mmap offset */ | |
252 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
253 | { | |
254 | struct drm_device *dev = obj->dev; | |
255 | int ret; | |
256 | ||
257 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
258 | ||
259 | /* Make it mmapable */ | |
260 | ret = drm_gem_create_mmap_offset(obj); | |
261 | ||
262 | if (ret) { | |
263 | dev_err(dev->dev, "could not allocate mmap offset\n"); | |
264 | return 0; | |
265 | } | |
266 | ||
267 | return drm_vma_node_offset_addr(&obj->vma_node); | |
268 | } | |
269 | ||
270 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |
271 | { | |
272 | uint64_t offset; | |
273 | mutex_lock(&obj->dev->struct_mutex); | |
274 | offset = mmap_offset(obj); | |
275 | mutex_unlock(&obj->dev->struct_mutex); | |
276 | return offset; | |
277 | } | |
278 | ||
c8afe684 RC |
279 | /* should be called under struct_mutex.. although it can be called |
280 | * from atomic context without struct_mutex to acquire an extra | |
281 | * iova ref if you know one is already held. | |
282 | * | |
283 | * That means when I do eventually need to add support for unpinning | |
284 | * the refcnt counter needs to be atomic_t. | |
285 | */ | |
286 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |
287 | uint32_t *iova) | |
288 | { | |
289 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
290 | int ret = 0; | |
291 | ||
292 | if (!msm_obj->domain[id].iova) { | |
293 | struct msm_drm_private *priv = obj->dev->dev_private; | |
871d812a RC |
294 | struct page **pages = get_pages(obj); |
295 | ||
c8afe684 RC |
296 | if (IS_ERR(pages)) |
297 | return PTR_ERR(pages); | |
871d812a RC |
298 | |
299 | if (iommu_present(&platform_bus_type)) { | |
1c4997fe RC |
300 | struct msm_mmu *mmu = priv->mmus[id]; |
301 | uint32_t offset; | |
302 | ||
303 | if (WARN_ON(!mmu)) | |
304 | return -EINVAL; | |
305 | ||
306 | offset = (uint32_t)mmap_offset(obj); | |
871d812a RC |
307 | ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, |
308 | obj->size, IOMMU_READ | IOMMU_WRITE); | |
309 | msm_obj->domain[id].iova = offset; | |
310 | } else { | |
311 | msm_obj->domain[id].iova = physaddr(obj); | |
312 | } | |
c8afe684 RC |
313 | } |
314 | ||
315 | if (!ret) | |
316 | *iova = msm_obj->domain[id].iova; | |
317 | ||
318 | return ret; | |
319 | } | |
320 | ||
2638d90a | 321 | /* get iova, taking a reference. Should have a matching put */ |
c8afe684 RC |
322 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) |
323 | { | |
edd4fc63 | 324 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 | 325 | int ret; |
edd4fc63 RC |
326 | |
327 | /* this is safe right now because we don't unmap until the | |
328 | * bo is deleted: | |
329 | */ | |
330 | if (msm_obj->domain[id].iova) { | |
331 | *iova = msm_obj->domain[id].iova; | |
332 | return 0; | |
333 | } | |
334 | ||
c8afe684 RC |
335 | mutex_lock(&obj->dev->struct_mutex); |
336 | ret = msm_gem_get_iova_locked(obj, id, iova); | |
337 | mutex_unlock(&obj->dev->struct_mutex); | |
338 | return ret; | |
339 | } | |
340 | ||
2638d90a RC |
341 | /* get iova without taking a reference, used in places where you have |
342 | * already done a 'msm_gem_get_iova()'. | |
343 | */ | |
344 | uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) | |
345 | { | |
346 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
347 | WARN_ON(!msm_obj->domain[id].iova); | |
348 | return msm_obj->domain[id].iova; | |
349 | } | |
350 | ||
c8afe684 RC |
351 | void msm_gem_put_iova(struct drm_gem_object *obj, int id) |
352 | { | |
353 | // XXX TODO .. | |
354 | // NOTE: probably don't need a _locked() version.. we wouldn't | |
355 | // normally unmap here, but instead just mark that it could be | |
356 | // unmapped (if the iova refcnt drops to zero), but then later | |
357 | // if another _get_iova_locked() fails we can start unmapping | |
358 | // things that are no longer needed.. | |
359 | } | |
360 | ||
361 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
362 | struct drm_mode_create_dumb *args) | |
363 | { | |
364 | args->pitch = align_pitch(args->width, args->bpp); | |
365 | args->size = PAGE_ALIGN(args->pitch * args->height); | |
366 | return msm_gem_new_handle(dev, file, args->size, | |
367 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | |
368 | } | |
369 | ||
c8afe684 RC |
370 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
371 | uint32_t handle, uint64_t *offset) | |
372 | { | |
373 | struct drm_gem_object *obj; | |
374 | int ret = 0; | |
375 | ||
376 | /* GEM does all our handle to object mapping */ | |
a8ad0bd8 | 377 | obj = drm_gem_object_lookup(file, handle); |
c8afe684 RC |
378 | if (obj == NULL) { |
379 | ret = -ENOENT; | |
380 | goto fail; | |
381 | } | |
382 | ||
383 | *offset = msm_gem_mmap_offset(obj); | |
384 | ||
385 | drm_gem_object_unreference_unlocked(obj); | |
386 | ||
387 | fail: | |
388 | return ret; | |
389 | } | |
390 | ||
391 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | |
392 | { | |
393 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
394 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | |
395 | if (!msm_obj->vaddr) { | |
396 | struct page **pages = get_pages(obj); | |
397 | if (IS_ERR(pages)) | |
398 | return ERR_CAST(pages); | |
399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | |
400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
69a834c2 RC |
401 | if (msm_obj->vaddr == NULL) |
402 | return ERR_PTR(-ENOMEM); | |
c8afe684 RC |
403 | } |
404 | return msm_obj->vaddr; | |
405 | } | |
406 | ||
407 | void *msm_gem_vaddr(struct drm_gem_object *obj) | |
408 | { | |
409 | void *ret; | |
410 | mutex_lock(&obj->dev->struct_mutex); | |
411 | ret = msm_gem_vaddr_locked(obj); | |
412 | mutex_unlock(&obj->dev->struct_mutex); | |
413 | return ret; | |
414 | } | |
415 | ||
b6295f9a RC |
416 | /* must be called before _move_to_active().. */ |
417 | int msm_gem_sync_object(struct drm_gem_object *obj, | |
418 | struct msm_fence_context *fctx, bool exclusive) | |
419 | { | |
420 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
421 | struct reservation_object_list *fobj; | |
422 | struct fence *fence; | |
423 | int i, ret; | |
424 | ||
425 | if (!exclusive) { | |
426 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | |
427 | * which makes this a slightly strange place to call it. OTOH this | |
428 | * is a convenient can-fail point to hook it in. (And similar to | |
429 | * how etnaviv and nouveau handle this.) | |
430 | */ | |
431 | ret = reservation_object_reserve_shared(msm_obj->resv); | |
432 | if (ret) | |
433 | return ret; | |
434 | } | |
435 | ||
436 | fobj = reservation_object_get_list(msm_obj->resv); | |
437 | if (!fobj || (fobj->shared_count == 0)) { | |
438 | fence = reservation_object_get_excl(msm_obj->resv); | |
439 | /* don't need to wait on our own fences, since ring is fifo */ | |
440 | if (fence && (fence->context != fctx->context)) { | |
441 | ret = fence_wait(fence, true); | |
442 | if (ret) | |
443 | return ret; | |
444 | } | |
445 | } | |
446 | ||
447 | if (!exclusive || !fobj) | |
448 | return 0; | |
449 | ||
450 | for (i = 0; i < fobj->shared_count; i++) { | |
451 | fence = rcu_dereference_protected(fobj->shared[i], | |
452 | reservation_object_held(msm_obj->resv)); | |
453 | if (fence->context != fctx->context) { | |
454 | ret = fence_wait(fence, true); | |
455 | if (ret) | |
456 | return ret; | |
457 | } | |
458 | } | |
459 | ||
460 | return 0; | |
461 | } | |
462 | ||
7198e6b0 | 463 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
b6295f9a | 464 | struct msm_gpu *gpu, bool exclusive, struct fence *fence) |
7198e6b0 RC |
465 | { |
466 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
467 | msm_obj->gpu = gpu; | |
b6295f9a RC |
468 | if (exclusive) |
469 | reservation_object_add_excl_fence(msm_obj->resv, fence); | |
bf6811f3 | 470 | else |
b6295f9a | 471 | reservation_object_add_shared_fence(msm_obj->resv, fence); |
7198e6b0 RC |
472 | list_del_init(&msm_obj->mm_list); |
473 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | |
474 | } | |
475 | ||
476 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |
477 | { | |
478 | struct drm_device *dev = obj->dev; | |
479 | struct msm_drm_private *priv = dev->dev_private; | |
480 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
481 | ||
482 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
483 | ||
484 | msm_obj->gpu = NULL; | |
7198e6b0 RC |
485 | list_del_init(&msm_obj->mm_list); |
486 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
7198e6b0 RC |
487 | } |
488 | ||
b6295f9a | 489 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
7198e6b0 | 490 | { |
7198e6b0 | 491 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a | 492 | bool write = !!(op & MSM_PREP_WRITE); |
f816f272 | 493 | |
b6295f9a RC |
494 | if (op & MSM_PREP_NOSYNC) { |
495 | if (!reservation_object_test_signaled_rcu(msm_obj->resv, write)) | |
496 | return -EBUSY; | |
497 | } else { | |
498 | int ret; | |
f816f272 | 499 | |
b6295f9a RC |
500 | ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, |
501 | true, timeout_to_jiffies(timeout)); | |
502 | if (ret <= 0) | |
503 | return ret == 0 ? -ETIMEDOUT : ret; | |
bf6811f3 | 504 | } |
7198e6b0 RC |
505 | |
506 | /* TODO cache maintenance */ | |
c8afe684 | 507 | |
b6295f9a | 508 | return 0; |
7198e6b0 | 509 | } |
c8afe684 | 510 | |
7198e6b0 RC |
511 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
512 | { | |
513 | /* TODO cache maintenance */ | |
c8afe684 RC |
514 | return 0; |
515 | } | |
516 | ||
517 | #ifdef CONFIG_DEBUG_FS | |
b6295f9a RC |
518 | static void describe_fence(struct fence *fence, const char *type, |
519 | struct seq_file *m) | |
520 | { | |
521 | if (!fence_is_signaled(fence)) | |
522 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, | |
523 | fence->ops->get_driver_name(fence), | |
524 | fence->ops->get_timeline_name(fence), | |
525 | fence->seqno); | |
526 | } | |
527 | ||
c8afe684 RC |
528 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
529 | { | |
c8afe684 | 530 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a RC |
531 | struct reservation_object *robj = msm_obj->resv; |
532 | struct reservation_object_list *fobj; | |
533 | struct fence *fence; | |
c8afe684 RC |
534 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
535 | ||
b6295f9a RC |
536 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
537 | ||
538 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", | |
7198e6b0 | 539 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
bf6811f3 | 540 | obj->name, obj->refcount.refcount.counter, |
c8afe684 | 541 | off, msm_obj->vaddr, obj->size); |
b6295f9a RC |
542 | |
543 | rcu_read_lock(); | |
544 | fobj = rcu_dereference(robj->fence); | |
545 | if (fobj) { | |
546 | unsigned int i, shared_count = fobj->shared_count; | |
547 | ||
548 | for (i = 0; i < shared_count; i++) { | |
549 | fence = rcu_dereference(fobj->shared[i]); | |
550 | describe_fence(fence, "Shared", m); | |
551 | } | |
552 | } | |
553 | ||
554 | fence = rcu_dereference(robj->fence_excl); | |
555 | if (fence) | |
556 | describe_fence(fence, "Exclusive", m); | |
557 | rcu_read_unlock(); | |
c8afe684 RC |
558 | } |
559 | ||
560 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
561 | { | |
562 | struct msm_gem_object *msm_obj; | |
563 | int count = 0; | |
564 | size_t size = 0; | |
565 | ||
566 | list_for_each_entry(msm_obj, list, mm_list) { | |
567 | struct drm_gem_object *obj = &msm_obj->base; | |
568 | seq_printf(m, " "); | |
569 | msm_gem_describe(obj, m); | |
570 | count++; | |
571 | size += obj->size; | |
572 | } | |
573 | ||
574 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
575 | } | |
576 | #endif | |
577 | ||
578 | void msm_gem_free_object(struct drm_gem_object *obj) | |
579 | { | |
580 | struct drm_device *dev = obj->dev; | |
871d812a | 581 | struct msm_drm_private *priv = obj->dev->dev_private; |
c8afe684 RC |
582 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
583 | int id; | |
584 | ||
585 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
586 | ||
7198e6b0 RC |
587 | /* object should not be on active list: */ |
588 | WARN_ON(is_active(msm_obj)); | |
589 | ||
c8afe684 RC |
590 | list_del(&msm_obj->mm_list); |
591 | ||
592 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | |
871d812a RC |
593 | struct msm_mmu *mmu = priv->mmus[id]; |
594 | if (mmu && msm_obj->domain[id].iova) { | |
257d06f7 | 595 | uint32_t offset = msm_obj->domain[id].iova; |
871d812a | 596 | mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); |
c8afe684 RC |
597 | } |
598 | } | |
599 | ||
05b84911 RC |
600 | if (obj->import_attach) { |
601 | if (msm_obj->vaddr) | |
602 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
603 | ||
604 | /* Don't drop the pages for imported dmabuf, as they are not | |
605 | * ours, just free the array we allocated: | |
606 | */ | |
607 | if (msm_obj->pages) | |
608 | drm_free_large(msm_obj->pages); | |
c8afe684 | 609 | |
f28730c8 | 610 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
05b84911 | 611 | } else { |
264f7d67 | 612 | vunmap(msm_obj->vaddr); |
05b84911 RC |
613 | put_pages(obj); |
614 | } | |
c8afe684 | 615 | |
7198e6b0 RC |
616 | if (msm_obj->resv == &msm_obj->_resv) |
617 | reservation_object_fini(msm_obj->resv); | |
618 | ||
c8afe684 RC |
619 | drm_gem_object_release(obj); |
620 | ||
621 | kfree(msm_obj); | |
622 | } | |
623 | ||
624 | /* convenience method to construct a GEM buffer object, and userspace handle */ | |
625 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
626 | uint32_t size, uint32_t flags, uint32_t *handle) | |
627 | { | |
628 | struct drm_gem_object *obj; | |
629 | int ret; | |
630 | ||
631 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
632 | if (ret) | |
633 | return ret; | |
634 | ||
635 | obj = msm_gem_new(dev, size, flags); | |
636 | ||
637 | mutex_unlock(&dev->struct_mutex); | |
638 | ||
639 | if (IS_ERR(obj)) | |
640 | return PTR_ERR(obj); | |
641 | ||
642 | ret = drm_gem_handle_create(file, obj, handle); | |
643 | ||
644 | /* drop reference from allocate - handle holds it now */ | |
645 | drm_gem_object_unreference_unlocked(obj); | |
646 | ||
647 | return ret; | |
648 | } | |
649 | ||
05b84911 RC |
650 | static int msm_gem_new_impl(struct drm_device *dev, |
651 | uint32_t size, uint32_t flags, | |
79f0e202 | 652 | struct reservation_object *resv, |
05b84911 | 653 | struct drm_gem_object **obj) |
c8afe684 RC |
654 | { |
655 | struct msm_drm_private *priv = dev->dev_private; | |
656 | struct msm_gem_object *msm_obj; | |
871d812a | 657 | unsigned sz; |
072f1f91 | 658 | bool use_vram = false; |
c8afe684 RC |
659 | |
660 | switch (flags & MSM_BO_CACHE_MASK) { | |
661 | case MSM_BO_UNCACHED: | |
662 | case MSM_BO_CACHED: | |
663 | case MSM_BO_WC: | |
664 | break; | |
665 | default: | |
666 | dev_err(dev->dev, "invalid cache flag: %x\n", | |
667 | (flags & MSM_BO_CACHE_MASK)); | |
05b84911 | 668 | return -EINVAL; |
c8afe684 RC |
669 | } |
670 | ||
871d812a | 671 | if (!iommu_present(&platform_bus_type)) |
072f1f91 RC |
672 | use_vram = true; |
673 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) | |
674 | use_vram = true; | |
675 | ||
676 | if (WARN_ON(use_vram && !priv->vram.size)) | |
677 | return -EINVAL; | |
678 | ||
679 | sz = sizeof(*msm_obj); | |
680 | if (use_vram) | |
871d812a RC |
681 | sz += sizeof(struct drm_mm_node); |
682 | ||
683 | msm_obj = kzalloc(sz, GFP_KERNEL); | |
05b84911 RC |
684 | if (!msm_obj) |
685 | return -ENOMEM; | |
c8afe684 | 686 | |
072f1f91 | 687 | if (use_vram) |
871d812a RC |
688 | msm_obj->vram_node = (void *)&msm_obj[1]; |
689 | ||
c8afe684 RC |
690 | msm_obj->flags = flags; |
691 | ||
79f0e202 RC |
692 | if (resv) { |
693 | msm_obj->resv = resv; | |
694 | } else { | |
695 | msm_obj->resv = &msm_obj->_resv; | |
696 | reservation_object_init(msm_obj->resv); | |
697 | } | |
c8afe684 | 698 | |
7198e6b0 | 699 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
c8afe684 RC |
700 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
701 | ||
05b84911 RC |
702 | *obj = &msm_obj->base; |
703 | ||
704 | return 0; | |
705 | } | |
706 | ||
707 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
708 | uint32_t size, uint32_t flags) | |
709 | { | |
871d812a | 710 | struct drm_gem_object *obj = NULL; |
05b84911 RC |
711 | int ret; |
712 | ||
713 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
714 | ||
715 | size = PAGE_ALIGN(size); | |
716 | ||
79f0e202 | 717 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); |
05b84911 RC |
718 | if (ret) |
719 | goto fail; | |
720 | ||
072f1f91 | 721 | if (use_pages(obj)) { |
871d812a RC |
722 | ret = drm_gem_object_init(dev, obj, size); |
723 | if (ret) | |
724 | goto fail; | |
725 | } else { | |
726 | drm_gem_private_object_init(dev, obj, size); | |
727 | } | |
05b84911 RC |
728 | |
729 | return obj; | |
730 | ||
731 | fail: | |
732 | if (obj) | |
9999f105 | 733 | drm_gem_object_unreference(obj); |
05b84911 RC |
734 | |
735 | return ERR_PTR(ret); | |
736 | } | |
737 | ||
738 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |
79f0e202 | 739 | struct dma_buf *dmabuf, struct sg_table *sgt) |
05b84911 RC |
740 | { |
741 | struct msm_gem_object *msm_obj; | |
742 | struct drm_gem_object *obj; | |
79f0e202 | 743 | uint32_t size; |
05b84911 RC |
744 | int ret, npages; |
745 | ||
871d812a RC |
746 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
747 | if (!iommu_present(&platform_bus_type)) { | |
748 | dev_err(dev->dev, "cannot import without IOMMU\n"); | |
749 | return ERR_PTR(-EINVAL); | |
750 | } | |
751 | ||
79f0e202 | 752 | size = PAGE_ALIGN(dmabuf->size); |
05b84911 | 753 | |
79f0e202 | 754 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); |
05b84911 RC |
755 | if (ret) |
756 | goto fail; | |
757 | ||
758 | drm_gem_private_object_init(dev, obj, size); | |
759 | ||
760 | npages = size / PAGE_SIZE; | |
761 | ||
762 | msm_obj = to_msm_bo(obj); | |
763 | msm_obj->sgt = sgt; | |
764 | msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | |
765 | if (!msm_obj->pages) { | |
766 | ret = -ENOMEM; | |
767 | goto fail; | |
768 | } | |
769 | ||
770 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
771 | if (ret) | |
772 | goto fail; | |
773 | ||
c8afe684 RC |
774 | return obj; |
775 | ||
776 | fail: | |
777 | if (obj) | |
778 | drm_gem_object_unreference_unlocked(obj); | |
779 | ||
780 | return ERR_PTR(ret); | |
781 | } |