Merge tag 'master-2014-08-14' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[deliverable/linux.git] / drivers / gpu / drm / drm_prime.c
1 /*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32
33 /*
34 * DMA-BUF/GEM Object references and lifetime overview:
35 *
36 * On the export the dma_buf holds a reference to the exporting GEM
37 * object. It takes this reference in handle_to_fd_ioctl, when it
38 * first calls .prime_export and stores the exporting GEM object in
39 * the dma_buf priv. This reference is released when the dma_buf
40 * object goes away in the driver .release function.
41 *
42 * On the import the importing GEM object holds a reference to the
43 * dma_buf (which in turn holds a ref to the exporting GEM object).
44 * It takes that reference in the fd_to_handle ioctl.
45 * It calls dma_buf_get, creates an attachment to it and stores the
46 * attachment in the GEM object. When this attachment is destroyed
47 * when the imported object is destroyed, we remove the attachment
48 * and drop the reference to the dma_buf.
49 *
50 * Thus the chain of references always flows in one direction
51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
52 *
53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object
56 * from the dma-buf private. Prime will do this automatically for drivers that
57 * use the drm_gem_prime_{import,export} helpers.
58 */
59
60 struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64 };
65
66 struct drm_prime_attachment {
67 struct sg_table *sgt;
68 enum dma_data_direction dir;
69 };
70
71 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
72 struct dma_buf *dma_buf, uint32_t handle)
73 {
74 struct drm_prime_member *member;
75
76 member = kmalloc(sizeof(*member), GFP_KERNEL);
77 if (!member)
78 return -ENOMEM;
79
80 get_dma_buf(dma_buf);
81 member->dma_buf = dma_buf;
82 member->handle = handle;
83 list_add(&member->entry, &prime_fpriv->head);
84 return 0;
85 }
86
87 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
88 uint32_t handle)
89 {
90 struct drm_prime_member *member;
91
92 list_for_each_entry(member, &prime_fpriv->head, entry) {
93 if (member->handle == handle)
94 return member->dma_buf;
95 }
96
97 return NULL;
98 }
99
100 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
101 struct dma_buf *dma_buf,
102 uint32_t *handle)
103 {
104 struct drm_prime_member *member;
105
106 list_for_each_entry(member, &prime_fpriv->head, entry) {
107 if (member->dma_buf == dma_buf) {
108 *handle = member->handle;
109 return 0;
110 }
111 }
112 return -ENOENT;
113 }
114
115 static int drm_gem_map_attach(struct dma_buf *dma_buf,
116 struct device *target_dev,
117 struct dma_buf_attachment *attach)
118 {
119 struct drm_prime_attachment *prime_attach;
120 struct drm_gem_object *obj = dma_buf->priv;
121 struct drm_device *dev = obj->dev;
122
123 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
124 if (!prime_attach)
125 return -ENOMEM;
126
127 prime_attach->dir = DMA_NONE;
128 attach->priv = prime_attach;
129
130 if (!dev->driver->gem_prime_pin)
131 return 0;
132
133 return dev->driver->gem_prime_pin(obj);
134 }
135
136 static void drm_gem_map_detach(struct dma_buf *dma_buf,
137 struct dma_buf_attachment *attach)
138 {
139 struct drm_prime_attachment *prime_attach = attach->priv;
140 struct drm_gem_object *obj = dma_buf->priv;
141 struct drm_device *dev = obj->dev;
142 struct sg_table *sgt;
143
144 if (dev->driver->gem_prime_unpin)
145 dev->driver->gem_prime_unpin(obj);
146
147 if (!prime_attach)
148 return;
149
150 sgt = prime_attach->sgt;
151 if (sgt) {
152 if (prime_attach->dir != DMA_NONE)
153 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
154 prime_attach->dir);
155 sg_free_table(sgt);
156 }
157
158 kfree(sgt);
159 kfree(prime_attach);
160 attach->priv = NULL;
161 }
162
163 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
164 struct dma_buf *dma_buf)
165 {
166 struct drm_prime_member *member, *safe;
167
168 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
169 if (member->dma_buf == dma_buf) {
170 dma_buf_put(dma_buf);
171 list_del(&member->entry);
172 kfree(member);
173 }
174 }
175 }
176
177 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
178 enum dma_data_direction dir)
179 {
180 struct drm_prime_attachment *prime_attach = attach->priv;
181 struct drm_gem_object *obj = attach->dmabuf->priv;
182 struct sg_table *sgt;
183
184 if (WARN_ON(dir == DMA_NONE || !prime_attach))
185 return ERR_PTR(-EINVAL);
186
187 /* return the cached mapping when possible */
188 if (prime_attach->dir == dir)
189 return prime_attach->sgt;
190
191 /*
192 * two mappings with different directions for the same attachment are
193 * not allowed
194 */
195 if (WARN_ON(prime_attach->dir != DMA_NONE))
196 return ERR_PTR(-EBUSY);
197
198 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
199
200 if (!IS_ERR(sgt)) {
201 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
202 sg_free_table(sgt);
203 kfree(sgt);
204 sgt = ERR_PTR(-ENOMEM);
205 } else {
206 prime_attach->sgt = sgt;
207 prime_attach->dir = dir;
208 }
209 }
210
211 return sgt;
212 }
213
214 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
215 struct sg_table *sgt,
216 enum dma_data_direction dir)
217 {
218 /* nothing to be done here */
219 }
220
221 /**
222 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
223 * @dma_buf: buffer to be released
224 *
225 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
226 * must use this in their dma_buf ops structure as the release callback.
227 */
228 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
229 {
230 struct drm_gem_object *obj = dma_buf->priv;
231
232 /* drop the reference on the export fd holds */
233 drm_gem_object_unreference_unlocked(obj);
234 }
235 EXPORT_SYMBOL(drm_gem_dmabuf_release);
236
237 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
238 {
239 struct drm_gem_object *obj = dma_buf->priv;
240 struct drm_device *dev = obj->dev;
241
242 return dev->driver->gem_prime_vmap(obj);
243 }
244
245 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
246 {
247 struct drm_gem_object *obj = dma_buf->priv;
248 struct drm_device *dev = obj->dev;
249
250 dev->driver->gem_prime_vunmap(obj, vaddr);
251 }
252
253 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
254 unsigned long page_num)
255 {
256 return NULL;
257 }
258
259 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
260 unsigned long page_num, void *addr)
261 {
262
263 }
264 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
265 unsigned long page_num)
266 {
267 return NULL;
268 }
269
270 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
271 unsigned long page_num, void *addr)
272 {
273
274 }
275
276 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
277 struct vm_area_struct *vma)
278 {
279 struct drm_gem_object *obj = dma_buf->priv;
280 struct drm_device *dev = obj->dev;
281
282 if (!dev->driver->gem_prime_mmap)
283 return -ENOSYS;
284
285 return dev->driver->gem_prime_mmap(obj, vma);
286 }
287
288 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
289 .attach = drm_gem_map_attach,
290 .detach = drm_gem_map_detach,
291 .map_dma_buf = drm_gem_map_dma_buf,
292 .unmap_dma_buf = drm_gem_unmap_dma_buf,
293 .release = drm_gem_dmabuf_release,
294 .kmap = drm_gem_dmabuf_kmap,
295 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
296 .kunmap = drm_gem_dmabuf_kunmap,
297 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
298 .mmap = drm_gem_dmabuf_mmap,
299 .vmap = drm_gem_dmabuf_vmap,
300 .vunmap = drm_gem_dmabuf_vunmap,
301 };
302
303 /**
304 * DOC: PRIME Helpers
305 *
306 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
307 * simpler APIs by using the helper functions @drm_gem_prime_export and
308 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
309 * five lower-level driver callbacks:
310 *
311 * Export callbacks:
312 *
313 * - @gem_prime_pin (optional): prepare a GEM object for exporting
314 *
315 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
316 *
317 * - @gem_prime_vmap: vmap a buffer exported by your driver
318 *
319 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
320 *
321 * Import callback:
322 *
323 * - @gem_prime_import_sg_table (import): produce a GEM object from another
324 * driver's scatter/gather table
325 */
326
327 /**
328 * drm_gem_prime_export - helper library implemention of the export callback
329 * @dev: drm_device to export from
330 * @obj: GEM object to export
331 * @flags: flags like DRM_CLOEXEC
332 *
333 * This is the implementation of the gem_prime_export functions for GEM drivers
334 * using the PRIME helpers.
335 */
336 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
337 struct drm_gem_object *obj, int flags)
338 {
339 struct reservation_object *robj = NULL;
340
341 if (dev->driver->gem_prime_res_obj)
342 robj = dev->driver->gem_prime_res_obj(obj);
343
344 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
345 flags, robj);
346 }
347 EXPORT_SYMBOL(drm_gem_prime_export);
348
349 static struct dma_buf *export_and_register_object(struct drm_device *dev,
350 struct drm_gem_object *obj,
351 uint32_t flags)
352 {
353 struct dma_buf *dmabuf;
354
355 /* prevent races with concurrent gem_close. */
356 if (obj->handle_count == 0) {
357 dmabuf = ERR_PTR(-ENOENT);
358 return dmabuf;
359 }
360
361 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
362 if (IS_ERR(dmabuf)) {
363 /* normally the created dma-buf takes ownership of the ref,
364 * but if that fails then drop the ref
365 */
366 return dmabuf;
367 }
368
369 /*
370 * Note that callers do not need to clean up the export cache
371 * since the check for obj->handle_count guarantees that someone
372 * will clean it up.
373 */
374 obj->dma_buf = dmabuf;
375 get_dma_buf(obj->dma_buf);
376 /* Grab a new ref since the callers is now used by the dma-buf */
377 drm_gem_object_reference(obj);
378
379 return dmabuf;
380 }
381
382 /**
383 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
384 * @dev: dev to export the buffer from
385 * @file_priv: drm file-private structure
386 * @handle: buffer handle to export
387 * @flags: flags like DRM_CLOEXEC
388 * @prime_fd: pointer to storage for the fd id of the create dma-buf
389 *
390 * This is the PRIME export function which must be used mandatorily by GEM
391 * drivers to ensure correct lifetime management of the underlying GEM object.
392 * The actual exporting from GEM object to a dma-buf is done through the
393 * gem_prime_export driver callback.
394 */
395 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
396 struct drm_file *file_priv, uint32_t handle,
397 uint32_t flags,
398 int *prime_fd)
399 {
400 struct drm_gem_object *obj;
401 int ret = 0;
402 struct dma_buf *dmabuf;
403
404 mutex_lock(&file_priv->prime.lock);
405 obj = drm_gem_object_lookup(dev, file_priv, handle);
406 if (!obj) {
407 ret = -ENOENT;
408 goto out_unlock;
409 }
410
411 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
412 if (dmabuf) {
413 get_dma_buf(dmabuf);
414 goto out_have_handle;
415 }
416
417 mutex_lock(&dev->object_name_lock);
418 /* re-export the original imported object */
419 if (obj->import_attach) {
420 dmabuf = obj->import_attach->dmabuf;
421 get_dma_buf(dmabuf);
422 goto out_have_obj;
423 }
424
425 if (obj->dma_buf) {
426 get_dma_buf(obj->dma_buf);
427 dmabuf = obj->dma_buf;
428 goto out_have_obj;
429 }
430
431 dmabuf = export_and_register_object(dev, obj, flags);
432 if (IS_ERR(dmabuf)) {
433 /* normally the created dma-buf takes ownership of the ref,
434 * but if that fails then drop the ref
435 */
436 ret = PTR_ERR(dmabuf);
437 mutex_unlock(&dev->object_name_lock);
438 goto out;
439 }
440
441 out_have_obj:
442 /*
443 * If we've exported this buffer then cheat and add it to the import list
444 * so we get the correct handle back. We must do this under the
445 * protection of dev->object_name_lock to ensure that a racing gem close
446 * ioctl doesn't miss to remove this buffer handle from the cache.
447 */
448 ret = drm_prime_add_buf_handle(&file_priv->prime,
449 dmabuf, handle);
450 mutex_unlock(&dev->object_name_lock);
451 if (ret)
452 goto fail_put_dmabuf;
453
454 out_have_handle:
455 ret = dma_buf_fd(dmabuf, flags);
456 /*
457 * We must _not_ remove the buffer from the handle cache since the newly
458 * created dma buf is already linked in the global obj->dma_buf pointer,
459 * and that is invariant as long as a userspace gem handle exists.
460 * Closing the handle will clean out the cache anyway, so we don't leak.
461 */
462 if (ret < 0) {
463 goto fail_put_dmabuf;
464 } else {
465 *prime_fd = ret;
466 ret = 0;
467 }
468
469 goto out;
470
471 fail_put_dmabuf:
472 dma_buf_put(dmabuf);
473 out:
474 drm_gem_object_unreference_unlocked(obj);
475 out_unlock:
476 mutex_unlock(&file_priv->prime.lock);
477
478 return ret;
479 }
480 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
481
482 /**
483 * drm_gem_prime_import - helper library implemention of the import callback
484 * @dev: drm_device to import into
485 * @dma_buf: dma-buf object to import
486 *
487 * This is the implementation of the gem_prime_import functions for GEM drivers
488 * using the PRIME helpers.
489 */
490 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
491 struct dma_buf *dma_buf)
492 {
493 struct dma_buf_attachment *attach;
494 struct sg_table *sgt;
495 struct drm_gem_object *obj;
496 int ret;
497
498 if (!dev->driver->gem_prime_import_sg_table)
499 return ERR_PTR(-EINVAL);
500
501 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
502 obj = dma_buf->priv;
503 if (obj->dev == dev) {
504 /*
505 * Importing dmabuf exported from out own gem increases
506 * refcount on gem itself instead of f_count of dmabuf.
507 */
508 drm_gem_object_reference(obj);
509 return obj;
510 }
511 }
512
513 attach = dma_buf_attach(dma_buf, dev->dev);
514 if (IS_ERR(attach))
515 return ERR_CAST(attach);
516
517 get_dma_buf(dma_buf);
518
519 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
520 if (IS_ERR(sgt)) {
521 ret = PTR_ERR(sgt);
522 goto fail_detach;
523 }
524
525 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
526 if (IS_ERR(obj)) {
527 ret = PTR_ERR(obj);
528 goto fail_unmap;
529 }
530
531 obj->import_attach = attach;
532
533 return obj;
534
535 fail_unmap:
536 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
537 fail_detach:
538 dma_buf_detach(dma_buf, attach);
539 dma_buf_put(dma_buf);
540
541 return ERR_PTR(ret);
542 }
543 EXPORT_SYMBOL(drm_gem_prime_import);
544
545 /**
546 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
547 * @dev: dev to export the buffer from
548 * @file_priv: drm file-private structure
549 * @prime_fd: fd id of the dma-buf which should be imported
550 * @handle: pointer to storage for the handle of the imported buffer object
551 *
552 * This is the PRIME import function which must be used mandatorily by GEM
553 * drivers to ensure correct lifetime management of the underlying GEM object.
554 * The actual importing of GEM object from the dma-buf is done through the
555 * gem_import_export driver callback.
556 */
557 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
558 struct drm_file *file_priv, int prime_fd,
559 uint32_t *handle)
560 {
561 struct dma_buf *dma_buf;
562 struct drm_gem_object *obj;
563 int ret;
564
565 dma_buf = dma_buf_get(prime_fd);
566 if (IS_ERR(dma_buf))
567 return PTR_ERR(dma_buf);
568
569 mutex_lock(&file_priv->prime.lock);
570
571 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
572 dma_buf, handle);
573 if (ret == 0)
574 goto out_put;
575
576 /* never seen this one, need to import */
577 mutex_lock(&dev->object_name_lock);
578 obj = dev->driver->gem_prime_import(dev, dma_buf);
579 if (IS_ERR(obj)) {
580 ret = PTR_ERR(obj);
581 goto out_unlock;
582 }
583
584 if (obj->dma_buf) {
585 WARN_ON(obj->dma_buf != dma_buf);
586 } else {
587 obj->dma_buf = dma_buf;
588 get_dma_buf(dma_buf);
589 }
590
591 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
592 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
593 drm_gem_object_unreference_unlocked(obj);
594 if (ret)
595 goto out_put;
596
597 ret = drm_prime_add_buf_handle(&file_priv->prime,
598 dma_buf, *handle);
599 if (ret)
600 goto fail;
601
602 mutex_unlock(&file_priv->prime.lock);
603
604 dma_buf_put(dma_buf);
605
606 return 0;
607
608 fail:
609 /* hmm, if driver attached, we are relying on the free-object path
610 * to detach.. which seems ok..
611 */
612 drm_gem_handle_delete(file_priv, *handle);
613 out_unlock:
614 mutex_unlock(&dev->object_name_lock);
615 out_put:
616 dma_buf_put(dma_buf);
617 mutex_unlock(&file_priv->prime.lock);
618 return ret;
619 }
620 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
621
622 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
623 struct drm_file *file_priv)
624 {
625 struct drm_prime_handle *args = data;
626 uint32_t flags;
627
628 if (!drm_core_check_feature(dev, DRIVER_PRIME))
629 return -EINVAL;
630
631 if (!dev->driver->prime_handle_to_fd)
632 return -ENOSYS;
633
634 /* check flags are valid */
635 if (args->flags & ~DRM_CLOEXEC)
636 return -EINVAL;
637
638 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
639 flags = args->flags & DRM_CLOEXEC;
640
641 return dev->driver->prime_handle_to_fd(dev, file_priv,
642 args->handle, flags, &args->fd);
643 }
644
645 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
646 struct drm_file *file_priv)
647 {
648 struct drm_prime_handle *args = data;
649
650 if (!drm_core_check_feature(dev, DRIVER_PRIME))
651 return -EINVAL;
652
653 if (!dev->driver->prime_fd_to_handle)
654 return -ENOSYS;
655
656 return dev->driver->prime_fd_to_handle(dev, file_priv,
657 args->fd, &args->handle);
658 }
659
660 /**
661 * drm_prime_pages_to_sg - converts a page array into an sg list
662 * @pages: pointer to the array of page pointers to convert
663 * @nr_pages: length of the page vector
664 *
665 * This helper creates an sg table object from a set of pages
666 * the driver is responsible for mapping the pages into the
667 * importers address space for use with dma_buf itself.
668 */
669 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
670 {
671 struct sg_table *sg = NULL;
672 int ret;
673
674 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
675 if (!sg) {
676 ret = -ENOMEM;
677 goto out;
678 }
679
680 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
681 nr_pages << PAGE_SHIFT, GFP_KERNEL);
682 if (ret)
683 goto out;
684
685 return sg;
686 out:
687 kfree(sg);
688 return ERR_PTR(ret);
689 }
690 EXPORT_SYMBOL(drm_prime_pages_to_sg);
691
692 /**
693 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
694 * @sgt: scatter-gather table to convert
695 * @pages: array of page pointers to store the page array in
696 * @addrs: optional array to store the dma bus address of each page
697 * @max_pages: size of both the passed-in arrays
698 *
699 * Exports an sg table into an array of pages and addresses. This is currently
700 * required by the TTM driver in order to do correct fault handling.
701 */
702 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
703 dma_addr_t *addrs, int max_pages)
704 {
705 unsigned count;
706 struct scatterlist *sg;
707 struct page *page;
708 u32 len;
709 int pg_index;
710 dma_addr_t addr;
711
712 pg_index = 0;
713 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
714 len = sg->length;
715 page = sg_page(sg);
716 addr = sg_dma_address(sg);
717
718 while (len > 0) {
719 if (WARN_ON(pg_index >= max_pages))
720 return -1;
721 pages[pg_index] = page;
722 if (addrs)
723 addrs[pg_index] = addr;
724
725 page++;
726 addr += PAGE_SIZE;
727 len -= PAGE_SIZE;
728 pg_index++;
729 }
730 }
731 return 0;
732 }
733 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
734
735 /**
736 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
737 * @obj: GEM object which was created from a dma-buf
738 * @sg: the sg-table which was pinned at import time
739 *
740 * This is the cleanup functions which GEM drivers need to call when they use
741 * @drm_gem_prime_import to import dma-bufs.
742 */
743 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
744 {
745 struct dma_buf_attachment *attach;
746 struct dma_buf *dma_buf;
747 attach = obj->import_attach;
748 if (sg)
749 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
750 dma_buf = attach->dmabuf;
751 dma_buf_detach(attach->dmabuf, attach);
752 /* remove the reference */
753 dma_buf_put(dma_buf);
754 }
755 EXPORT_SYMBOL(drm_prime_gem_destroy);
756
757 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
758 {
759 INIT_LIST_HEAD(&prime_fpriv->head);
760 mutex_init(&prime_fpriv->lock);
761 }
762
763 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
764 {
765 /* by now drm_gem_release should've made sure the list is empty */
766 WARN_ON(!list_empty(&prime_fpriv->head));
767 }
This page took 0.072067 seconds and 5 git commands to generate.