drm/i915: export error state ref handling
[deliverable/linux.git] / drivers / gpu / drm / drm_prime.c
1 /*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32
33 /*
34 * DMA-BUF/GEM Object references and lifetime overview:
35 *
36 * On the export the dma_buf holds a reference to the exporting GEM
37 * object. It takes this reference in handle_to_fd_ioctl, when it
38 * first calls .prime_export and stores the exporting GEM object in
39 * the dma_buf priv. This reference is released when the dma_buf
40 * object goes away in the driver .release function.
41 *
42 * On the import the importing GEM object holds a reference to the
43 * dma_buf (which in turn holds a ref to the exporting GEM object).
44 * It takes that reference in the fd_to_handle ioctl.
45 * It calls dma_buf_get, creates an attachment to it and stores the
46 * attachment in the GEM object. When this attachment is destroyed
47 * when the imported object is destroyed, we remove the attachment
48 * and drop the reference to the dma_buf.
49 *
50 * Thus the chain of references always flows in one direction
51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
52 *
53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object
56 * from the dma-buf private. Prime will do this automatically for drivers that
57 * use the drm_gem_prime_{import,export} helpers.
58 */
59
60 struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64 };
65
66 struct drm_prime_attachment {
67 struct sg_table *sgt;
68 enum dma_data_direction dir;
69 };
70
71 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
72 {
73 struct drm_prime_member *member;
74
75 member = kmalloc(sizeof(*member), GFP_KERNEL);
76 if (!member)
77 return -ENOMEM;
78
79 get_dma_buf(dma_buf);
80 member->dma_buf = dma_buf;
81 member->handle = handle;
82 list_add(&member->entry, &prime_fpriv->head);
83 return 0;
84 }
85
86 static int drm_gem_map_attach(struct dma_buf *dma_buf,
87 struct device *target_dev,
88 struct dma_buf_attachment *attach)
89 {
90 struct drm_prime_attachment *prime_attach;
91 struct drm_gem_object *obj = dma_buf->priv;
92 struct drm_device *dev = obj->dev;
93
94 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
95 if (!prime_attach)
96 return -ENOMEM;
97
98 prime_attach->dir = DMA_NONE;
99 attach->priv = prime_attach;
100
101 if (!dev->driver->gem_prime_pin)
102 return 0;
103
104 return dev->driver->gem_prime_pin(obj);
105 }
106
107 static void drm_gem_map_detach(struct dma_buf *dma_buf,
108 struct dma_buf_attachment *attach)
109 {
110 struct drm_prime_attachment *prime_attach = attach->priv;
111 struct drm_gem_object *obj = dma_buf->priv;
112 struct drm_device *dev = obj->dev;
113 struct sg_table *sgt;
114
115 if (dev->driver->gem_prime_unpin)
116 dev->driver->gem_prime_unpin(obj);
117
118 if (!prime_attach)
119 return;
120
121 sgt = prime_attach->sgt;
122
123 if (prime_attach->dir != DMA_NONE)
124 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
125 prime_attach->dir);
126
127 sg_free_table(sgt);
128 kfree(sgt);
129 kfree(prime_attach);
130 attach->priv = NULL;
131 }
132
133 static void drm_prime_remove_buf_handle_locked(
134 struct drm_prime_file_private *prime_fpriv,
135 struct dma_buf *dma_buf)
136 {
137 struct drm_prime_member *member, *safe;
138
139 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
140 if (member->dma_buf == dma_buf) {
141 dma_buf_put(dma_buf);
142 list_del(&member->entry);
143 kfree(member);
144 }
145 }
146 }
147
148 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
149 enum dma_data_direction dir)
150 {
151 struct drm_prime_attachment *prime_attach = attach->priv;
152 struct drm_gem_object *obj = attach->dmabuf->priv;
153 struct sg_table *sgt;
154
155 if (WARN_ON(dir == DMA_NONE || !prime_attach))
156 return ERR_PTR(-EINVAL);
157
158 /* return the cached mapping when possible */
159 if (prime_attach->dir == dir)
160 return prime_attach->sgt;
161
162 /*
163 * two mappings with different directions for the same attachment are
164 * not allowed
165 */
166 if (WARN_ON(prime_attach->dir != DMA_NONE))
167 return ERR_PTR(-EBUSY);
168
169 mutex_lock(&obj->dev->struct_mutex);
170
171 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
172
173 if (!IS_ERR(sgt)) {
174 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
175 sg_free_table(sgt);
176 kfree(sgt);
177 sgt = ERR_PTR(-ENOMEM);
178 } else {
179 prime_attach->sgt = sgt;
180 prime_attach->dir = dir;
181 }
182 }
183
184 mutex_unlock(&obj->dev->struct_mutex);
185 return sgt;
186 }
187
188 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
189 struct sg_table *sgt, enum dma_data_direction dir)
190 {
191 /* nothing to be done here */
192 }
193
194 static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
195 {
196 struct drm_gem_object *obj = dma_buf->priv;
197
198 if (obj->export_dma_buf == dma_buf) {
199 /* drop the reference on the export fd holds */
200 obj->export_dma_buf = NULL;
201 drm_gem_object_unreference_unlocked(obj);
202 }
203 }
204
205 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
206 {
207 struct drm_gem_object *obj = dma_buf->priv;
208 struct drm_device *dev = obj->dev;
209
210 return dev->driver->gem_prime_vmap(obj);
211 }
212
213 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
214 {
215 struct drm_gem_object *obj = dma_buf->priv;
216 struct drm_device *dev = obj->dev;
217
218 dev->driver->gem_prime_vunmap(obj, vaddr);
219 }
220
221 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
222 unsigned long page_num)
223 {
224 return NULL;
225 }
226
227 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
228 unsigned long page_num, void *addr)
229 {
230
231 }
232 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
233 unsigned long page_num)
234 {
235 return NULL;
236 }
237
238 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
239 unsigned long page_num, void *addr)
240 {
241
242 }
243
244 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
245 struct vm_area_struct *vma)
246 {
247 return -EINVAL;
248 }
249
250 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
251 .attach = drm_gem_map_attach,
252 .detach = drm_gem_map_detach,
253 .map_dma_buf = drm_gem_map_dma_buf,
254 .unmap_dma_buf = drm_gem_unmap_dma_buf,
255 .release = drm_gem_dmabuf_release,
256 .kmap = drm_gem_dmabuf_kmap,
257 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
258 .kunmap = drm_gem_dmabuf_kunmap,
259 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
260 .mmap = drm_gem_dmabuf_mmap,
261 .vmap = drm_gem_dmabuf_vmap,
262 .vunmap = drm_gem_dmabuf_vunmap,
263 };
264
265 /**
266 * DOC: PRIME Helpers
267 *
268 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
269 * simpler APIs by using the helper functions @drm_gem_prime_export and
270 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
271 * five lower-level driver callbacks:
272 *
273 * Export callbacks:
274 *
275 * - @gem_prime_pin (optional): prepare a GEM object for exporting
276 *
277 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
278 *
279 * - @gem_prime_vmap: vmap a buffer exported by your driver
280 *
281 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
282 *
283 * Import callback:
284 *
285 * - @gem_prime_import_sg_table (import): produce a GEM object from another
286 * driver's scatter/gather table
287 */
288
289 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
290 struct drm_gem_object *obj, int flags)
291 {
292 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
293 }
294 EXPORT_SYMBOL(drm_gem_prime_export);
295
296 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
297 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
298 int *prime_fd)
299 {
300 struct drm_gem_object *obj;
301 void *buf;
302 int ret = 0;
303 struct dma_buf *dmabuf;
304
305 obj = drm_gem_object_lookup(dev, file_priv, handle);
306 if (!obj)
307 return -ENOENT;
308
309 mutex_lock(&file_priv->prime.lock);
310 /* re-export the original imported object */
311 if (obj->import_attach) {
312 dmabuf = obj->import_attach->dmabuf;
313 goto out_have_obj;
314 }
315
316 if (obj->export_dma_buf) {
317 dmabuf = obj->export_dma_buf;
318 goto out_have_obj;
319 }
320
321 buf = dev->driver->gem_prime_export(dev, obj, flags);
322 if (IS_ERR(buf)) {
323 /* normally the created dma-buf takes ownership of the ref,
324 * but if that fails then drop the ref
325 */
326 ret = PTR_ERR(buf);
327 goto out;
328 }
329 obj->export_dma_buf = buf;
330
331 /* if we've exported this buffer the cheat and add it to the import list
332 * so we get the correct handle back
333 */
334 ret = drm_prime_add_buf_handle(&file_priv->prime,
335 obj->export_dma_buf, handle);
336 if (ret)
337 goto fail_put_dmabuf;
338
339 ret = dma_buf_fd(buf, flags);
340 if (ret < 0)
341 goto fail_rm_handle;
342
343 *prime_fd = ret;
344 mutex_unlock(&file_priv->prime.lock);
345 return 0;
346
347 out_have_obj:
348 get_dma_buf(dmabuf);
349 ret = dma_buf_fd(dmabuf, flags);
350 if (ret < 0)
351 dma_buf_put(dmabuf);
352 else
353 *prime_fd = ret;
354 goto out;
355
356 fail_rm_handle:
357 drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
358 fail_put_dmabuf:
359 /* clear NOT to be checked when releasing dma_buf */
360 obj->export_dma_buf = NULL;
361 dma_buf_put(buf);
362 out:
363 drm_gem_object_unreference_unlocked(obj);
364 mutex_unlock(&file_priv->prime.lock);
365 return ret;
366 }
367 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
368
369 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
370 struct dma_buf *dma_buf)
371 {
372 struct dma_buf_attachment *attach;
373 struct sg_table *sgt;
374 struct drm_gem_object *obj;
375 int ret;
376
377 if (!dev->driver->gem_prime_import_sg_table)
378 return ERR_PTR(-EINVAL);
379
380 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
381 obj = dma_buf->priv;
382 if (obj->dev == dev) {
383 /*
384 * Importing dmabuf exported from out own gem increases
385 * refcount on gem itself instead of f_count of dmabuf.
386 */
387 drm_gem_object_reference(obj);
388 return obj;
389 }
390 }
391
392 attach = dma_buf_attach(dma_buf, dev->dev);
393 if (IS_ERR(attach))
394 return ERR_CAST(attach);
395
396 get_dma_buf(dma_buf);
397
398 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
399 if (IS_ERR_OR_NULL(sgt)) {
400 ret = PTR_ERR(sgt);
401 goto fail_detach;
402 }
403
404 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
405 if (IS_ERR(obj)) {
406 ret = PTR_ERR(obj);
407 goto fail_unmap;
408 }
409
410 obj->import_attach = attach;
411
412 return obj;
413
414 fail_unmap:
415 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
416 fail_detach:
417 dma_buf_detach(dma_buf, attach);
418 dma_buf_put(dma_buf);
419
420 return ERR_PTR(ret);
421 }
422 EXPORT_SYMBOL(drm_gem_prime_import);
423
424 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
425 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
426 {
427 struct dma_buf *dma_buf;
428 struct drm_gem_object *obj;
429 int ret;
430
431 dma_buf = dma_buf_get(prime_fd);
432 if (IS_ERR(dma_buf))
433 return PTR_ERR(dma_buf);
434
435 mutex_lock(&file_priv->prime.lock);
436
437 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
438 dma_buf, handle);
439 if (!ret) {
440 ret = 0;
441 goto out_put;
442 }
443
444 /* never seen this one, need to import */
445 obj = dev->driver->gem_prime_import(dev, dma_buf);
446 if (IS_ERR(obj)) {
447 ret = PTR_ERR(obj);
448 goto out_put;
449 }
450
451 ret = drm_gem_handle_create(file_priv, obj, handle);
452 drm_gem_object_unreference_unlocked(obj);
453 if (ret)
454 goto out_put;
455
456 ret = drm_prime_add_buf_handle(&file_priv->prime,
457 dma_buf, *handle);
458 if (ret)
459 goto fail;
460
461 mutex_unlock(&file_priv->prime.lock);
462
463 dma_buf_put(dma_buf);
464
465 return 0;
466
467 fail:
468 /* hmm, if driver attached, we are relying on the free-object path
469 * to detach.. which seems ok..
470 */
471 drm_gem_object_handle_unreference_unlocked(obj);
472 out_put:
473 dma_buf_put(dma_buf);
474 mutex_unlock(&file_priv->prime.lock);
475 return ret;
476 }
477 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
478
479 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
480 struct drm_file *file_priv)
481 {
482 struct drm_prime_handle *args = data;
483 uint32_t flags;
484
485 if (!drm_core_check_feature(dev, DRIVER_PRIME))
486 return -EINVAL;
487
488 if (!dev->driver->prime_handle_to_fd)
489 return -ENOSYS;
490
491 /* check flags are valid */
492 if (args->flags & ~DRM_CLOEXEC)
493 return -EINVAL;
494
495 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
496 flags = args->flags & DRM_CLOEXEC;
497
498 return dev->driver->prime_handle_to_fd(dev, file_priv,
499 args->handle, flags, &args->fd);
500 }
501
502 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
503 struct drm_file *file_priv)
504 {
505 struct drm_prime_handle *args = data;
506
507 if (!drm_core_check_feature(dev, DRIVER_PRIME))
508 return -EINVAL;
509
510 if (!dev->driver->prime_fd_to_handle)
511 return -ENOSYS;
512
513 return dev->driver->prime_fd_to_handle(dev, file_priv,
514 args->fd, &args->handle);
515 }
516
517 /*
518 * drm_prime_pages_to_sg
519 *
520 * this helper creates an sg table object from a set of pages
521 * the driver is responsible for mapping the pages into the
522 * importers address space
523 */
524 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
525 {
526 struct sg_table *sg = NULL;
527 int ret;
528
529 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
530 if (!sg) {
531 ret = -ENOMEM;
532 goto out;
533 }
534
535 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
536 nr_pages << PAGE_SHIFT, GFP_KERNEL);
537 if (ret)
538 goto out;
539
540 return sg;
541 out:
542 kfree(sg);
543 return ERR_PTR(ret);
544 }
545 EXPORT_SYMBOL(drm_prime_pages_to_sg);
546
547 /* export an sg table into an array of pages and addresses
548 this is currently required by the TTM driver in order to do correct fault
549 handling */
550 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
551 dma_addr_t *addrs, int max_pages)
552 {
553 unsigned count;
554 struct scatterlist *sg;
555 struct page *page;
556 u32 len, offset;
557 int pg_index;
558 dma_addr_t addr;
559
560 pg_index = 0;
561 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
562 len = sg->length;
563 offset = sg->offset;
564 page = sg_page(sg);
565 addr = sg_dma_address(sg);
566
567 while (len > 0) {
568 if (WARN_ON(pg_index >= max_pages))
569 return -1;
570 pages[pg_index] = page;
571 if (addrs)
572 addrs[pg_index] = addr;
573
574 page++;
575 addr += PAGE_SIZE;
576 len -= PAGE_SIZE;
577 pg_index++;
578 }
579 }
580 return 0;
581 }
582 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
583 /* helper function to cleanup a GEM/prime object */
584 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
585 {
586 struct dma_buf_attachment *attach;
587 struct dma_buf *dma_buf;
588 attach = obj->import_attach;
589 if (sg)
590 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
591 dma_buf = attach->dmabuf;
592 dma_buf_detach(attach->dmabuf, attach);
593 /* remove the reference */
594 dma_buf_put(dma_buf);
595 }
596 EXPORT_SYMBOL(drm_prime_gem_destroy);
597
598 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
599 {
600 INIT_LIST_HEAD(&prime_fpriv->head);
601 mutex_init(&prime_fpriv->lock);
602 }
603 EXPORT_SYMBOL(drm_prime_init_file_private);
604
605 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
606 {
607 /* by now drm_gem_release should've made sure the list is empty */
608 WARN_ON(!list_empty(&prime_fpriv->head));
609 }
610 EXPORT_SYMBOL(drm_prime_destroy_file_private);
611
612 int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
613 {
614 struct drm_prime_member *member;
615
616 list_for_each_entry(member, &prime_fpriv->head, entry) {
617 if (member->dma_buf == dma_buf) {
618 *handle = member->handle;
619 return 0;
620 }
621 }
622 return -ENOENT;
623 }
624 EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
625
626 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
627 {
628 mutex_lock(&prime_fpriv->lock);
629 drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
630 mutex_unlock(&prime_fpriv->lock);
631 }
632 EXPORT_SYMBOL(drm_prime_remove_buf_handle);
This page took 0.045069 seconds and 5 git commands to generate.