drm/omap: fix modeset_init if a panel doesn't satisfy omapdrm requirements
[deliverable/linux.git] / drivers / gpu / drm / drm_prime.c
1 /*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32
33 /*
34 * DMA-BUF/GEM Object references and lifetime overview:
35 *
36 * On the export the dma_buf holds a reference to the exporting GEM
37 * object. It takes this reference in handle_to_fd_ioctl, when it
38 * first calls .prime_export and stores the exporting GEM object in
39 * the dma_buf priv. This reference is released when the dma_buf
40 * object goes away in the driver .release function.
41 *
42 * On the import the importing GEM object holds a reference to the
43 * dma_buf (which in turn holds a ref to the exporting GEM object).
44 * It takes that reference in the fd_to_handle ioctl.
45 * It calls dma_buf_get, creates an attachment to it and stores the
46 * attachment in the GEM object. When this attachment is destroyed
47 * when the imported object is destroyed, we remove the attachment
48 * and drop the reference to the dma_buf.
49 *
50 * Thus the chain of references always flows in one direction
51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
52 *
53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object
56 * from the dma-buf private. Prime will do this automatically for drivers that
57 * use the drm_gem_prime_{import,export} helpers.
58 */
59
60 struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64 };
65
66 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
67 enum dma_data_direction dir)
68 {
69 struct drm_gem_object *obj = attach->dmabuf->priv;
70 struct sg_table *sgt;
71
72 mutex_lock(&obj->dev->struct_mutex);
73
74 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
75
76 if (!IS_ERR_OR_NULL(sgt))
77 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
78
79 mutex_unlock(&obj->dev->struct_mutex);
80 return sgt;
81 }
82
83 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
84 struct sg_table *sgt, enum dma_data_direction dir)
85 {
86 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
87 sg_free_table(sgt);
88 kfree(sgt);
89 }
90
91 static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
92 {
93 struct drm_gem_object *obj = dma_buf->priv;
94
95 if (obj->export_dma_buf == dma_buf) {
96 /* drop the reference on the export fd holds */
97 obj->export_dma_buf = NULL;
98 drm_gem_object_unreference_unlocked(obj);
99 }
100 }
101
102 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
103 {
104 struct drm_gem_object *obj = dma_buf->priv;
105 struct drm_device *dev = obj->dev;
106
107 return dev->driver->gem_prime_vmap(obj);
108 }
109
110 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
111 {
112 struct drm_gem_object *obj = dma_buf->priv;
113 struct drm_device *dev = obj->dev;
114
115 dev->driver->gem_prime_vunmap(obj, vaddr);
116 }
117
118 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
119 unsigned long page_num)
120 {
121 return NULL;
122 }
123
124 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
125 unsigned long page_num, void *addr)
126 {
127
128 }
129 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
130 unsigned long page_num)
131 {
132 return NULL;
133 }
134
135 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
136 unsigned long page_num, void *addr)
137 {
138
139 }
140
141 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
142 struct vm_area_struct *vma)
143 {
144 return -EINVAL;
145 }
146
147 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
148 .map_dma_buf = drm_gem_map_dma_buf,
149 .unmap_dma_buf = drm_gem_unmap_dma_buf,
150 .release = drm_gem_dmabuf_release,
151 .kmap = drm_gem_dmabuf_kmap,
152 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
153 .kunmap = drm_gem_dmabuf_kunmap,
154 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
155 .mmap = drm_gem_dmabuf_mmap,
156 .vmap = drm_gem_dmabuf_vmap,
157 .vunmap = drm_gem_dmabuf_vunmap,
158 };
159
160 /**
161 * DOC: PRIME Helpers
162 *
163 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
164 * simpler APIs by using the helper functions @drm_gem_prime_export and
165 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
166 * five lower-level driver callbacks:
167 *
168 * Export callbacks:
169 *
170 * - @gem_prime_pin (optional): prepare a GEM object for exporting
171 *
172 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
173 *
174 * - @gem_prime_vmap: vmap a buffer exported by your driver
175 *
176 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
177 *
178 * Import callback:
179 *
180 * - @gem_prime_import_sg_table (import): produce a GEM object from another
181 * driver's scatter/gather table
182 */
183
184 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
185 struct drm_gem_object *obj, int flags)
186 {
187 if (dev->driver->gem_prime_pin) {
188 int ret = dev->driver->gem_prime_pin(obj);
189 if (ret)
190 return ERR_PTR(ret);
191 }
192 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
193 0600);
194 }
195 EXPORT_SYMBOL(drm_gem_prime_export);
196
197 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
198 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
199 int *prime_fd)
200 {
201 struct drm_gem_object *obj;
202 void *buf;
203 int ret;
204
205 obj = drm_gem_object_lookup(dev, file_priv, handle);
206 if (!obj)
207 return -ENOENT;
208
209 mutex_lock(&file_priv->prime.lock);
210 /* re-export the original imported object */
211 if (obj->import_attach) {
212 get_dma_buf(obj->import_attach->dmabuf);
213 *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
214 drm_gem_object_unreference_unlocked(obj);
215 mutex_unlock(&file_priv->prime.lock);
216 return 0;
217 }
218
219 if (obj->export_dma_buf) {
220 get_dma_buf(obj->export_dma_buf);
221 *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
222 drm_gem_object_unreference_unlocked(obj);
223 } else {
224 buf = dev->driver->gem_prime_export(dev, obj, flags);
225 if (IS_ERR(buf)) {
226 /* normally the created dma-buf takes ownership of the ref,
227 * but if that fails then drop the ref
228 */
229 drm_gem_object_unreference_unlocked(obj);
230 mutex_unlock(&file_priv->prime.lock);
231 return PTR_ERR(buf);
232 }
233 obj->export_dma_buf = buf;
234 *prime_fd = dma_buf_fd(buf, flags);
235 }
236 /* if we've exported this buffer the cheat and add it to the import list
237 * so we get the correct handle back
238 */
239 ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
240 obj->export_dma_buf, handle);
241 if (ret) {
242 drm_gem_object_unreference_unlocked(obj);
243 mutex_unlock(&file_priv->prime.lock);
244 return ret;
245 }
246
247 mutex_unlock(&file_priv->prime.lock);
248 return 0;
249 }
250 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
251
252 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
253 struct dma_buf *dma_buf)
254 {
255 struct dma_buf_attachment *attach;
256 struct sg_table *sgt;
257 struct drm_gem_object *obj;
258 int ret;
259
260 if (!dev->driver->gem_prime_import_sg_table)
261 return ERR_PTR(-EINVAL);
262
263 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
264 obj = dma_buf->priv;
265 if (obj->dev == dev) {
266 /*
267 * Importing dmabuf exported from out own gem increases
268 * refcount on gem itself instead of f_count of dmabuf.
269 */
270 drm_gem_object_reference(obj);
271 dma_buf_put(dma_buf);
272 return obj;
273 }
274 }
275
276 attach = dma_buf_attach(dma_buf, dev->dev);
277 if (IS_ERR(attach))
278 return ERR_PTR(PTR_ERR(attach));
279
280 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
281 if (IS_ERR_OR_NULL(sgt)) {
282 ret = PTR_ERR(sgt);
283 goto fail_detach;
284 }
285
286 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
287 if (IS_ERR(obj)) {
288 ret = PTR_ERR(obj);
289 goto fail_unmap;
290 }
291
292 obj->import_attach = attach;
293
294 return obj;
295
296 fail_unmap:
297 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
298 fail_detach:
299 dma_buf_detach(dma_buf, attach);
300 return ERR_PTR(ret);
301 }
302 EXPORT_SYMBOL(drm_gem_prime_import);
303
304 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
305 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
306 {
307 struct dma_buf *dma_buf;
308 struct drm_gem_object *obj;
309 int ret;
310
311 dma_buf = dma_buf_get(prime_fd);
312 if (IS_ERR(dma_buf))
313 return PTR_ERR(dma_buf);
314
315 mutex_lock(&file_priv->prime.lock);
316
317 ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
318 dma_buf, handle);
319 if (!ret) {
320 ret = 0;
321 goto out_put;
322 }
323
324 /* never seen this one, need to import */
325 obj = dev->driver->gem_prime_import(dev, dma_buf);
326 if (IS_ERR(obj)) {
327 ret = PTR_ERR(obj);
328 goto out_put;
329 }
330
331 ret = drm_gem_handle_create(file_priv, obj, handle);
332 drm_gem_object_unreference_unlocked(obj);
333 if (ret)
334 goto out_put;
335
336 ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
337 dma_buf, *handle);
338 if (ret)
339 goto fail;
340
341 mutex_unlock(&file_priv->prime.lock);
342 return 0;
343
344 fail:
345 /* hmm, if driver attached, we are relying on the free-object path
346 * to detach.. which seems ok..
347 */
348 drm_gem_object_handle_unreference_unlocked(obj);
349 out_put:
350 dma_buf_put(dma_buf);
351 mutex_unlock(&file_priv->prime.lock);
352 return ret;
353 }
354 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
355
356 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv)
358 {
359 struct drm_prime_handle *args = data;
360 uint32_t flags;
361
362 if (!drm_core_check_feature(dev, DRIVER_PRIME))
363 return -EINVAL;
364
365 if (!dev->driver->prime_handle_to_fd)
366 return -ENOSYS;
367
368 /* check flags are valid */
369 if (args->flags & ~DRM_CLOEXEC)
370 return -EINVAL;
371
372 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
373 flags = args->flags & DRM_CLOEXEC;
374
375 return dev->driver->prime_handle_to_fd(dev, file_priv,
376 args->handle, flags, &args->fd);
377 }
378
379 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
380 struct drm_file *file_priv)
381 {
382 struct drm_prime_handle *args = data;
383
384 if (!drm_core_check_feature(dev, DRIVER_PRIME))
385 return -EINVAL;
386
387 if (!dev->driver->prime_fd_to_handle)
388 return -ENOSYS;
389
390 return dev->driver->prime_fd_to_handle(dev, file_priv,
391 args->fd, &args->handle);
392 }
393
394 /*
395 * drm_prime_pages_to_sg
396 *
397 * this helper creates an sg table object from a set of pages
398 * the driver is responsible for mapping the pages into the
399 * importers address space
400 */
401 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
402 {
403 struct sg_table *sg = NULL;
404 struct scatterlist *iter;
405 int i;
406 int ret;
407
408 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
409 if (!sg)
410 goto out;
411
412 ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
413 if (ret)
414 goto out;
415
416 for_each_sg(sg->sgl, iter, nr_pages, i)
417 sg_set_page(iter, pages[i], PAGE_SIZE, 0);
418
419 return sg;
420 out:
421 kfree(sg);
422 return NULL;
423 }
424 EXPORT_SYMBOL(drm_prime_pages_to_sg);
425
426 /* export an sg table into an array of pages and addresses
427 this is currently required by the TTM driver in order to do correct fault
428 handling */
429 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
430 dma_addr_t *addrs, int max_pages)
431 {
432 unsigned count;
433 struct scatterlist *sg;
434 struct page *page;
435 u32 len, offset;
436 int pg_index;
437 dma_addr_t addr;
438
439 pg_index = 0;
440 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
441 len = sg->length;
442 offset = sg->offset;
443 page = sg_page(sg);
444 addr = sg_dma_address(sg);
445
446 while (len > 0) {
447 if (WARN_ON(pg_index >= max_pages))
448 return -1;
449 pages[pg_index] = page;
450 if (addrs)
451 addrs[pg_index] = addr;
452
453 page++;
454 addr += PAGE_SIZE;
455 len -= PAGE_SIZE;
456 pg_index++;
457 }
458 }
459 return 0;
460 }
461 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
462 /* helper function to cleanup a GEM/prime object */
463 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
464 {
465 struct dma_buf_attachment *attach;
466 struct dma_buf *dma_buf;
467 attach = obj->import_attach;
468 if (sg)
469 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
470 dma_buf = attach->dmabuf;
471 dma_buf_detach(attach->dmabuf, attach);
472 /* remove the reference */
473 dma_buf_put(dma_buf);
474 }
475 EXPORT_SYMBOL(drm_prime_gem_destroy);
476
477 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
478 {
479 INIT_LIST_HEAD(&prime_fpriv->head);
480 mutex_init(&prime_fpriv->lock);
481 }
482 EXPORT_SYMBOL(drm_prime_init_file_private);
483
484 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
485 {
486 struct drm_prime_member *member, *safe;
487 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
488 list_del(&member->entry);
489 kfree(member);
490 }
491 }
492 EXPORT_SYMBOL(drm_prime_destroy_file_private);
493
494 int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
495 {
496 struct drm_prime_member *member;
497
498 member = kmalloc(sizeof(*member), GFP_KERNEL);
499 if (!member)
500 return -ENOMEM;
501
502 member->dma_buf = dma_buf;
503 member->handle = handle;
504 list_add(&member->entry, &prime_fpriv->head);
505 return 0;
506 }
507 EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
508
509 int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
510 {
511 struct drm_prime_member *member;
512
513 list_for_each_entry(member, &prime_fpriv->head, entry) {
514 if (member->dma_buf == dma_buf) {
515 *handle = member->handle;
516 return 0;
517 }
518 }
519 return -ENOENT;
520 }
521 EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
522
523 void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
524 {
525 struct drm_prime_member *member, *safe;
526
527 mutex_lock(&prime_fpriv->lock);
528 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
529 if (member->dma_buf == dma_buf) {
530 list_del(&member->entry);
531 kfree(member);
532 }
533 }
534 mutex_unlock(&prime_fpriv->lock);
535 }
536 EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
This page took 0.048636 seconds and 5 git commands to generate.