2 * Framework for buffer objects that can be shared across devices/subsystems.
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program. If not, see <http://www.gnu.org/licenses/>.
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/fence.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/export.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/seq_file.h>
34 #include <linux/poll.h>
35 #include <linux/reservation.h>
37 #include <uapi/linux/dma-buf.h>
39 static inline int is_dma_buf_file(struct file
*);
42 struct list_head head
;
46 static struct dma_buf_list db_list
;
48 static int dma_buf_release(struct inode
*inode
, struct file
*file
)
50 struct dma_buf
*dmabuf
;
52 if (!is_dma_buf_file(file
))
55 dmabuf
= file
->private_data
;
57 BUG_ON(dmabuf
->vmapping_counter
);
60 * Any fences that a dma-buf poll can wait on should be signaled
61 * before releasing dma-buf. This is the responsibility of each
62 * driver that uses the reservation objects.
64 * If you hit this BUG() it means someone dropped their ref to the
65 * dma-buf while still having pending operation to the buffer.
67 BUG_ON(dmabuf
->cb_shared
.active
|| dmabuf
->cb_excl
.active
);
69 dmabuf
->ops
->release(dmabuf
);
71 mutex_lock(&db_list
.lock
);
72 list_del(&dmabuf
->list_node
);
73 mutex_unlock(&db_list
.lock
);
75 if (dmabuf
->resv
== (struct reservation_object
*)&dmabuf
[1])
76 reservation_object_fini(dmabuf
->resv
);
78 module_put(dmabuf
->owner
);
83 static int dma_buf_mmap_internal(struct file
*file
, struct vm_area_struct
*vma
)
85 struct dma_buf
*dmabuf
;
87 if (!is_dma_buf_file(file
))
90 dmabuf
= file
->private_data
;
92 /* check for overflowing the buffer's size */
93 if (vma
->vm_pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) >
94 dmabuf
->size
>> PAGE_SHIFT
)
97 return dmabuf
->ops
->mmap(dmabuf
, vma
);
100 static loff_t
dma_buf_llseek(struct file
*file
, loff_t offset
, int whence
)
102 struct dma_buf
*dmabuf
;
105 if (!is_dma_buf_file(file
))
108 dmabuf
= file
->private_data
;
110 /* only support discovering the end of the buffer,
111 but also allow SEEK_SET to maintain the idiomatic
112 SEEK_END(0), SEEK_CUR(0) pattern */
113 if (whence
== SEEK_END
)
115 else if (whence
== SEEK_SET
)
123 return base
+ offset
;
126 static void dma_buf_poll_cb(struct fence
*fence
, struct fence_cb
*cb
)
128 struct dma_buf_poll_cb_t
*dcb
= (struct dma_buf_poll_cb_t
*)cb
;
131 spin_lock_irqsave(&dcb
->poll
->lock
, flags
);
132 wake_up_locked_poll(dcb
->poll
, dcb
->active
);
134 spin_unlock_irqrestore(&dcb
->poll
->lock
, flags
);
137 static unsigned int dma_buf_poll(struct file
*file
, poll_table
*poll
)
139 struct dma_buf
*dmabuf
;
140 struct reservation_object
*resv
;
141 struct reservation_object_list
*fobj
;
142 struct fence
*fence_excl
;
143 unsigned long events
;
144 unsigned shared_count
, seq
;
146 dmabuf
= file
->private_data
;
147 if (!dmabuf
|| !dmabuf
->resv
)
152 poll_wait(file
, &dmabuf
->poll
, poll
);
154 events
= poll_requested_events(poll
) & (POLLIN
| POLLOUT
);
159 seq
= read_seqcount_begin(&resv
->seq
);
162 fobj
= rcu_dereference(resv
->fence
);
164 shared_count
= fobj
->shared_count
;
167 fence_excl
= rcu_dereference(resv
->fence_excl
);
168 if (read_seqcount_retry(&resv
->seq
, seq
)) {
173 if (fence_excl
&& (!(events
& POLLOUT
) || shared_count
== 0)) {
174 struct dma_buf_poll_cb_t
*dcb
= &dmabuf
->cb_excl
;
175 unsigned long pevents
= POLLIN
;
177 if (shared_count
== 0)
180 spin_lock_irq(&dmabuf
->poll
.lock
);
182 dcb
->active
|= pevents
;
185 dcb
->active
= pevents
;
186 spin_unlock_irq(&dmabuf
->poll
.lock
);
188 if (events
& pevents
) {
189 if (!fence_get_rcu(fence_excl
)) {
190 /* force a recheck */
192 dma_buf_poll_cb(NULL
, &dcb
->cb
);
193 } else if (!fence_add_callback(fence_excl
, &dcb
->cb
,
196 fence_put(fence_excl
);
199 * No callback queued, wake up any additional
202 fence_put(fence_excl
);
203 dma_buf_poll_cb(NULL
, &dcb
->cb
);
208 if ((events
& POLLOUT
) && shared_count
> 0) {
209 struct dma_buf_poll_cb_t
*dcb
= &dmabuf
->cb_shared
;
212 /* Only queue a new callback if no event has fired yet */
213 spin_lock_irq(&dmabuf
->poll
.lock
);
217 dcb
->active
= POLLOUT
;
218 spin_unlock_irq(&dmabuf
->poll
.lock
);
220 if (!(events
& POLLOUT
))
223 for (i
= 0; i
< shared_count
; ++i
) {
224 struct fence
*fence
= rcu_dereference(fobj
->shared
[i
]);
226 if (!fence_get_rcu(fence
)) {
228 * fence refcount dropped to zero, this means
229 * that fobj has been freed
231 * call dma_buf_poll_cb and force a recheck!
234 dma_buf_poll_cb(NULL
, &dcb
->cb
);
237 if (!fence_add_callback(fence
, &dcb
->cb
,
246 /* No callback queued, wake up any additional waiters. */
247 if (i
== shared_count
)
248 dma_buf_poll_cb(NULL
, &dcb
->cb
);
256 static long dma_buf_ioctl(struct file
*file
,
257 unsigned int cmd
, unsigned long arg
)
259 struct dma_buf
*dmabuf
;
260 struct dma_buf_sync sync
;
261 enum dma_data_direction direction
;
264 dmabuf
= file
->private_data
;
267 case DMA_BUF_IOCTL_SYNC
:
268 if (copy_from_user(&sync
, (void __user
*) arg
, sizeof(sync
)))
271 if (sync
.flags
& ~DMA_BUF_SYNC_VALID_FLAGS_MASK
)
274 switch (sync
.flags
& DMA_BUF_SYNC_RW
) {
275 case DMA_BUF_SYNC_READ
:
276 direction
= DMA_FROM_DEVICE
;
278 case DMA_BUF_SYNC_WRITE
:
279 direction
= DMA_TO_DEVICE
;
281 case DMA_BUF_SYNC_RW
:
282 direction
= DMA_BIDIRECTIONAL
;
288 if (sync
.flags
& DMA_BUF_SYNC_END
)
289 ret
= dma_buf_end_cpu_access(dmabuf
, direction
);
291 ret
= dma_buf_begin_cpu_access(dmabuf
, direction
);
299 static const struct file_operations dma_buf_fops
= {
300 .release
= dma_buf_release
,
301 .mmap
= dma_buf_mmap_internal
,
302 .llseek
= dma_buf_llseek
,
303 .poll
= dma_buf_poll
,
304 .unlocked_ioctl
= dma_buf_ioctl
,
308 * is_dma_buf_file - Check if struct file* is associated with dma_buf
310 static inline int is_dma_buf_file(struct file
*file
)
312 return file
->f_op
== &dma_buf_fops
;
316 * dma_buf_export - Creates a new dma_buf, and associates an anon file
317 * with this buffer, so it can be exported.
318 * Also connect the allocator specific data and ops to the buffer.
319 * Additionally, provide a name string for exporter; useful in debugging.
321 * @exp_info: [in] holds all the export related information provided
322 * by the exporter. see struct dma_buf_export_info
323 * for further details.
325 * Returns, on success, a newly created dma_buf object, which wraps the
326 * supplied private data and operations for dma_buf_ops. On either missing
327 * ops, or error in allocating struct dma_buf, will return negative error.
330 struct dma_buf
*dma_buf_export(const struct dma_buf_export_info
*exp_info
)
332 struct dma_buf
*dmabuf
;
333 struct reservation_object
*resv
= exp_info
->resv
;
335 size_t alloc_size
= sizeof(struct dma_buf
);
338 alloc_size
+= sizeof(struct reservation_object
);
340 /* prevent &dma_buf[1] == dma_buf->resv */
343 if (WARN_ON(!exp_info
->priv
345 || !exp_info
->ops
->map_dma_buf
346 || !exp_info
->ops
->unmap_dma_buf
347 || !exp_info
->ops
->release
348 || !exp_info
->ops
->kmap_atomic
349 || !exp_info
->ops
->kmap
350 || !exp_info
->ops
->mmap
)) {
351 return ERR_PTR(-EINVAL
);
354 if (!try_module_get(exp_info
->owner
))
355 return ERR_PTR(-ENOENT
);
357 dmabuf
= kzalloc(alloc_size
, GFP_KERNEL
);
359 module_put(exp_info
->owner
);
360 return ERR_PTR(-ENOMEM
);
363 dmabuf
->priv
= exp_info
->priv
;
364 dmabuf
->ops
= exp_info
->ops
;
365 dmabuf
->size
= exp_info
->size
;
366 dmabuf
->exp_name
= exp_info
->exp_name
;
367 dmabuf
->owner
= exp_info
->owner
;
368 init_waitqueue_head(&dmabuf
->poll
);
369 dmabuf
->cb_excl
.poll
= dmabuf
->cb_shared
.poll
= &dmabuf
->poll
;
370 dmabuf
->cb_excl
.active
= dmabuf
->cb_shared
.active
= 0;
373 resv
= (struct reservation_object
*)&dmabuf
[1];
374 reservation_object_init(resv
);
378 file
= anon_inode_getfile("dmabuf", &dma_buf_fops
, dmabuf
,
382 return ERR_CAST(file
);
385 file
->f_mode
|= FMODE_LSEEK
;
388 mutex_init(&dmabuf
->lock
);
389 INIT_LIST_HEAD(&dmabuf
->attachments
);
391 mutex_lock(&db_list
.lock
);
392 list_add(&dmabuf
->list_node
, &db_list
.head
);
393 mutex_unlock(&db_list
.lock
);
397 EXPORT_SYMBOL_GPL(dma_buf_export
);
400 * dma_buf_fd - returns a file descriptor for the given dma_buf
401 * @dmabuf: [in] pointer to dma_buf for which fd is required.
402 * @flags: [in] flags to give to fd
404 * On success, returns an associated 'fd'. Else, returns error.
406 int dma_buf_fd(struct dma_buf
*dmabuf
, int flags
)
410 if (!dmabuf
|| !dmabuf
->file
)
413 fd
= get_unused_fd_flags(flags
);
417 fd_install(fd
, dmabuf
->file
);
421 EXPORT_SYMBOL_GPL(dma_buf_fd
);
424 * dma_buf_get - returns the dma_buf structure related to an fd
425 * @fd: [in] fd associated with the dma_buf to be returned
427 * On success, returns the dma_buf structure associated with an fd; uses
428 * file's refcounting done by fget to increase refcount. returns ERR_PTR
431 struct dma_buf
*dma_buf_get(int fd
)
438 return ERR_PTR(-EBADF
);
440 if (!is_dma_buf_file(file
)) {
442 return ERR_PTR(-EINVAL
);
445 return file
->private_data
;
447 EXPORT_SYMBOL_GPL(dma_buf_get
);
450 * dma_buf_put - decreases refcount of the buffer
451 * @dmabuf: [in] buffer to reduce refcount of
453 * Uses file's refcounting done implicitly by fput()
455 void dma_buf_put(struct dma_buf
*dmabuf
)
457 if (WARN_ON(!dmabuf
|| !dmabuf
->file
))
462 EXPORT_SYMBOL_GPL(dma_buf_put
);
465 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
466 * calls attach() of dma_buf_ops to allow device-specific attach functionality
467 * @dmabuf: [in] buffer to attach device to.
468 * @dev: [in] device to be attached.
470 * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
473 struct dma_buf_attachment
*dma_buf_attach(struct dma_buf
*dmabuf
,
476 struct dma_buf_attachment
*attach
;
479 if (WARN_ON(!dmabuf
|| !dev
))
480 return ERR_PTR(-EINVAL
);
482 attach
= kzalloc(sizeof(struct dma_buf_attachment
), GFP_KERNEL
);
484 return ERR_PTR(-ENOMEM
);
487 attach
->dmabuf
= dmabuf
;
489 mutex_lock(&dmabuf
->lock
);
491 if (dmabuf
->ops
->attach
) {
492 ret
= dmabuf
->ops
->attach(dmabuf
, dev
, attach
);
496 list_add(&attach
->node
, &dmabuf
->attachments
);
498 mutex_unlock(&dmabuf
->lock
);
503 mutex_unlock(&dmabuf
->lock
);
506 EXPORT_SYMBOL_GPL(dma_buf_attach
);
509 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
510 * optionally calls detach() of dma_buf_ops for device-specific detach
511 * @dmabuf: [in] buffer to detach from.
512 * @attach: [in] attachment to be detached; is free'd after this call.
515 void dma_buf_detach(struct dma_buf
*dmabuf
, struct dma_buf_attachment
*attach
)
517 if (WARN_ON(!dmabuf
|| !attach
))
520 mutex_lock(&dmabuf
->lock
);
521 list_del(&attach
->node
);
522 if (dmabuf
->ops
->detach
)
523 dmabuf
->ops
->detach(dmabuf
, attach
);
525 mutex_unlock(&dmabuf
->lock
);
528 EXPORT_SYMBOL_GPL(dma_buf_detach
);
531 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
532 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
534 * @attach: [in] attachment whose scatterlist is to be returned
535 * @direction: [in] direction of DMA transfer
537 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
540 struct sg_table
*dma_buf_map_attachment(struct dma_buf_attachment
*attach
,
541 enum dma_data_direction direction
)
543 struct sg_table
*sg_table
= ERR_PTR(-EINVAL
);
547 if (WARN_ON(!attach
|| !attach
->dmabuf
))
548 return ERR_PTR(-EINVAL
);
550 sg_table
= attach
->dmabuf
->ops
->map_dma_buf(attach
, direction
);
552 sg_table
= ERR_PTR(-ENOMEM
);
556 EXPORT_SYMBOL_GPL(dma_buf_map_attachment
);
559 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
560 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
562 * @attach: [in] attachment to unmap buffer from
563 * @sg_table: [in] scatterlist info of the buffer to unmap
564 * @direction: [in] direction of DMA transfer
567 void dma_buf_unmap_attachment(struct dma_buf_attachment
*attach
,
568 struct sg_table
*sg_table
,
569 enum dma_data_direction direction
)
573 if (WARN_ON(!attach
|| !attach
->dmabuf
|| !sg_table
))
576 attach
->dmabuf
->ops
->unmap_dma_buf(attach
, sg_table
,
579 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment
);
583 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
584 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
585 * preparations. Coherency is only guaranteed in the specified range for the
586 * specified access direction.
587 * @dmabuf: [in] buffer to prepare cpu access for.
588 * @direction: [in] length of range for cpu access.
590 * Can return negative error values, returns 0 on success.
592 int dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
593 enum dma_data_direction direction
)
597 if (WARN_ON(!dmabuf
))
600 if (dmabuf
->ops
->begin_cpu_access
)
601 ret
= dmabuf
->ops
->begin_cpu_access(dmabuf
, direction
);
605 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access
);
608 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
609 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
610 * actions. Coherency is only guaranteed in the specified range for the
611 * specified access direction.
612 * @dmabuf: [in] buffer to complete cpu access for.
613 * @direction: [in] length of range for cpu access.
615 * Can return negative error values, returns 0 on success.
617 int dma_buf_end_cpu_access(struct dma_buf
*dmabuf
,
618 enum dma_data_direction direction
)
624 if (dmabuf
->ops
->end_cpu_access
)
625 ret
= dmabuf
->ops
->end_cpu_access(dmabuf
, direction
);
629 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access
);
632 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
633 * space. The same restrictions as for kmap_atomic and friends apply.
634 * @dmabuf: [in] buffer to map page from.
635 * @page_num: [in] page in PAGE_SIZE units to map.
637 * This call must always succeed, any necessary preparations that might fail
638 * need to be done in begin_cpu_access.
640 void *dma_buf_kmap_atomic(struct dma_buf
*dmabuf
, unsigned long page_num
)
644 return dmabuf
->ops
->kmap_atomic(dmabuf
, page_num
);
646 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic
);
649 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
650 * @dmabuf: [in] buffer to unmap page from.
651 * @page_num: [in] page in PAGE_SIZE units to unmap.
652 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
654 * This call must always succeed.
656 void dma_buf_kunmap_atomic(struct dma_buf
*dmabuf
, unsigned long page_num
,
661 if (dmabuf
->ops
->kunmap_atomic
)
662 dmabuf
->ops
->kunmap_atomic(dmabuf
, page_num
, vaddr
);
664 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic
);
667 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
668 * same restrictions as for kmap and friends apply.
669 * @dmabuf: [in] buffer to map page from.
670 * @page_num: [in] page in PAGE_SIZE units to map.
672 * This call must always succeed, any necessary preparations that might fail
673 * need to be done in begin_cpu_access.
675 void *dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long page_num
)
679 return dmabuf
->ops
->kmap(dmabuf
, page_num
);
681 EXPORT_SYMBOL_GPL(dma_buf_kmap
);
684 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
685 * @dmabuf: [in] buffer to unmap page from.
686 * @page_num: [in] page in PAGE_SIZE units to unmap.
687 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
689 * This call must always succeed.
691 void dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long page_num
,
696 if (dmabuf
->ops
->kunmap
)
697 dmabuf
->ops
->kunmap(dmabuf
, page_num
, vaddr
);
699 EXPORT_SYMBOL_GPL(dma_buf_kunmap
);
703 * dma_buf_mmap - Setup up a userspace mmap with the given vma
704 * @dmabuf: [in] buffer that should back the vma
705 * @vma: [in] vma for the mmap
706 * @pgoff: [in] offset in pages where this mmap should start within the
709 * This function adjusts the passed in vma so that it points at the file of the
710 * dma_buf operation. It also adjusts the starting pgoff and does bounds
711 * checking on the size of the vma. Then it calls the exporters mmap function to
712 * set up the mapping.
714 * Can return negative error values, returns 0 on success.
716 int dma_buf_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
,
719 struct file
*oldfile
;
722 if (WARN_ON(!dmabuf
|| !vma
))
725 /* check for offset overflow */
726 if (pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) < pgoff
)
729 /* check for overflowing the buffer's size */
730 if (pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) >
731 dmabuf
->size
>> PAGE_SHIFT
)
734 /* readjust the vma */
735 get_file(dmabuf
->file
);
736 oldfile
= vma
->vm_file
;
737 vma
->vm_file
= dmabuf
->file
;
738 vma
->vm_pgoff
= pgoff
;
740 ret
= dmabuf
->ops
->mmap(dmabuf
, vma
);
742 /* restore old parameters on failure */
743 vma
->vm_file
= oldfile
;
752 EXPORT_SYMBOL_GPL(dma_buf_mmap
);
755 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
756 * address space. Same restrictions as for vmap and friends apply.
757 * @dmabuf: [in] buffer to vmap
759 * This call may fail due to lack of virtual mapping address space.
760 * These calls are optional in drivers. The intended use for them
761 * is for mapping objects linear in kernel space for high use objects.
762 * Please attempt to use kmap/kunmap before thinking about these interfaces.
764 * Returns NULL on error.
766 void *dma_buf_vmap(struct dma_buf
*dmabuf
)
770 if (WARN_ON(!dmabuf
))
773 if (!dmabuf
->ops
->vmap
)
776 mutex_lock(&dmabuf
->lock
);
777 if (dmabuf
->vmapping_counter
) {
778 dmabuf
->vmapping_counter
++;
779 BUG_ON(!dmabuf
->vmap_ptr
);
780 ptr
= dmabuf
->vmap_ptr
;
784 BUG_ON(dmabuf
->vmap_ptr
);
786 ptr
= dmabuf
->ops
->vmap(dmabuf
);
787 if (WARN_ON_ONCE(IS_ERR(ptr
)))
792 dmabuf
->vmap_ptr
= ptr
;
793 dmabuf
->vmapping_counter
= 1;
796 mutex_unlock(&dmabuf
->lock
);
799 EXPORT_SYMBOL_GPL(dma_buf_vmap
);
802 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
803 * @dmabuf: [in] buffer to vunmap
804 * @vaddr: [in] vmap to vunmap
806 void dma_buf_vunmap(struct dma_buf
*dmabuf
, void *vaddr
)
808 if (WARN_ON(!dmabuf
))
811 BUG_ON(!dmabuf
->vmap_ptr
);
812 BUG_ON(dmabuf
->vmapping_counter
== 0);
813 BUG_ON(dmabuf
->vmap_ptr
!= vaddr
);
815 mutex_lock(&dmabuf
->lock
);
816 if (--dmabuf
->vmapping_counter
== 0) {
817 if (dmabuf
->ops
->vunmap
)
818 dmabuf
->ops
->vunmap(dmabuf
, vaddr
);
819 dmabuf
->vmap_ptr
= NULL
;
821 mutex_unlock(&dmabuf
->lock
);
823 EXPORT_SYMBOL_GPL(dma_buf_vunmap
);
825 #ifdef CONFIG_DEBUG_FS
826 static int dma_buf_describe(struct seq_file
*s
)
829 struct dma_buf
*buf_obj
;
830 struct dma_buf_attachment
*attach_obj
;
831 int count
= 0, attach_count
;
834 ret
= mutex_lock_interruptible(&db_list
.lock
);
839 seq_puts(s
, "\nDma-buf Objects:\n");
840 seq_puts(s
, "size\tflags\tmode\tcount\texp_name\n");
842 list_for_each_entry(buf_obj
, &db_list
.head
, list_node
) {
843 ret
= mutex_lock_interruptible(&buf_obj
->lock
);
847 "\tERROR locking buffer object: skipping\n");
851 seq_printf(s
, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
853 buf_obj
->file
->f_flags
, buf_obj
->file
->f_mode
,
854 file_count(buf_obj
->file
),
857 seq_puts(s
, "\tAttached Devices:\n");
860 list_for_each_entry(attach_obj
, &buf_obj
->attachments
, node
) {
863 seq_printf(s
, "%s\n", dev_name(attach_obj
->dev
));
867 seq_printf(s
, "Total %d devices attached\n\n",
871 size
+= buf_obj
->size
;
872 mutex_unlock(&buf_obj
->lock
);
875 seq_printf(s
, "\nTotal %d objects, %zu bytes\n", count
, size
);
877 mutex_unlock(&db_list
.lock
);
881 static int dma_buf_show(struct seq_file
*s
, void *unused
)
883 void (*func
)(struct seq_file
*) = s
->private;
889 static int dma_buf_debug_open(struct inode
*inode
, struct file
*file
)
891 return single_open(file
, dma_buf_show
, inode
->i_private
);
894 static const struct file_operations dma_buf_debug_fops
= {
895 .open
= dma_buf_debug_open
,
898 .release
= single_release
,
901 static struct dentry
*dma_buf_debugfs_dir
;
903 static int dma_buf_init_debugfs(void)
907 dma_buf_debugfs_dir
= debugfs_create_dir("dma_buf", NULL
);
909 if (IS_ERR(dma_buf_debugfs_dir
)) {
910 err
= PTR_ERR(dma_buf_debugfs_dir
);
911 dma_buf_debugfs_dir
= NULL
;
915 err
= dma_buf_debugfs_create_file("bufinfo", dma_buf_describe
);
918 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
923 static void dma_buf_uninit_debugfs(void)
925 if (dma_buf_debugfs_dir
)
926 debugfs_remove_recursive(dma_buf_debugfs_dir
);
929 int dma_buf_debugfs_create_file(const char *name
,
930 int (*write
)(struct seq_file
*))
934 d
= debugfs_create_file(name
, S_IRUGO
, dma_buf_debugfs_dir
,
935 write
, &dma_buf_debug_fops
);
937 return PTR_ERR_OR_ZERO(d
);
940 static inline int dma_buf_init_debugfs(void)
944 static inline void dma_buf_uninit_debugfs(void)
949 static int __init
dma_buf_init(void)
951 mutex_init(&db_list
.lock
);
952 INIT_LIST_HEAD(&db_list
.head
);
953 dma_buf_init_debugfs();
956 subsys_initcall(dma_buf_init
);
958 static void __exit
dma_buf_deinit(void)
960 dma_buf_uninit_debugfs();
962 __exitcall(dma_buf_deinit
);