3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
45 * @buffers: an rb tree of all the existing buffers
46 * @buffer_lock: lock protecting the tree of buffers
47 * @lock: rwsem protecting the tree of heaps and clients
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
52 struct miscdevice dev
;
53 struct rb_root buffers
;
54 struct mutex buffer_lock
;
55 struct rw_semaphore lock
;
56 struct plist_head heaps
;
57 long (*custom_ioctl
) (struct ion_client
*client
, unsigned int cmd
,
59 struct rb_root clients
;
60 struct dentry
*debug_root
;
64 * struct ion_client - a process/hw block local address space
65 * @node: node in the tree of all clients
66 * @dev: backpointer to ion device
67 * @handles: an rb tree of all the handles in this client
68 * @idr: an idr space for allocating handle ids
69 * @lock: lock protecting the tree of handles
70 * @name: used for debugging
71 * @task: used for debugging
73 * A client represents a list of buffers this client may access.
74 * The mutex stored here is used to protect both handles tree
75 * as well as the handles themselves, and should be held while modifying either.
79 struct ion_device
*dev
;
80 struct rb_root handles
;
84 struct task_struct
*task
;
86 struct dentry
*debug_root
;
90 * ion_handle - a client local reference to a buffer
91 * @ref: reference count
92 * @client: back pointer to the client the buffer resides in
93 * @buffer: pointer to the buffer
94 * @node: node in the client's handle rbtree
95 * @kmap_cnt: count of times this client has mapped to kernel
96 * @id: client-unique id allocated by client->idr
98 * Modifications to node, map_cnt or mapping should be protected by the
99 * lock in the client. Other fields are never changed after initialization.
103 struct ion_client
*client
;
104 struct ion_buffer
*buffer
;
106 unsigned int kmap_cnt
;
110 bool ion_buffer_fault_user_mappings(struct ion_buffer
*buffer
)
112 return ((buffer
->flags
& ION_FLAG_CACHED
) &&
113 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
));
116 bool ion_buffer_cached(struct ion_buffer
*buffer
)
118 return !!(buffer
->flags
& ION_FLAG_CACHED
);
121 static inline struct page
*ion_buffer_page(struct page
*page
)
123 return (struct page
*)((unsigned long)page
& ~(1UL));
126 static inline bool ion_buffer_page_is_dirty(struct page
*page
)
128 return !!((unsigned long)page
& 1UL);
131 static inline void ion_buffer_page_dirty(struct page
**page
)
133 *page
= (struct page
*)((unsigned long)(*page
) | 1UL);
136 static inline void ion_buffer_page_clean(struct page
**page
)
138 *page
= (struct page
*)((unsigned long)(*page
) & ~(1UL));
141 /* this function should only be called while dev->lock is held */
142 static void ion_buffer_add(struct ion_device
*dev
,
143 struct ion_buffer
*buffer
)
145 struct rb_node
**p
= &dev
->buffers
.rb_node
;
146 struct rb_node
*parent
= NULL
;
147 struct ion_buffer
*entry
;
151 entry
= rb_entry(parent
, struct ion_buffer
, node
);
153 if (buffer
< entry
) {
155 } else if (buffer
> entry
) {
158 pr_err("%s: buffer already found.", __func__
);
163 rb_link_node(&buffer
->node
, parent
, p
);
164 rb_insert_color(&buffer
->node
, &dev
->buffers
);
167 /* this function should only be called while dev->lock is held */
168 static struct ion_buffer
*ion_buffer_create(struct ion_heap
*heap
,
169 struct ion_device
*dev
,
174 struct ion_buffer
*buffer
;
175 struct sg_table
*table
;
176 struct scatterlist
*sg
;
179 buffer
= kzalloc(sizeof(struct ion_buffer
), GFP_KERNEL
);
181 return ERR_PTR(-ENOMEM
);
184 buffer
->flags
= flags
;
185 kref_init(&buffer
->ref
);
187 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
, flags
);
190 if (!(heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
))
193 ion_heap_freelist_drain(heap
, 0);
194 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
,
203 table
= heap
->ops
->map_dma(heap
, buffer
);
204 if (WARN_ONCE(table
== NULL
, "heap->ops->map_dma should return ERR_PTR on error"))
205 table
= ERR_PTR(-EINVAL
);
207 heap
->ops
->free(buffer
);
209 return ERR_PTR(PTR_ERR(table
));
211 buffer
->sg_table
= table
;
212 if (ion_buffer_fault_user_mappings(buffer
)) {
213 int num_pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
214 struct scatterlist
*sg
;
217 buffer
->pages
= vmalloc(sizeof(struct page
*) * num_pages
);
218 if (!buffer
->pages
) {
223 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
224 struct page
*page
= sg_page(sg
);
226 for (j
= 0; j
< sg_dma_len(sg
) / PAGE_SIZE
; j
++)
227 buffer
->pages
[k
++] = page
++;
236 INIT_LIST_HEAD(&buffer
->vmas
);
237 mutex_init(&buffer
->lock
);
238 /* this will set up dma addresses for the sglist -- it is not
239 technically correct as per the dma api -- a specific
240 device isn't really taking ownership here. However, in practice on
241 our systems the only dma_address space is physical addresses.
242 Additionally, we can't afford the overhead of invalidating every
243 allocation via dma_map_sg. The implicit contract here is that
244 memory comming from the heaps is ready for dma, ie if it has a
245 cached mapping that mapping has been invalidated */
246 for_each_sg(buffer
->sg_table
->sgl
, sg
, buffer
->sg_table
->nents
, i
)
247 sg_dma_address(sg
) = sg_phys(sg
);
248 mutex_lock(&dev
->buffer_lock
);
249 ion_buffer_add(dev
, buffer
);
250 mutex_unlock(&dev
->buffer_lock
);
254 heap
->ops
->unmap_dma(heap
, buffer
);
255 heap
->ops
->free(buffer
);
258 vfree(buffer
->pages
);
264 void ion_buffer_destroy(struct ion_buffer
*buffer
)
266 if (WARN_ON(buffer
->kmap_cnt
> 0))
267 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
268 buffer
->heap
->ops
->unmap_dma(buffer
->heap
, buffer
);
269 buffer
->heap
->ops
->free(buffer
);
271 vfree(buffer
->pages
);
275 static void _ion_buffer_destroy(struct kref
*kref
)
277 struct ion_buffer
*buffer
= container_of(kref
, struct ion_buffer
, ref
);
278 struct ion_heap
*heap
= buffer
->heap
;
279 struct ion_device
*dev
= buffer
->dev
;
281 mutex_lock(&dev
->buffer_lock
);
282 rb_erase(&buffer
->node
, &dev
->buffers
);
283 mutex_unlock(&dev
->buffer_lock
);
285 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
286 ion_heap_freelist_add(heap
, buffer
);
288 ion_buffer_destroy(buffer
);
291 static void ion_buffer_get(struct ion_buffer
*buffer
)
293 kref_get(&buffer
->ref
);
296 static int ion_buffer_put(struct ion_buffer
*buffer
)
298 return kref_put(&buffer
->ref
, _ion_buffer_destroy
);
301 static void ion_buffer_add_to_handle(struct ion_buffer
*buffer
)
303 mutex_lock(&buffer
->lock
);
304 buffer
->handle_count
++;
305 mutex_unlock(&buffer
->lock
);
308 static void ion_buffer_remove_from_handle(struct ion_buffer
*buffer
)
311 * when a buffer is removed from a handle, if it is not in
312 * any other handles, copy the taskcomm and the pid of the
313 * process it's being removed from into the buffer. At this
314 * point there will be no way to track what processes this buffer is
315 * being used by, it only exists as a dma_buf file descriptor.
316 * The taskcomm and pid can provide a debug hint as to where this fd
319 mutex_lock(&buffer
->lock
);
320 buffer
->handle_count
--;
321 BUG_ON(buffer
->handle_count
< 0);
322 if (!buffer
->handle_count
) {
323 struct task_struct
*task
;
325 task
= current
->group_leader
;
326 get_task_comm(buffer
->task_comm
, task
);
327 buffer
->pid
= task_pid_nr(task
);
329 mutex_unlock(&buffer
->lock
);
332 static struct ion_handle
*ion_handle_create(struct ion_client
*client
,
333 struct ion_buffer
*buffer
)
335 struct ion_handle
*handle
;
337 handle
= kzalloc(sizeof(struct ion_handle
), GFP_KERNEL
);
339 return ERR_PTR(-ENOMEM
);
340 kref_init(&handle
->ref
);
341 RB_CLEAR_NODE(&handle
->node
);
342 handle
->client
= client
;
343 ion_buffer_get(buffer
);
344 ion_buffer_add_to_handle(buffer
);
345 handle
->buffer
= buffer
;
350 static void ion_handle_kmap_put(struct ion_handle
*);
352 static void ion_handle_destroy(struct kref
*kref
)
354 struct ion_handle
*handle
= container_of(kref
, struct ion_handle
, ref
);
355 struct ion_client
*client
= handle
->client
;
356 struct ion_buffer
*buffer
= handle
->buffer
;
358 mutex_lock(&buffer
->lock
);
359 while (handle
->kmap_cnt
)
360 ion_handle_kmap_put(handle
);
361 mutex_unlock(&buffer
->lock
);
363 idr_remove(&client
->idr
, handle
->id
);
364 if (!RB_EMPTY_NODE(&handle
->node
))
365 rb_erase(&handle
->node
, &client
->handles
);
367 ion_buffer_remove_from_handle(buffer
);
368 ion_buffer_put(buffer
);
373 struct ion_buffer
*ion_handle_buffer(struct ion_handle
*handle
)
375 return handle
->buffer
;
378 static void ion_handle_get(struct ion_handle
*handle
)
380 kref_get(&handle
->ref
);
383 static int ion_handle_put(struct ion_handle
*handle
)
385 return kref_put(&handle
->ref
, ion_handle_destroy
);
388 static struct ion_handle
*ion_handle_lookup(struct ion_client
*client
,
389 struct ion_buffer
*buffer
)
393 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
394 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
396 if (handle
->buffer
== buffer
)
399 return ERR_PTR(-EINVAL
);
402 static struct ion_handle
*ion_uhandle_get(struct ion_client
*client
, int id
)
404 return idr_find(&client
->idr
, id
);
407 static bool ion_handle_validate(struct ion_client
*client
, struct ion_handle
*handle
)
409 return (ion_uhandle_get(client
, handle
->id
) == handle
);
412 static int ion_handle_add(struct ion_client
*client
, struct ion_handle
*handle
)
415 struct rb_node
**p
= &client
->handles
.rb_node
;
416 struct rb_node
*parent
= NULL
;
417 struct ion_handle
*entry
;
421 rc
= idr_pre_get(&client
->idr
, GFP_KERNEL
);
424 rc
= idr_get_new(&client
->idr
, handle
, &id
);
426 } while (rc
== -EAGAIN
);
433 entry
= rb_entry(parent
, struct ion_handle
, node
);
437 else if (handle
> entry
)
440 WARN(1, "%s: buffer already found.", __func__
);
443 rb_link_node(&handle
->node
, parent
, p
);
444 rb_insert_color(&handle
->node
, &client
->handles
);
449 struct ion_handle
*ion_alloc(struct ion_client
*client
, size_t len
,
450 size_t align
, unsigned int heap_id_mask
,
453 struct ion_handle
*handle
;
454 struct ion_device
*dev
= client
->dev
;
455 struct ion_buffer
*buffer
= NULL
;
456 struct ion_heap
*heap
;
459 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__
,
460 len
, align
, heap_id_mask
, flags
);
462 * traverse the list of heaps available in this system in priority
463 * order. If the heap type is supported by the client, and matches the
464 * request of the caller allocate from it. Repeat until allocate has
465 * succeeded or all heaps have been tried
468 return ERR_PTR(-EINVAL
);
470 len
= PAGE_ALIGN(len
);
472 down_read(&dev
->lock
);
473 plist_for_each_entry(heap
, &dev
->heaps
, node
) {
474 /* if the caller didn't specify this heap id */
475 if (!((1 << heap
->id
) & heap_id_mask
))
477 buffer
= ion_buffer_create(heap
, dev
, len
, align
, flags
);
484 return ERR_PTR(-ENODEV
);
487 return ERR_PTR(PTR_ERR(buffer
));
489 handle
= ion_handle_create(client
, buffer
);
492 * ion_buffer_create will create a buffer with a ref_cnt of 1,
493 * and ion_handle_create will take a second reference, drop one here
495 ion_buffer_put(buffer
);
500 mutex_lock(&client
->lock
);
501 ret
= ion_handle_add(client
, handle
);
503 ion_handle_put(handle
);
504 handle
= ERR_PTR(ret
);
506 mutex_unlock(&client
->lock
);
510 EXPORT_SYMBOL(ion_alloc
);
512 void ion_free(struct ion_client
*client
, struct ion_handle
*handle
)
516 BUG_ON(client
!= handle
->client
);
518 mutex_lock(&client
->lock
);
519 valid_handle
= ion_handle_validate(client
, handle
);
522 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
523 mutex_unlock(&client
->lock
);
526 ion_handle_put(handle
);
527 mutex_unlock(&client
->lock
);
529 EXPORT_SYMBOL(ion_free
);
531 int ion_phys(struct ion_client
*client
, struct ion_handle
*handle
,
532 ion_phys_addr_t
*addr
, size_t *len
)
534 struct ion_buffer
*buffer
;
537 mutex_lock(&client
->lock
);
538 if (!ion_handle_validate(client
, handle
)) {
539 mutex_unlock(&client
->lock
);
543 buffer
= handle
->buffer
;
545 if (!buffer
->heap
->ops
->phys
) {
546 pr_err("%s: ion_phys is not implemented by this heap.\n",
548 mutex_unlock(&client
->lock
);
551 mutex_unlock(&client
->lock
);
552 ret
= buffer
->heap
->ops
->phys(buffer
->heap
, buffer
, addr
, len
);
555 EXPORT_SYMBOL(ion_phys
);
557 static void *ion_buffer_kmap_get(struct ion_buffer
*buffer
)
561 if (buffer
->kmap_cnt
) {
563 return buffer
->vaddr
;
565 vaddr
= buffer
->heap
->ops
->map_kernel(buffer
->heap
, buffer
);
566 if (WARN_ONCE(vaddr
== NULL
, "heap->ops->map_kernel should return ERR_PTR on error"))
567 return ERR_PTR(-EINVAL
);
570 buffer
->vaddr
= vaddr
;
575 static void *ion_handle_kmap_get(struct ion_handle
*handle
)
577 struct ion_buffer
*buffer
= handle
->buffer
;
580 if (handle
->kmap_cnt
) {
582 return buffer
->vaddr
;
584 vaddr
= ion_buffer_kmap_get(buffer
);
591 static void ion_buffer_kmap_put(struct ion_buffer
*buffer
)
594 if (!buffer
->kmap_cnt
) {
595 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
596 buffer
->vaddr
= NULL
;
600 static void ion_handle_kmap_put(struct ion_handle
*handle
)
602 struct ion_buffer
*buffer
= handle
->buffer
;
605 if (!handle
->kmap_cnt
)
606 ion_buffer_kmap_put(buffer
);
609 void *ion_map_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
611 struct ion_buffer
*buffer
;
614 mutex_lock(&client
->lock
);
615 if (!ion_handle_validate(client
, handle
)) {
616 pr_err("%s: invalid handle passed to map_kernel.\n",
618 mutex_unlock(&client
->lock
);
619 return ERR_PTR(-EINVAL
);
622 buffer
= handle
->buffer
;
624 if (!handle
->buffer
->heap
->ops
->map_kernel
) {
625 pr_err("%s: map_kernel is not implemented by this heap.\n",
627 mutex_unlock(&client
->lock
);
628 return ERR_PTR(-ENODEV
);
631 mutex_lock(&buffer
->lock
);
632 vaddr
= ion_handle_kmap_get(handle
);
633 mutex_unlock(&buffer
->lock
);
634 mutex_unlock(&client
->lock
);
637 EXPORT_SYMBOL(ion_map_kernel
);
639 void ion_unmap_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
641 struct ion_buffer
*buffer
;
643 mutex_lock(&client
->lock
);
644 buffer
= handle
->buffer
;
645 mutex_lock(&buffer
->lock
);
646 ion_handle_kmap_put(handle
);
647 mutex_unlock(&buffer
->lock
);
648 mutex_unlock(&client
->lock
);
650 EXPORT_SYMBOL(ion_unmap_kernel
);
652 static int ion_debug_client_show(struct seq_file
*s
, void *unused
)
654 struct ion_client
*client
= s
->private;
656 size_t sizes
[ION_NUM_HEAP_IDS
] = {0};
657 const char *names
[ION_NUM_HEAP_IDS
] = {0};
660 mutex_lock(&client
->lock
);
661 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
662 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
664 unsigned int id
= handle
->buffer
->heap
->id
;
667 names
[id
] = handle
->buffer
->heap
->name
;
668 sizes
[id
] += handle
->buffer
->size
;
670 mutex_unlock(&client
->lock
);
672 seq_printf(s
, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
673 for (i
= 0; i
< ION_NUM_HEAP_IDS
; i
++) {
676 seq_printf(s
, "%16.16s: %16u\n", names
[i
], sizes
[i
]);
681 static int ion_debug_client_open(struct inode
*inode
, struct file
*file
)
683 return single_open(file
, ion_debug_client_show
, inode
->i_private
);
686 static const struct file_operations debug_client_fops
= {
687 .open
= ion_debug_client_open
,
690 .release
= single_release
,
693 struct ion_client
*ion_client_create(struct ion_device
*dev
,
696 struct ion_client
*client
;
697 struct task_struct
*task
;
699 struct rb_node
*parent
= NULL
;
700 struct ion_client
*entry
;
704 get_task_struct(current
->group_leader
);
705 task_lock(current
->group_leader
);
706 pid
= task_pid_nr(current
->group_leader
);
707 /* don't bother to store task struct for kernel threads,
708 they can't be killed anyway */
709 if (current
->group_leader
->flags
& PF_KTHREAD
) {
710 put_task_struct(current
->group_leader
);
713 task
= current
->group_leader
;
715 task_unlock(current
->group_leader
);
717 client
= kzalloc(sizeof(struct ion_client
), GFP_KERNEL
);
720 put_task_struct(current
->group_leader
);
721 return ERR_PTR(-ENOMEM
);
725 client
->handles
= RB_ROOT
;
726 idr_init(&client
->idr
);
727 mutex_init(&client
->lock
);
732 down_write(&dev
->lock
);
733 p
= &dev
->clients
.rb_node
;
736 entry
= rb_entry(parent
, struct ion_client
, node
);
740 else if (client
> entry
)
743 rb_link_node(&client
->node
, parent
, p
);
744 rb_insert_color(&client
->node
, &dev
->clients
);
746 snprintf(debug_name
, 64, "%u", client
->pid
);
747 client
->debug_root
= debugfs_create_file(debug_name
, 0664,
748 dev
->debug_root
, client
,
750 up_write(&dev
->lock
);
754 EXPORT_SYMBOL(ion_client_create
);
756 void ion_client_destroy(struct ion_client
*client
)
758 struct ion_device
*dev
= client
->dev
;
761 pr_debug("%s: %d\n", __func__
, __LINE__
);
762 while ((n
= rb_first(&client
->handles
))) {
763 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
765 ion_handle_destroy(&handle
->ref
);
768 idr_remove_all(&client
->idr
);
769 idr_destroy(&client
->idr
);
771 down_write(&dev
->lock
);
773 put_task_struct(client
->task
);
774 rb_erase(&client
->node
, &dev
->clients
);
775 debugfs_remove_recursive(client
->debug_root
);
776 up_write(&dev
->lock
);
780 EXPORT_SYMBOL(ion_client_destroy
);
782 struct sg_table
*ion_sg_table(struct ion_client
*client
,
783 struct ion_handle
*handle
)
785 struct ion_buffer
*buffer
;
786 struct sg_table
*table
;
788 mutex_lock(&client
->lock
);
789 if (!ion_handle_validate(client
, handle
)) {
790 pr_err("%s: invalid handle passed to map_dma.\n",
792 mutex_unlock(&client
->lock
);
793 return ERR_PTR(-EINVAL
);
795 buffer
= handle
->buffer
;
796 table
= buffer
->sg_table
;
797 mutex_unlock(&client
->lock
);
800 EXPORT_SYMBOL(ion_sg_table
);
802 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
804 enum dma_data_direction direction
);
806 static struct sg_table
*ion_map_dma_buf(struct dma_buf_attachment
*attachment
,
807 enum dma_data_direction direction
)
809 struct dma_buf
*dmabuf
= attachment
->dmabuf
;
810 struct ion_buffer
*buffer
= dmabuf
->priv
;
812 ion_buffer_sync_for_device(buffer
, attachment
->dev
, direction
);
813 return buffer
->sg_table
;
816 static void ion_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
817 struct sg_table
*table
,
818 enum dma_data_direction direction
)
822 struct ion_vma_list
{
823 struct list_head list
;
824 struct vm_area_struct
*vma
;
827 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
829 enum dma_data_direction dir
)
831 struct ion_vma_list
*vma_list
;
832 int pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
835 pr_debug("%s: syncing for device %s\n", __func__
,
836 dev
? dev_name(dev
) : "null");
838 if (!ion_buffer_fault_user_mappings(buffer
))
841 mutex_lock(&buffer
->lock
);
842 for (i
= 0; i
< pages
; i
++) {
843 struct page
*page
= buffer
->pages
[i
];
845 if (ion_buffer_page_is_dirty(page
))
846 __dma_page_cpu_to_dev(page
, 0, PAGE_SIZE
, dir
);
847 ion_buffer_page_clean(buffer
->pages
+ i
);
849 list_for_each_entry(vma_list
, &buffer
->vmas
, list
) {
850 struct vm_area_struct
*vma
= vma_list
->vma
;
852 zap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
,
855 mutex_unlock(&buffer
->lock
);
858 int ion_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
860 struct ion_buffer
*buffer
= vma
->vm_private_data
;
863 mutex_lock(&buffer
->lock
);
864 ion_buffer_page_dirty(buffer
->pages
+ vmf
->pgoff
);
866 BUG_ON(!buffer
->pages
|| !buffer
->pages
[vmf
->pgoff
]);
867 ret
= vm_insert_page(vma
, (unsigned long)vmf
->virtual_address
,
868 ion_buffer_page(buffer
->pages
[vmf
->pgoff
]));
869 mutex_unlock(&buffer
->lock
);
871 return VM_FAULT_ERROR
;
873 return VM_FAULT_NOPAGE
;
876 static void ion_vm_open(struct vm_area_struct
*vma
)
878 struct ion_buffer
*buffer
= vma
->vm_private_data
;
879 struct ion_vma_list
*vma_list
;
881 vma_list
= kmalloc(sizeof(struct ion_vma_list
), GFP_KERNEL
);
885 mutex_lock(&buffer
->lock
);
886 list_add(&vma_list
->list
, &buffer
->vmas
);
887 mutex_unlock(&buffer
->lock
);
888 pr_debug("%s: adding %p\n", __func__
, vma
);
891 static void ion_vm_close(struct vm_area_struct
*vma
)
893 struct ion_buffer
*buffer
= vma
->vm_private_data
;
894 struct ion_vma_list
*vma_list
, *tmp
;
896 pr_debug("%s\n", __func__
);
897 mutex_lock(&buffer
->lock
);
898 list_for_each_entry_safe(vma_list
, tmp
, &buffer
->vmas
, list
) {
899 if (vma_list
->vma
!= vma
)
901 list_del(&vma_list
->list
);
903 pr_debug("%s: deleting %p\n", __func__
, vma
);
906 mutex_unlock(&buffer
->lock
);
909 struct vm_operations_struct ion_vma_ops
= {
911 .close
= ion_vm_close
,
912 .fault
= ion_vm_fault
,
915 static int ion_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
917 struct ion_buffer
*buffer
= dmabuf
->priv
;
920 if (!buffer
->heap
->ops
->map_user
) {
921 pr_err("%s: this heap does not define a method for mapping "
922 "to userspace\n", __func__
);
926 if (ion_buffer_fault_user_mappings(buffer
)) {
927 vma
->vm_private_data
= buffer
;
928 vma
->vm_ops
= &ion_vma_ops
;
933 if (!(buffer
->flags
& ION_FLAG_CACHED
))
934 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
936 mutex_lock(&buffer
->lock
);
937 /* now map it to userspace */
938 ret
= buffer
->heap
->ops
->map_user(buffer
->heap
, buffer
, vma
);
939 mutex_unlock(&buffer
->lock
);
942 pr_err("%s: failure mapping buffer to userspace\n",
948 static void ion_dma_buf_release(struct dma_buf
*dmabuf
)
950 struct ion_buffer
*buffer
= dmabuf
->priv
;
951 ion_buffer_put(buffer
);
954 static void *ion_dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long offset
)
956 struct ion_buffer
*buffer
= dmabuf
->priv
;
957 return buffer
->vaddr
+ offset
* PAGE_SIZE
;
960 static void ion_dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long offset
,
966 static int ion_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
968 enum dma_data_direction direction
)
970 struct ion_buffer
*buffer
= dmabuf
->priv
;
973 if (!buffer
->heap
->ops
->map_kernel
) {
974 pr_err("%s: map kernel is not implemented by this heap.\n",
979 mutex_lock(&buffer
->lock
);
980 vaddr
= ion_buffer_kmap_get(buffer
);
981 mutex_unlock(&buffer
->lock
);
983 return PTR_ERR(vaddr
);
987 static void ion_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
989 enum dma_data_direction direction
)
991 struct ion_buffer
*buffer
= dmabuf
->priv
;
993 mutex_lock(&buffer
->lock
);
994 ion_buffer_kmap_put(buffer
);
995 mutex_unlock(&buffer
->lock
);
998 struct dma_buf_ops dma_buf_ops
= {
999 .map_dma_buf
= ion_map_dma_buf
,
1000 .unmap_dma_buf
= ion_unmap_dma_buf
,
1002 .release
= ion_dma_buf_release
,
1003 .begin_cpu_access
= ion_dma_buf_begin_cpu_access
,
1004 .end_cpu_access
= ion_dma_buf_end_cpu_access
,
1005 .kmap_atomic
= ion_dma_buf_kmap
,
1006 .kunmap_atomic
= ion_dma_buf_kunmap
,
1007 .kmap
= ion_dma_buf_kmap
,
1008 .kunmap
= ion_dma_buf_kunmap
,
1011 struct dma_buf
*ion_share_dma_buf(struct ion_client
*client
,
1012 struct ion_handle
*handle
)
1014 struct ion_buffer
*buffer
;
1015 struct dma_buf
*dmabuf
;
1018 mutex_lock(&client
->lock
);
1019 valid_handle
= ion_handle_validate(client
, handle
);
1020 mutex_unlock(&client
->lock
);
1021 if (!valid_handle
) {
1022 WARN(1, "%s: invalid handle passed to share.\n", __func__
);
1023 return ERR_PTR(-EINVAL
);
1026 buffer
= handle
->buffer
;
1027 ion_buffer_get(buffer
);
1028 dmabuf
= dma_buf_export(buffer
, &dma_buf_ops
, buffer
->size
, O_RDWR
);
1029 if (IS_ERR(dmabuf
)) {
1030 ion_buffer_put(buffer
);
1036 EXPORT_SYMBOL(ion_share_dma_buf
);
1038 int ion_share_dma_buf_fd(struct ion_client
*client
, struct ion_handle
*handle
)
1040 struct dma_buf
*dmabuf
;
1043 dmabuf
= ion_share_dma_buf(client
, handle
);
1045 return PTR_ERR(dmabuf
);
1047 fd
= dma_buf_fd(dmabuf
, O_CLOEXEC
);
1049 dma_buf_put(dmabuf
);
1053 EXPORT_SYMBOL(ion_share_dma_buf_fd
);
1055 struct ion_handle
*ion_import_dma_buf(struct ion_client
*client
, int fd
)
1057 struct dma_buf
*dmabuf
;
1058 struct ion_buffer
*buffer
;
1059 struct ion_handle
*handle
;
1062 dmabuf
= dma_buf_get(fd
);
1064 return ERR_PTR(PTR_ERR(dmabuf
));
1065 /* if this memory came from ion */
1067 if (dmabuf
->ops
!= &dma_buf_ops
) {
1068 pr_err("%s: can not import dmabuf from another exporter\n",
1070 dma_buf_put(dmabuf
);
1071 return ERR_PTR(-EINVAL
);
1073 buffer
= dmabuf
->priv
;
1075 mutex_lock(&client
->lock
);
1076 /* if a handle exists for this buffer just take a reference to it */
1077 handle
= ion_handle_lookup(client
, buffer
);
1078 if (!IS_ERR(handle
)) {
1079 ion_handle_get(handle
);
1082 handle
= ion_handle_create(client
, buffer
);
1085 ret
= ion_handle_add(client
, handle
);
1087 ion_handle_put(handle
);
1088 handle
= ERR_PTR(ret
);
1091 mutex_unlock(&client
->lock
);
1092 dma_buf_put(dmabuf
);
1095 EXPORT_SYMBOL(ion_import_dma_buf
);
1097 static int ion_sync_for_device(struct ion_client
*client
, int fd
)
1099 struct dma_buf
*dmabuf
;
1100 struct ion_buffer
*buffer
;
1102 dmabuf
= dma_buf_get(fd
);
1104 return PTR_ERR(dmabuf
);
1106 /* if this memory came from ion */
1107 if (dmabuf
->ops
!= &dma_buf_ops
) {
1108 pr_err("%s: can not sync dmabuf from another exporter\n",
1110 dma_buf_put(dmabuf
);
1113 buffer
= dmabuf
->priv
;
1115 dma_sync_sg_for_device(NULL
, buffer
->sg_table
->sgl
,
1116 buffer
->sg_table
->nents
, DMA_BIDIRECTIONAL
);
1117 dma_buf_put(dmabuf
);
1121 static long ion_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1123 struct ion_client
*client
= filp
->private_data
;
1128 struct ion_allocation_data data
;
1129 struct ion_handle
*handle
;
1131 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
)))
1133 handle
= ion_alloc(client
, data
.len
, data
.align
,
1134 data
.heap_id_mask
, data
.flags
);
1137 return PTR_ERR(handle
);
1139 data
.handle
= (struct ion_handle
*)handle
->id
;
1141 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
))) {
1142 ion_free(client
, handle
);
1149 struct ion_handle_data data
;
1150 struct ion_handle
*handle
;
1152 if (copy_from_user(&data
, (void __user
*)arg
,
1153 sizeof(struct ion_handle_data
)))
1155 mutex_lock(&client
->lock
);
1156 handle
= ion_uhandle_get(client
, (int)data
.handle
);
1157 mutex_unlock(&client
->lock
);
1160 ion_free(client
, handle
);
1166 struct ion_fd_data data
;
1167 struct ion_handle
*handle
;
1169 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
)))
1171 handle
= ion_uhandle_get(client
, (int)data
.handle
);
1172 data
.fd
= ion_share_dma_buf_fd(client
, handle
);
1173 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
)))
1179 case ION_IOC_IMPORT
:
1181 struct ion_fd_data data
;
1182 struct ion_handle
*handle
;
1184 if (copy_from_user(&data
, (void __user
*)arg
,
1185 sizeof(struct ion_fd_data
)))
1187 handle
= ion_import_dma_buf(client
, data
.fd
);
1189 ret
= PTR_ERR(handle
);
1191 data
.handle
= (struct ion_handle
*)handle
->id
;
1193 if (copy_to_user((void __user
*)arg
, &data
,
1194 sizeof(struct ion_fd_data
)))
1202 struct ion_fd_data data
;
1203 if (copy_from_user(&data
, (void __user
*)arg
,
1204 sizeof(struct ion_fd_data
)))
1206 ion_sync_for_device(client
, data
.fd
);
1209 case ION_IOC_CUSTOM
:
1211 struct ion_device
*dev
= client
->dev
;
1212 struct ion_custom_data data
;
1214 if (!dev
->custom_ioctl
)
1216 if (copy_from_user(&data
, (void __user
*)arg
,
1217 sizeof(struct ion_custom_data
)))
1219 return dev
->custom_ioctl(client
, data
.cmd
, data
.arg
);
1227 static int ion_release(struct inode
*inode
, struct file
*file
)
1229 struct ion_client
*client
= file
->private_data
;
1231 pr_debug("%s: %d\n", __func__
, __LINE__
);
1232 ion_client_destroy(client
);
1236 static int ion_open(struct inode
*inode
, struct file
*file
)
1238 struct miscdevice
*miscdev
= file
->private_data
;
1239 struct ion_device
*dev
= container_of(miscdev
, struct ion_device
, dev
);
1240 struct ion_client
*client
;
1242 pr_debug("%s: %d\n", __func__
, __LINE__
);
1243 client
= ion_client_create(dev
, "user");
1245 return PTR_ERR(client
);
1246 file
->private_data
= client
;
1251 static const struct file_operations ion_fops
= {
1252 .owner
= THIS_MODULE
,
1254 .release
= ion_release
,
1255 .unlocked_ioctl
= ion_ioctl
,
1258 static size_t ion_debug_heap_total(struct ion_client
*client
,
1264 mutex_lock(&client
->lock
);
1265 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
1266 struct ion_handle
*handle
= rb_entry(n
,
1269 if (handle
->buffer
->heap
->id
== id
)
1270 size
+= handle
->buffer
->size
;
1272 mutex_unlock(&client
->lock
);
1276 static int ion_debug_heap_show(struct seq_file
*s
, void *unused
)
1278 struct ion_heap
*heap
= s
->private;
1279 struct ion_device
*dev
= heap
->dev
;
1281 size_t total_size
= 0;
1282 size_t total_orphaned_size
= 0;
1284 seq_printf(s
, "%16.s %16.s %16.s\n", "client", "pid", "size");
1285 seq_printf(s
, "----------------------------------------------------\n");
1287 for (n
= rb_first(&dev
->clients
); n
; n
= rb_next(n
)) {
1288 struct ion_client
*client
= rb_entry(n
, struct ion_client
,
1290 size_t size
= ion_debug_heap_total(client
, heap
->id
);
1294 char task_comm
[TASK_COMM_LEN
];
1296 get_task_comm(task_comm
, client
->task
);
1297 seq_printf(s
, "%16.s %16u %16u\n", task_comm
,
1300 seq_printf(s
, "%16.s %16u %16u\n", client
->name
,
1304 seq_printf(s
, "----------------------------------------------------\n");
1305 seq_printf(s
, "orphaned allocations (info is from last known client):"
1307 mutex_lock(&dev
->buffer_lock
);
1308 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
1309 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
1311 if (buffer
->heap
->id
!= heap
->id
)
1313 total_size
+= buffer
->size
;
1314 if (!buffer
->handle_count
) {
1315 seq_printf(s
, "%16.s %16u %16u %d %d\n", buffer
->task_comm
,
1316 buffer
->pid
, buffer
->size
, buffer
->kmap_cnt
,
1317 atomic_read(&buffer
->ref
.refcount
));
1318 total_orphaned_size
+= buffer
->size
;
1321 mutex_unlock(&dev
->buffer_lock
);
1322 seq_printf(s
, "----------------------------------------------------\n");
1323 seq_printf(s
, "%16.s %16u\n", "total orphaned",
1324 total_orphaned_size
);
1325 seq_printf(s
, "%16.s %16u\n", "total ", total_size
);
1326 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1327 seq_printf(s
, "%16.s %16u\n", "deferred free",
1328 heap
->free_list_size
);
1329 seq_printf(s
, "----------------------------------------------------\n");
1331 if (heap
->debug_show
)
1332 heap
->debug_show(heap
, s
, unused
);
1337 static int ion_debug_heap_open(struct inode
*inode
, struct file
*file
)
1339 return single_open(file
, ion_debug_heap_show
, inode
->i_private
);
1342 static const struct file_operations debug_heap_fops
= {
1343 .open
= ion_debug_heap_open
,
1345 .llseek
= seq_lseek
,
1346 .release
= single_release
,
1349 #ifdef DEBUG_HEAP_SHRINKER
1350 static int debug_shrink_set(void *data
, u64 val
)
1352 struct ion_heap
*heap
= data
;
1353 struct shrink_control sc
;
1362 objs
= heap
->shrinker
.shrink(&heap
->shrinker
, &sc
);
1363 sc
.nr_to_scan
= objs
;
1365 heap
->shrinker
.shrink(&heap
->shrinker
, &sc
);
1369 static int debug_shrink_get(void *data
, u64
*val
)
1371 struct ion_heap
*heap
= data
;
1372 struct shrink_control sc
;
1378 objs
= heap
->shrinker
.shrink(&heap
->shrinker
, &sc
);
1383 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops
, debug_shrink_get
,
1384 debug_shrink_set
, "%llu\n");
1387 void ion_device_add_heap(struct ion_device
*dev
, struct ion_heap
*heap
)
1389 if (!heap
->ops
->allocate
|| !heap
->ops
->free
|| !heap
->ops
->map_dma
||
1390 !heap
->ops
->unmap_dma
)
1391 pr_err("%s: can not add heap with invalid ops struct.\n",
1394 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1395 ion_heap_init_deferred_free(heap
);
1398 down_write(&dev
->lock
);
1399 /* use negative heap->id to reverse the priority -- when traversing
1400 the list later attempt higher id numbers first */
1401 plist_node_init(&heap
->node
, -heap
->id
);
1402 plist_add(&heap
->node
, &dev
->heaps
);
1403 debugfs_create_file(heap
->name
, 0664, dev
->debug_root
, heap
,
1405 #ifdef DEBUG_HEAP_SHRINKER
1406 if (heap
->shrinker
.shrink
) {
1407 char debug_name
[64];
1409 snprintf(debug_name
, 64, "%s_shrink", heap
->name
);
1410 debugfs_create_file(debug_name
, 0644, dev
->debug_root
, heap
,
1411 &debug_shrink_fops
);
1414 up_write(&dev
->lock
);
1417 struct ion_device
*ion_device_create(long (*custom_ioctl
)
1418 (struct ion_client
*client
,
1422 struct ion_device
*idev
;
1425 idev
= kzalloc(sizeof(struct ion_device
), GFP_KERNEL
);
1427 return ERR_PTR(-ENOMEM
);
1429 idev
->dev
.minor
= MISC_DYNAMIC_MINOR
;
1430 idev
->dev
.name
= "ion";
1431 idev
->dev
.fops
= &ion_fops
;
1432 idev
->dev
.parent
= NULL
;
1433 ret
= misc_register(&idev
->dev
);
1435 pr_err("ion: failed to register misc device.\n");
1436 return ERR_PTR(ret
);
1439 idev
->debug_root
= debugfs_create_dir("ion", NULL
);
1440 if (!idev
->debug_root
)
1441 pr_err("ion: failed to create debug files.\n");
1443 idev
->custom_ioctl
= custom_ioctl
;
1444 idev
->buffers
= RB_ROOT
;
1445 mutex_init(&idev
->buffer_lock
);
1446 init_rwsem(&idev
->lock
);
1447 plist_head_init(&idev
->heaps
);
1448 idev
->clients
= RB_ROOT
;
1452 void ion_device_destroy(struct ion_device
*dev
)
1454 misc_deregister(&dev
->dev
);
1455 /* XXX need to free the heaps and clients ? */
1459 void __init
ion_reserve(struct ion_platform_data
*data
)
1463 for (i
= 0; i
< data
->nr
; i
++) {
1464 if (data
->heaps
[i
].size
== 0)
1467 if (data
->heaps
[i
].base
== 0) {
1469 paddr
= memblock_alloc_base(data
->heaps
[i
].size
,
1470 data
->heaps
[i
].align
,
1471 MEMBLOCK_ALLOC_ANYWHERE
);
1473 pr_err("%s: error allocating memblock for "
1478 data
->heaps
[i
].base
= paddr
;
1480 int ret
= memblock_reserve(data
->heaps
[i
].base
,
1481 data
->heaps
[i
].size
);
1483 pr_err("memblock reserve of %x@%lx failed\n",
1484 data
->heaps
[i
].size
,
1485 data
->heaps
[i
].base
);
1487 pr_info("%s: %s reserved base %lx size %d\n", __func__
,
1488 data
->heaps
[i
].name
,
1489 data
->heaps
[i
].base
,
1490 data
->heaps
[i
].size
);