ece1a4c327ac7744f5a06694fe1c0bdd299c4105
[deliverable/linux.git] / drivers / staging / android / ion / ion.c
1 /*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38
39 #include "ion.h"
40 #include "ion_priv.h"
41
42 /**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
45 * @buffers: an rb tree of all the existing buffers
46 * @buffer_lock: lock protecting the tree of buffers
47 * @lock: rwsem protecting the tree of heaps and clients
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51 struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
54 struct mutex buffer_lock;
55 struct rw_semaphore lock;
56 struct plist_head heaps;
57 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
58 unsigned long arg);
59 struct rb_root clients;
60 struct dentry *debug_root;
61 };
62
63 /**
64 * struct ion_client - a process/hw block local address space
65 * @node: node in the tree of all clients
66 * @dev: backpointer to ion device
67 * @handles: an rb tree of all the handles in this client
68 * @idr: an idr space for allocating handle ids
69 * @lock: lock protecting the tree of handles
70 * @name: used for debugging
71 * @task: used for debugging
72 *
73 * A client represents a list of buffers this client may access.
74 * The mutex stored here is used to protect both handles tree
75 * as well as the handles themselves, and should be held while modifying either.
76 */
77 struct ion_client {
78 struct rb_node node;
79 struct ion_device *dev;
80 struct rb_root handles;
81 struct idr idr;
82 struct mutex lock;
83 const char *name;
84 struct task_struct *task;
85 pid_t pid;
86 struct dentry *debug_root;
87 };
88
89 /**
90 * ion_handle - a client local reference to a buffer
91 * @ref: reference count
92 * @client: back pointer to the client the buffer resides in
93 * @buffer: pointer to the buffer
94 * @node: node in the client's handle rbtree
95 * @kmap_cnt: count of times this client has mapped to kernel
96 * @id: client-unique id allocated by client->idr
97 *
98 * Modifications to node, map_cnt or mapping should be protected by the
99 * lock in the client. Other fields are never changed after initialization.
100 */
101 struct ion_handle {
102 struct kref ref;
103 struct ion_client *client;
104 struct ion_buffer *buffer;
105 struct rb_node node;
106 unsigned int kmap_cnt;
107 int id;
108 };
109
110 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
111 {
112 return ((buffer->flags & ION_FLAG_CACHED) &&
113 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
114 }
115
116 bool ion_buffer_cached(struct ion_buffer *buffer)
117 {
118 return !!(buffer->flags & ION_FLAG_CACHED);
119 }
120
121 static inline struct page *ion_buffer_page(struct page *page)
122 {
123 return (struct page *)((unsigned long)page & ~(1UL));
124 }
125
126 static inline bool ion_buffer_page_is_dirty(struct page *page)
127 {
128 return !!((unsigned long)page & 1UL);
129 }
130
131 static inline void ion_buffer_page_dirty(struct page **page)
132 {
133 *page = (struct page *)((unsigned long)(*page) | 1UL);
134 }
135
136 static inline void ion_buffer_page_clean(struct page **page)
137 {
138 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
139 }
140
141 /* this function should only be called while dev->lock is held */
142 static void ion_buffer_add(struct ion_device *dev,
143 struct ion_buffer *buffer)
144 {
145 struct rb_node **p = &dev->buffers.rb_node;
146 struct rb_node *parent = NULL;
147 struct ion_buffer *entry;
148
149 while (*p) {
150 parent = *p;
151 entry = rb_entry(parent, struct ion_buffer, node);
152
153 if (buffer < entry) {
154 p = &(*p)->rb_left;
155 } else if (buffer > entry) {
156 p = &(*p)->rb_right;
157 } else {
158 pr_err("%s: buffer already found.", __func__);
159 BUG();
160 }
161 }
162
163 rb_link_node(&buffer->node, parent, p);
164 rb_insert_color(&buffer->node, &dev->buffers);
165 }
166
167 /* this function should only be called while dev->lock is held */
168 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
169 struct ion_device *dev,
170 unsigned long len,
171 unsigned long align,
172 unsigned long flags)
173 {
174 struct ion_buffer *buffer;
175 struct sg_table *table;
176 struct scatterlist *sg;
177 int i, ret;
178
179 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
180 if (!buffer)
181 return ERR_PTR(-ENOMEM);
182
183 buffer->heap = heap;
184 buffer->flags = flags;
185 kref_init(&buffer->ref);
186
187 ret = heap->ops->allocate(heap, buffer, len, align, flags);
188
189 if (ret) {
190 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
191 goto err2;
192
193 ion_heap_freelist_drain(heap, 0);
194 ret = heap->ops->allocate(heap, buffer, len, align,
195 flags);
196 if (ret)
197 goto err2;
198 }
199
200 buffer->dev = dev;
201 buffer->size = len;
202
203 table = heap->ops->map_dma(heap, buffer);
204 if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
205 table = ERR_PTR(-EINVAL);
206 if (IS_ERR(table)) {
207 heap->ops->free(buffer);
208 kfree(buffer);
209 return ERR_PTR(PTR_ERR(table));
210 }
211 buffer->sg_table = table;
212 if (ion_buffer_fault_user_mappings(buffer)) {
213 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
214 struct scatterlist *sg;
215 int i, j, k = 0;
216
217 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
218 if (!buffer->pages) {
219 ret = -ENOMEM;
220 goto err1;
221 }
222
223 for_each_sg(table->sgl, sg, table->nents, i) {
224 struct page *page = sg_page(sg);
225
226 for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
227 buffer->pages[k++] = page++;
228 }
229
230 if (ret)
231 goto err;
232 }
233
234 buffer->dev = dev;
235 buffer->size = len;
236 INIT_LIST_HEAD(&buffer->vmas);
237 mutex_init(&buffer->lock);
238 /* this will set up dma addresses for the sglist -- it is not
239 technically correct as per the dma api -- a specific
240 device isn't really taking ownership here. However, in practice on
241 our systems the only dma_address space is physical addresses.
242 Additionally, we can't afford the overhead of invalidating every
243 allocation via dma_map_sg. The implicit contract here is that
244 memory comming from the heaps is ready for dma, ie if it has a
245 cached mapping that mapping has been invalidated */
246 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
247 sg_dma_address(sg) = sg_phys(sg);
248 mutex_lock(&dev->buffer_lock);
249 ion_buffer_add(dev, buffer);
250 mutex_unlock(&dev->buffer_lock);
251 return buffer;
252
253 err:
254 heap->ops->unmap_dma(heap, buffer);
255 heap->ops->free(buffer);
256 err1:
257 if (buffer->pages)
258 vfree(buffer->pages);
259 err2:
260 kfree(buffer);
261 return ERR_PTR(ret);
262 }
263
264 void ion_buffer_destroy(struct ion_buffer *buffer)
265 {
266 if (WARN_ON(buffer->kmap_cnt > 0))
267 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
268 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
269 buffer->heap->ops->free(buffer);
270 if (buffer->pages)
271 vfree(buffer->pages);
272 kfree(buffer);
273 }
274
275 static void _ion_buffer_destroy(struct kref *kref)
276 {
277 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
278 struct ion_heap *heap = buffer->heap;
279 struct ion_device *dev = buffer->dev;
280
281 mutex_lock(&dev->buffer_lock);
282 rb_erase(&buffer->node, &dev->buffers);
283 mutex_unlock(&dev->buffer_lock);
284
285 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
286 ion_heap_freelist_add(heap, buffer);
287 else
288 ion_buffer_destroy(buffer);
289 }
290
291 static void ion_buffer_get(struct ion_buffer *buffer)
292 {
293 kref_get(&buffer->ref);
294 }
295
296 static int ion_buffer_put(struct ion_buffer *buffer)
297 {
298 return kref_put(&buffer->ref, _ion_buffer_destroy);
299 }
300
301 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
302 {
303 mutex_lock(&buffer->lock);
304 buffer->handle_count++;
305 mutex_unlock(&buffer->lock);
306 }
307
308 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
309 {
310 /*
311 * when a buffer is removed from a handle, if it is not in
312 * any other handles, copy the taskcomm and the pid of the
313 * process it's being removed from into the buffer. At this
314 * point there will be no way to track what processes this buffer is
315 * being used by, it only exists as a dma_buf file descriptor.
316 * The taskcomm and pid can provide a debug hint as to where this fd
317 * is in the system
318 */
319 mutex_lock(&buffer->lock);
320 buffer->handle_count--;
321 BUG_ON(buffer->handle_count < 0);
322 if (!buffer->handle_count) {
323 struct task_struct *task;
324
325 task = current->group_leader;
326 get_task_comm(buffer->task_comm, task);
327 buffer->pid = task_pid_nr(task);
328 }
329 mutex_unlock(&buffer->lock);
330 }
331
332 static struct ion_handle *ion_handle_create(struct ion_client *client,
333 struct ion_buffer *buffer)
334 {
335 struct ion_handle *handle;
336
337 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
338 if (!handle)
339 return ERR_PTR(-ENOMEM);
340 kref_init(&handle->ref);
341 RB_CLEAR_NODE(&handle->node);
342 handle->client = client;
343 ion_buffer_get(buffer);
344 ion_buffer_add_to_handle(buffer);
345 handle->buffer = buffer;
346
347 return handle;
348 }
349
350 static void ion_handle_kmap_put(struct ion_handle *);
351
352 static void ion_handle_destroy(struct kref *kref)
353 {
354 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
355 struct ion_client *client = handle->client;
356 struct ion_buffer *buffer = handle->buffer;
357
358 mutex_lock(&buffer->lock);
359 while (handle->kmap_cnt)
360 ion_handle_kmap_put(handle);
361 mutex_unlock(&buffer->lock);
362
363 idr_remove(&client->idr, handle->id);
364 if (!RB_EMPTY_NODE(&handle->node))
365 rb_erase(&handle->node, &client->handles);
366
367 ion_buffer_remove_from_handle(buffer);
368 ion_buffer_put(buffer);
369
370 kfree(handle);
371 }
372
373 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
374 {
375 return handle->buffer;
376 }
377
378 static void ion_handle_get(struct ion_handle *handle)
379 {
380 kref_get(&handle->ref);
381 }
382
383 static int ion_handle_put(struct ion_handle *handle)
384 {
385 return kref_put(&handle->ref, ion_handle_destroy);
386 }
387
388 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
389 struct ion_buffer *buffer)
390 {
391 struct rb_node *n;
392
393 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
394 struct ion_handle *handle = rb_entry(n, struct ion_handle,
395 node);
396 if (handle->buffer == buffer)
397 return handle;
398 }
399 return ERR_PTR(-EINVAL);
400 }
401
402 static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
403 {
404 return idr_find(&client->idr, id);
405 }
406
407 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
408 {
409 return (ion_uhandle_get(client, handle->id) == handle);
410 }
411
412 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
413 {
414 int rc;
415 struct rb_node **p = &client->handles.rb_node;
416 struct rb_node *parent = NULL;
417 struct ion_handle *entry;
418
419 do {
420 int id;
421 rc = idr_pre_get(&client->idr, GFP_KERNEL);
422 if (!rc)
423 return -ENOMEM;
424 rc = idr_get_new(&client->idr, handle, &id);
425 handle->id = id;
426 } while (rc == -EAGAIN);
427
428 if (rc < 0)
429 return rc;
430
431 while (*p) {
432 parent = *p;
433 entry = rb_entry(parent, struct ion_handle, node);
434
435 if (handle < entry)
436 p = &(*p)->rb_left;
437 else if (handle > entry)
438 p = &(*p)->rb_right;
439 else
440 WARN(1, "%s: buffer already found.", __func__);
441 }
442
443 rb_link_node(&handle->node, parent, p);
444 rb_insert_color(&handle->node, &client->handles);
445
446 return 0;
447 }
448
449 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
450 size_t align, unsigned int heap_id_mask,
451 unsigned int flags)
452 {
453 struct ion_handle *handle;
454 struct ion_device *dev = client->dev;
455 struct ion_buffer *buffer = NULL;
456 struct ion_heap *heap;
457 int ret;
458
459 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
460 len, align, heap_id_mask, flags);
461 /*
462 * traverse the list of heaps available in this system in priority
463 * order. If the heap type is supported by the client, and matches the
464 * request of the caller allocate from it. Repeat until allocate has
465 * succeeded or all heaps have been tried
466 */
467 if (WARN_ON(!len))
468 return ERR_PTR(-EINVAL);
469
470 len = PAGE_ALIGN(len);
471
472 down_read(&dev->lock);
473 plist_for_each_entry(heap, &dev->heaps, node) {
474 /* if the caller didn't specify this heap id */
475 if (!((1 << heap->id) & heap_id_mask))
476 continue;
477 buffer = ion_buffer_create(heap, dev, len, align, flags);
478 if (!IS_ERR(buffer))
479 break;
480 }
481 up_read(&dev->lock);
482
483 if (buffer == NULL)
484 return ERR_PTR(-ENODEV);
485
486 if (IS_ERR(buffer))
487 return ERR_PTR(PTR_ERR(buffer));
488
489 handle = ion_handle_create(client, buffer);
490
491 /*
492 * ion_buffer_create will create a buffer with a ref_cnt of 1,
493 * and ion_handle_create will take a second reference, drop one here
494 */
495 ion_buffer_put(buffer);
496
497 if (IS_ERR(handle))
498 return handle;
499
500 mutex_lock(&client->lock);
501 ret = ion_handle_add(client, handle);
502 if (ret) {
503 ion_handle_put(handle);
504 handle = ERR_PTR(ret);
505 }
506 mutex_unlock(&client->lock);
507
508 return handle;
509 }
510 EXPORT_SYMBOL(ion_alloc);
511
512 void ion_free(struct ion_client *client, struct ion_handle *handle)
513 {
514 bool valid_handle;
515
516 BUG_ON(client != handle->client);
517
518 mutex_lock(&client->lock);
519 valid_handle = ion_handle_validate(client, handle);
520
521 if (!valid_handle) {
522 WARN(1, "%s: invalid handle passed to free.\n", __func__);
523 mutex_unlock(&client->lock);
524 return;
525 }
526 ion_handle_put(handle);
527 mutex_unlock(&client->lock);
528 }
529 EXPORT_SYMBOL(ion_free);
530
531 int ion_phys(struct ion_client *client, struct ion_handle *handle,
532 ion_phys_addr_t *addr, size_t *len)
533 {
534 struct ion_buffer *buffer;
535 int ret;
536
537 mutex_lock(&client->lock);
538 if (!ion_handle_validate(client, handle)) {
539 mutex_unlock(&client->lock);
540 return -EINVAL;
541 }
542
543 buffer = handle->buffer;
544
545 if (!buffer->heap->ops->phys) {
546 pr_err("%s: ion_phys is not implemented by this heap.\n",
547 __func__);
548 mutex_unlock(&client->lock);
549 return -ENODEV;
550 }
551 mutex_unlock(&client->lock);
552 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
553 return ret;
554 }
555 EXPORT_SYMBOL(ion_phys);
556
557 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
558 {
559 void *vaddr;
560
561 if (buffer->kmap_cnt) {
562 buffer->kmap_cnt++;
563 return buffer->vaddr;
564 }
565 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
566 if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
567 return ERR_PTR(-EINVAL);
568 if (IS_ERR(vaddr))
569 return vaddr;
570 buffer->vaddr = vaddr;
571 buffer->kmap_cnt++;
572 return vaddr;
573 }
574
575 static void *ion_handle_kmap_get(struct ion_handle *handle)
576 {
577 struct ion_buffer *buffer = handle->buffer;
578 void *vaddr;
579
580 if (handle->kmap_cnt) {
581 handle->kmap_cnt++;
582 return buffer->vaddr;
583 }
584 vaddr = ion_buffer_kmap_get(buffer);
585 if (IS_ERR(vaddr))
586 return vaddr;
587 handle->kmap_cnt++;
588 return vaddr;
589 }
590
591 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
592 {
593 buffer->kmap_cnt--;
594 if (!buffer->kmap_cnt) {
595 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
596 buffer->vaddr = NULL;
597 }
598 }
599
600 static void ion_handle_kmap_put(struct ion_handle *handle)
601 {
602 struct ion_buffer *buffer = handle->buffer;
603
604 handle->kmap_cnt--;
605 if (!handle->kmap_cnt)
606 ion_buffer_kmap_put(buffer);
607 }
608
609 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
610 {
611 struct ion_buffer *buffer;
612 void *vaddr;
613
614 mutex_lock(&client->lock);
615 if (!ion_handle_validate(client, handle)) {
616 pr_err("%s: invalid handle passed to map_kernel.\n",
617 __func__);
618 mutex_unlock(&client->lock);
619 return ERR_PTR(-EINVAL);
620 }
621
622 buffer = handle->buffer;
623
624 if (!handle->buffer->heap->ops->map_kernel) {
625 pr_err("%s: map_kernel is not implemented by this heap.\n",
626 __func__);
627 mutex_unlock(&client->lock);
628 return ERR_PTR(-ENODEV);
629 }
630
631 mutex_lock(&buffer->lock);
632 vaddr = ion_handle_kmap_get(handle);
633 mutex_unlock(&buffer->lock);
634 mutex_unlock(&client->lock);
635 return vaddr;
636 }
637 EXPORT_SYMBOL(ion_map_kernel);
638
639 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
640 {
641 struct ion_buffer *buffer;
642
643 mutex_lock(&client->lock);
644 buffer = handle->buffer;
645 mutex_lock(&buffer->lock);
646 ion_handle_kmap_put(handle);
647 mutex_unlock(&buffer->lock);
648 mutex_unlock(&client->lock);
649 }
650 EXPORT_SYMBOL(ion_unmap_kernel);
651
652 static int ion_debug_client_show(struct seq_file *s, void *unused)
653 {
654 struct ion_client *client = s->private;
655 struct rb_node *n;
656 size_t sizes[ION_NUM_HEAP_IDS] = {0};
657 const char *names[ION_NUM_HEAP_IDS] = {0};
658 int i;
659
660 mutex_lock(&client->lock);
661 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
662 struct ion_handle *handle = rb_entry(n, struct ion_handle,
663 node);
664 unsigned int id = handle->buffer->heap->id;
665
666 if (!names[id])
667 names[id] = handle->buffer->heap->name;
668 sizes[id] += handle->buffer->size;
669 }
670 mutex_unlock(&client->lock);
671
672 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
673 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
674 if (!names[i])
675 continue;
676 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
677 }
678 return 0;
679 }
680
681 static int ion_debug_client_open(struct inode *inode, struct file *file)
682 {
683 return single_open(file, ion_debug_client_show, inode->i_private);
684 }
685
686 static const struct file_operations debug_client_fops = {
687 .open = ion_debug_client_open,
688 .read = seq_read,
689 .llseek = seq_lseek,
690 .release = single_release,
691 };
692
693 struct ion_client *ion_client_create(struct ion_device *dev,
694 const char *name)
695 {
696 struct ion_client *client;
697 struct task_struct *task;
698 struct rb_node **p;
699 struct rb_node *parent = NULL;
700 struct ion_client *entry;
701 char debug_name[64];
702 pid_t pid;
703
704 get_task_struct(current->group_leader);
705 task_lock(current->group_leader);
706 pid = task_pid_nr(current->group_leader);
707 /* don't bother to store task struct for kernel threads,
708 they can't be killed anyway */
709 if (current->group_leader->flags & PF_KTHREAD) {
710 put_task_struct(current->group_leader);
711 task = NULL;
712 } else {
713 task = current->group_leader;
714 }
715 task_unlock(current->group_leader);
716
717 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
718 if (!client) {
719 if (task)
720 put_task_struct(current->group_leader);
721 return ERR_PTR(-ENOMEM);
722 }
723
724 client->dev = dev;
725 client->handles = RB_ROOT;
726 idr_init(&client->idr);
727 mutex_init(&client->lock);
728 client->name = name;
729 client->task = task;
730 client->pid = pid;
731
732 down_write(&dev->lock);
733 p = &dev->clients.rb_node;
734 while (*p) {
735 parent = *p;
736 entry = rb_entry(parent, struct ion_client, node);
737
738 if (client < entry)
739 p = &(*p)->rb_left;
740 else if (client > entry)
741 p = &(*p)->rb_right;
742 }
743 rb_link_node(&client->node, parent, p);
744 rb_insert_color(&client->node, &dev->clients);
745
746 snprintf(debug_name, 64, "%u", client->pid);
747 client->debug_root = debugfs_create_file(debug_name, 0664,
748 dev->debug_root, client,
749 &debug_client_fops);
750 up_write(&dev->lock);
751
752 return client;
753 }
754 EXPORT_SYMBOL(ion_client_create);
755
756 void ion_client_destroy(struct ion_client *client)
757 {
758 struct ion_device *dev = client->dev;
759 struct rb_node *n;
760
761 pr_debug("%s: %d\n", __func__, __LINE__);
762 while ((n = rb_first(&client->handles))) {
763 struct ion_handle *handle = rb_entry(n, struct ion_handle,
764 node);
765 ion_handle_destroy(&handle->ref);
766 }
767
768 idr_remove_all(&client->idr);
769 idr_destroy(&client->idr);
770
771 down_write(&dev->lock);
772 if (client->task)
773 put_task_struct(client->task);
774 rb_erase(&client->node, &dev->clients);
775 debugfs_remove_recursive(client->debug_root);
776 up_write(&dev->lock);
777
778 kfree(client);
779 }
780 EXPORT_SYMBOL(ion_client_destroy);
781
782 struct sg_table *ion_sg_table(struct ion_client *client,
783 struct ion_handle *handle)
784 {
785 struct ion_buffer *buffer;
786 struct sg_table *table;
787
788 mutex_lock(&client->lock);
789 if (!ion_handle_validate(client, handle)) {
790 pr_err("%s: invalid handle passed to map_dma.\n",
791 __func__);
792 mutex_unlock(&client->lock);
793 return ERR_PTR(-EINVAL);
794 }
795 buffer = handle->buffer;
796 table = buffer->sg_table;
797 mutex_unlock(&client->lock);
798 return table;
799 }
800 EXPORT_SYMBOL(ion_sg_table);
801
802 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
803 struct device *dev,
804 enum dma_data_direction direction);
805
806 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
807 enum dma_data_direction direction)
808 {
809 struct dma_buf *dmabuf = attachment->dmabuf;
810 struct ion_buffer *buffer = dmabuf->priv;
811
812 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
813 return buffer->sg_table;
814 }
815
816 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
817 struct sg_table *table,
818 enum dma_data_direction direction)
819 {
820 }
821
822 struct ion_vma_list {
823 struct list_head list;
824 struct vm_area_struct *vma;
825 };
826
827 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
828 struct device *dev,
829 enum dma_data_direction dir)
830 {
831 struct ion_vma_list *vma_list;
832 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
833 int i;
834
835 pr_debug("%s: syncing for device %s\n", __func__,
836 dev ? dev_name(dev) : "null");
837
838 if (!ion_buffer_fault_user_mappings(buffer))
839 return;
840
841 mutex_lock(&buffer->lock);
842 for (i = 0; i < pages; i++) {
843 struct page *page = buffer->pages[i];
844
845 if (ion_buffer_page_is_dirty(page))
846 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
847 ion_buffer_page_clean(buffer->pages + i);
848 }
849 list_for_each_entry(vma_list, &buffer->vmas, list) {
850 struct vm_area_struct *vma = vma_list->vma;
851
852 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
853 NULL);
854 }
855 mutex_unlock(&buffer->lock);
856 }
857
858 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
859 {
860 struct ion_buffer *buffer = vma->vm_private_data;
861 int ret;
862
863 mutex_lock(&buffer->lock);
864 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
865
866 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
867 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
868 ion_buffer_page(buffer->pages[vmf->pgoff]));
869 mutex_unlock(&buffer->lock);
870 if (ret)
871 return VM_FAULT_ERROR;
872
873 return VM_FAULT_NOPAGE;
874 }
875
876 static void ion_vm_open(struct vm_area_struct *vma)
877 {
878 struct ion_buffer *buffer = vma->vm_private_data;
879 struct ion_vma_list *vma_list;
880
881 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
882 if (!vma_list)
883 return;
884 vma_list->vma = vma;
885 mutex_lock(&buffer->lock);
886 list_add(&vma_list->list, &buffer->vmas);
887 mutex_unlock(&buffer->lock);
888 pr_debug("%s: adding %p\n", __func__, vma);
889 }
890
891 static void ion_vm_close(struct vm_area_struct *vma)
892 {
893 struct ion_buffer *buffer = vma->vm_private_data;
894 struct ion_vma_list *vma_list, *tmp;
895
896 pr_debug("%s\n", __func__);
897 mutex_lock(&buffer->lock);
898 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
899 if (vma_list->vma != vma)
900 continue;
901 list_del(&vma_list->list);
902 kfree(vma_list);
903 pr_debug("%s: deleting %p\n", __func__, vma);
904 break;
905 }
906 mutex_unlock(&buffer->lock);
907 }
908
909 struct vm_operations_struct ion_vma_ops = {
910 .open = ion_vm_open,
911 .close = ion_vm_close,
912 .fault = ion_vm_fault,
913 };
914
915 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
916 {
917 struct ion_buffer *buffer = dmabuf->priv;
918 int ret = 0;
919
920 if (!buffer->heap->ops->map_user) {
921 pr_err("%s: this heap does not define a method for mapping "
922 "to userspace\n", __func__);
923 return -EINVAL;
924 }
925
926 if (ion_buffer_fault_user_mappings(buffer)) {
927 vma->vm_private_data = buffer;
928 vma->vm_ops = &ion_vma_ops;
929 ion_vm_open(vma);
930 return 0;
931 }
932
933 if (!(buffer->flags & ION_FLAG_CACHED))
934 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
935
936 mutex_lock(&buffer->lock);
937 /* now map it to userspace */
938 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
939 mutex_unlock(&buffer->lock);
940
941 if (ret)
942 pr_err("%s: failure mapping buffer to userspace\n",
943 __func__);
944
945 return ret;
946 }
947
948 static void ion_dma_buf_release(struct dma_buf *dmabuf)
949 {
950 struct ion_buffer *buffer = dmabuf->priv;
951 ion_buffer_put(buffer);
952 }
953
954 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
955 {
956 struct ion_buffer *buffer = dmabuf->priv;
957 return buffer->vaddr + offset * PAGE_SIZE;
958 }
959
960 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
961 void *ptr)
962 {
963 return;
964 }
965
966 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
967 size_t len,
968 enum dma_data_direction direction)
969 {
970 struct ion_buffer *buffer = dmabuf->priv;
971 void *vaddr;
972
973 if (!buffer->heap->ops->map_kernel) {
974 pr_err("%s: map kernel is not implemented by this heap.\n",
975 __func__);
976 return -ENODEV;
977 }
978
979 mutex_lock(&buffer->lock);
980 vaddr = ion_buffer_kmap_get(buffer);
981 mutex_unlock(&buffer->lock);
982 if (IS_ERR(vaddr))
983 return PTR_ERR(vaddr);
984 return 0;
985 }
986
987 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
988 size_t len,
989 enum dma_data_direction direction)
990 {
991 struct ion_buffer *buffer = dmabuf->priv;
992
993 mutex_lock(&buffer->lock);
994 ion_buffer_kmap_put(buffer);
995 mutex_unlock(&buffer->lock);
996 }
997
998 struct dma_buf_ops dma_buf_ops = {
999 .map_dma_buf = ion_map_dma_buf,
1000 .unmap_dma_buf = ion_unmap_dma_buf,
1001 .mmap = ion_mmap,
1002 .release = ion_dma_buf_release,
1003 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1004 .end_cpu_access = ion_dma_buf_end_cpu_access,
1005 .kmap_atomic = ion_dma_buf_kmap,
1006 .kunmap_atomic = ion_dma_buf_kunmap,
1007 .kmap = ion_dma_buf_kmap,
1008 .kunmap = ion_dma_buf_kunmap,
1009 };
1010
1011 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1012 struct ion_handle *handle)
1013 {
1014 struct ion_buffer *buffer;
1015 struct dma_buf *dmabuf;
1016 bool valid_handle;
1017
1018 mutex_lock(&client->lock);
1019 valid_handle = ion_handle_validate(client, handle);
1020 mutex_unlock(&client->lock);
1021 if (!valid_handle) {
1022 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1023 return ERR_PTR(-EINVAL);
1024 }
1025
1026 buffer = handle->buffer;
1027 ion_buffer_get(buffer);
1028 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1029 if (IS_ERR(dmabuf)) {
1030 ion_buffer_put(buffer);
1031 return dmabuf;
1032 }
1033
1034 return dmabuf;
1035 }
1036 EXPORT_SYMBOL(ion_share_dma_buf);
1037
1038 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1039 {
1040 struct dma_buf *dmabuf;
1041 int fd;
1042
1043 dmabuf = ion_share_dma_buf(client, handle);
1044 if (IS_ERR(dmabuf))
1045 return PTR_ERR(dmabuf);
1046
1047 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1048 if (fd < 0)
1049 dma_buf_put(dmabuf);
1050
1051 return fd;
1052 }
1053 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1054
1055 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1056 {
1057 struct dma_buf *dmabuf;
1058 struct ion_buffer *buffer;
1059 struct ion_handle *handle;
1060 int ret;
1061
1062 dmabuf = dma_buf_get(fd);
1063 if (IS_ERR(dmabuf))
1064 return ERR_PTR(PTR_ERR(dmabuf));
1065 /* if this memory came from ion */
1066
1067 if (dmabuf->ops != &dma_buf_ops) {
1068 pr_err("%s: can not import dmabuf from another exporter\n",
1069 __func__);
1070 dma_buf_put(dmabuf);
1071 return ERR_PTR(-EINVAL);
1072 }
1073 buffer = dmabuf->priv;
1074
1075 mutex_lock(&client->lock);
1076 /* if a handle exists for this buffer just take a reference to it */
1077 handle = ion_handle_lookup(client, buffer);
1078 if (!IS_ERR(handle)) {
1079 ion_handle_get(handle);
1080 goto end;
1081 }
1082 handle = ion_handle_create(client, buffer);
1083 if (IS_ERR(handle))
1084 goto end;
1085 ret = ion_handle_add(client, handle);
1086 if (ret) {
1087 ion_handle_put(handle);
1088 handle = ERR_PTR(ret);
1089 }
1090 end:
1091 mutex_unlock(&client->lock);
1092 dma_buf_put(dmabuf);
1093 return handle;
1094 }
1095 EXPORT_SYMBOL(ion_import_dma_buf);
1096
1097 static int ion_sync_for_device(struct ion_client *client, int fd)
1098 {
1099 struct dma_buf *dmabuf;
1100 struct ion_buffer *buffer;
1101
1102 dmabuf = dma_buf_get(fd);
1103 if (IS_ERR(dmabuf))
1104 return PTR_ERR(dmabuf);
1105
1106 /* if this memory came from ion */
1107 if (dmabuf->ops != &dma_buf_ops) {
1108 pr_err("%s: can not sync dmabuf from another exporter\n",
1109 __func__);
1110 dma_buf_put(dmabuf);
1111 return -EINVAL;
1112 }
1113 buffer = dmabuf->priv;
1114
1115 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1116 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1117 dma_buf_put(dmabuf);
1118 return 0;
1119 }
1120
1121 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1122 {
1123 struct ion_client *client = filp->private_data;
1124
1125 switch (cmd) {
1126 case ION_IOC_ALLOC:
1127 {
1128 struct ion_allocation_data data;
1129 struct ion_handle *handle;
1130
1131 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1132 return -EFAULT;
1133 handle = ion_alloc(client, data.len, data.align,
1134 data.heap_id_mask, data.flags);
1135
1136 if (IS_ERR(handle))
1137 return PTR_ERR(handle);
1138
1139 data.handle = (struct ion_handle *)handle->id;
1140
1141 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1142 ion_free(client, handle);
1143 return -EFAULT;
1144 }
1145 break;
1146 }
1147 case ION_IOC_FREE:
1148 {
1149 struct ion_handle_data data;
1150 struct ion_handle *handle;
1151
1152 if (copy_from_user(&data, (void __user *)arg,
1153 sizeof(struct ion_handle_data)))
1154 return -EFAULT;
1155 mutex_lock(&client->lock);
1156 handle = ion_uhandle_get(client, (int)data.handle);
1157 mutex_unlock(&client->lock);
1158 if (!handle)
1159 return -EINVAL;
1160 ion_free(client, handle);
1161 break;
1162 }
1163 case ION_IOC_SHARE:
1164 case ION_IOC_MAP:
1165 {
1166 struct ion_fd_data data;
1167 struct ion_handle *handle;
1168
1169 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1170 return -EFAULT;
1171 handle = ion_uhandle_get(client, (int)data.handle);
1172 data.fd = ion_share_dma_buf_fd(client, handle);
1173 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1174 return -EFAULT;
1175 if (data.fd < 0)
1176 return data.fd;
1177 break;
1178 }
1179 case ION_IOC_IMPORT:
1180 {
1181 struct ion_fd_data data;
1182 struct ion_handle *handle;
1183 int ret = 0;
1184 if (copy_from_user(&data, (void __user *)arg,
1185 sizeof(struct ion_fd_data)))
1186 return -EFAULT;
1187 handle = ion_import_dma_buf(client, data.fd);
1188 if (IS_ERR(handle))
1189 ret = PTR_ERR(handle);
1190 else
1191 data.handle = (struct ion_handle *)handle->id;
1192
1193 if (copy_to_user((void __user *)arg, &data,
1194 sizeof(struct ion_fd_data)))
1195 return -EFAULT;
1196 if (ret < 0)
1197 return ret;
1198 break;
1199 }
1200 case ION_IOC_SYNC:
1201 {
1202 struct ion_fd_data data;
1203 if (copy_from_user(&data, (void __user *)arg,
1204 sizeof(struct ion_fd_data)))
1205 return -EFAULT;
1206 ion_sync_for_device(client, data.fd);
1207 break;
1208 }
1209 case ION_IOC_CUSTOM:
1210 {
1211 struct ion_device *dev = client->dev;
1212 struct ion_custom_data data;
1213
1214 if (!dev->custom_ioctl)
1215 return -ENOTTY;
1216 if (copy_from_user(&data, (void __user *)arg,
1217 sizeof(struct ion_custom_data)))
1218 return -EFAULT;
1219 return dev->custom_ioctl(client, data.cmd, data.arg);
1220 }
1221 default:
1222 return -ENOTTY;
1223 }
1224 return 0;
1225 }
1226
1227 static int ion_release(struct inode *inode, struct file *file)
1228 {
1229 struct ion_client *client = file->private_data;
1230
1231 pr_debug("%s: %d\n", __func__, __LINE__);
1232 ion_client_destroy(client);
1233 return 0;
1234 }
1235
1236 static int ion_open(struct inode *inode, struct file *file)
1237 {
1238 struct miscdevice *miscdev = file->private_data;
1239 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1240 struct ion_client *client;
1241
1242 pr_debug("%s: %d\n", __func__, __LINE__);
1243 client = ion_client_create(dev, "user");
1244 if (IS_ERR(client))
1245 return PTR_ERR(client);
1246 file->private_data = client;
1247
1248 return 0;
1249 }
1250
1251 static const struct file_operations ion_fops = {
1252 .owner = THIS_MODULE,
1253 .open = ion_open,
1254 .release = ion_release,
1255 .unlocked_ioctl = ion_ioctl,
1256 };
1257
1258 static size_t ion_debug_heap_total(struct ion_client *client,
1259 unsigned int id)
1260 {
1261 size_t size = 0;
1262 struct rb_node *n;
1263
1264 mutex_lock(&client->lock);
1265 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1266 struct ion_handle *handle = rb_entry(n,
1267 struct ion_handle,
1268 node);
1269 if (handle->buffer->heap->id == id)
1270 size += handle->buffer->size;
1271 }
1272 mutex_unlock(&client->lock);
1273 return size;
1274 }
1275
1276 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1277 {
1278 struct ion_heap *heap = s->private;
1279 struct ion_device *dev = heap->dev;
1280 struct rb_node *n;
1281 size_t total_size = 0;
1282 size_t total_orphaned_size = 0;
1283
1284 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1285 seq_printf(s, "----------------------------------------------------\n");
1286
1287 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1288 struct ion_client *client = rb_entry(n, struct ion_client,
1289 node);
1290 size_t size = ion_debug_heap_total(client, heap->id);
1291 if (!size)
1292 continue;
1293 if (client->task) {
1294 char task_comm[TASK_COMM_LEN];
1295
1296 get_task_comm(task_comm, client->task);
1297 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1298 client->pid, size);
1299 } else {
1300 seq_printf(s, "%16.s %16u %16u\n", client->name,
1301 client->pid, size);
1302 }
1303 }
1304 seq_printf(s, "----------------------------------------------------\n");
1305 seq_printf(s, "orphaned allocations (info is from last known client):"
1306 "\n");
1307 mutex_lock(&dev->buffer_lock);
1308 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1309 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1310 node);
1311 if (buffer->heap->id != heap->id)
1312 continue;
1313 total_size += buffer->size;
1314 if (!buffer->handle_count) {
1315 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1316 buffer->pid, buffer->size, buffer->kmap_cnt,
1317 atomic_read(&buffer->ref.refcount));
1318 total_orphaned_size += buffer->size;
1319 }
1320 }
1321 mutex_unlock(&dev->buffer_lock);
1322 seq_printf(s, "----------------------------------------------------\n");
1323 seq_printf(s, "%16.s %16u\n", "total orphaned",
1324 total_orphaned_size);
1325 seq_printf(s, "%16.s %16u\n", "total ", total_size);
1326 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1327 seq_printf(s, "%16.s %16u\n", "deferred free",
1328 heap->free_list_size);
1329 seq_printf(s, "----------------------------------------------------\n");
1330
1331 if (heap->debug_show)
1332 heap->debug_show(heap, s, unused);
1333
1334 return 0;
1335 }
1336
1337 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1338 {
1339 return single_open(file, ion_debug_heap_show, inode->i_private);
1340 }
1341
1342 static const struct file_operations debug_heap_fops = {
1343 .open = ion_debug_heap_open,
1344 .read = seq_read,
1345 .llseek = seq_lseek,
1346 .release = single_release,
1347 };
1348
1349 #ifdef DEBUG_HEAP_SHRINKER
1350 static int debug_shrink_set(void *data, u64 val)
1351 {
1352 struct ion_heap *heap = data;
1353 struct shrink_control sc;
1354 int objs;
1355
1356 sc.gfp_mask = -1;
1357 sc.nr_to_scan = 0;
1358
1359 if (!val)
1360 return 0;
1361
1362 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1363 sc.nr_to_scan = objs;
1364
1365 heap->shrinker.shrink(&heap->shrinker, &sc);
1366 return 0;
1367 }
1368
1369 static int debug_shrink_get(void *data, u64 *val)
1370 {
1371 struct ion_heap *heap = data;
1372 struct shrink_control sc;
1373 int objs;
1374
1375 sc.gfp_mask = -1;
1376 sc.nr_to_scan = 0;
1377
1378 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1379 *val = objs;
1380 return 0;
1381 }
1382
1383 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1384 debug_shrink_set, "%llu\n");
1385 #endif
1386
1387 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1388 {
1389 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1390 !heap->ops->unmap_dma)
1391 pr_err("%s: can not add heap with invalid ops struct.\n",
1392 __func__);
1393
1394 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1395 ion_heap_init_deferred_free(heap);
1396
1397 heap->dev = dev;
1398 down_write(&dev->lock);
1399 /* use negative heap->id to reverse the priority -- when traversing
1400 the list later attempt higher id numbers first */
1401 plist_node_init(&heap->node, -heap->id);
1402 plist_add(&heap->node, &dev->heaps);
1403 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1404 &debug_heap_fops);
1405 #ifdef DEBUG_HEAP_SHRINKER
1406 if (heap->shrinker.shrink) {
1407 char debug_name[64];
1408
1409 snprintf(debug_name, 64, "%s_shrink", heap->name);
1410 debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1411 &debug_shrink_fops);
1412 }
1413 #endif
1414 up_write(&dev->lock);
1415 }
1416
1417 struct ion_device *ion_device_create(long (*custom_ioctl)
1418 (struct ion_client *client,
1419 unsigned int cmd,
1420 unsigned long arg))
1421 {
1422 struct ion_device *idev;
1423 int ret;
1424
1425 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1426 if (!idev)
1427 return ERR_PTR(-ENOMEM);
1428
1429 idev->dev.minor = MISC_DYNAMIC_MINOR;
1430 idev->dev.name = "ion";
1431 idev->dev.fops = &ion_fops;
1432 idev->dev.parent = NULL;
1433 ret = misc_register(&idev->dev);
1434 if (ret) {
1435 pr_err("ion: failed to register misc device.\n");
1436 return ERR_PTR(ret);
1437 }
1438
1439 idev->debug_root = debugfs_create_dir("ion", NULL);
1440 if (!idev->debug_root)
1441 pr_err("ion: failed to create debug files.\n");
1442
1443 idev->custom_ioctl = custom_ioctl;
1444 idev->buffers = RB_ROOT;
1445 mutex_init(&idev->buffer_lock);
1446 init_rwsem(&idev->lock);
1447 plist_head_init(&idev->heaps);
1448 idev->clients = RB_ROOT;
1449 return idev;
1450 }
1451
1452 void ion_device_destroy(struct ion_device *dev)
1453 {
1454 misc_deregister(&dev->dev);
1455 /* XXX need to free the heaps and clients ? */
1456 kfree(dev);
1457 }
1458
1459 void __init ion_reserve(struct ion_platform_data *data)
1460 {
1461 int i;
1462
1463 for (i = 0; i < data->nr; i++) {
1464 if (data->heaps[i].size == 0)
1465 continue;
1466
1467 if (data->heaps[i].base == 0) {
1468 phys_addr_t paddr;
1469 paddr = memblock_alloc_base(data->heaps[i].size,
1470 data->heaps[i].align,
1471 MEMBLOCK_ALLOC_ANYWHERE);
1472 if (!paddr) {
1473 pr_err("%s: error allocating memblock for "
1474 "heap %d\n",
1475 __func__, i);
1476 continue;
1477 }
1478 data->heaps[i].base = paddr;
1479 } else {
1480 int ret = memblock_reserve(data->heaps[i].base,
1481 data->heaps[i].size);
1482 if (ret)
1483 pr_err("memblock reserve of %x@%lx failed\n",
1484 data->heaps[i].size,
1485 data->heaps[i].base);
1486 }
1487 pr_info("%s: %s reserved base %lx size %d\n", __func__,
1488 data->heaps[i].name,
1489 data->heaps[i].base,
1490 data->heaps[i].size);
1491 }
1492 }
This page took 0.081699 seconds and 4 git commands to generate.