ion: fix printk warnings
[deliverable/linux.git] / drivers / staging / android / ion / ion.c
CommitLineData
c30707be 1/*
38eeeb51 2
c30707be
RSZ
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
fe2faea7 20#include <linux/freezer.h>
c30707be
RSZ
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
fe2faea7 23#include <linux/kthread.h>
c30707be 24#include <linux/list.h>
2991b7a0 25#include <linux/memblock.h>
c30707be
RSZ
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
c30707be
RSZ
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
c13bd1c4 34#include <linux/vmalloc.h>
c30707be 35#include <linux/debugfs.h>
b892bf75 36#include <linux/dma-buf.h>
47b40458 37#include <linux/idr.h>
c30707be
RSZ
38
39#include "ion.h"
40#include "ion_priv.h"
827c849e 41#include "compat_ion.h"
c30707be
RSZ
42
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
8d7ab9a9
RSZ
46 * @buffers: an rb tree of all the existing buffers
47 * @buffer_lock: lock protecting the tree of buffers
48 * @lock: rwsem protecting the tree of heaps and clients
c30707be
RSZ
49 * @heaps: list of all the heaps in the system
50 * @user_clients: list of all the clients created from userspace
51 */
52struct ion_device {
53 struct miscdevice dev;
54 struct rb_root buffers;
8d7ab9a9
RSZ
55 struct mutex buffer_lock;
56 struct rw_semaphore lock;
cd69488c 57 struct plist_head heaps;
c30707be
RSZ
58 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59 unsigned long arg);
b892bf75 60 struct rb_root clients;
c30707be
RSZ
61 struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
c30707be
RSZ
66 * @node: node in the tree of all clients
67 * @dev: backpointer to ion device
68 * @handles: an rb tree of all the handles in this client
47b40458 69 * @idr: an idr space for allocating handle ids
c30707be 70 * @lock: lock protecting the tree of handles
c30707be
RSZ
71 * @name: used for debugging
72 * @task: used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
c30707be
RSZ
79 struct rb_node node;
80 struct ion_device *dev;
81 struct rb_root handles;
47b40458 82 struct idr idr;
c30707be 83 struct mutex lock;
c30707be
RSZ
84 const char *name;
85 struct task_struct *task;
86 pid_t pid;
87 struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref: reference count
93 * @client: back pointer to the client the buffer resides in
94 * @buffer: pointer to the buffer
95 * @node: node in the client's handle rbtree
96 * @kmap_cnt: count of times this client has mapped to kernel
47b40458 97 * @id: client-unique id allocated by client->idr
c30707be
RSZ
98 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client. Other fields are never changed after initialization.
101 */
102struct ion_handle {
103 struct kref ref;
104 struct ion_client *client;
105 struct ion_buffer *buffer;
106 struct rb_node node;
107 unsigned int kmap_cnt;
47b40458 108 int id;
c30707be
RSZ
109};
110
13ba7805
RSZ
111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
c13bd1c4
RSZ
113 return ((buffer->flags & ION_FLAG_CACHED) &&
114 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
13ba7805
RSZ
115}
116
45b17a80
RSZ
117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
c13bd1c4
RSZ
119 return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
122static inline struct page *ion_buffer_page(struct page *page)
123{
124 return (struct page *)((unsigned long)page & ~(1UL));
125}
126
127static inline bool ion_buffer_page_is_dirty(struct page *page)
128{
129 return !!((unsigned long)page & 1UL);
130}
131
132static inline void ion_buffer_page_dirty(struct page **page)
133{
134 *page = (struct page *)((unsigned long)(*page) | 1UL);
135}
136
137static inline void ion_buffer_page_clean(struct page **page)
138{
139 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
45b17a80
RSZ
140}
141
c30707be
RSZ
142/* this function should only be called while dev->lock is held */
143static void ion_buffer_add(struct ion_device *dev,
144 struct ion_buffer *buffer)
145{
146 struct rb_node **p = &dev->buffers.rb_node;
147 struct rb_node *parent = NULL;
148 struct ion_buffer *entry;
149
150 while (*p) {
151 parent = *p;
152 entry = rb_entry(parent, struct ion_buffer, node);
153
154 if (buffer < entry) {
155 p = &(*p)->rb_left;
156 } else if (buffer > entry) {
157 p = &(*p)->rb_right;
158 } else {
159 pr_err("%s: buffer already found.", __func__);
160 BUG();
161 }
162 }
163
164 rb_link_node(&buffer->node, parent, p);
165 rb_insert_color(&buffer->node, &dev->buffers);
166}
167
168/* this function should only be called while dev->lock is held */
169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170 struct ion_device *dev,
171 unsigned long len,
172 unsigned long align,
173 unsigned long flags)
174{
175 struct ion_buffer *buffer;
29ae6bc7 176 struct sg_table *table;
a46b6b2d
RSZ
177 struct scatterlist *sg;
178 int i, ret;
c30707be
RSZ
179
180 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181 if (!buffer)
182 return ERR_PTR(-ENOMEM);
183
184 buffer->heap = heap;
13ba7805 185 buffer->flags = flags;
c30707be
RSZ
186 kref_init(&buffer->ref);
187
188 ret = heap->ops->allocate(heap, buffer, len, align, flags);
fe2faea7 189
c30707be 190 if (ret) {
fe2faea7
RSZ
191 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192 goto err2;
193
ea313b5f 194 ion_heap_freelist_drain(heap, 0);
fe2faea7
RSZ
195 ret = heap->ops->allocate(heap, buffer, len, align,
196 flags);
197 if (ret)
198 goto err2;
c30707be 199 }
29ae6bc7 200
056be396
GH
201 buffer->dev = dev;
202 buffer->size = len;
203
56a7c185 204 table = heap->ops->map_dma(heap, buffer);
9e907654
CC
205 if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
206 table = ERR_PTR(-EINVAL);
207 if (IS_ERR(table)) {
29ae6bc7
RSZ
208 heap->ops->free(buffer);
209 kfree(buffer);
210 return ERR_PTR(PTR_ERR(table));
211 }
212 buffer->sg_table = table;
13ba7805 213 if (ion_buffer_fault_user_mappings(buffer)) {
c13bd1c4
RSZ
214 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
215 struct scatterlist *sg;
216 int i, j, k = 0;
217
218 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
219 if (!buffer->pages) {
220 ret = -ENOMEM;
221 goto err1;
222 }
223
224 for_each_sg(table->sgl, sg, table->nents, i) {
225 struct page *page = sg_page(sg);
226
06e0dcae 227 for (j = 0; j < sg->length / PAGE_SIZE; j++)
c13bd1c4 228 buffer->pages[k++] = page++;
56a7c185 229 }
29ae6bc7 230
d3c0bced
RSZ
231 if (ret)
232 goto err;
56a7c185
RSZ
233 }
234
235 buffer->dev = dev;
236 buffer->size = len;
237 INIT_LIST_HEAD(&buffer->vmas);
c30707be 238 mutex_init(&buffer->lock);
a46b6b2d
RSZ
239 /* this will set up dma addresses for the sglist -- it is not
240 technically correct as per the dma api -- a specific
241 device isn't really taking ownership here. However, in practice on
242 our systems the only dma_address space is physical addresses.
243 Additionally, we can't afford the overhead of invalidating every
244 allocation via dma_map_sg. The implicit contract here is that
245 memory comming from the heaps is ready for dma, ie if it has a
246 cached mapping that mapping has been invalidated */
247 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
248 sg_dma_address(sg) = sg_phys(sg);
8d7ab9a9 249 mutex_lock(&dev->buffer_lock);
c30707be 250 ion_buffer_add(dev, buffer);
8d7ab9a9 251 mutex_unlock(&dev->buffer_lock);
c30707be 252 return buffer;
d3c0bced
RSZ
253
254err:
255 heap->ops->unmap_dma(heap, buffer);
256 heap->ops->free(buffer);
c13bd1c4
RSZ
257err1:
258 if (buffer->pages)
259 vfree(buffer->pages);
fe2faea7 260err2:
d3c0bced
RSZ
261 kfree(buffer);
262 return ERR_PTR(ret);
c30707be
RSZ
263}
264
ea313b5f 265void ion_buffer_destroy(struct ion_buffer *buffer)
c30707be 266{
54ac0784
KC
267 if (WARN_ON(buffer->kmap_cnt > 0))
268 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
29ae6bc7 269 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
c30707be 270 buffer->heap->ops->free(buffer);
c13bd1c4
RSZ
271 if (buffer->pages)
272 vfree(buffer->pages);
c30707be
RSZ
273 kfree(buffer);
274}
275
ea313b5f 276static void _ion_buffer_destroy(struct kref *kref)
fe2faea7
RSZ
277{
278 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
279 struct ion_heap *heap = buffer->heap;
280 struct ion_device *dev = buffer->dev;
281
282 mutex_lock(&dev->buffer_lock);
283 rb_erase(&buffer->node, &dev->buffers);
284 mutex_unlock(&dev->buffer_lock);
285
ea313b5f
RSZ
286 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287 ion_heap_freelist_add(heap, buffer);
288 else
289 ion_buffer_destroy(buffer);
fe2faea7
RSZ
290}
291
c30707be
RSZ
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294 kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
ea313b5f 299 return kref_put(&buffer->ref, _ion_buffer_destroy);
c30707be
RSZ
300}
301
5ad7bc3a
RSZ
302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
8d7ab9a9 304 mutex_lock(&buffer->lock);
5ad7bc3a 305 buffer->handle_count++;
8d7ab9a9 306 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311 /*
312 * when a buffer is removed from a handle, if it is not in
313 * any other handles, copy the taskcomm and the pid of the
314 * process it's being removed from into the buffer. At this
315 * point there will be no way to track what processes this buffer is
316 * being used by, it only exists as a dma_buf file descriptor.
317 * The taskcomm and pid can provide a debug hint as to where this fd
318 * is in the system
319 */
8d7ab9a9 320 mutex_lock(&buffer->lock);
5ad7bc3a
RSZ
321 buffer->handle_count--;
322 BUG_ON(buffer->handle_count < 0);
323 if (!buffer->handle_count) {
324 struct task_struct *task;
325
326 task = current->group_leader;
327 get_task_comm(buffer->task_comm, task);
328 buffer->pid = task_pid_nr(task);
329 }
8d7ab9a9 330 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
331}
332
c30707be
RSZ
333static struct ion_handle *ion_handle_create(struct ion_client *client,
334 struct ion_buffer *buffer)
335{
336 struct ion_handle *handle;
337
338 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
339 if (!handle)
340 return ERR_PTR(-ENOMEM);
341 kref_init(&handle->ref);
342 RB_CLEAR_NODE(&handle->node);
343 handle->client = client;
344 ion_buffer_get(buffer);
5ad7bc3a 345 ion_buffer_add_to_handle(buffer);
c30707be
RSZ
346 handle->buffer = buffer;
347
348 return handle;
349}
350
b892bf75
RSZ
351static void ion_handle_kmap_put(struct ion_handle *);
352
c30707be
RSZ
353static void ion_handle_destroy(struct kref *kref)
354{
355 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
b892bf75
RSZ
356 struct ion_client *client = handle->client;
357 struct ion_buffer *buffer = handle->buffer;
358
b892bf75 359 mutex_lock(&buffer->lock);
2900cd76 360 while (handle->kmap_cnt)
b892bf75
RSZ
361 ion_handle_kmap_put(handle);
362 mutex_unlock(&buffer->lock);
363
47b40458 364 idr_remove(&client->idr, handle->id);
c30707be 365 if (!RB_EMPTY_NODE(&handle->node))
b892bf75 366 rb_erase(&handle->node, &client->handles);
b892bf75 367
5ad7bc3a 368 ion_buffer_remove_from_handle(buffer);
b892bf75 369 ion_buffer_put(buffer);
5ad7bc3a 370
c30707be
RSZ
371 kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376 return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381 kref_get(&handle->ref);
382}
383
384static int ion_handle_put(struct ion_handle *handle)
385{
83271f62
CC
386 struct ion_client *client = handle->client;
387 int ret;
388
389 mutex_lock(&client->lock);
390 ret = kref_put(&handle->ref, ion_handle_destroy);
391 mutex_unlock(&client->lock);
392
393 return ret;
c30707be
RSZ
394}
395
396static struct ion_handle *ion_handle_lookup(struct ion_client *client,
397 struct ion_buffer *buffer)
398{
e1cf3682
CC
399 struct rb_node *n = client->handles.rb_node;
400
401 while (n) {
402 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
403 if (buffer < entry->buffer)
404 n = n->rb_left;
405 else if (buffer > entry->buffer)
406 n = n->rb_right;
407 else
408 return entry;
c30707be 409 }
9e907654 410 return ERR_PTR(-EINVAL);
c30707be
RSZ
411}
412
83271f62
CC
413static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
414 int id)
47b40458 415{
83271f62
CC
416 struct ion_handle *handle;
417
418 mutex_lock(&client->lock);
419 handle = idr_find(&client->idr, id);
420 if (handle)
421 ion_handle_get(handle);
422 mutex_unlock(&client->lock);
423
424 return handle ? handle : ERR_PTR(-EINVAL);
47b40458
CC
425}
426
c30707be
RSZ
427static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
428{
83271f62
CC
429 WARN_ON(!mutex_is_locked(&client->lock));
430 return (idr_find(&client->idr, handle->id) == handle);
c30707be
RSZ
431}
432
47b40458 433static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
c30707be 434{
b26661d1 435 int id;
c30707be
RSZ
436 struct rb_node **p = &client->handles.rb_node;
437 struct rb_node *parent = NULL;
438 struct ion_handle *entry;
439
b26661d1
CC
440 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
441 if (id < 0)
442 return id;
47b40458 443
b26661d1 444 handle->id = id;
47b40458 445
c30707be
RSZ
446 while (*p) {
447 parent = *p;
448 entry = rb_entry(parent, struct ion_handle, node);
449
e1cf3682 450 if (handle->buffer < entry->buffer)
c30707be 451 p = &(*p)->rb_left;
e1cf3682 452 else if (handle->buffer > entry->buffer)
c30707be
RSZ
453 p = &(*p)->rb_right;
454 else
455 WARN(1, "%s: buffer already found.", __func__);
456 }
457
458 rb_link_node(&handle->node, parent, p);
459 rb_insert_color(&handle->node, &client->handles);
47b40458
CC
460
461 return 0;
c30707be
RSZ
462}
463
464struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
38eeeb51 465 size_t align, unsigned int heap_id_mask,
56a7c185 466 unsigned int flags)
c30707be 467{
c30707be
RSZ
468 struct ion_handle *handle;
469 struct ion_device *dev = client->dev;
470 struct ion_buffer *buffer = NULL;
cd69488c 471 struct ion_heap *heap;
47b40458 472 int ret;
c30707be 473
e61fc915 474 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
38eeeb51 475 len, align, heap_id_mask, flags);
c30707be
RSZ
476 /*
477 * traverse the list of heaps available in this system in priority
478 * order. If the heap type is supported by the client, and matches the
479 * request of the caller allocate from it. Repeat until allocate has
480 * succeeded or all heaps have been tried
481 */
54ac0784
KC
482 len = PAGE_ALIGN(len);
483
a14baf71
CC
484 if (!len)
485 return ERR_PTR(-EINVAL);
486
8d7ab9a9 487 down_read(&dev->lock);
cd69488c 488 plist_for_each_entry(heap, &dev->heaps, node) {
38eeeb51
RSZ
489 /* if the caller didn't specify this heap id */
490 if (!((1 << heap->id) & heap_id_mask))
c30707be
RSZ
491 continue;
492 buffer = ion_buffer_create(heap, dev, len, align, flags);
9e907654 493 if (!IS_ERR(buffer))
c30707be
RSZ
494 break;
495 }
8d7ab9a9 496 up_read(&dev->lock);
c30707be 497
54ac0784
KC
498 if (buffer == NULL)
499 return ERR_PTR(-ENODEV);
500
501 if (IS_ERR(buffer))
c30707be
RSZ
502 return ERR_PTR(PTR_ERR(buffer));
503
504 handle = ion_handle_create(client, buffer);
505
c30707be
RSZ
506 /*
507 * ion_buffer_create will create a buffer with a ref_cnt of 1,
508 * and ion_handle_create will take a second reference, drop one here
509 */
510 ion_buffer_put(buffer);
511
47b40458
CC
512 if (IS_ERR(handle))
513 return handle;
c30707be 514
47b40458
CC
515 mutex_lock(&client->lock);
516 ret = ion_handle_add(client, handle);
83271f62 517 mutex_unlock(&client->lock);
47b40458
CC
518 if (ret) {
519 ion_handle_put(handle);
520 handle = ERR_PTR(ret);
521 }
29ae6bc7 522
c30707be
RSZ
523 return handle;
524}
ee4c8aa9 525EXPORT_SYMBOL(ion_alloc);
c30707be
RSZ
526
527void ion_free(struct ion_client *client, struct ion_handle *handle)
528{
529 bool valid_handle;
530
531 BUG_ON(client != handle->client);
532
533 mutex_lock(&client->lock);
534 valid_handle = ion_handle_validate(client, handle);
c30707be
RSZ
535
536 if (!valid_handle) {
a9bb075d 537 WARN(1, "%s: invalid handle passed to free.\n", __func__);
37bdbf00 538 mutex_unlock(&client->lock);
c30707be
RSZ
539 return;
540 }
0e9c03a5 541 mutex_unlock(&client->lock);
83271f62 542 ion_handle_put(handle);
c30707be 543}
ee4c8aa9 544EXPORT_SYMBOL(ion_free);
c30707be 545
c30707be
RSZ
546int ion_phys(struct ion_client *client, struct ion_handle *handle,
547 ion_phys_addr_t *addr, size_t *len)
548{
549 struct ion_buffer *buffer;
550 int ret;
551
552 mutex_lock(&client->lock);
553 if (!ion_handle_validate(client, handle)) {
554 mutex_unlock(&client->lock);
555 return -EINVAL;
556 }
557
558 buffer = handle->buffer;
559
560 if (!buffer->heap->ops->phys) {
561 pr_err("%s: ion_phys is not implemented by this heap.\n",
562 __func__);
563 mutex_unlock(&client->lock);
564 return -ENODEV;
565 }
566 mutex_unlock(&client->lock);
567 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
568 return ret;
569}
ee4c8aa9 570EXPORT_SYMBOL(ion_phys);
c30707be 571
0f34faf8
RSZ
572static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
573{
574 void *vaddr;
575
576 if (buffer->kmap_cnt) {
577 buffer->kmap_cnt++;
578 return buffer->vaddr;
579 }
580 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
9e907654
CC
581 if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
582 return ERR_PTR(-EINVAL);
583 if (IS_ERR(vaddr))
0f34faf8
RSZ
584 return vaddr;
585 buffer->vaddr = vaddr;
586 buffer->kmap_cnt++;
587 return vaddr;
588}
589
b892bf75 590static void *ion_handle_kmap_get(struct ion_handle *handle)
c30707be 591{
b892bf75 592 struct ion_buffer *buffer = handle->buffer;
c30707be
RSZ
593 void *vaddr;
594
b892bf75
RSZ
595 if (handle->kmap_cnt) {
596 handle->kmap_cnt++;
597 return buffer->vaddr;
c30707be 598 }
0f34faf8 599 vaddr = ion_buffer_kmap_get(buffer);
9e907654 600 if (IS_ERR(vaddr))
b892bf75 601 return vaddr;
b892bf75 602 handle->kmap_cnt++;
b892bf75
RSZ
603 return vaddr;
604}
c30707be 605
0f34faf8
RSZ
606static void ion_buffer_kmap_put(struct ion_buffer *buffer)
607{
608 buffer->kmap_cnt--;
609 if (!buffer->kmap_cnt) {
610 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
611 buffer->vaddr = NULL;
612 }
613}
614
b892bf75
RSZ
615static void ion_handle_kmap_put(struct ion_handle *handle)
616{
617 struct ion_buffer *buffer = handle->buffer;
618
619 handle->kmap_cnt--;
620 if (!handle->kmap_cnt)
0f34faf8 621 ion_buffer_kmap_put(buffer);
c30707be
RSZ
622}
623
b892bf75 624void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
625{
626 struct ion_buffer *buffer;
b892bf75 627 void *vaddr;
c30707be
RSZ
628
629 mutex_lock(&client->lock);
630 if (!ion_handle_validate(client, handle)) {
b892bf75 631 pr_err("%s: invalid handle passed to map_kernel.\n",
c30707be
RSZ
632 __func__);
633 mutex_unlock(&client->lock);
634 return ERR_PTR(-EINVAL);
635 }
b892bf75 636
c30707be 637 buffer = handle->buffer;
c30707be 638
b892bf75 639 if (!handle->buffer->heap->ops->map_kernel) {
c30707be
RSZ
640 pr_err("%s: map_kernel is not implemented by this heap.\n",
641 __func__);
c30707be
RSZ
642 mutex_unlock(&client->lock);
643 return ERR_PTR(-ENODEV);
644 }
c30707be 645
c30707be 646 mutex_lock(&buffer->lock);
b892bf75 647 vaddr = ion_handle_kmap_get(handle);
c30707be
RSZ
648 mutex_unlock(&buffer->lock);
649 mutex_unlock(&client->lock);
b892bf75 650 return vaddr;
c30707be 651}
ee4c8aa9 652EXPORT_SYMBOL(ion_map_kernel);
c30707be 653
b892bf75 654void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
655{
656 struct ion_buffer *buffer;
657
658 mutex_lock(&client->lock);
659 buffer = handle->buffer;
660 mutex_lock(&buffer->lock);
b892bf75 661 ion_handle_kmap_put(handle);
c30707be
RSZ
662 mutex_unlock(&buffer->lock);
663 mutex_unlock(&client->lock);
664}
ee4c8aa9 665EXPORT_SYMBOL(ion_unmap_kernel);
c30707be 666
c30707be
RSZ
667static int ion_debug_client_show(struct seq_file *s, void *unused)
668{
669 struct ion_client *client = s->private;
670 struct rb_node *n;
38eeeb51
RSZ
671 size_t sizes[ION_NUM_HEAP_IDS] = {0};
672 const char *names[ION_NUM_HEAP_IDS] = {0};
c30707be
RSZ
673 int i;
674
675 mutex_lock(&client->lock);
676 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
677 struct ion_handle *handle = rb_entry(n, struct ion_handle,
678 node);
38eeeb51 679 unsigned int id = handle->buffer->heap->id;
c30707be 680
38eeeb51
RSZ
681 if (!names[id])
682 names[id] = handle->buffer->heap->name;
683 sizes[id] += handle->buffer->size;
c30707be
RSZ
684 }
685 mutex_unlock(&client->lock);
686
687 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
38eeeb51 688 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
c30707be
RSZ
689 if (!names[i])
690 continue;
e61fc915 691 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
c30707be
RSZ
692 }
693 return 0;
694}
695
696static int ion_debug_client_open(struct inode *inode, struct file *file)
697{
698 return single_open(file, ion_debug_client_show, inode->i_private);
699}
700
701static const struct file_operations debug_client_fops = {
702 .open = ion_debug_client_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = single_release,
706};
707
c30707be 708struct ion_client *ion_client_create(struct ion_device *dev,
c30707be
RSZ
709 const char *name)
710{
711 struct ion_client *client;
712 struct task_struct *task;
713 struct rb_node **p;
714 struct rb_node *parent = NULL;
715 struct ion_client *entry;
716 char debug_name[64];
717 pid_t pid;
718
719 get_task_struct(current->group_leader);
720 task_lock(current->group_leader);
721 pid = task_pid_nr(current->group_leader);
722 /* don't bother to store task struct for kernel threads,
723 they can't be killed anyway */
724 if (current->group_leader->flags & PF_KTHREAD) {
725 put_task_struct(current->group_leader);
726 task = NULL;
727 } else {
728 task = current->group_leader;
729 }
730 task_unlock(current->group_leader);
731
c30707be
RSZ
732 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
733 if (!client) {
54ac0784
KC
734 if (task)
735 put_task_struct(current->group_leader);
c30707be
RSZ
736 return ERR_PTR(-ENOMEM);
737 }
738
739 client->dev = dev;
740 client->handles = RB_ROOT;
47b40458 741 idr_init(&client->idr);
c30707be
RSZ
742 mutex_init(&client->lock);
743 client->name = name;
c30707be
RSZ
744 client->task = task;
745 client->pid = pid;
c30707be 746
8d7ab9a9 747 down_write(&dev->lock);
b892bf75
RSZ
748 p = &dev->clients.rb_node;
749 while (*p) {
750 parent = *p;
751 entry = rb_entry(parent, struct ion_client, node);
752
753 if (client < entry)
754 p = &(*p)->rb_left;
755 else if (client > entry)
756 p = &(*p)->rb_right;
c30707be 757 }
b892bf75
RSZ
758 rb_link_node(&client->node, parent, p);
759 rb_insert_color(&client->node, &dev->clients);
c30707be
RSZ
760
761 snprintf(debug_name, 64, "%u", client->pid);
762 client->debug_root = debugfs_create_file(debug_name, 0664,
763 dev->debug_root, client,
764 &debug_client_fops);
8d7ab9a9 765 up_write(&dev->lock);
c30707be
RSZ
766
767 return client;
768}
9122fe86 769EXPORT_SYMBOL(ion_client_create);
c30707be 770
b892bf75 771void ion_client_destroy(struct ion_client *client)
c30707be 772{
c30707be
RSZ
773 struct ion_device *dev = client->dev;
774 struct rb_node *n;
775
776 pr_debug("%s: %d\n", __func__, __LINE__);
777 while ((n = rb_first(&client->handles))) {
778 struct ion_handle *handle = rb_entry(n, struct ion_handle,
779 node);
780 ion_handle_destroy(&handle->ref);
781 }
47b40458 782
47b40458
CC
783 idr_destroy(&client->idr);
784
8d7ab9a9 785 down_write(&dev->lock);
b892bf75 786 if (client->task)
c30707be 787 put_task_struct(client->task);
b892bf75 788 rb_erase(&client->node, &dev->clients);
c30707be 789 debugfs_remove_recursive(client->debug_root);
8d7ab9a9 790 up_write(&dev->lock);
c30707be
RSZ
791
792 kfree(client);
793}
ee4c8aa9 794EXPORT_SYMBOL(ion_client_destroy);
c30707be 795
ce1f147a
RSZ
796struct sg_table *ion_sg_table(struct ion_client *client,
797 struct ion_handle *handle)
c30707be 798{
29ae6bc7 799 struct ion_buffer *buffer;
b892bf75 800 struct sg_table *table;
c30707be 801
29ae6bc7
RSZ
802 mutex_lock(&client->lock);
803 if (!ion_handle_validate(client, handle)) {
804 pr_err("%s: invalid handle passed to map_dma.\n",
b892bf75 805 __func__);
29ae6bc7
RSZ
806 mutex_unlock(&client->lock);
807 return ERR_PTR(-EINVAL);
54ac0784 808 }
29ae6bc7
RSZ
809 buffer = handle->buffer;
810 table = buffer->sg_table;
811 mutex_unlock(&client->lock);
b892bf75 812 return table;
c30707be 813}
ee4c8aa9 814EXPORT_SYMBOL(ion_sg_table);
c30707be 815
56a7c185
RSZ
816static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
817 struct device *dev,
818 enum dma_data_direction direction);
819
29ae6bc7
RSZ
820static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
821 enum dma_data_direction direction)
c30707be 822{
b892bf75
RSZ
823 struct dma_buf *dmabuf = attachment->dmabuf;
824 struct ion_buffer *buffer = dmabuf->priv;
c30707be 825
0b9ec1cf 826 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
29ae6bc7
RSZ
827 return buffer->sg_table;
828}
829
830static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
831 struct sg_table *table,
832 enum dma_data_direction direction)
833{
c30707be
RSZ
834}
835
e946b209
CC
836void ion_pages_sync_for_device(struct device *dev, struct page *page,
837 size_t size, enum dma_data_direction dir)
838{
839 struct scatterlist sg;
840
841 sg_init_table(&sg, 1);
842 sg_set_page(&sg, page, size, 0);
843 /*
844 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
845 * for the the targeted device, but this works on the currently targeted
846 * hardware.
847 */
848 sg_dma_address(&sg) = page_to_phys(page);
849 dma_sync_sg_for_device(dev, &sg, 1, dir);
850}
851
56a7c185
RSZ
852struct ion_vma_list {
853 struct list_head list;
854 struct vm_area_struct *vma;
855};
856
857static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
858 struct device *dev,
859 enum dma_data_direction dir)
860{
56a7c185 861 struct ion_vma_list *vma_list;
c13bd1c4
RSZ
862 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
863 int i;
56a7c185
RSZ
864
865 pr_debug("%s: syncing for device %s\n", __func__,
866 dev ? dev_name(dev) : "null");
0b9ec1cf 867
13ba7805 868 if (!ion_buffer_fault_user_mappings(buffer))
0b9ec1cf
RSZ
869 return;
870
56a7c185 871 mutex_lock(&buffer->lock);
c13bd1c4
RSZ
872 for (i = 0; i < pages; i++) {
873 struct page *page = buffer->pages[i];
874
875 if (ion_buffer_page_is_dirty(page))
e946b209
CC
876 ion_pages_sync_for_device(dev, ion_buffer_page(page),
877 PAGE_SIZE, dir);
878
c13bd1c4 879 ion_buffer_page_clean(buffer->pages + i);
56a7c185
RSZ
880 }
881 list_for_each_entry(vma_list, &buffer->vmas, list) {
882 struct vm_area_struct *vma = vma_list->vma;
883
884 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
885 NULL);
886 }
887 mutex_unlock(&buffer->lock);
888}
889
890int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
891{
892 struct ion_buffer *buffer = vma->vm_private_data;
c13bd1c4 893 int ret;
56a7c185
RSZ
894
895 mutex_lock(&buffer->lock);
c13bd1c4 896 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
56a7c185 897
c13bd1c4
RSZ
898 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
899 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
900 ion_buffer_page(buffer->pages[vmf->pgoff]));
56a7c185 901 mutex_unlock(&buffer->lock);
c13bd1c4
RSZ
902 if (ret)
903 return VM_FAULT_ERROR;
904
56a7c185
RSZ
905 return VM_FAULT_NOPAGE;
906}
907
908static void ion_vm_open(struct vm_area_struct *vma)
909{
910 struct ion_buffer *buffer = vma->vm_private_data;
911 struct ion_vma_list *vma_list;
912
913 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
914 if (!vma_list)
915 return;
916 vma_list->vma = vma;
917 mutex_lock(&buffer->lock);
918 list_add(&vma_list->list, &buffer->vmas);
919 mutex_unlock(&buffer->lock);
920 pr_debug("%s: adding %p\n", __func__, vma);
921}
922
923static void ion_vm_close(struct vm_area_struct *vma)
924{
925 struct ion_buffer *buffer = vma->vm_private_data;
926 struct ion_vma_list *vma_list, *tmp;
927
928 pr_debug("%s\n", __func__);
929 mutex_lock(&buffer->lock);
930 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
931 if (vma_list->vma != vma)
932 continue;
933 list_del(&vma_list->list);
934 kfree(vma_list);
935 pr_debug("%s: deleting %p\n", __func__, vma);
936 break;
937 }
938 mutex_unlock(&buffer->lock);
939}
940
941struct vm_operations_struct ion_vma_ops = {
942 .open = ion_vm_open,
943 .close = ion_vm_close,
944 .fault = ion_vm_fault,
945};
946
b892bf75 947static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
c30707be 948{
b892bf75 949 struct ion_buffer *buffer = dmabuf->priv;
56a7c185 950 int ret = 0;
c30707be 951
b892bf75 952 if (!buffer->heap->ops->map_user) {
c30707be
RSZ
953 pr_err("%s: this heap does not define a method for mapping "
954 "to userspace\n", __func__);
b892bf75 955 return -EINVAL;
c30707be
RSZ
956 }
957
13ba7805 958 if (ion_buffer_fault_user_mappings(buffer)) {
56a7c185
RSZ
959 vma->vm_private_data = buffer;
960 vma->vm_ops = &ion_vma_ops;
961 ion_vm_open(vma);
856661d5 962 return 0;
56a7c185 963 }
b892bf75 964
856661d5
RSZ
965 if (!(buffer->flags & ION_FLAG_CACHED))
966 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
967
968 mutex_lock(&buffer->lock);
969 /* now map it to userspace */
970 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
971 mutex_unlock(&buffer->lock);
972
b892bf75 973 if (ret)
c30707be
RSZ
974 pr_err("%s: failure mapping buffer to userspace\n",
975 __func__);
c30707be 976
c30707be
RSZ
977 return ret;
978}
979
b892bf75
RSZ
980static void ion_dma_buf_release(struct dma_buf *dmabuf)
981{
982 struct ion_buffer *buffer = dmabuf->priv;
983 ion_buffer_put(buffer);
984}
c30707be 985
b892bf75 986static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
c30707be 987{
0f34faf8 988 struct ion_buffer *buffer = dmabuf->priv;
12edf53d 989 return buffer->vaddr + offset * PAGE_SIZE;
b892bf75 990}
c30707be 991
b892bf75
RSZ
992static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
993 void *ptr)
994{
995 return;
996}
997
0f34faf8
RSZ
998static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
999 size_t len,
1000 enum dma_data_direction direction)
b892bf75 1001{
0f34faf8
RSZ
1002 struct ion_buffer *buffer = dmabuf->priv;
1003 void *vaddr;
1004
1005 if (!buffer->heap->ops->map_kernel) {
1006 pr_err("%s: map kernel is not implemented by this heap.\n",
1007 __func__);
1008 return -ENODEV;
1009 }
1010
1011 mutex_lock(&buffer->lock);
1012 vaddr = ion_buffer_kmap_get(buffer);
1013 mutex_unlock(&buffer->lock);
1014 if (IS_ERR(vaddr))
1015 return PTR_ERR(vaddr);
0f34faf8 1016 return 0;
b892bf75
RSZ
1017}
1018
0f34faf8
RSZ
1019static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1020 size_t len,
1021 enum dma_data_direction direction)
b892bf75 1022{
0f34faf8 1023 struct ion_buffer *buffer = dmabuf->priv;
c30707be 1024
0f34faf8
RSZ
1025 mutex_lock(&buffer->lock);
1026 ion_buffer_kmap_put(buffer);
1027 mutex_unlock(&buffer->lock);
1028}
c30707be 1029
b892bf75
RSZ
1030struct dma_buf_ops dma_buf_ops = {
1031 .map_dma_buf = ion_map_dma_buf,
1032 .unmap_dma_buf = ion_unmap_dma_buf,
1033 .mmap = ion_mmap,
1034 .release = ion_dma_buf_release,
0f34faf8
RSZ
1035 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1036 .end_cpu_access = ion_dma_buf_end_cpu_access,
1037 .kmap_atomic = ion_dma_buf_kmap,
1038 .kunmap_atomic = ion_dma_buf_kunmap,
b892bf75
RSZ
1039 .kmap = ion_dma_buf_kmap,
1040 .kunmap = ion_dma_buf_kunmap,
1041};
1042
22ba4322
JM
1043struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1044 struct ion_handle *handle)
b892bf75
RSZ
1045{
1046 struct ion_buffer *buffer;
1047 struct dma_buf *dmabuf;
1048 bool valid_handle;
b892bf75
RSZ
1049
1050 mutex_lock(&client->lock);
1051 valid_handle = ion_handle_validate(client, handle);
b892bf75 1052 if (!valid_handle) {
a9bb075d 1053 WARN(1, "%s: invalid handle passed to share.\n", __func__);
83271f62 1054 mutex_unlock(&client->lock);
22ba4322 1055 return ERR_PTR(-EINVAL);
b892bf75 1056 }
b892bf75
RSZ
1057 buffer = handle->buffer;
1058 ion_buffer_get(buffer);
83271f62
CC
1059 mutex_unlock(&client->lock);
1060
b892bf75
RSZ
1061 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1062 if (IS_ERR(dmabuf)) {
1063 ion_buffer_put(buffer);
22ba4322 1064 return dmabuf;
b892bf75 1065 }
22ba4322
JM
1066
1067 return dmabuf;
1068}
1069EXPORT_SYMBOL(ion_share_dma_buf);
1070
1071int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1072{
1073 struct dma_buf *dmabuf;
1074 int fd;
1075
1076 dmabuf = ion_share_dma_buf(client, handle);
1077 if (IS_ERR(dmabuf))
1078 return PTR_ERR(dmabuf);
1079
b892bf75 1080 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
55808b8d 1081 if (fd < 0)
b892bf75 1082 dma_buf_put(dmabuf);
55808b8d 1083
c30707be 1084 return fd;
b892bf75 1085}
22ba4322 1086EXPORT_SYMBOL(ion_share_dma_buf_fd);
c30707be 1087
b892bf75
RSZ
1088struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1089{
1090 struct dma_buf *dmabuf;
1091 struct ion_buffer *buffer;
1092 struct ion_handle *handle;
47b40458 1093 int ret;
b892bf75
RSZ
1094
1095 dmabuf = dma_buf_get(fd);
9e907654 1096 if (IS_ERR(dmabuf))
b892bf75
RSZ
1097 return ERR_PTR(PTR_ERR(dmabuf));
1098 /* if this memory came from ion */
1099
1100 if (dmabuf->ops != &dma_buf_ops) {
1101 pr_err("%s: can not import dmabuf from another exporter\n",
1102 __func__);
1103 dma_buf_put(dmabuf);
1104 return ERR_PTR(-EINVAL);
1105 }
1106 buffer = dmabuf->priv;
1107
1108 mutex_lock(&client->lock);
1109 /* if a handle exists for this buffer just take a reference to it */
1110 handle = ion_handle_lookup(client, buffer);
9e907654 1111 if (!IS_ERR(handle)) {
b892bf75 1112 ion_handle_get(handle);
83271f62 1113 mutex_unlock(&client->lock);
b892bf75
RSZ
1114 goto end;
1115 }
83271f62
CC
1116 mutex_unlock(&client->lock);
1117
b892bf75 1118 handle = ion_handle_create(client, buffer);
9e907654 1119 if (IS_ERR(handle))
b892bf75 1120 goto end;
83271f62
CC
1121
1122 mutex_lock(&client->lock);
47b40458 1123 ret = ion_handle_add(client, handle);
83271f62 1124 mutex_unlock(&client->lock);
47b40458
CC
1125 if (ret) {
1126 ion_handle_put(handle);
1127 handle = ERR_PTR(ret);
1128 }
83271f62 1129
b892bf75 1130end:
b892bf75
RSZ
1131 dma_buf_put(dmabuf);
1132 return handle;
c30707be 1133}
ee4c8aa9 1134EXPORT_SYMBOL(ion_import_dma_buf);
c30707be 1135
0b9ec1cf
RSZ
1136static int ion_sync_for_device(struct ion_client *client, int fd)
1137{
1138 struct dma_buf *dmabuf;
1139 struct ion_buffer *buffer;
1140
1141 dmabuf = dma_buf_get(fd);
9e907654 1142 if (IS_ERR(dmabuf))
0b9ec1cf
RSZ
1143 return PTR_ERR(dmabuf);
1144
1145 /* if this memory came from ion */
1146 if (dmabuf->ops != &dma_buf_ops) {
1147 pr_err("%s: can not sync dmabuf from another exporter\n",
1148 __func__);
1149 dma_buf_put(dmabuf);
1150 return -EINVAL;
1151 }
1152 buffer = dmabuf->priv;
856661d5
RSZ
1153
1154 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1155 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
0b9ec1cf
RSZ
1156 dma_buf_put(dmabuf);
1157 return 0;
1158}
1159
c30707be
RSZ
1160static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1161{
1162 struct ion_client *client = filp->private_data;
1163
1164 switch (cmd) {
1165 case ION_IOC_ALLOC:
1166 {
1167 struct ion_allocation_data data;
47b40458 1168 struct ion_handle *handle;
c30707be
RSZ
1169
1170 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1171 return -EFAULT;
47b40458 1172 handle = ion_alloc(client, data.len, data.align,
38eeeb51 1173 data.heap_id_mask, data.flags);
54ac0784 1174
47b40458
CC
1175 if (IS_ERR(handle))
1176 return PTR_ERR(handle);
1177
b88fa731 1178 data.handle = handle->id;
54ac0784
KC
1179
1180 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
47b40458 1181 ion_free(client, handle);
c30707be 1182 return -EFAULT;
54ac0784 1183 }
c30707be
RSZ
1184 break;
1185 }
1186 case ION_IOC_FREE:
1187 {
1188 struct ion_handle_data data;
47b40458 1189 struct ion_handle *handle;
c30707be
RSZ
1190
1191 if (copy_from_user(&data, (void __user *)arg,
1192 sizeof(struct ion_handle_data)))
1193 return -EFAULT;
83271f62
CC
1194 handle = ion_handle_get_by_id(client, data.handle);
1195 if (IS_ERR(handle))
1196 return PTR_ERR(handle);
47b40458 1197 ion_free(client, handle);
83271f62 1198 ion_handle_put(handle);
c30707be
RSZ
1199 break;
1200 }
c30707be 1201 case ION_IOC_SHARE:
df0f6c76 1202 case ION_IOC_MAP:
c30707be
RSZ
1203 {
1204 struct ion_fd_data data;
47b40458 1205 struct ion_handle *handle;
c30707be
RSZ
1206
1207 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1208 return -EFAULT;
83271f62
CC
1209 handle = ion_handle_get_by_id(client, data.handle);
1210 if (IS_ERR(handle))
1211 return PTR_ERR(handle);
47b40458 1212 data.fd = ion_share_dma_buf_fd(client, handle);
83271f62 1213 ion_handle_put(handle);
c30707be
RSZ
1214 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1215 return -EFAULT;
a1c6b996
OH
1216 if (data.fd < 0)
1217 return data.fd;
c30707be
RSZ
1218 break;
1219 }
1220 case ION_IOC_IMPORT:
1221 {
1222 struct ion_fd_data data;
47b40458 1223 struct ion_handle *handle;
a1c6b996 1224 int ret = 0;
c30707be
RSZ
1225 if (copy_from_user(&data, (void __user *)arg,
1226 sizeof(struct ion_fd_data)))
1227 return -EFAULT;
47b40458
CC
1228 handle = ion_import_dma_buf(client, data.fd);
1229 if (IS_ERR(handle))
1230 ret = PTR_ERR(handle);
1231 else
b88fa731 1232 data.handle = handle->id;
47b40458 1233
c30707be
RSZ
1234 if (copy_to_user((void __user *)arg, &data,
1235 sizeof(struct ion_fd_data)))
1236 return -EFAULT;
a1c6b996
OH
1237 if (ret < 0)
1238 return ret;
c30707be
RSZ
1239 break;
1240 }
0b9ec1cf
RSZ
1241 case ION_IOC_SYNC:
1242 {
1243 struct ion_fd_data data;
1244 if (copy_from_user(&data, (void __user *)arg,
1245 sizeof(struct ion_fd_data)))
1246 return -EFAULT;
1247 ion_sync_for_device(client, data.fd);
1248 break;
1249 }
c30707be
RSZ
1250 case ION_IOC_CUSTOM:
1251 {
1252 struct ion_device *dev = client->dev;
1253 struct ion_custom_data data;
1254
1255 if (!dev->custom_ioctl)
1256 return -ENOTTY;
1257 if (copy_from_user(&data, (void __user *)arg,
1258 sizeof(struct ion_custom_data)))
1259 return -EFAULT;
1260 return dev->custom_ioctl(client, data.cmd, data.arg);
1261 }
1262 default:
1263 return -ENOTTY;
1264 }
1265 return 0;
1266}
1267
1268static int ion_release(struct inode *inode, struct file *file)
1269{
1270 struct ion_client *client = file->private_data;
1271
1272 pr_debug("%s: %d\n", __func__, __LINE__);
b892bf75 1273 ion_client_destroy(client);
c30707be
RSZ
1274 return 0;
1275}
1276
1277static int ion_open(struct inode *inode, struct file *file)
1278{
1279 struct miscdevice *miscdev = file->private_data;
1280 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1281 struct ion_client *client;
1282
1283 pr_debug("%s: %d\n", __func__, __LINE__);
2bb9f503 1284 client = ion_client_create(dev, "user");
9e907654 1285 if (IS_ERR(client))
c30707be
RSZ
1286 return PTR_ERR(client);
1287 file->private_data = client;
1288
1289 return 0;
1290}
1291
1292static const struct file_operations ion_fops = {
1293 .owner = THIS_MODULE,
1294 .open = ion_open,
1295 .release = ion_release,
1296 .unlocked_ioctl = ion_ioctl,
827c849e 1297 .compat_ioctl = compat_ion_ioctl,
c30707be
RSZ
1298};
1299
1300static size_t ion_debug_heap_total(struct ion_client *client,
2bb9f503 1301 unsigned int id)
c30707be
RSZ
1302{
1303 size_t size = 0;
1304 struct rb_node *n;
1305
1306 mutex_lock(&client->lock);
1307 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1308 struct ion_handle *handle = rb_entry(n,
1309 struct ion_handle,
1310 node);
2bb9f503 1311 if (handle->buffer->heap->id == id)
c30707be
RSZ
1312 size += handle->buffer->size;
1313 }
1314 mutex_unlock(&client->lock);
1315 return size;
1316}
1317
1318static int ion_debug_heap_show(struct seq_file *s, void *unused)
1319{
1320 struct ion_heap *heap = s->private;
1321 struct ion_device *dev = heap->dev;
1322 struct rb_node *n;
5ad7bc3a
RSZ
1323 size_t total_size = 0;
1324 size_t total_orphaned_size = 0;
c30707be
RSZ
1325
1326 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
5ad7bc3a 1327 seq_printf(s, "----------------------------------------------------\n");
c30707be 1328
b892bf75 1329 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
c30707be
RSZ
1330 struct ion_client *client = rb_entry(n, struct ion_client,
1331 node);
2bb9f503 1332 size_t size = ion_debug_heap_total(client, heap->id);
c30707be
RSZ
1333 if (!size)
1334 continue;
b892bf75
RSZ
1335 if (client->task) {
1336 char task_comm[TASK_COMM_LEN];
1337
1338 get_task_comm(task_comm, client->task);
e61fc915 1339 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
b892bf75
RSZ
1340 client->pid, size);
1341 } else {
e61fc915 1342 seq_printf(s, "%16.s %16u %16zu\n", client->name,
b892bf75
RSZ
1343 client->pid, size);
1344 }
c30707be 1345 }
5ad7bc3a
RSZ
1346 seq_printf(s, "----------------------------------------------------\n");
1347 seq_printf(s, "orphaned allocations (info is from last known client):"
1348 "\n");
8d7ab9a9 1349 mutex_lock(&dev->buffer_lock);
5ad7bc3a
RSZ
1350 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1351 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1352 node);
2bb9f503 1353 if (buffer->heap->id != heap->id)
45b17a80
RSZ
1354 continue;
1355 total_size += buffer->size;
5ad7bc3a 1356 if (!buffer->handle_count) {
e61fc915
CC
1357 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1358 buffer->task_comm, buffer->pid,
1359 buffer->size, buffer->kmap_cnt,
092c354b 1360 atomic_read(&buffer->ref.refcount));
5ad7bc3a
RSZ
1361 total_orphaned_size += buffer->size;
1362 }
1363 }
8d7ab9a9 1364 mutex_unlock(&dev->buffer_lock);
5ad7bc3a 1365 seq_printf(s, "----------------------------------------------------\n");
e61fc915 1366 seq_printf(s, "%16.s %16zu\n", "total orphaned",
5ad7bc3a 1367 total_orphaned_size);
e61fc915 1368 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
2540c73a 1369 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
e61fc915 1370 seq_printf(s, "%16.s %16zu\n", "deferred free",
2540c73a 1371 heap->free_list_size);
45b17a80
RSZ
1372 seq_printf(s, "----------------------------------------------------\n");
1373
1374 if (heap->debug_show)
1375 heap->debug_show(heap, s, unused);
5ad7bc3a 1376
c30707be
RSZ
1377 return 0;
1378}
1379
1380static int ion_debug_heap_open(struct inode *inode, struct file *file)
1381{
1382 return single_open(file, ion_debug_heap_show, inode->i_private);
1383}
1384
1385static const struct file_operations debug_heap_fops = {
1386 .open = ion_debug_heap_open,
1387 .read = seq_read,
1388 .llseek = seq_lseek,
1389 .release = single_release,
1390};
1391
ea313b5f
RSZ
1392#ifdef DEBUG_HEAP_SHRINKER
1393static int debug_shrink_set(void *data, u64 val)
fe2faea7 1394{
ea313b5f
RSZ
1395 struct ion_heap *heap = data;
1396 struct shrink_control sc;
1397 int objs;
fe2faea7 1398
ea313b5f
RSZ
1399 sc.gfp_mask = -1;
1400 sc.nr_to_scan = 0;
fe2faea7 1401
ea313b5f
RSZ
1402 if (!val)
1403 return 0;
fe2faea7 1404
ea313b5f
RSZ
1405 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1406 sc.nr_to_scan = objs;
fe2faea7 1407
ea313b5f
RSZ
1408 heap->shrinker.shrink(&heap->shrinker, &sc);
1409 return 0;
fe2faea7
RSZ
1410}
1411
ea313b5f 1412static int debug_shrink_get(void *data, u64 *val)
fe2faea7 1413{
ea313b5f
RSZ
1414 struct ion_heap *heap = data;
1415 struct shrink_control sc;
1416 int objs;
fe2faea7 1417
ea313b5f
RSZ
1418 sc.gfp_mask = -1;
1419 sc.nr_to_scan = 0;
fe2faea7 1420
ea313b5f
RSZ
1421 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1422 *val = objs;
1423 return 0;
fe2faea7
RSZ
1424}
1425
ea313b5f
RSZ
1426DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1427 debug_shrink_set, "%llu\n");
1428#endif
1429
c30707be
RSZ
1430void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1431{
29ae6bc7
RSZ
1432 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1433 !heap->ops->unmap_dma)
1434 pr_err("%s: can not add heap with invalid ops struct.\n",
1435 __func__);
1436
ea313b5f
RSZ
1437 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1438 ion_heap_init_deferred_free(heap);
fe2faea7 1439
c30707be 1440 heap->dev = dev;
8d7ab9a9 1441 down_write(&dev->lock);
cd69488c
RSZ
1442 /* use negative heap->id to reverse the priority -- when traversing
1443 the list later attempt higher id numbers first */
1444 plist_node_init(&heap->node, -heap->id);
1445 plist_add(&heap->node, &dev->heaps);
c30707be
RSZ
1446 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1447 &debug_heap_fops);
ea313b5f
RSZ
1448#ifdef DEBUG_HEAP_SHRINKER
1449 if (heap->shrinker.shrink) {
1450 char debug_name[64];
1451
1452 snprintf(debug_name, 64, "%s_shrink", heap->name);
1453 debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1454 &debug_shrink_fops);
1455 }
1456#endif
8d7ab9a9 1457 up_write(&dev->lock);
c30707be
RSZ
1458}
1459
1460struct ion_device *ion_device_create(long (*custom_ioctl)
1461 (struct ion_client *client,
1462 unsigned int cmd,
1463 unsigned long arg))
1464{
1465 struct ion_device *idev;
1466 int ret;
1467
1468 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1469 if (!idev)
1470 return ERR_PTR(-ENOMEM);
1471
1472 idev->dev.minor = MISC_DYNAMIC_MINOR;
1473 idev->dev.name = "ion";
1474 idev->dev.fops = &ion_fops;
1475 idev->dev.parent = NULL;
1476 ret = misc_register(&idev->dev);
1477 if (ret) {
1478 pr_err("ion: failed to register misc device.\n");
1479 return ERR_PTR(ret);
1480 }
1481
1482 idev->debug_root = debugfs_create_dir("ion", NULL);
9e907654 1483 if (!idev->debug_root)
c30707be
RSZ
1484 pr_err("ion: failed to create debug files.\n");
1485
1486 idev->custom_ioctl = custom_ioctl;
1487 idev->buffers = RB_ROOT;
8d7ab9a9
RSZ
1488 mutex_init(&idev->buffer_lock);
1489 init_rwsem(&idev->lock);
cd69488c 1490 plist_head_init(&idev->heaps);
b892bf75 1491 idev->clients = RB_ROOT;
c30707be
RSZ
1492 return idev;
1493}
1494
1495void ion_device_destroy(struct ion_device *dev)
1496{
1497 misc_deregister(&dev->dev);
1498 /* XXX need to free the heaps and clients ? */
1499 kfree(dev);
1500}
2991b7a0
RSZ
1501
1502void __init ion_reserve(struct ion_platform_data *data)
1503{
fa9bba55 1504 int i;
2991b7a0
RSZ
1505
1506 for (i = 0; i < data->nr; i++) {
1507 if (data->heaps[i].size == 0)
1508 continue;
fa9bba55
RSZ
1509
1510 if (data->heaps[i].base == 0) {
1511 phys_addr_t paddr;
1512 paddr = memblock_alloc_base(data->heaps[i].size,
1513 data->heaps[i].align,
1514 MEMBLOCK_ALLOC_ANYWHERE);
1515 if (!paddr) {
1516 pr_err("%s: error allocating memblock for "
1517 "heap %d\n",
1518 __func__, i);
1519 continue;
1520 }
1521 data->heaps[i].base = paddr;
1522 } else {
1523 int ret = memblock_reserve(data->heaps[i].base,
1524 data->heaps[i].size);
1525 if (ret)
e61fc915 1526 pr_err("memblock reserve of %zx@%lx failed\n",
fa9bba55
RSZ
1527 data->heaps[i].size,
1528 data->heaps[i].base);
1529 }
e61fc915 1530 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
fa9bba55
RSZ
1531 data->heaps[i].name,
1532 data->heaps[i].base,
1533 data->heaps[i].size);
2991b7a0
RSZ
1534 }
1535}
This page took 0.127224 seconds and 5 git commands to generate.