62f7439e02b1f9a4c6000106aadb64db2a68e0f6
[deliverable/linux.git] / drivers / staging / android / ion / ion_chunk_heap.c
1 /*
2 * drivers/staging/android/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 //#include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 #include <asm/mach/map.h>
29
30 struct ion_chunk_heap {
31 struct ion_heap heap;
32 struct gen_pool *pool;
33 ion_phys_addr_t base;
34 unsigned long chunk_size;
35 unsigned long size;
36 unsigned long allocated;
37 };
38
39 static int ion_chunk_heap_allocate(struct ion_heap *heap,
40 struct ion_buffer *buffer,
41 unsigned long size, unsigned long align,
42 unsigned long flags)
43 {
44 struct ion_chunk_heap *chunk_heap =
45 container_of(heap, struct ion_chunk_heap, heap);
46 struct sg_table *table;
47 struct scatterlist *sg;
48 int ret, i;
49 unsigned long num_chunks;
50 unsigned long allocated_size;
51
52 if (ion_buffer_fault_user_mappings(buffer))
53 return -ENOMEM;
54
55 allocated_size = ALIGN(size, chunk_heap->chunk_size);
56 num_chunks = allocated_size / chunk_heap->chunk_size;
57
58 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
59 return -ENOMEM;
60
61 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
62 if (!table)
63 return -ENOMEM;
64 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
65 if (ret) {
66 kfree(table);
67 return ret;
68 }
69
70 sg = table->sgl;
71 for (i = 0; i < num_chunks; i++) {
72 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
73 chunk_heap->chunk_size);
74 if (!paddr)
75 goto err;
76 sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
77 sg = sg_next(sg);
78 }
79
80 buffer->priv_virt = table;
81 chunk_heap->allocated += allocated_size;
82 return 0;
83 err:
84 sg = table->sgl;
85 for (i -= 1; i >= 0; i--) {
86 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
87 sg->length);
88 sg = sg_next(sg);
89 }
90 sg_free_table(table);
91 kfree(table);
92 return -ENOMEM;
93 }
94
95 static void ion_chunk_heap_free(struct ion_buffer *buffer)
96 {
97 struct ion_heap *heap = buffer->heap;
98 struct ion_chunk_heap *chunk_heap =
99 container_of(heap, struct ion_chunk_heap, heap);
100 struct sg_table *table = buffer->priv_virt;
101 struct scatterlist *sg;
102 int i;
103 unsigned long allocated_size;
104
105 allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
106
107 ion_heap_buffer_zero(buffer);
108
109 if (ion_buffer_cached(buffer))
110 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
111 DMA_BIDIRECTIONAL);
112
113 for_each_sg(table->sgl, sg, table->nents, i) {
114 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
115 sg->length);
116 }
117 chunk_heap->allocated -= allocated_size;
118 sg_free_table(table);
119 kfree(table);
120 }
121
122 struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
123 struct ion_buffer *buffer)
124 {
125 return buffer->priv_virt;
126 }
127
128 void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
129 struct ion_buffer *buffer)
130 {
131 return;
132 }
133
134 static struct ion_heap_ops chunk_heap_ops = {
135 .allocate = ion_chunk_heap_allocate,
136 .free = ion_chunk_heap_free,
137 .map_dma = ion_chunk_heap_map_dma,
138 .unmap_dma = ion_chunk_heap_unmap_dma,
139 .map_user = ion_heap_map_user,
140 .map_kernel = ion_heap_map_kernel,
141 .unmap_kernel = ion_heap_unmap_kernel,
142 };
143
144 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
145 {
146 struct ion_chunk_heap *chunk_heap;
147 struct vm_struct *vm_struct;
148 pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
149 int i, ret;
150
151 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
152 if (!chunk_heap)
153 return ERR_PTR(-ENOMEM);
154
155 chunk_heap->chunk_size = (unsigned long)heap_data->priv;
156 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
157 PAGE_SHIFT, -1);
158 if (!chunk_heap->pool) {
159 ret = -ENOMEM;
160 goto error_gen_pool_create;
161 }
162 chunk_heap->base = heap_data->base;
163 chunk_heap->size = heap_data->size;
164 chunk_heap->allocated = 0;
165
166 vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
167 if (!vm_struct) {
168 ret = -ENOMEM;
169 goto error;
170 }
171 for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
172 struct page *page = phys_to_page(chunk_heap->base + i);
173 struct page **pages = &page;
174
175 ret = map_vm_area(vm_struct, pgprot, &pages);
176 if (ret)
177 goto error_map_vm_area;
178 memset(vm_struct->addr, 0, PAGE_SIZE);
179 unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
180 }
181 free_vm_area(vm_struct);
182
183 ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
184 heap_data->size, DMA_BIDIRECTIONAL);
185
186 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
187 chunk_heap->heap.ops = &chunk_heap_ops;
188 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
189 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
190 pr_info("%s: base %lu size %u align %ld\n", __func__, chunk_heap->base,
191 heap_data->size, heap_data->align);
192
193 return &chunk_heap->heap;
194
195 error_map_vm_area:
196 free_vm_area(vm_struct);
197 error:
198 gen_pool_destroy(chunk_heap->pool);
199 error_gen_pool_create:
200 kfree(chunk_heap);
201 return ERR_PTR(ret);
202 }
203
204 void ion_chunk_heap_destroy(struct ion_heap *heap)
205 {
206 struct ion_chunk_heap *chunk_heap =
207 container_of(heap, struct ion_chunk_heap, heap);
208
209 gen_pool_destroy(chunk_heap->pool);
210 kfree(chunk_heap);
211 chunk_heap = NULL;
212 }
This page took 0.035761 seconds and 4 git commands to generate.