ion: convert map_kernel to return ERR_PTR
[deliverable/linux.git] / drivers / staging / android / ion / ion_heap.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
ea313b5f
RSZ
18#include <linux/freezer.h>
19#include <linux/kthread.h>
8898227e 20#include <linux/mm.h>
ea313b5f
RSZ
21#include <linux/rtmutex.h>
22#include <linux/sched.h>
8898227e
RSZ
23#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
c30707be
RSZ
25#include "ion.h"
26#include "ion_priv.h"
27
8898227e
RSZ
28void *ion_heap_map_kernel(struct ion_heap *heap,
29 struct ion_buffer *buffer)
30{
31 struct scatterlist *sg;
32 int i, j;
33 void *vaddr;
34 pgprot_t pgprot;
35 struct sg_table *table = buffer->sg_table;
36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
39
40 if (!pages)
41 return 0;
42
43 if (buffer->flags & ION_FLAG_CACHED)
44 pgprot = PAGE_KERNEL;
45 else
46 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48 for_each_sg(table->sgl, sg, table->nents, i) {
49 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
50 struct page *page = sg_page(sg);
51 BUG_ON(i >= npages);
52 for (j = 0; j < npages_this_entry; j++) {
53 *(tmp++) = page++;
54 }
55 }
56 vaddr = vmap(pages, npages, VM_MAP, pgprot);
57 vfree(pages);
58
dfc4a9b1
CC
59 if (vaddr == NULL)
60 return ERR_PTR(-ENOMEM);
61
8898227e
RSZ
62 return vaddr;
63}
64
65void ion_heap_unmap_kernel(struct ion_heap *heap,
66 struct ion_buffer *buffer)
67{
68 vunmap(buffer->vaddr);
69}
70
71int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72 struct vm_area_struct *vma)
73{
74 struct sg_table *table = buffer->sg_table;
75 unsigned long addr = vma->vm_start;
76 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77 struct scatterlist *sg;
78 int i;
79
80 for_each_sg(table->sgl, sg, table->nents, i) {
81 struct page *page = sg_page(sg);
82 unsigned long remainder = vma->vm_end - addr;
83 unsigned long len = sg_dma_len(sg);
84
85 if (offset >= sg_dma_len(sg)) {
86 offset -= sg_dma_len(sg);
87 continue;
88 } else if (offset) {
89 page += offset / PAGE_SIZE;
90 len = sg_dma_len(sg) - offset;
91 offset = 0;
92 }
93 len = min(len, remainder);
94 remap_pfn_range(vma, addr, page_to_pfn(page), len,
95 vma->vm_page_prot);
96 addr += len;
97 if (addr >= vma->vm_end)
98 return 0;
99 }
100 return 0;
101}
102
0b6b2cde
RSZ
103int ion_heap_buffer_zero(struct ion_buffer *buffer)
104{
105 struct sg_table *table = buffer->sg_table;
106 pgprot_t pgprot;
107 struct scatterlist *sg;
108 struct vm_struct *vm_struct;
109 int i, j, ret = 0;
110
111 if (buffer->flags & ION_FLAG_CACHED)
112 pgprot = PAGE_KERNEL;
113 else
114 pgprot = pgprot_writecombine(PAGE_KERNEL);
115
116 vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
117 if (!vm_struct)
118 return -ENOMEM;
119
120 for_each_sg(table->sgl, sg, table->nents, i) {
121 struct page *page = sg_page(sg);
122 unsigned long len = sg_dma_len(sg);
123
124 for (j = 0; j < len / PAGE_SIZE; j++) {
125 struct page *sub_page = page + j;
126 struct page **pages = &sub_page;
127 ret = map_vm_area(vm_struct, pgprot, &pages);
128 if (ret)
129 goto end;
130 memset(vm_struct->addr, 0, PAGE_SIZE);
131 unmap_kernel_range((unsigned long)vm_struct->addr,
132 PAGE_SIZE);
133 }
134 }
135end:
136 free_vm_area(vm_struct);
137 return ret;
138}
139
c13bd1c4
RSZ
140struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
141 unsigned int order)
142{
143 struct page *page = alloc_pages(gfp_flags, order);
144
145 if (!page)
146 return page;
147
148 if (ion_buffer_fault_user_mappings(buffer))
149 split_page(page, order);
150
151 return page;
152}
153
154void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
155 unsigned int order)
ea313b5f
RSZ
156{
157 int i;
158
159 if (!ion_buffer_fault_user_mappings(buffer)) {
160 __free_pages(page, order);
161 return;
162 }
163 for (i = 0; i < (1 << order); i++)
164 __free_page(page + i);
165}
166
167void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
168{
169 rt_mutex_lock(&heap->lock);
170 list_add(&buffer->list, &heap->free_list);
171 heap->free_list_size += buffer->size;
172 rt_mutex_unlock(&heap->lock);
173 wake_up(&heap->waitqueue);
174}
175
176size_t ion_heap_freelist_size(struct ion_heap *heap)
177{
178 size_t size;
179
180 rt_mutex_lock(&heap->lock);
181 size = heap->free_list_size;
182 rt_mutex_unlock(&heap->lock);
183
184 return size;
185}
186
187size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
188{
189 struct ion_buffer *buffer, *tmp;
190 size_t total_drained = 0;
191
192 if (ion_heap_freelist_size(heap) == 0)
193 return 0;
194
195 rt_mutex_lock(&heap->lock);
196 if (size == 0)
197 size = heap->free_list_size;
198
199 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
200 if (total_drained >= size)
201 break;
202 list_del(&buffer->list);
203 ion_buffer_destroy(buffer);
204 heap->free_list_size -= buffer->size;
205 total_drained += buffer->size;
206 }
207 rt_mutex_unlock(&heap->lock);
208
209 return total_drained;
210}
211
212int ion_heap_deferred_free(void *data)
213{
214 struct ion_heap *heap = data;
215
216 while (true) {
217 struct ion_buffer *buffer;
218
219 wait_event_freezable(heap->waitqueue,
220 ion_heap_freelist_size(heap) > 0);
221
222 rt_mutex_lock(&heap->lock);
223 if (list_empty(&heap->free_list)) {
224 rt_mutex_unlock(&heap->lock);
225 continue;
226 }
227 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
228 list);
229 list_del(&buffer->list);
230 heap->free_list_size -= buffer->size;
231 rt_mutex_unlock(&heap->lock);
232 ion_buffer_destroy(buffer);
233 }
234
235 return 0;
236}
237
238int ion_heap_init_deferred_free(struct ion_heap *heap)
239{
240 struct sched_param param = { .sched_priority = 0 };
241
242 INIT_LIST_HEAD(&heap->free_list);
243 heap->free_list_size = 0;
244 rt_mutex_init(&heap->lock);
245 init_waitqueue_head(&heap->waitqueue);
246 heap->task = kthread_run(ion_heap_deferred_free, heap,
247 "%s", heap->name);
248 sched_setscheduler(heap->task, SCHED_IDLE, &param);
249 if (IS_ERR(heap->task)) {
250 pr_err("%s: creating thread for deferred free failed\n",
251 __func__);
252 return PTR_RET(heap->task);
253 }
254 return 0;
255}
256
c30707be
RSZ
257struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
258{
259 struct ion_heap *heap = NULL;
260
261 switch (heap_data->type) {
262 case ION_HEAP_TYPE_SYSTEM_CONTIG:
263 heap = ion_system_contig_heap_create(heap_data);
264 break;
265 case ION_HEAP_TYPE_SYSTEM:
266 heap = ion_system_heap_create(heap_data);
267 break;
268 case ION_HEAP_TYPE_CARVEOUT:
269 heap = ion_carveout_heap_create(heap_data);
270 break;
e3c2eb7c
RSZ
271 case ION_HEAP_TYPE_CHUNK:
272 heap = ion_chunk_heap_create(heap_data);
273 break;
349c9e13
BG
274 case ION_HEAP_TYPE_DMA:
275 heap = ion_cma_heap_create(heap_data);
276 break;
c30707be
RSZ
277 default:
278 pr_err("%s: Invalid heap type %d\n", __func__,
279 heap_data->type);
280 return ERR_PTR(-EINVAL);
281 }
282
283 if (IS_ERR_OR_NULL(heap)) {
284 pr_err("%s: error creating heap %s type %d base %lu size %u\n",
285 __func__, heap_data->name, heap_data->type,
286 heap_data->base, heap_data->size);
287 return ERR_PTR(-EINVAL);
288 }
289
290 heap->name = heap_data->name;
291 heap->id = heap_data->id;
292 return heap;
293}
294
295void ion_heap_destroy(struct ion_heap *heap)
296{
297 if (!heap)
298 return;
299
300 switch (heap->type) {
301 case ION_HEAP_TYPE_SYSTEM_CONTIG:
302 ion_system_contig_heap_destroy(heap);
303 break;
304 case ION_HEAP_TYPE_SYSTEM:
305 ion_system_heap_destroy(heap);
306 break;
307 case ION_HEAP_TYPE_CARVEOUT:
308 ion_carveout_heap_destroy(heap);
309 break;
e3c2eb7c
RSZ
310 case ION_HEAP_TYPE_CHUNK:
311 ion_chunk_heap_destroy(heap);
312 break;
349c9e13
BG
313 case ION_HEAP_TYPE_DMA:
314 ion_cma_heap_destroy(heap);
315 break;
c30707be
RSZ
316 default:
317 pr_err("%s: Invalid heap type %d\n", __func__,
318 heap->type);
319 }
320}
This page took 0.041056 seconds and 5 git commands to generate.