ion: don't use __arm_ioremap to map pages
[deliverable/linux.git] / drivers / staging / android / ion / ion_chunk_heap.c
CommitLineData
e3c2eb7c
RSZ
1/*
2 * drivers/staging/android/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16//#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
e3c2eb7c
RSZ
28struct ion_chunk_heap {
29 struct ion_heap heap;
30 struct gen_pool *pool;
31 ion_phys_addr_t base;
32 unsigned long chunk_size;
33 unsigned long size;
34 unsigned long allocated;
35};
36
37static int ion_chunk_heap_allocate(struct ion_heap *heap,
38 struct ion_buffer *buffer,
39 unsigned long size, unsigned long align,
40 unsigned long flags)
41{
42 struct ion_chunk_heap *chunk_heap =
43 container_of(heap, struct ion_chunk_heap, heap);
44 struct sg_table *table;
45 struct scatterlist *sg;
46 int ret, i;
47 unsigned long num_chunks;
ea89faff 48 unsigned long allocated_size;
e3c2eb7c
RSZ
49
50 if (ion_buffer_fault_user_mappings(buffer))
51 return -ENOMEM;
52
ea89faff
CC
53 allocated_size = ALIGN(size, chunk_heap->chunk_size);
54 num_chunks = allocated_size / chunk_heap->chunk_size;
e3c2eb7c 55
ea89faff 56 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
e3c2eb7c
RSZ
57 return -ENOMEM;
58
59 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
60 if (!table)
61 return -ENOMEM;
62 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
63 if (ret) {
64 kfree(table);
65 return ret;
66 }
67
68 sg = table->sgl;
69 for (i = 0; i < num_chunks; i++) {
70 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
71 chunk_heap->chunk_size);
72 if (!paddr)
73 goto err;
74 sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
75 sg = sg_next(sg);
76 }
77
78 buffer->priv_virt = table;
ea89faff 79 chunk_heap->allocated += allocated_size;
e3c2eb7c
RSZ
80 return 0;
81err:
82 sg = table->sgl;
83 for (i -= 1; i >= 0; i--) {
84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
06e0dcae 85 sg->length);
e3c2eb7c
RSZ
86 sg = sg_next(sg);
87 }
88 sg_free_table(table);
89 kfree(table);
90 return -ENOMEM;
91}
92
93static void ion_chunk_heap_free(struct ion_buffer *buffer)
94{
95 struct ion_heap *heap = buffer->heap;
96 struct ion_chunk_heap *chunk_heap =
97 container_of(heap, struct ion_chunk_heap, heap);
98 struct sg_table *table = buffer->priv_virt;
99 struct scatterlist *sg;
100 int i;
ea89faff
CC
101 unsigned long allocated_size;
102
103 allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
e3c2eb7c 104
0b6b2cde
RSZ
105 ion_heap_buffer_zero(buffer);
106
e946b209
CC
107 if (ion_buffer_cached(buffer))
108 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
109 DMA_BIDIRECTIONAL);
110
e3c2eb7c 111 for_each_sg(table->sgl, sg, table->nents, i) {
e3c2eb7c 112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
06e0dcae 113 sg->length);
e3c2eb7c 114 }
ea89faff 115 chunk_heap->allocated -= allocated_size;
e3c2eb7c
RSZ
116 sg_free_table(table);
117 kfree(table);
118}
119
120struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
121 struct ion_buffer *buffer)
122{
123 return buffer->priv_virt;
124}
125
126void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
127 struct ion_buffer *buffer)
128{
129 return;
130}
131
132static struct ion_heap_ops chunk_heap_ops = {
133 .allocate = ion_chunk_heap_allocate,
134 .free = ion_chunk_heap_free,
135 .map_dma = ion_chunk_heap_map_dma,
136 .unmap_dma = ion_chunk_heap_unmap_dma,
137 .map_user = ion_heap_map_user,
138 .map_kernel = ion_heap_map_kernel,
139 .unmap_kernel = ion_heap_unmap_kernel,
140};
141
142struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
143{
144 struct ion_chunk_heap *chunk_heap;
57b5cd06
RSZ
145 struct vm_struct *vm_struct;
146 pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
147 int i, ret;
148
e3c2eb7c
RSZ
149 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
150 if (!chunk_heap)
151 return ERR_PTR(-ENOMEM);
152
153 chunk_heap->chunk_size = (unsigned long)heap_data->priv;
154 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
155 PAGE_SHIFT, -1);
156 if (!chunk_heap->pool) {
57b5cd06
RSZ
157 ret = -ENOMEM;
158 goto error_gen_pool_create;
e3c2eb7c
RSZ
159 }
160 chunk_heap->base = heap_data->base;
161 chunk_heap->size = heap_data->size;
162 chunk_heap->allocated = 0;
57b5cd06
RSZ
163
164 vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
165 if (!vm_struct) {
166 ret = -ENOMEM;
167 goto error;
168 }
169 for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
170 struct page *page = phys_to_page(chunk_heap->base + i);
171 struct page **pages = &page;
172
173 ret = map_vm_area(vm_struct, pgprot, &pages);
174 if (ret)
175 goto error_map_vm_area;
176 memset(vm_struct->addr, 0, PAGE_SIZE);
177 unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
178 }
179 free_vm_area(vm_struct);
180
e946b209
CC
181 ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
182 heap_data->size, DMA_BIDIRECTIONAL);
183
e3c2eb7c
RSZ
184 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
185 chunk_heap->heap.ops = &chunk_heap_ops;
186 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
fe2faea7
RSZ
187 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
188 pr_info("%s: base %lu size %u align %ld\n", __func__, chunk_heap->base,
e3c2eb7c
RSZ
189 heap_data->size, heap_data->align);
190
191 return &chunk_heap->heap;
57b5cd06
RSZ
192
193error_map_vm_area:
194 free_vm_area(vm_struct);
195error:
196 gen_pool_destroy(chunk_heap->pool);
197error_gen_pool_create:
198 kfree(chunk_heap);
199 return ERR_PTR(ret);
e3c2eb7c
RSZ
200}
201
202void ion_chunk_heap_destroy(struct ion_heap *heap)
203{
204 struct ion_chunk_heap *chunk_heap =
205 container_of(heap, struct ion_chunk_heap, heap);
206
207 gen_pool_destroy(chunk_heap->pool);
208 kfree(chunk_heap);
209 chunk_heap = NULL;
210}
This page took 0.035423 seconds and 5 git commands to generate.