2 * drivers/staging/android/ion/ion_chunk_heap.c
4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 //#include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
28 #include <asm/mach/map.h>
30 struct ion_chunk_heap
{
32 struct gen_pool
*pool
;
34 unsigned long chunk_size
;
36 unsigned long allocated
;
39 static int ion_chunk_heap_allocate(struct ion_heap
*heap
,
40 struct ion_buffer
*buffer
,
41 unsigned long size
, unsigned long align
,
44 struct ion_chunk_heap
*chunk_heap
=
45 container_of(heap
, struct ion_chunk_heap
, heap
);
46 struct sg_table
*table
;
47 struct scatterlist
*sg
;
49 unsigned long num_chunks
;
50 unsigned long allocated_size
;
52 if (ion_buffer_fault_user_mappings(buffer
))
55 allocated_size
= ALIGN(size
, chunk_heap
->chunk_size
);
56 num_chunks
= allocated_size
/ chunk_heap
->chunk_size
;
58 if (allocated_size
> chunk_heap
->size
- chunk_heap
->allocated
)
61 table
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
64 ret
= sg_alloc_table(table
, num_chunks
, GFP_KERNEL
);
71 for (i
= 0; i
< num_chunks
; i
++) {
72 unsigned long paddr
= gen_pool_alloc(chunk_heap
->pool
,
73 chunk_heap
->chunk_size
);
76 sg_set_page(sg
, phys_to_page(paddr
), chunk_heap
->chunk_size
, 0);
80 buffer
->priv_virt
= table
;
81 chunk_heap
->allocated
+= allocated_size
;
85 for (i
-= 1; i
>= 0; i
--) {
86 gen_pool_free(chunk_heap
->pool
, page_to_phys(sg_page(sg
)),
95 static void ion_chunk_heap_free(struct ion_buffer
*buffer
)
97 struct ion_heap
*heap
= buffer
->heap
;
98 struct ion_chunk_heap
*chunk_heap
=
99 container_of(heap
, struct ion_chunk_heap
, heap
);
100 struct sg_table
*table
= buffer
->priv_virt
;
101 struct scatterlist
*sg
;
103 unsigned long allocated_size
;
105 allocated_size
= ALIGN(buffer
->size
, chunk_heap
->chunk_size
);
107 ion_heap_buffer_zero(buffer
);
109 if (ion_buffer_cached(buffer
))
110 dma_sync_sg_for_device(NULL
, table
->sgl
, table
->nents
,
113 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
114 gen_pool_free(chunk_heap
->pool
, page_to_phys(sg_page(sg
)),
117 chunk_heap
->allocated
-= allocated_size
;
118 sg_free_table(table
);
122 struct sg_table
*ion_chunk_heap_map_dma(struct ion_heap
*heap
,
123 struct ion_buffer
*buffer
)
125 return buffer
->priv_virt
;
128 void ion_chunk_heap_unmap_dma(struct ion_heap
*heap
,
129 struct ion_buffer
*buffer
)
134 static struct ion_heap_ops chunk_heap_ops
= {
135 .allocate
= ion_chunk_heap_allocate
,
136 .free
= ion_chunk_heap_free
,
137 .map_dma
= ion_chunk_heap_map_dma
,
138 .unmap_dma
= ion_chunk_heap_unmap_dma
,
139 .map_user
= ion_heap_map_user
,
140 .map_kernel
= ion_heap_map_kernel
,
141 .unmap_kernel
= ion_heap_unmap_kernel
,
144 struct ion_heap
*ion_chunk_heap_create(struct ion_platform_heap
*heap_data
)
146 struct ion_chunk_heap
*chunk_heap
;
147 struct vm_struct
*vm_struct
;
148 pgprot_t pgprot
= pgprot_writecombine(PAGE_KERNEL
);
151 chunk_heap
= kzalloc(sizeof(struct ion_chunk_heap
), GFP_KERNEL
);
153 return ERR_PTR(-ENOMEM
);
155 chunk_heap
->chunk_size
= (unsigned long)heap_data
->priv
;
156 chunk_heap
->pool
= gen_pool_create(get_order(chunk_heap
->chunk_size
) +
158 if (!chunk_heap
->pool
) {
160 goto error_gen_pool_create
;
162 chunk_heap
->base
= heap_data
->base
;
163 chunk_heap
->size
= heap_data
->size
;
164 chunk_heap
->allocated
= 0;
166 vm_struct
= get_vm_area(PAGE_SIZE
, VM_ALLOC
);
171 for (i
= 0; i
< chunk_heap
->size
; i
+= PAGE_SIZE
) {
172 struct page
*page
= phys_to_page(chunk_heap
->base
+ i
);
173 struct page
**pages
= &page
;
175 ret
= map_vm_area(vm_struct
, pgprot
, &pages
);
177 goto error_map_vm_area
;
178 memset(vm_struct
->addr
, 0, PAGE_SIZE
);
179 unmap_kernel_range((unsigned long)vm_struct
->addr
, PAGE_SIZE
);
181 free_vm_area(vm_struct
);
183 ion_pages_sync_for_device(NULL
, pfn_to_page(PFN_DOWN(heap_data
->base
)),
184 heap_data
->size
, DMA_BIDIRECTIONAL
);
186 gen_pool_add(chunk_heap
->pool
, chunk_heap
->base
, heap_data
->size
, -1);
187 chunk_heap
->heap
.ops
= &chunk_heap_ops
;
188 chunk_heap
->heap
.type
= ION_HEAP_TYPE_CHUNK
;
189 chunk_heap
->heap
.flags
= ION_HEAP_FLAG_DEFER_FREE
;
190 pr_info("%s: base %lu size %u align %ld\n", __func__
, chunk_heap
->base
,
191 heap_data
->size
, heap_data
->align
);
193 return &chunk_heap
->heap
;
196 free_vm_area(vm_struct
);
198 gen_pool_destroy(chunk_heap
->pool
);
199 error_gen_pool_create
:
204 void ion_chunk_heap_destroy(struct ion_heap
*heap
)
206 struct ion_chunk_heap
*chunk_heap
=
207 container_of(heap
, struct ion_chunk_heap
, heap
);
209 gen_pool_destroy(chunk_heap
->pool
);