2 * drivers/staging/android/ion/ion_chunk_heap.c
4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 //#include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
28 struct ion_chunk_heap
{
30 struct gen_pool
*pool
;
32 unsigned long chunk_size
;
34 unsigned long allocated
;
37 static int ion_chunk_heap_allocate(struct ion_heap
*heap
,
38 struct ion_buffer
*buffer
,
39 unsigned long size
, unsigned long align
,
42 struct ion_chunk_heap
*chunk_heap
=
43 container_of(heap
, struct ion_chunk_heap
, heap
);
44 struct sg_table
*table
;
45 struct scatterlist
*sg
;
47 unsigned long num_chunks
;
48 unsigned long allocated_size
;
50 if (ion_buffer_fault_user_mappings(buffer
))
53 allocated_size
= ALIGN(size
, chunk_heap
->chunk_size
);
54 num_chunks
= allocated_size
/ chunk_heap
->chunk_size
;
56 if (allocated_size
> chunk_heap
->size
- chunk_heap
->allocated
)
59 table
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
62 ret
= sg_alloc_table(table
, num_chunks
, GFP_KERNEL
);
69 for (i
= 0; i
< num_chunks
; i
++) {
70 unsigned long paddr
= gen_pool_alloc(chunk_heap
->pool
,
71 chunk_heap
->chunk_size
);
74 sg_set_page(sg
, pfn_to_page(PFN_DOWN(paddr
)),
75 chunk_heap
->chunk_size
, 0);
79 buffer
->priv_virt
= table
;
80 chunk_heap
->allocated
+= allocated_size
;
84 for (i
-= 1; i
>= 0; i
--) {
85 gen_pool_free(chunk_heap
->pool
, page_to_phys(sg_page(sg
)),
94 static void ion_chunk_heap_free(struct ion_buffer
*buffer
)
96 struct ion_heap
*heap
= buffer
->heap
;
97 struct ion_chunk_heap
*chunk_heap
=
98 container_of(heap
, struct ion_chunk_heap
, heap
);
99 struct sg_table
*table
= buffer
->priv_virt
;
100 struct scatterlist
*sg
;
102 unsigned long allocated_size
;
104 allocated_size
= ALIGN(buffer
->size
, chunk_heap
->chunk_size
);
106 ion_heap_buffer_zero(buffer
);
108 if (ion_buffer_cached(buffer
))
109 dma_sync_sg_for_device(NULL
, table
->sgl
, table
->nents
,
112 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
113 gen_pool_free(chunk_heap
->pool
, page_to_phys(sg_page(sg
)),
116 chunk_heap
->allocated
-= allocated_size
;
117 sg_free_table(table
);
121 struct sg_table
*ion_chunk_heap_map_dma(struct ion_heap
*heap
,
122 struct ion_buffer
*buffer
)
124 return buffer
->priv_virt
;
127 void ion_chunk_heap_unmap_dma(struct ion_heap
*heap
,
128 struct ion_buffer
*buffer
)
133 static struct ion_heap_ops chunk_heap_ops
= {
134 .allocate
= ion_chunk_heap_allocate
,
135 .free
= ion_chunk_heap_free
,
136 .map_dma
= ion_chunk_heap_map_dma
,
137 .unmap_dma
= ion_chunk_heap_unmap_dma
,
138 .map_user
= ion_heap_map_user
,
139 .map_kernel
= ion_heap_map_kernel
,
140 .unmap_kernel
= ion_heap_unmap_kernel
,
143 struct ion_heap
*ion_chunk_heap_create(struct ion_platform_heap
*heap_data
)
145 struct ion_chunk_heap
*chunk_heap
;
146 struct vm_struct
*vm_struct
;
147 pgprot_t pgprot
= pgprot_writecombine(PAGE_KERNEL
);
150 chunk_heap
= kzalloc(sizeof(struct ion_chunk_heap
), GFP_KERNEL
);
152 return ERR_PTR(-ENOMEM
);
154 chunk_heap
->chunk_size
= (unsigned long)heap_data
->priv
;
155 chunk_heap
->pool
= gen_pool_create(get_order(chunk_heap
->chunk_size
) +
157 if (!chunk_heap
->pool
) {
159 goto error_gen_pool_create
;
161 chunk_heap
->base
= heap_data
->base
;
162 chunk_heap
->size
= heap_data
->size
;
163 chunk_heap
->allocated
= 0;
165 vm_struct
= get_vm_area(PAGE_SIZE
, VM_ALLOC
);
170 for (i
= 0; i
< chunk_heap
->size
; i
+= PAGE_SIZE
) {
171 struct page
*page
= pfn_to_page(PFN_DOWN(chunk_heap
->base
+ i
));
172 struct page
**pages
= &page
;
174 ret
= map_vm_area(vm_struct
, pgprot
, &pages
);
176 goto error_map_vm_area
;
177 memset(vm_struct
->addr
, 0, PAGE_SIZE
);
178 unmap_kernel_range((unsigned long)vm_struct
->addr
, PAGE_SIZE
);
180 free_vm_area(vm_struct
);
182 ion_pages_sync_for_device(NULL
, pfn_to_page(PFN_DOWN(heap_data
->base
)),
183 heap_data
->size
, DMA_BIDIRECTIONAL
);
185 gen_pool_add(chunk_heap
->pool
, chunk_heap
->base
, heap_data
->size
, -1);
186 chunk_heap
->heap
.ops
= &chunk_heap_ops
;
187 chunk_heap
->heap
.type
= ION_HEAP_TYPE_CHUNK
;
188 chunk_heap
->heap
.flags
= ION_HEAP_FLAG_DEFER_FREE
;
189 pr_info("%s: base %lu size %u align %ld\n", __func__
, chunk_heap
->base
,
190 heap_data
->size
, heap_data
->align
);
192 return &chunk_heap
->heap
;
195 free_vm_area(vm_struct
);
197 gen_pool_destroy(chunk_heap
->pool
);
198 error_gen_pool_create
:
203 void ion_chunk_heap_destroy(struct ion_heap
*heap
)
205 struct ion_chunk_heap
*chunk_heap
=
206 container_of(heap
, struct ion_chunk_heap
, heap
);
208 gen_pool_destroy(chunk_heap
->pool
);