2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
29 static unsigned int high_order_gfp_flags
= (GFP_HIGHUSER
| __GFP_ZERO
|
30 __GFP_NOWARN
| __GFP_NORETRY
) &
32 static unsigned int low_order_gfp_flags
= (GFP_HIGHUSER
| __GFP_ZERO
|
34 static const unsigned int orders
[] = {8, 4, 0};
35 static const int num_orders
= ARRAY_SIZE(orders
);
36 static int order_to_index(unsigned int order
)
39 for (i
= 0; i
< num_orders
; i
++)
40 if (order
== orders
[i
])
46 static unsigned int order_to_size(int order
)
48 return PAGE_SIZE
<< order
;
51 struct ion_system_heap
{
53 struct ion_page_pool
**pools
;
59 struct list_head list
;
62 static struct page
*alloc_buffer_page(struct ion_system_heap
*heap
,
63 struct ion_buffer
*buffer
,
66 bool cached
= ion_buffer_cached(buffer
);
67 struct ion_page_pool
*pool
= heap
->pools
[order_to_index(order
)];
71 page
= ion_page_pool_alloc(pool
);
73 gfp_t gfp_flags
= low_order_gfp_flags
;
76 gfp_flags
= high_order_gfp_flags
;
77 page
= ion_heap_alloc_pages(buffer
, gfp_flags
, order
);
80 ion_pages_sync_for_device(NULL
, page
, PAGE_SIZE
<< order
,
89 static void free_buffer_page(struct ion_system_heap
*heap
,
90 struct ion_buffer
*buffer
, struct page
*page
,
93 bool cached
= ion_buffer_cached(buffer
);
94 bool split_pages
= ion_buffer_fault_user_mappings(buffer
);
98 struct ion_page_pool
*pool
= heap
->pools
[order_to_index(order
)];
99 ion_page_pool_free(pool
, page
);
100 } else if (split_pages
) {
101 for (i
= 0; i
< (1 << order
); i
++)
102 __free_page(page
+ i
);
104 __free_pages(page
, order
);
109 static struct page_info
*alloc_largest_available(struct ion_system_heap
*heap
,
110 struct ion_buffer
*buffer
,
112 unsigned int max_order
)
115 struct page_info
*info
;
118 for (i
= 0; i
< num_orders
; i
++) {
119 if (size
< order_to_size(orders
[i
]))
121 if (max_order
< orders
[i
])
124 page
= alloc_buffer_page(heap
, buffer
, orders
[i
]);
128 info
= kmalloc(sizeof(struct page_info
), GFP_KERNEL
);
130 info
->order
= orders
[i
];
136 static int ion_system_heap_allocate(struct ion_heap
*heap
,
137 struct ion_buffer
*buffer
,
138 unsigned long size
, unsigned long align
,
141 struct ion_system_heap
*sys_heap
= container_of(heap
,
142 struct ion_system_heap
,
144 struct sg_table
*table
;
145 struct scatterlist
*sg
;
147 struct list_head pages
;
148 struct page_info
*info
, *tmp_info
;
150 long size_remaining
= PAGE_ALIGN(size
);
151 unsigned int max_order
= orders
[0];
153 if (align
> PAGE_SIZE
)
156 if (ion_buffer_fault_user_mappings(buffer
))
159 INIT_LIST_HEAD(&pages
);
160 while (size_remaining
> 0) {
161 info
= alloc_largest_available(sys_heap
, buffer
, size_remaining
, max_order
);
164 list_add_tail(&info
->list
, &pages
);
165 size_remaining
-= (1 << info
->order
) * PAGE_SIZE
;
166 max_order
= info
->order
;
170 table
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
174 ret
= sg_alloc_table(table
, i
, GFP_KERNEL
);
179 list_for_each_entry_safe(info
, tmp_info
, &pages
, list
) {
180 struct page
*page
= info
->page
;
181 sg_set_page(sg
, page
, (1 << info
->order
) * PAGE_SIZE
, 0);
183 list_del(&info
->list
);
187 buffer
->priv_virt
= table
;
192 list_for_each_entry(info
, &pages
, list
) {
193 free_buffer_page(sys_heap
, buffer
, info
->page
, info
->order
);
199 void ion_system_heap_free(struct ion_buffer
*buffer
)
201 struct ion_heap
*heap
= buffer
->heap
;
202 struct ion_system_heap
*sys_heap
= container_of(heap
,
203 struct ion_system_heap
,
205 struct sg_table
*table
= buffer
->sg_table
;
206 bool cached
= ion_buffer_cached(buffer
);
207 struct scatterlist
*sg
;
211 /* uncached pages come from the page pools, zero them before returning
212 for security purposes (other allocations are zerod at alloc time */
214 ion_heap_buffer_zero(buffer
);
216 for_each_sg(table
->sgl
, sg
, table
->nents
, i
)
217 free_buffer_page(sys_heap
, buffer
, sg_page(sg
),
218 get_order(sg
->length
));
219 sg_free_table(table
);
223 struct sg_table
*ion_system_heap_map_dma(struct ion_heap
*heap
,
224 struct ion_buffer
*buffer
)
226 return buffer
->priv_virt
;
229 void ion_system_heap_unmap_dma(struct ion_heap
*heap
,
230 struct ion_buffer
*buffer
)
235 static struct ion_heap_ops system_heap_ops
= {
236 .allocate
= ion_system_heap_allocate
,
237 .free
= ion_system_heap_free
,
238 .map_dma
= ion_system_heap_map_dma
,
239 .unmap_dma
= ion_system_heap_unmap_dma
,
240 .map_kernel
= ion_heap_map_kernel
,
241 .unmap_kernel
= ion_heap_unmap_kernel
,
242 .map_user
= ion_heap_map_user
,
245 static int ion_system_heap_shrink(struct shrinker
*shrinker
,
246 struct shrink_control
*sc
) {
248 struct ion_heap
*heap
= container_of(shrinker
, struct ion_heap
,
250 struct ion_system_heap
*sys_heap
= container_of(heap
,
251 struct ion_system_heap
,
257 if (sc
->nr_to_scan
== 0)
260 /* shrink the free list first, no point in zeroing the memory if
261 we're just going to reclaim it */
262 nr_freed
+= ion_heap_freelist_drain(heap
, sc
->nr_to_scan
* PAGE_SIZE
) /
265 if (nr_freed
>= sc
->nr_to_scan
)
268 for (i
= 0; i
< num_orders
; i
++) {
269 struct ion_page_pool
*pool
= sys_heap
->pools
[i
];
271 nr_freed
+= ion_page_pool_shrink(pool
, sc
->gfp_mask
,
273 if (nr_freed
>= sc
->nr_to_scan
)
278 /* total number of items is whatever the page pools are holding
279 plus whatever's in the freelist */
280 for (i
= 0; i
< num_orders
; i
++) {
281 struct ion_page_pool
*pool
= sys_heap
->pools
[i
];
282 nr_total
+= ion_page_pool_shrink(pool
, sc
->gfp_mask
, 0);
284 nr_total
+= ion_heap_freelist_size(heap
) / PAGE_SIZE
;
289 static int ion_system_heap_debug_show(struct ion_heap
*heap
, struct seq_file
*s
,
293 struct ion_system_heap
*sys_heap
= container_of(heap
,
294 struct ion_system_heap
,
297 for (i
= 0; i
< num_orders
; i
++) {
298 struct ion_page_pool
*pool
= sys_heap
->pools
[i
];
299 seq_printf(s
, "%d order %u highmem pages in pool = %lu total\n",
300 pool
->high_count
, pool
->order
,
301 (1 << pool
->order
) * PAGE_SIZE
* pool
->high_count
);
302 seq_printf(s
, "%d order %u lowmem pages in pool = %lu total\n",
303 pool
->low_count
, pool
->order
,
304 (1 << pool
->order
) * PAGE_SIZE
* pool
->low_count
);
309 struct ion_heap
*ion_system_heap_create(struct ion_platform_heap
*unused
)
311 struct ion_system_heap
*heap
;
314 heap
= kzalloc(sizeof(struct ion_system_heap
), GFP_KERNEL
);
316 return ERR_PTR(-ENOMEM
);
317 heap
->heap
.ops
= &system_heap_ops
;
318 heap
->heap
.type
= ION_HEAP_TYPE_SYSTEM
;
319 heap
->heap
.flags
= ION_HEAP_FLAG_DEFER_FREE
;
320 heap
->pools
= kzalloc(sizeof(struct ion_page_pool
*) * num_orders
,
323 goto err_alloc_pools
;
324 for (i
= 0; i
< num_orders
; i
++) {
325 struct ion_page_pool
*pool
;
326 gfp_t gfp_flags
= low_order_gfp_flags
;
329 gfp_flags
= high_order_gfp_flags
;
330 pool
= ion_page_pool_create(gfp_flags
, orders
[i
]);
332 goto err_create_pool
;
333 heap
->pools
[i
] = pool
;
336 heap
->heap
.shrinker
.shrink
= ion_system_heap_shrink
;
337 heap
->heap
.shrinker
.seeks
= DEFAULT_SEEKS
;
338 heap
->heap
.shrinker
.batch
= 0;
339 register_shrinker(&heap
->heap
.shrinker
);
340 heap
->heap
.debug_show
= ion_system_heap_debug_show
;
343 for (i
= 0; i
< num_orders
; i
++)
345 ion_page_pool_destroy(heap
->pools
[i
]);
349 return ERR_PTR(-ENOMEM
);
352 void ion_system_heap_destroy(struct ion_heap
*heap
)
354 struct ion_system_heap
*sys_heap
= container_of(heap
,
355 struct ion_system_heap
,
359 for (i
= 0; i
< num_orders
; i
++)
360 ion_page_pool_destroy(sys_heap
->pools
[i
]);
361 kfree(sys_heap
->pools
);
365 static int ion_system_contig_heap_allocate(struct ion_heap
*heap
,
366 struct ion_buffer
*buffer
,
371 int order
= get_order(len
);
373 if (align
> (PAGE_SIZE
<< order
))
376 if (ion_buffer_fault_user_mappings(buffer
))
379 buffer
->priv_virt
= kzalloc(len
, GFP_KERNEL
);
380 if (!buffer
->priv_virt
)
385 void ion_system_contig_heap_free(struct ion_buffer
*buffer
)
387 kfree(buffer
->priv_virt
);
390 static int ion_system_contig_heap_phys(struct ion_heap
*heap
,
391 struct ion_buffer
*buffer
,
392 ion_phys_addr_t
*addr
, size_t *len
)
394 *addr
= virt_to_phys(buffer
->priv_virt
);
399 struct sg_table
*ion_system_contig_heap_map_dma(struct ion_heap
*heap
,
400 struct ion_buffer
*buffer
)
402 struct sg_table
*table
;
405 table
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
407 return ERR_PTR(-ENOMEM
);
408 ret
= sg_alloc_table(table
, 1, GFP_KERNEL
);
413 sg_set_page(table
->sgl
, virt_to_page(buffer
->priv_virt
), buffer
->size
,
418 void ion_system_contig_heap_unmap_dma(struct ion_heap
*heap
,
419 struct ion_buffer
*buffer
)
421 sg_free_table(buffer
->sg_table
);
422 kfree(buffer
->sg_table
);
425 int ion_system_contig_heap_map_user(struct ion_heap
*heap
,
426 struct ion_buffer
*buffer
,
427 struct vm_area_struct
*vma
)
429 unsigned long pfn
= page_to_pfn(virt_to_page(buffer
->priv_virt
));
430 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ vma
->vm_pgoff
,
431 vma
->vm_end
- vma
->vm_start
,
436 static struct ion_heap_ops kmalloc_ops
= {
437 .allocate
= ion_system_contig_heap_allocate
,
438 .free
= ion_system_contig_heap_free
,
439 .phys
= ion_system_contig_heap_phys
,
440 .map_dma
= ion_system_contig_heap_map_dma
,
441 .unmap_dma
= ion_system_contig_heap_unmap_dma
,
442 .map_kernel
= ion_heap_map_kernel
,
443 .unmap_kernel
= ion_heap_unmap_kernel
,
444 .map_user
= ion_system_contig_heap_map_user
,
447 struct ion_heap
*ion_system_contig_heap_create(struct ion_platform_heap
*unused
)
449 struct ion_heap
*heap
;
451 heap
= kzalloc(sizeof(struct ion_heap
), GFP_KERNEL
);
453 return ERR_PTR(-ENOMEM
);
454 heap
->ops
= &kmalloc_ops
;
455 heap
->type
= ION_HEAP_TYPE_SYSTEM_CONTIG
;
459 void ion_system_contig_heap_destroy(struct ion_heap
*heap
)