ion: don't use __arm_ioremap to map pages
[deliverable/linux.git] / drivers / staging / android / ion / ion_carveout_heap.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/spinlock.h>
17
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
c30707be
RSZ
28struct ion_carveout_heap {
29 struct ion_heap heap;
30 struct gen_pool *pool;
31 ion_phys_addr_t base;
32};
33
34ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
35 unsigned long size,
36 unsigned long align)
37{
38 struct ion_carveout_heap *carveout_heap =
39 container_of(heap, struct ion_carveout_heap, heap);
40 unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
41
42 if (!offset)
43 return ION_CARVEOUT_ALLOCATE_FAIL;
44
45 return offset;
46}
47
48void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
49 unsigned long size)
50{
51 struct ion_carveout_heap *carveout_heap =
52 container_of(heap, struct ion_carveout_heap, heap);
53
54 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
55 return;
56 gen_pool_free(carveout_heap->pool, addr, size);
57}
58
59static int ion_carveout_heap_phys(struct ion_heap *heap,
60 struct ion_buffer *buffer,
61 ion_phys_addr_t *addr, size_t *len)
62{
63 *addr = buffer->priv_phys;
64 *len = buffer->size;
65 return 0;
66}
67
68static int ion_carveout_heap_allocate(struct ion_heap *heap,
69 struct ion_buffer *buffer,
70 unsigned long size, unsigned long align,
71 unsigned long flags)
72{
73 buffer->priv_phys = ion_carveout_allocate(heap, size, align);
74 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
75}
76
77static void ion_carveout_heap_free(struct ion_buffer *buffer)
78{
79 struct ion_heap *heap = buffer->heap;
80
81 ion_carveout_free(heap, buffer->priv_phys, buffer->size);
82 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
83}
84
56a7c185 85struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
c30707be
RSZ
86 struct ion_buffer *buffer)
87{
56a7c185
RSZ
88 struct sg_table *table;
89 int ret;
90
91 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
92 if (!table)
93 return ERR_PTR(-ENOMEM);
94 ret = sg_alloc_table(table, 1, GFP_KERNEL);
95 if (ret) {
96 kfree(table);
97 return ERR_PTR(ret);
98 }
99 sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
100 0);
101 return table;
c30707be
RSZ
102}
103
104void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
105 struct ion_buffer *buffer)
106{
56a7c185 107 sg_free_table(buffer->sg_table);
c30707be
RSZ
108}
109
c30707be
RSZ
110static struct ion_heap_ops carveout_heap_ops = {
111 .allocate = ion_carveout_heap_allocate,
112 .free = ion_carveout_heap_free,
113 .phys = ion_carveout_heap_phys,
caaf3f12
BG
114 .map_dma = ion_carveout_heap_map_dma,
115 .unmap_dma = ion_carveout_heap_unmap_dma,
8be3759a
CC
116 .map_user = ion_heap_map_user,
117 .map_kernel = ion_heap_map_kernel,
118 .unmap_kernel = ion_heap_unmap_kernel,
c30707be
RSZ
119};
120
121struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
122{
123 struct ion_carveout_heap *carveout_heap;
124
125 carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
126 if (!carveout_heap)
127 return ERR_PTR(-ENOMEM);
128
129 carveout_heap->pool = gen_pool_create(12, -1);
130 if (!carveout_heap->pool) {
131 kfree(carveout_heap);
132 return ERR_PTR(-ENOMEM);
133 }
134 carveout_heap->base = heap_data->base;
135 gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
136 -1);
137 carveout_heap->heap.ops = &carveout_heap_ops;
138 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
139
140 return &carveout_heap->heap;
141}
142
143void ion_carveout_heap_destroy(struct ion_heap *heap)
144{
145 struct ion_carveout_heap *carveout_heap =
146 container_of(heap, struct ion_carveout_heap, heap);
147
148 gen_pool_destroy(carveout_heap->pool);
149 kfree(carveout_heap);
150 carveout_heap = NULL;
151}
This page took 0.035578 seconds and 5 git commands to generate.