zswap: change zpool/compressor at runtime
[deliverable/linux.git] / drivers / misc / sram.c
1 /*
2 * Generic on-chip SRAM allocation driver
3 *
4 * Copyright (C) 2012 Philipp Zabel, Pengutronix
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
18 * MA 02110-1301, USA.
19 */
20
21 #include <linux/clk.h>
22 #include <linux/genalloc.h>
23 #include <linux/io.h>
24 #include <linux/list_sort.h>
25 #include <linux/of_address.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28
29 #define SRAM_GRANULARITY 32
30
31 struct sram_dev {
32 struct device *dev;
33 void __iomem *virt_base;
34
35 struct gen_pool *pool;
36 struct clk *clk;
37 };
38
39 struct sram_reserve {
40 struct list_head list;
41 u32 start;
42 u32 size;
43 };
44
45 static int sram_reserve_cmp(void *priv, struct list_head *a,
46 struct list_head *b)
47 {
48 struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
49 struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
50
51 return ra->start - rb->start;
52 }
53
54 static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
55 {
56 struct device_node *np = sram->dev->of_node, *child;
57 unsigned long size, cur_start, cur_size;
58 struct sram_reserve *rblocks, *block;
59 struct list_head reserve_list;
60 unsigned int nblocks;
61 int ret = 0;
62
63 INIT_LIST_HEAD(&reserve_list);
64
65 size = resource_size(res);
66
67 /*
68 * We need an additional block to mark the end of the memory region
69 * after the reserved blocks from the dt are processed.
70 */
71 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
72 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
73 if (!rblocks)
74 return -ENOMEM;
75
76 block = &rblocks[0];
77 for_each_available_child_of_node(np, child) {
78 struct resource child_res;
79
80 ret = of_address_to_resource(child, 0, &child_res);
81 if (ret < 0) {
82 dev_err(sram->dev,
83 "could not get address for node %s\n",
84 child->full_name);
85 of_node_put(child);
86 goto err_chunks;
87 }
88
89 if (child_res.start < res->start || child_res.end > res->end) {
90 dev_err(sram->dev,
91 "reserved block %s outside the sram area\n",
92 child->full_name);
93 ret = -EINVAL;
94 of_node_put(child);
95 goto err_chunks;
96 }
97
98 block->start = child_res.start - res->start;
99 block->size = resource_size(&child_res);
100 list_add_tail(&block->list, &reserve_list);
101
102 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
103 block->start, block->start + block->size);
104
105 block++;
106 }
107
108 /* the last chunk marks the end of the region */
109 rblocks[nblocks - 1].start = size;
110 rblocks[nblocks - 1].size = 0;
111 list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
112
113 list_sort(NULL, &reserve_list, sram_reserve_cmp);
114
115 cur_start = 0;
116
117 list_for_each_entry(block, &reserve_list, list) {
118 /* can only happen if sections overlap */
119 if (block->start < cur_start) {
120 dev_err(sram->dev,
121 "block at 0x%x starts after current offset 0x%lx\n",
122 block->start, cur_start);
123 ret = -EINVAL;
124 goto err_chunks;
125 }
126
127 /* current start is in a reserved block, so continue after it */
128 if (block->start == cur_start) {
129 cur_start = block->start + block->size;
130 continue;
131 }
132
133 /*
134 * allocate the space between the current starting
135 * address and the following reserved block, or the
136 * end of the region.
137 */
138 cur_size = block->start - cur_start;
139
140 dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
141 cur_start, cur_start + cur_size);
142
143 ret = gen_pool_add_virt(sram->pool,
144 (unsigned long)sram->virt_base + cur_start,
145 res->start + cur_start, cur_size, -1);
146 if (ret < 0)
147 goto err_chunks;
148
149 /* next allocation after this reserved block */
150 cur_start = block->start + block->size;
151 }
152
153 err_chunks:
154 kfree(rblocks);
155
156 return ret;
157 }
158
159 static int sram_probe(struct platform_device *pdev)
160 {
161 struct sram_dev *sram;
162 struct resource *res;
163 size_t size;
164 int ret;
165
166 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
167 if (!sram)
168 return -ENOMEM;
169
170 sram->dev = &pdev->dev;
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (!res) {
174 dev_err(sram->dev, "found no memory resource\n");
175 return -EINVAL;
176 }
177
178 size = resource_size(res);
179
180 if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
181 dev_err(sram->dev, "could not request region for resource\n");
182 return -EBUSY;
183 }
184
185 sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
186 if (IS_ERR(sram->virt_base))
187 return PTR_ERR(sram->virt_base);
188
189 sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
190 NUMA_NO_NODE, NULL);
191 if (IS_ERR(sram->pool))
192 return PTR_ERR(sram->pool);
193
194 ret = sram_reserve_regions(sram, res);
195 if (ret)
196 return ret;
197
198 sram->clk = devm_clk_get(sram->dev, NULL);
199 if (IS_ERR(sram->clk))
200 sram->clk = NULL;
201 else
202 clk_prepare_enable(sram->clk);
203
204 platform_set_drvdata(pdev, sram);
205
206 dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
207 gen_pool_size(sram->pool) / 1024, sram->virt_base);
208
209 return 0;
210 }
211
212 static int sram_remove(struct platform_device *pdev)
213 {
214 struct sram_dev *sram = platform_get_drvdata(pdev);
215
216 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
217 dev_err(sram->dev, "removed while SRAM allocated\n");
218
219 if (sram->clk)
220 clk_disable_unprepare(sram->clk);
221
222 return 0;
223 }
224
225 #ifdef CONFIG_OF
226 static const struct of_device_id sram_dt_ids[] = {
227 { .compatible = "mmio-sram" },
228 {}
229 };
230 #endif
231
232 static struct platform_driver sram_driver = {
233 .driver = {
234 .name = "sram",
235 .of_match_table = of_match_ptr(sram_dt_ids),
236 },
237 .probe = sram_probe,
238 .remove = sram_remove,
239 };
240
241 static int __init sram_init(void)
242 {
243 return platform_driver_register(&sram_driver);
244 }
245
246 postcore_initcall(sram_init);
This page took 0.035923 seconds and 5 git commands to generate.