ARM: OMAP: Remove unused old gpio-switch.h
[deliverable/linux.git] / arch / arm / mach-omap2 / iommu2.c
1 /*
2 * omap iommu: omap2/3 architecture specific functions
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/err.h>
15 #include <linux/device.h>
16 #include <linux/jiffies.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/stringify.h>
20
21 #include <plat/iommu.h>
22
23 /*
24 * omap2 architecture specific register bit definitions
25 */
26 #define IOMMU_ARCH_VERSION 0x00000011
27
28 /* SYSCONF */
29 #define MMU_SYS_IDLE_SHIFT 3
30 #define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
31 #define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
32 #define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
33 #define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
34
35 #define MMU_SYS_SOFTRESET (1 << 1)
36 #define MMU_SYS_AUTOIDLE 1
37
38 /* SYSSTATUS */
39 #define MMU_SYS_RESETDONE 1
40
41 /* IRQSTATUS & IRQENABLE */
42 #define MMU_IRQ_MULTIHITFAULT (1 << 4)
43 #define MMU_IRQ_TABLEWALKFAULT (1 << 3)
44 #define MMU_IRQ_EMUMISS (1 << 2)
45 #define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
46 #define MMU_IRQ_TLBMISS (1 << 0)
47
48 #define __MMU_IRQ_FAULT \
49 (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
50 #define MMU_IRQ_MASK \
51 (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
52 #define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
53 #define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
54
55 /* MMU_CNTL */
56 #define MMU_CNTL_SHIFT 1
57 #define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
58 #define MMU_CNTL_EML_TLB (1 << 3)
59 #define MMU_CNTL_TWL_EN (1 << 2)
60 #define MMU_CNTL_MMU_EN (1 << 1)
61
62 #define get_cam_va_mask(pgsz) \
63 (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
64 ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
65 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
66 ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
67
68
69 static void __iommu_set_twl(struct omap_iommu *obj, bool on)
70 {
71 u32 l = iommu_read_reg(obj, MMU_CNTL);
72
73 if (on)
74 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
75 else
76 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
77
78 l &= ~MMU_CNTL_MASK;
79 if (on)
80 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
81 else
82 l |= (MMU_CNTL_MMU_EN);
83
84 iommu_write_reg(obj, l, MMU_CNTL);
85 }
86
87
88 static int omap2_iommu_enable(struct omap_iommu *obj)
89 {
90 u32 l, pa;
91 unsigned long timeout;
92
93 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
94 return -EINVAL;
95
96 pa = virt_to_phys(obj->iopgd);
97 if (!IS_ALIGNED(pa, SZ_16K))
98 return -EINVAL;
99
100 iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
101
102 timeout = jiffies + msecs_to_jiffies(20);
103 do {
104 l = iommu_read_reg(obj, MMU_SYSSTATUS);
105 if (l & MMU_SYS_RESETDONE)
106 break;
107 } while (!time_after(jiffies, timeout));
108
109 if (!(l & MMU_SYS_RESETDONE)) {
110 dev_err(obj->dev, "can't take mmu out of reset\n");
111 return -ENODEV;
112 }
113
114 l = iommu_read_reg(obj, MMU_REVISION);
115 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
116 (l >> 4) & 0xf, l & 0xf);
117
118 l = iommu_read_reg(obj, MMU_SYSCONFIG);
119 l &= ~MMU_SYS_IDLE_MASK;
120 l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
121 iommu_write_reg(obj, l, MMU_SYSCONFIG);
122
123 iommu_write_reg(obj, pa, MMU_TTB);
124
125 __iommu_set_twl(obj, true);
126
127 return 0;
128 }
129
130 static void omap2_iommu_disable(struct omap_iommu *obj)
131 {
132 u32 l = iommu_read_reg(obj, MMU_CNTL);
133
134 l &= ~MMU_CNTL_MASK;
135 iommu_write_reg(obj, l, MMU_CNTL);
136 iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
137
138 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
139 }
140
141 static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
142 {
143 __iommu_set_twl(obj, false);
144 }
145
146 static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
147 {
148 u32 stat, da;
149 u32 errs = 0;
150
151 stat = iommu_read_reg(obj, MMU_IRQSTATUS);
152 stat &= MMU_IRQ_MASK;
153 if (!stat) {
154 *ra = 0;
155 return 0;
156 }
157
158 da = iommu_read_reg(obj, MMU_FAULT_AD);
159 *ra = da;
160
161 if (stat & MMU_IRQ_TLBMISS)
162 errs |= OMAP_IOMMU_ERR_TLB_MISS;
163 if (stat & MMU_IRQ_TRANSLATIONFAULT)
164 errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
165 if (stat & MMU_IRQ_EMUMISS)
166 errs |= OMAP_IOMMU_ERR_EMU_MISS;
167 if (stat & MMU_IRQ_TABLEWALKFAULT)
168 errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
169 if (stat & MMU_IRQ_MULTIHITFAULT)
170 errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
171 iommu_write_reg(obj, stat, MMU_IRQSTATUS);
172
173 return errs;
174 }
175
176 static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
177 {
178 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
179 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
180 }
181
182 static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
183 {
184 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
185 iommu_write_reg(obj, cr->ram, MMU_RAM);
186 }
187
188 static u32 omap2_cr_to_virt(struct cr_regs *cr)
189 {
190 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
191 u32 mask = get_cam_va_mask(cr->cam & page_size);
192
193 return cr->cam & mask;
194 }
195
196 static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
197 struct iotlb_entry *e)
198 {
199 struct cr_regs *cr;
200
201 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
202 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
203 e->da);
204 return ERR_PTR(-EINVAL);
205 }
206
207 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
208 if (!cr)
209 return ERR_PTR(-ENOMEM);
210
211 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
212 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
213
214 return cr;
215 }
216
217 static inline int omap2_cr_valid(struct cr_regs *cr)
218 {
219 return cr->cam & MMU_CAM_V;
220 }
221
222 static u32 omap2_get_pte_attr(struct iotlb_entry *e)
223 {
224 u32 attr;
225
226 attr = e->mixed << 5;
227 attr |= e->endian;
228 attr |= e->elsz >> 3;
229 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
230 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
231 return attr;
232 }
233
234 static ssize_t
235 omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
236 {
237 char *p = buf;
238
239 /* FIXME: Need more detail analysis of cam/ram */
240 p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
241 (cr->cam & MMU_CAM_P) ? 1 : 0);
242
243 return p - buf;
244 }
245
246 #define pr_reg(name) \
247 do { \
248 ssize_t bytes; \
249 const char *str = "%20s: %08x\n"; \
250 const int maxcol = 32; \
251 bytes = snprintf(p, maxcol, str, __stringify(name), \
252 iommu_read_reg(obj, MMU_##name)); \
253 p += bytes; \
254 len -= bytes; \
255 if (len < maxcol) \
256 goto out; \
257 } while (0)
258
259 static ssize_t
260 omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
261 {
262 char *p = buf;
263
264 pr_reg(REVISION);
265 pr_reg(SYSCONFIG);
266 pr_reg(SYSSTATUS);
267 pr_reg(IRQSTATUS);
268 pr_reg(IRQENABLE);
269 pr_reg(WALKING_ST);
270 pr_reg(CNTL);
271 pr_reg(FAULT_AD);
272 pr_reg(TTB);
273 pr_reg(LOCK);
274 pr_reg(LD_TLB);
275 pr_reg(CAM);
276 pr_reg(RAM);
277 pr_reg(GFLUSH);
278 pr_reg(FLUSH_ENTRY);
279 pr_reg(READ_CAM);
280 pr_reg(READ_RAM);
281 pr_reg(EMU_FAULT_AD);
282 out:
283 return p - buf;
284 }
285
286 static void omap2_iommu_save_ctx(struct omap_iommu *obj)
287 {
288 int i;
289 u32 *p = obj->ctx;
290
291 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
292 p[i] = iommu_read_reg(obj, i * sizeof(u32));
293 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
294 }
295
296 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
297 }
298
299 static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
300 {
301 int i;
302 u32 *p = obj->ctx;
303
304 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
305 iommu_write_reg(obj, p[i], i * sizeof(u32));
306 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
307 }
308
309 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
310 }
311
312 static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
313 {
314 e->da = cr->cam & MMU_CAM_VATAG_MASK;
315 e->pa = cr->ram & MMU_RAM_PADDR_MASK;
316 e->valid = cr->cam & MMU_CAM_V;
317 e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
318 e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
319 e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
320 e->mixed = cr->ram & MMU_RAM_MIXED;
321 }
322
323 static const struct iommu_functions omap2_iommu_ops = {
324 .version = IOMMU_ARCH_VERSION,
325
326 .enable = omap2_iommu_enable,
327 .disable = omap2_iommu_disable,
328 .set_twl = omap2_iommu_set_twl,
329 .fault_isr = omap2_iommu_fault_isr,
330
331 .tlb_read_cr = omap2_tlb_read_cr,
332 .tlb_load_cr = omap2_tlb_load_cr,
333
334 .cr_to_e = omap2_cr_to_e,
335 .cr_to_virt = omap2_cr_to_virt,
336 .alloc_cr = omap2_alloc_cr,
337 .cr_valid = omap2_cr_valid,
338 .dump_cr = omap2_dump_cr,
339
340 .get_pte_attr = omap2_get_pte_attr,
341
342 .save_ctx = omap2_iommu_save_ctx,
343 .restore_ctx = omap2_iommu_restore_ctx,
344 .dump_ctx = omap2_iommu_dump_ctx,
345 };
346
347 static int __init omap2_iommu_init(void)
348 {
349 return omap_install_iommu_arch(&omap2_iommu_ops);
350 }
351 module_init(omap2_iommu_init);
352
353 static void __exit omap2_iommu_exit(void)
354 {
355 omap_uninstall_iommu_arch(&omap2_iommu_ops);
356 }
357 module_exit(omap2_iommu_exit);
358
359 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
360 MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
361 MODULE_LICENSE("GPL v2");
This page took 0.039507 seconds and 5 git commands to generate.