sh: PVR detection for 2nd cut SH7786.
[deliverable/linux.git] / arch / sh / mm / pmb.c
CommitLineData
0c7b1df6
PM
1/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
38c425f6 6 * Copyright (C) 2005, 2006, 2007 Paul Mundt
0c7b1df6
PM
7 *
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16#include <linux/init.h>
17#include <linux/kernel.h>
a83c0b73
FV
18#include <linux/sysdev.h>
19#include <linux/cpu.h>
0c7b1df6
PM
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/bitops.h>
23#include <linux/debugfs.h>
24#include <linux/fs.h>
25#include <linux/seq_file.h>
26#include <linux/err.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
d7cdc9e8 29#include <asm/pgtable.h>
0c7b1df6
PM
30#include <asm/mmu.h>
31#include <asm/io.h>
eddeeb32 32#include <asm/mmu_context.h>
0c7b1df6
PM
33
34#define NR_PMB_ENTRIES 16
35
fc2bdefd
MF
36static void __pmb_unmap(struct pmb_entry *);
37
edd7de80 38static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
0c7b1df6
PM
39static unsigned long pmb_map;
40
0c7b1df6
PM
41static inline unsigned long mk_pmb_entry(unsigned int entry)
42{
43 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
44}
45
46static inline unsigned long mk_pmb_addr(unsigned int entry)
47{
48 return mk_pmb_entry(entry) | PMB_ADDR;
49}
50
51static inline unsigned long mk_pmb_data(unsigned int entry)
52{
53 return mk_pmb_entry(entry) | PMB_DATA;
54}
55
067784f6
MF
56static int pmb_alloc_entry(void)
57{
58 unsigned int pos;
59
60repeat:
61 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
62
63 if (unlikely(pos > NR_PMB_ENTRIES))
64 return -ENOSPC;
65
66 if (test_and_set_bit(pos, &pmb_map))
67 goto repeat;
68
69 return pos;
70}
71
8386aebb 72static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
20b5014b 73 unsigned long flags, int entry)
0c7b1df6
PM
74{
75 struct pmb_entry *pmbe;
067784f6
MF
76 int pos;
77
20b5014b
MF
78 if (entry == PMB_NO_ENTRY) {
79 pos = pmb_alloc_entry();
80 if (pos < 0)
81 return ERR_PTR(pos);
82 } else {
83 if (test_bit(entry, &pmb_map))
84 return ERR_PTR(-ENOSPC);
85 pos = entry;
86 }
0c7b1df6 87
edd7de80 88 pmbe = &pmb_entry_list[pos];
0c7b1df6
PM
89 if (!pmbe)
90 return ERR_PTR(-ENOMEM);
91
92 pmbe->vpn = vpn;
93 pmbe->ppn = ppn;
94 pmbe->flags = flags;
067784f6 95 pmbe->entry = pos;
0c7b1df6
PM
96
97 return pmbe;
98}
99
8386aebb 100static void pmb_free(struct pmb_entry *pmbe)
0c7b1df6 101{
edd7de80 102 int pos = pmbe->entry;
38c425f6 103
edd7de80
MF
104 pmbe->vpn = 0;
105 pmbe->ppn = 0;
106 pmbe->flags = 0;
107 pmbe->entry = 0;
108
109 clear_bit(pos, &pmb_map);
0c7b1df6
PM
110}
111
112/*
113 * Must be in P2 for __set_pmb_entry()
114 */
8386aebb
MF
115static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
116 unsigned long flags, int pos)
0c7b1df6 117{
0c7b1df6
PM
118 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
119
e7bd34a1 120#ifdef CONFIG_CACHE_WRITETHROUGH
0c7b1df6
PM
121 /*
122 * When we are in 32-bit address extended mode, CCR.CB becomes
123 * invalid, so care must be taken to manually adjust cacheable
124 * translations.
125 */
126 if (likely(flags & PMB_C))
127 flags |= PMB_WT;
128#endif
129
130 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
0c7b1df6
PM
131}
132
8386aebb 133static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
0c7b1df6 134{
cbaa118e 135 jump_to_uncached();
067784f6 136 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
cbaa118e 137 back_to_cached();
0c7b1df6
PM
138}
139
8386aebb 140static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
0c7b1df6
PM
141{
142 unsigned int entry = pmbe->entry;
143 unsigned long addr;
144
31051219 145 if (unlikely(entry >= NR_PMB_ENTRIES))
0c7b1df6
PM
146 return;
147
cbaa118e 148 jump_to_uncached();
0c7b1df6
PM
149
150 /* Clear V-bit */
151 addr = mk_pmb_addr(entry);
152 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
153
154 addr = mk_pmb_data(entry);
155 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
156
cbaa118e 157 back_to_cached();
0c7b1df6
PM
158}
159
d7cdc9e8
PM
160
161static struct {
162 unsigned long size;
163 int flag;
164} pmb_sizes[] = {
165 { .size = 0x20000000, .flag = PMB_SZ_512M, },
166 { .size = 0x08000000, .flag = PMB_SZ_128M, },
167 { .size = 0x04000000, .flag = PMB_SZ_64M, },
168 { .size = 0x01000000, .flag = PMB_SZ_16M, },
169};
170
171long pmb_remap(unsigned long vaddr, unsigned long phys,
172 unsigned long size, unsigned long flags)
173{
fc2bdefd 174 struct pmb_entry *pmbp, *pmbe;
d7cdc9e8
PM
175 unsigned long wanted;
176 int pmb_flags, i;
fc2bdefd 177 long err;
d7cdc9e8
PM
178
179 /* Convert typical pgprot value to the PMB equivalent */
180 if (flags & _PAGE_CACHABLE) {
181 if (flags & _PAGE_WT)
182 pmb_flags = PMB_WT;
183 else
184 pmb_flags = PMB_C;
185 } else
186 pmb_flags = PMB_WT | PMB_UB;
187
188 pmbp = NULL;
189 wanted = size;
190
191again:
192 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
d7cdc9e8
PM
193 if (size < pmb_sizes[i].size)
194 continue;
195
20b5014b
MF
196 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
197 PMB_NO_ENTRY);
fc2bdefd
MF
198 if (IS_ERR(pmbe)) {
199 err = PTR_ERR(pmbe);
200 goto out;
201 }
d7cdc9e8 202
067784f6 203 set_pmb_entry(pmbe);
d7cdc9e8
PM
204
205 phys += pmb_sizes[i].size;
206 vaddr += pmb_sizes[i].size;
207 size -= pmb_sizes[i].size;
208
209 /*
210 * Link adjacent entries that span multiple PMB entries
211 * for easier tear-down.
212 */
213 if (likely(pmbp))
214 pmbp->link = pmbe;
215
216 pmbp = pmbe;
a2767cfb
MF
217
218 /*
219 * Instead of trying smaller sizes on every iteration
220 * (even if we succeed in allocating space), try using
221 * pmb_sizes[i].size again.
222 */
223 i--;
d7cdc9e8
PM
224 }
225
226 if (size >= 0x1000000)
227 goto again;
228
229 return wanted - size;
fc2bdefd
MF
230
231out:
232 if (pmbp)
233 __pmb_unmap(pmbp);
234
235 return err;
d7cdc9e8
PM
236}
237
238void pmb_unmap(unsigned long addr)
239{
edd7de80
MF
240 struct pmb_entry *pmbe = NULL;
241 int i;
d7cdc9e8 242
edd7de80
MF
243 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
244 if (test_bit(i, &pmb_map)) {
245 pmbe = &pmb_entry_list[i];
246 if (pmbe->vpn == addr)
247 break;
248 }
249 }
d7cdc9e8
PM
250
251 if (unlikely(!pmbe))
252 return;
253
fc2bdefd
MF
254 __pmb_unmap(pmbe);
255}
256
257static void __pmb_unmap(struct pmb_entry *pmbe)
258{
edd7de80 259 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
d7cdc9e8
PM
260
261 do {
262 struct pmb_entry *pmblink = pmbe;
263
067784f6
MF
264 /*
265 * We may be called before this pmb_entry has been
266 * entered into the PMB table via set_pmb_entry(), but
267 * that's OK because we've allocated a unique slot for
268 * this entry in pmb_alloc() (even if we haven't filled
269 * it yet).
270 *
271 * Therefore, calling clear_pmb_entry() is safe as no
272 * other mapping can be using that slot.
273 */
274 clear_pmb_entry(pmbe);
fc2bdefd 275
d7cdc9e8
PM
276 pmbe = pmblink->link;
277
278 pmb_free(pmblink);
279 } while (pmbe);
280}
281
20b5014b 282#ifdef CONFIG_PMB
8386aebb 283int __uses_jump_to_uncached pmb_init(void)
0c7b1df6 284{
31051219 285 unsigned int i;
ef269b32 286 long size, ret;
0c7b1df6 287
cbaa118e 288 jump_to_uncached();
0c7b1df6
PM
289
290 /*
31051219
MF
291 * Insert PMB entries for the P1 and P2 areas so that, after
292 * we've switched the MMU to 32-bit mode, the semantics of P1
293 * and P2 are the same as in 29-bit mode, e.g.
294 *
295 * P1 - provides a cached window onto physical memory
296 * P2 - provides an uncached window onto physical memory
0c7b1df6 297 */
ef269b32 298 size = __MEMORY_START + __MEMORY_SIZE;
0c7b1df6 299
ef269b32
MF
300 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
301 BUG_ON(ret != size);
302
303 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
304 BUG_ON(ret != size);
0c7b1df6
PM
305
306 ctrl_outl(0, PMB_IRMCR);
307
308 /* PMB.SE and UB[7] */
31051219 309 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
0c7b1df6 310
eddeeb32
SM
311 /* Flush out the TLB */
312 i = ctrl_inl(MMUCR);
313 i |= MMUCR_TI;
314 ctrl_outl(i, MMUCR);
315
cbaa118e 316 back_to_cached();
0c7b1df6
PM
317
318 return 0;
319}
20b5014b
MF
320#else
321int __uses_jump_to_uncached pmb_init(void)
322{
323 int i;
324 unsigned long addr, data;
325
326 jump_to_uncached();
327
328 for (i = 0; i < PMB_ENTRY_MAX; i++) {
329 struct pmb_entry *pmbe;
330 unsigned long vpn, ppn, flags;
331
332 addr = PMB_DATA + (i << PMB_E_SHIFT);
333 data = ctrl_inl(addr);
334 if (!(data & PMB_V))
335 continue;
336
337 if (data & PMB_C) {
338#if defined(CONFIG_CACHE_WRITETHROUGH)
339 data |= PMB_WT;
340#elif defined(CONFIG_CACHE_WRITEBACK)
341 data &= ~PMB_WT;
342#else
343 data &= ~(PMB_C | PMB_WT);
344#endif
345 }
346 ctrl_outl(data, addr);
347
348 ppn = data & PMB_PFN_MASK;
349
350 flags = data & (PMB_C | PMB_WT | PMB_UB);
351 flags |= data & PMB_SZ_MASK;
352
353 addr = PMB_ADDR + (i << PMB_E_SHIFT);
354 data = ctrl_inl(addr);
355
356 vpn = data & PMB_PFN_MASK;
357
358 pmbe = pmb_alloc(vpn, ppn, flags, i);
359 WARN_ON(IS_ERR(pmbe));
360 }
361
362 back_to_cached();
363
364 return 0;
365}
366#endif /* CONFIG_PMB */
0c7b1df6 367
0c7b1df6
PM
368static int pmb_seq_show(struct seq_file *file, void *iter)
369{
370 int i;
371
372 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
373 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
374 seq_printf(file, "ety vpn ppn size flags\n");
375
376 for (i = 0; i < NR_PMB_ENTRIES; i++) {
377 unsigned long addr, data;
378 unsigned int size;
379 char *sz_str = NULL;
380
381 addr = ctrl_inl(mk_pmb_addr(i));
382 data = ctrl_inl(mk_pmb_data(i));
383
384 size = data & PMB_SZ_MASK;
385 sz_str = (size == PMB_SZ_16M) ? " 16MB":
386 (size == PMB_SZ_64M) ? " 64MB":
387 (size == PMB_SZ_128M) ? "128MB":
388 "512MB";
389
390 /* 02: V 0x88 0x08 128MB C CB B */
391 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
392 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
393 (addr >> 24) & 0xff, (data >> 24) & 0xff,
394 sz_str, (data & PMB_C) ? 'C' : ' ',
395 (data & PMB_WT) ? "WT" : "CB",
396 (data & PMB_UB) ? "UB" : " B");
397 }
398
399 return 0;
400}
401
402static int pmb_debugfs_open(struct inode *inode, struct file *file)
403{
404 return single_open(file, pmb_seq_show, NULL);
405}
406
5dfe4c96 407static const struct file_operations pmb_debugfs_fops = {
0c7b1df6
PM
408 .owner = THIS_MODULE,
409 .open = pmb_debugfs_open,
410 .read = seq_read,
411 .llseek = seq_lseek,
45dabf14 412 .release = single_release,
0c7b1df6
PM
413};
414
415static int __init pmb_debugfs_init(void)
416{
417 struct dentry *dentry;
418
419 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
b9e393c2 420 sh_debugfs_root, NULL, &pmb_debugfs_fops);
25627c7f
Z
421 if (!dentry)
422 return -ENOMEM;
0c7b1df6
PM
423 if (IS_ERR(dentry))
424 return PTR_ERR(dentry);
425
426 return 0;
427}
0c7b1df6 428postcore_initcall(pmb_debugfs_init);
a83c0b73
FV
429
430#ifdef CONFIG_PM
431static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
432{
433 static pm_message_t prev_state;
edd7de80 434 int i;
a83c0b73
FV
435
436 /* Restore the PMB after a resume from hibernation */
437 if (state.event == PM_EVENT_ON &&
438 prev_state.event == PM_EVENT_FREEZE) {
439 struct pmb_entry *pmbe;
edd7de80
MF
440 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
441 if (test_bit(i, &pmb_map)) {
442 pmbe = &pmb_entry_list[i];
443 set_pmb_entry(pmbe);
444 }
445 }
a83c0b73
FV
446 }
447 prev_state = state;
448 return 0;
449}
450
451static int pmb_sysdev_resume(struct sys_device *dev)
452{
453 return pmb_sysdev_suspend(dev, PMSG_ON);
454}
455
456static struct sysdev_driver pmb_sysdev_driver = {
457 .suspend = pmb_sysdev_suspend,
458 .resume = pmb_sysdev_resume,
459};
460
461static int __init pmb_sysdev_init(void)
462{
463 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
464}
465
466subsys_initcall(pmb_sysdev_init);
467#endif
This page took 0.356543 seconds and 5 git commands to generate.