sh: Build PMB entry links for existing contiguous multi-page mappings.
[deliverable/linux.git] / arch / sh / mm / pmb.c
CommitLineData
0c7b1df6
PM
1/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
3d467676
MF
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
0c7b1df6
PM
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
a83c0b73
FV
15#include <linux/sysdev.h>
16#include <linux/cpu.h>
0c7b1df6
PM
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
51becfd9
PM
24#include <linux/io.h>
25#include <asm/sizes.h>
0c7b1df6
PM
26#include <asm/system.h>
27#include <asm/uaccess.h>
d7cdc9e8 28#include <asm/pgtable.h>
7bdda620 29#include <asm/page.h>
0c7b1df6 30#include <asm/mmu.h>
eddeeb32 31#include <asm/mmu_context.h>
0c7b1df6 32
51becfd9 33static void pmb_unmap_entry(struct pmb_entry *);
fc2bdefd 34
edd7de80 35static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
51becfd9 36static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
0c7b1df6 37
51becfd9 38static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
0c7b1df6
PM
39{
40 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
41}
42
51becfd9 43static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
0c7b1df6
PM
44{
45 return mk_pmb_entry(entry) | PMB_ADDR;
46}
47
51becfd9 48static __always_inline unsigned long mk_pmb_data(unsigned int entry)
0c7b1df6
PM
49{
50 return mk_pmb_entry(entry) | PMB_DATA;
51}
52
067784f6
MF
53static int pmb_alloc_entry(void)
54{
55 unsigned int pos;
56
57repeat:
51becfd9 58 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
067784f6
MF
59
60 if (unlikely(pos > NR_PMB_ENTRIES))
61 return -ENOSPC;
62
51becfd9 63 if (test_and_set_bit(pos, pmb_map))
067784f6
MF
64 goto repeat;
65
66 return pos;
67}
68
8386aebb 69static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
20b5014b 70 unsigned long flags, int entry)
0c7b1df6
PM
71{
72 struct pmb_entry *pmbe;
067784f6
MF
73 int pos;
74
20b5014b
MF
75 if (entry == PMB_NO_ENTRY) {
76 pos = pmb_alloc_entry();
77 if (pos < 0)
78 return ERR_PTR(pos);
79 } else {
51becfd9 80 if (test_and_set_bit(entry, pmb_map))
20b5014b
MF
81 return ERR_PTR(-ENOSPC);
82 pos = entry;
83 }
0c7b1df6 84
edd7de80 85 pmbe = &pmb_entry_list[pos];
0c7b1df6
PM
86 if (!pmbe)
87 return ERR_PTR(-ENOMEM);
88
89 pmbe->vpn = vpn;
90 pmbe->ppn = ppn;
91 pmbe->flags = flags;
067784f6 92 pmbe->entry = pos;
d7813bc9 93 pmbe->size = 0;
0c7b1df6
PM
94
95 return pmbe;
96}
97
8386aebb 98static void pmb_free(struct pmb_entry *pmbe)
0c7b1df6 99{
d7813bc9
PM
100 clear_bit(pmbe->entry, pmb_map);
101 pmbe->entry = PMB_NO_ENTRY;
0c7b1df6
PM
102}
103
104/*
51becfd9 105 * Must be run uncached.
0c7b1df6 106 */
51becfd9 107static void set_pmb_entry(struct pmb_entry *pmbe)
0c7b1df6 108{
51becfd9
PM
109 jump_to_uncached();
110
111 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
0c7b1df6 112
e7bd34a1 113#ifdef CONFIG_CACHE_WRITETHROUGH
0c7b1df6
PM
114 /*
115 * When we are in 32-bit address extended mode, CCR.CB becomes
116 * invalid, so care must be taken to manually adjust cacheable
117 * translations.
118 */
51becfd9
PM
119 if (likely(pmbe->flags & PMB_C))
120 pmbe->flags |= PMB_WT;
0c7b1df6
PM
121#endif
122
51becfd9 123 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
0c7b1df6 124
cbaa118e 125 back_to_cached();
0c7b1df6
PM
126}
127
2dc2f8e0 128static void clear_pmb_entry(struct pmb_entry *pmbe)
0c7b1df6
PM
129{
130 unsigned int entry = pmbe->entry;
131 unsigned long addr;
132
cbaa118e 133 jump_to_uncached();
0c7b1df6
PM
134
135 /* Clear V-bit */
136 addr = mk_pmb_addr(entry);
9d56dd3b 137 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
0c7b1df6
PM
138
139 addr = mk_pmb_data(entry);
9d56dd3b 140 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
0c7b1df6 141
cbaa118e 142 back_to_cached();
0c7b1df6
PM
143}
144
d7cdc9e8
PM
145static struct {
146 unsigned long size;
147 int flag;
148} pmb_sizes[] = {
51becfd9
PM
149 { .size = SZ_512M, .flag = PMB_SZ_512M, },
150 { .size = SZ_128M, .flag = PMB_SZ_128M, },
151 { .size = SZ_64M, .flag = PMB_SZ_64M, },
152 { .size = SZ_16M, .flag = PMB_SZ_16M, },
d7cdc9e8
PM
153};
154
155long pmb_remap(unsigned long vaddr, unsigned long phys,
7bdda620 156 unsigned long size, pgprot_t prot)
d7cdc9e8 157{
fc2bdefd 158 struct pmb_entry *pmbp, *pmbe;
d7cdc9e8
PM
159 unsigned long wanted;
160 int pmb_flags, i;
fc2bdefd 161 long err;
7bdda620
PM
162 u64 flags;
163
164 flags = pgprot_val(prot);
d7cdc9e8
PM
165
166 /* Convert typical pgprot value to the PMB equivalent */
167 if (flags & _PAGE_CACHABLE) {
168 if (flags & _PAGE_WT)
169 pmb_flags = PMB_WT;
170 else
171 pmb_flags = PMB_C;
172 } else
173 pmb_flags = PMB_WT | PMB_UB;
174
175 pmbp = NULL;
176 wanted = size;
177
178again:
179 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
d7cdc9e8
PM
180 if (size < pmb_sizes[i].size)
181 continue;
182
20b5014b
MF
183 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
184 PMB_NO_ENTRY);
fc2bdefd
MF
185 if (IS_ERR(pmbe)) {
186 err = PTR_ERR(pmbe);
187 goto out;
188 }
d7cdc9e8 189
067784f6 190 set_pmb_entry(pmbe);
d7cdc9e8
PM
191
192 phys += pmb_sizes[i].size;
193 vaddr += pmb_sizes[i].size;
194 size -= pmb_sizes[i].size;
195
d7813bc9
PM
196 pmbe->size = pmb_sizes[i].size;
197
d7cdc9e8
PM
198 /*
199 * Link adjacent entries that span multiple PMB entries
200 * for easier tear-down.
201 */
202 if (likely(pmbp))
203 pmbp->link = pmbe;
204
205 pmbp = pmbe;
a2767cfb
MF
206
207 /*
208 * Instead of trying smaller sizes on every iteration
209 * (even if we succeed in allocating space), try using
210 * pmb_sizes[i].size again.
211 */
212 i--;
d7cdc9e8
PM
213 }
214
215 if (size >= 0x1000000)
216 goto again;
217
218 return wanted - size;
fc2bdefd
MF
219
220out:
51becfd9 221 pmb_unmap_entry(pmbp);
fc2bdefd
MF
222
223 return err;
d7cdc9e8
PM
224}
225
226void pmb_unmap(unsigned long addr)
227{
51becfd9 228 struct pmb_entry *pmbe;
edd7de80 229 int i;
d7cdc9e8 230
edd7de80 231 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
51becfd9 232 if (test_bit(i, pmb_map)) {
edd7de80 233 pmbe = &pmb_entry_list[i];
51becfd9
PM
234 if (pmbe->vpn == addr) {
235 pmb_unmap_entry(pmbe);
edd7de80 236 break;
51becfd9 237 }
edd7de80
MF
238 }
239 }
51becfd9 240}
d7cdc9e8 241
51becfd9
PM
242static void pmb_unmap_entry(struct pmb_entry *pmbe)
243{
d7cdc9e8
PM
244 if (unlikely(!pmbe))
245 return;
246
51becfd9
PM
247 if (!test_bit(pmbe->entry, pmb_map)) {
248 WARN_ON(1);
249 return;
250 }
d7cdc9e8
PM
251
252 do {
253 struct pmb_entry *pmblink = pmbe;
254
067784f6
MF
255 /*
256 * We may be called before this pmb_entry has been
257 * entered into the PMB table via set_pmb_entry(), but
258 * that's OK because we've allocated a unique slot for
259 * this entry in pmb_alloc() (even if we haven't filled
260 * it yet).
261 *
262 * Therefore, calling clear_pmb_entry() is safe as no
263 * other mapping can be using that slot.
264 */
265 clear_pmb_entry(pmbe);
fc2bdefd 266
d7cdc9e8
PM
267 pmbe = pmblink->link;
268
269 pmb_free(pmblink);
270 } while (pmbe);
271}
272
d7813bc9 273static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
3d467676 274{
efd54ea3 275 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
3d467676
MF
276}
277
efd54ea3 278static int pmb_synchronize_mappings(void)
20b5014b 279{
a0ab3668 280 unsigned int applied = 0;
d7813bc9
PM
281 struct pmb_entry *pmbp = NULL;
282 int i, j;
20b5014b 283
efd54ea3 284 pr_info("PMB: boot mappings:\n");
20b5014b 285
3d467676 286 /*
efd54ea3
PM
287 * Run through the initial boot mappings, log the established
288 * ones, and blow away anything that falls outside of the valid
289 * PPN range. Specifically, we only care about existing mappings
290 * that impact the cached/uncached sections.
3d467676 291 *
efd54ea3
PM
292 * Note that touching these can be a bit of a minefield; the boot
293 * loader can establish multi-page mappings with the same caching
294 * attributes, so we need to ensure that we aren't modifying a
295 * mapping that we're presently executing from, or may execute
296 * from in the case of straddling page boundaries.
3d467676 297 *
efd54ea3
PM
298 * In the future we will have to tidy up after the boot loader by
299 * jumping between the cached and uncached mappings and tearing
300 * down alternating mappings while executing from the other.
3d467676 301 */
51becfd9 302 for (i = 0; i < NR_PMB_ENTRIES; i++) {
3d467676
MF
303 unsigned long addr, data;
304 unsigned long addr_val, data_val;
efd54ea3 305 unsigned long ppn, vpn, flags;
d7813bc9 306 unsigned int size;
efd54ea3 307 struct pmb_entry *pmbe;
20b5014b 308
3d467676
MF
309 addr = mk_pmb_addr(i);
310 data = mk_pmb_data(i);
20b5014b 311
3d467676
MF
312 addr_val = __raw_readl(addr);
313 data_val = __raw_readl(data);
20b5014b 314
3d467676
MF
315 /*
316 * Skip over any bogus entries
317 */
318 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
319 continue;
20b5014b 320
3d467676
MF
321 ppn = data_val & PMB_PFN_MASK;
322 vpn = addr_val & PMB_PFN_MASK;
a0ab3668 323
3d467676
MF
324 /*
325 * Only preserve in-range mappings.
326 */
efd54ea3 327 if (!pmb_ppn_in_range(ppn)) {
3d467676
MF
328 /*
329 * Invalidate anything out of bounds.
330 */
331 __raw_writel(addr_val & ~PMB_V, addr);
332 __raw_writel(data_val & ~PMB_V, data);
efd54ea3 333 continue;
3d467676 334 }
efd54ea3
PM
335
336 /*
337 * Update the caching attributes if necessary
338 */
339 if (data_val & PMB_C) {
340#if defined(CONFIG_CACHE_WRITETHROUGH)
341 data_val |= PMB_WT;
342#elif defined(CONFIG_CACHE_WRITEBACK)
343 data_val &= ~PMB_WT;
344#else
345 data_val &= ~(PMB_C | PMB_WT);
346#endif
347 __raw_writel(data_val, data);
348 }
349
d7813bc9
PM
350 size = data_val & PMB_SZ_MASK;
351 flags = size | (data_val & PMB_CACHE_MASK);
efd54ea3
PM
352
353 pmbe = pmb_alloc(vpn, ppn, flags, i);
354 if (IS_ERR(pmbe)) {
355 WARN_ON_ONCE(1);
356 continue;
357 }
358
d7813bc9
PM
359 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
360 if (pmb_sizes[j].flag == size)
361 pmbe->size = pmb_sizes[j].size;
362
363 /*
364 * Compare the previous entry against the current one to
365 * see if the entries span a contiguous mapping. If so,
366 * setup the entry links accordingly.
367 */
368 if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
369 (pmbe->ppn == (pmbp->ppn + pmbp->size))))
370 pmbp->link = pmbe;
371
372 pmbp = pmbe;
373
374 pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
375 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
376 (data_val & PMB_C) ? "" : "un");
efd54ea3
PM
377
378 applied++;
a0ab3668
PM
379 }
380
381 return (applied == 0);
382}
a0ab3668 383
2dc2f8e0 384int pmb_init(void)
a0ab3668 385{
efd54ea3 386 int ret;
a0ab3668
PM
387
388 jump_to_uncached();
389
a0ab3668 390 /*
3d467676
MF
391 * Sync our software copy of the PMB mappings with those in
392 * hardware. The mappings in the hardware PMB were either set up
393 * by the bootloader or very early on by the kernel.
a0ab3668 394 */
efd54ea3
PM
395 ret = pmb_synchronize_mappings();
396 if (unlikely(ret == 0)) {
397 back_to_cached();
398 return 0;
3d467676
MF
399 }
400
9d56dd3b 401 __raw_writel(0, PMB_IRMCR);
a0ab3668
PM
402
403 /* Flush out the TLB */
efd54ea3 404 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
a0ab3668 405
20b5014b
MF
406 back_to_cached();
407
408 return 0;
409}
0c7b1df6 410
2efa53b2
PM
411bool __in_29bit_mode(void)
412{
413 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
414}
415
0c7b1df6
PM
416static int pmb_seq_show(struct seq_file *file, void *iter)
417{
418 int i;
419
420 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
421 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
422 seq_printf(file, "ety vpn ppn size flags\n");
423
424 for (i = 0; i < NR_PMB_ENTRIES; i++) {
425 unsigned long addr, data;
426 unsigned int size;
427 char *sz_str = NULL;
428
9d56dd3b
PM
429 addr = __raw_readl(mk_pmb_addr(i));
430 data = __raw_readl(mk_pmb_data(i));
0c7b1df6
PM
431
432 size = data & PMB_SZ_MASK;
433 sz_str = (size == PMB_SZ_16M) ? " 16MB":
434 (size == PMB_SZ_64M) ? " 64MB":
435 (size == PMB_SZ_128M) ? "128MB":
436 "512MB";
437
438 /* 02: V 0x88 0x08 128MB C CB B */
439 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
440 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
441 (addr >> 24) & 0xff, (data >> 24) & 0xff,
442 sz_str, (data & PMB_C) ? 'C' : ' ',
443 (data & PMB_WT) ? "WT" : "CB",
444 (data & PMB_UB) ? "UB" : " B");
445 }
446
447 return 0;
448}
449
450static int pmb_debugfs_open(struct inode *inode, struct file *file)
451{
452 return single_open(file, pmb_seq_show, NULL);
453}
454
5dfe4c96 455static const struct file_operations pmb_debugfs_fops = {
0c7b1df6
PM
456 .owner = THIS_MODULE,
457 .open = pmb_debugfs_open,
458 .read = seq_read,
459 .llseek = seq_lseek,
45dabf14 460 .release = single_release,
0c7b1df6
PM
461};
462
463static int __init pmb_debugfs_init(void)
464{
465 struct dentry *dentry;
466
467 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
b9e393c2 468 sh_debugfs_root, NULL, &pmb_debugfs_fops);
25627c7f
Z
469 if (!dentry)
470 return -ENOMEM;
0c7b1df6
PM
471 if (IS_ERR(dentry))
472 return PTR_ERR(dentry);
473
474 return 0;
475}
0c7b1df6 476postcore_initcall(pmb_debugfs_init);
a83c0b73
FV
477
478#ifdef CONFIG_PM
479static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
480{
481 static pm_message_t prev_state;
edd7de80 482 int i;
a83c0b73
FV
483
484 /* Restore the PMB after a resume from hibernation */
485 if (state.event == PM_EVENT_ON &&
486 prev_state.event == PM_EVENT_FREEZE) {
487 struct pmb_entry *pmbe;
edd7de80 488 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
51becfd9 489 if (test_bit(i, pmb_map)) {
edd7de80
MF
490 pmbe = &pmb_entry_list[i];
491 set_pmb_entry(pmbe);
492 }
493 }
a83c0b73
FV
494 }
495 prev_state = state;
496 return 0;
497}
498
499static int pmb_sysdev_resume(struct sys_device *dev)
500{
501 return pmb_sysdev_suspend(dev, PMSG_ON);
502}
503
504static struct sysdev_driver pmb_sysdev_driver = {
505 .suspend = pmb_sysdev_suspend,
506 .resume = pmb_sysdev_resume,
507};
508
509static int __init pmb_sysdev_init(void)
510{
511 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
512}
a83c0b73
FV
513subsys_initcall(pmb_sysdev_init);
514#endif
This page took 0.325187 seconds and 5 git commands to generate.