sh: disable aliased page logic on NOMMU models
authorRich Felker <dalias@libc.org>
Tue, 22 Mar 2016 22:02:23 +0000 (22:02 +0000)
committerRich Felker <dalias@libc.org>
Sun, 31 Jul 2016 03:33:32 +0000 (03:33 +0000)
SH3/4 (with MMU) have a virtually indexed cache, requiring explicit
work to avoid consistency problems arising from having the same
physical address range cached in multiple cache lines. This is
unneeded for the NOMMU case, and some of the resulting code paths
(kmap_coherent) don't work. SH2 only avoided this problem by having a
4-way associative cache with way size equal to the page size (4k),
yielding no cache index bits outside of the page offset and thus no
aliases.

Signed-off-by: Rich Felker <dalias@libc.org>
arch/sh/kernel/cpu/init.c
arch/sh/mm/cache.c

index 0d7360d549c17858c59f93b6dc0d279f4222a640..bfd9e2798008051a6b18c69ccf0357d9bfd13210 100644 (file)
@@ -323,9 +323,13 @@ asmlinkage void cpu_init(void)
        cache_init();
 
        if (raw_smp_processor_id() == 0) {
+#ifdef CONFIG_MMU
                shm_align_mask = max_t(unsigned long,
                                       current_cpu_data.dcache.way_size - 1,
                                       PAGE_SIZE - 1);
+#else
+               shm_align_mask = PAGE_SIZE - 1;
+#endif
 
                /* Boot CPU sets the cache shape */
                detect_cache_shape();
index e58cfbf4515008c32f670519b0904bb4aafad706..776d664a40c5f2fd4298f25b5ec010605ac0532d 100644 (file)
@@ -244,7 +244,11 @@ void flush_cache_sigtramp(unsigned long address)
 
 static void compute_alias(struct cache_info *c)
 {
+#ifdef CONFIG_MMU
        c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
+#else
+       c->alias_mask = 0;
+#endif
        c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
 }
 
This page took 0.025986 seconds and 5 git commands to generate.