2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
11 #include <linux/seq_file.h>
12 #include <linux/spinlock.h>
13 #include <linux/bootmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/vmalloc.h>
16 #include <linux/kdebug.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/gfp.h>
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/io-unit.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/bitext.h>
31 #include <asm/vaddrs.h>
32 #include <asm/cache.h>
33 #include <asm/traps.h>
34 #include <asm/oplib.h>
42 /* Now the cpu specific definitions. */
43 #include <asm/turbosparc.h>
44 #include <asm/tsunami.h>
45 #include <asm/viking.h>
46 #include <asm/swift.h>
53 enum mbus_module srmmu_modtype
;
54 static unsigned int hwbug_bitmask
;
58 struct ctx_list
*ctx_list_pool
;
59 struct ctx_list ctx_free
;
60 struct ctx_list ctx_used
;
62 extern struct resource sparc_iomap
;
64 extern unsigned long last_valid_pfn
;
66 static pgd_t
*srmmu_swapper_pg_dir
;
68 const struct sparc32_cachetlb_ops
*sparc32_cachetlb_ops
;
71 const struct sparc32_cachetlb_ops
*local_ops
;
73 #define FLUSH_BEGIN(mm)
76 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
80 int flush_page_for_dma_global
= 1;
84 ctxd_t
*srmmu_ctx_table_phys
;
85 static ctxd_t
*srmmu_context_table
;
87 int viking_mxcc_present
;
88 static DEFINE_SPINLOCK(srmmu_context_spinlock
);
90 static int is_hypersparc
;
92 static int srmmu_cache_pagetables
;
94 /* these will be initialized in srmmu_nocache_calcsize() */
95 static unsigned long srmmu_nocache_size
;
96 static unsigned long srmmu_nocache_end
;
98 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
101 /* The context table is a nocache user with the biggest alignment needs. */
102 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
104 void *srmmu_nocache_pool
;
105 void *srmmu_nocache_bitmap
;
106 static struct bit_map srmmu_nocache_map
;
108 static inline int srmmu_pmd_none(pmd_t pmd
)
109 { return !(pmd_val(pmd
) & 0xFFFFFFF); }
111 /* XXX should we hyper_flush_whole_icache here - Anton */
112 static inline void srmmu_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
113 { set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (__nocache_pa((unsigned long) pgdp
) >> 4))); }
115 void pmd_set(pmd_t
*pmdp
, pte_t
*ptep
)
117 unsigned long ptp
; /* Physical address, shifted right by 4 */
120 ptp
= __nocache_pa((unsigned long) ptep
) >> 4;
121 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++) {
122 set_pte((pte_t
*)&pmdp
->pmdv
[i
], SRMMU_ET_PTD
| ptp
);
123 ptp
+= (SRMMU_REAL_PTRS_PER_PTE
*sizeof(pte_t
) >> 4);
127 void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmdp
, struct page
*ptep
)
129 unsigned long ptp
; /* Physical address, shifted right by 4 */
132 ptp
= page_to_pfn(ptep
) << (PAGE_SHIFT
-4); /* watch for overflow */
133 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++) {
134 set_pte((pte_t
*)&pmdp
->pmdv
[i
], SRMMU_ET_PTD
| ptp
);
135 ptp
+= (SRMMU_REAL_PTRS_PER_PTE
*sizeof(pte_t
) >> 4);
139 /* Find an entry in the third-level page table.. */
140 pte_t
*pte_offset_kernel(pmd_t
*dir
, unsigned long address
)
144 pte
= __nocache_va((dir
->pmdv
[0] & SRMMU_PTD_PMASK
) << 4);
145 return (pte_t
*) pte
+
146 ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
150 * size: bytes to allocate in the nocache area.
151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area.
154 static void *__srmmu_get_nocache(int size
, int align
)
159 if (size
< SRMMU_NOCACHE_BITMAP_SHIFT
) {
160 printk(KERN_ERR
"Size 0x%x too small for nocache request\n",
162 size
= SRMMU_NOCACHE_BITMAP_SHIFT
;
164 if (size
& (SRMMU_NOCACHE_BITMAP_SHIFT
- 1)) {
165 printk(KERN_ERR
"Size 0x%x unaligned int nocache request\n",
167 size
+= SRMMU_NOCACHE_BITMAP_SHIFT
- 1;
169 BUG_ON(align
> SRMMU_NOCACHE_ALIGN_MAX
);
171 offset
= bit_map_string_get(&srmmu_nocache_map
,
172 size
>> SRMMU_NOCACHE_BITMAP_SHIFT
,
173 align
>> SRMMU_NOCACHE_BITMAP_SHIFT
);
175 printk(KERN_ERR
"srmmu: out of nocache %d: %d/%d\n",
176 size
, (int) srmmu_nocache_size
,
177 srmmu_nocache_map
.used
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
181 addr
= SRMMU_NOCACHE_VADDR
+ (offset
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
185 void *srmmu_get_nocache(int size
, int align
)
189 tmp
= __srmmu_get_nocache(size
, align
);
192 memset(tmp
, 0, size
);
197 void srmmu_free_nocache(void *addr
, int size
)
202 vaddr
= (unsigned long)addr
;
203 if (vaddr
< SRMMU_NOCACHE_VADDR
) {
204 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
205 vaddr
, (unsigned long)SRMMU_NOCACHE_VADDR
);
208 if (vaddr
+ size
> srmmu_nocache_end
) {
209 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
210 vaddr
, srmmu_nocache_end
);
213 if (!is_power_of_2(size
)) {
214 printk("Size 0x%x is not a power of 2\n", size
);
217 if (size
< SRMMU_NOCACHE_BITMAP_SHIFT
) {
218 printk("Size 0x%x is too small\n", size
);
221 if (vaddr
& (size
- 1)) {
222 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr
, size
);
226 offset
= (vaddr
- SRMMU_NOCACHE_VADDR
) >> SRMMU_NOCACHE_BITMAP_SHIFT
;
227 size
= size
>> SRMMU_NOCACHE_BITMAP_SHIFT
;
229 bit_map_clear(&srmmu_nocache_map
, offset
, size
);
232 static void srmmu_early_allocate_ptable_skeleton(unsigned long start
,
235 extern unsigned long probe_memory(void); /* in fault.c */
238 * Reserve nocache dynamically proportionally to the amount of
239 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
241 static void __init
srmmu_nocache_calcsize(void)
243 unsigned long sysmemavail
= probe_memory() / 1024;
244 int srmmu_nocache_npages
;
246 srmmu_nocache_npages
=
247 sysmemavail
/ SRMMU_NOCACHE_ALCRATIO
/ 1024 * 256;
249 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
250 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
251 if (srmmu_nocache_npages
< SRMMU_MIN_NOCACHE_PAGES
)
252 srmmu_nocache_npages
= SRMMU_MIN_NOCACHE_PAGES
;
254 /* anything above 1280 blows up */
255 if (srmmu_nocache_npages
> SRMMU_MAX_NOCACHE_PAGES
)
256 srmmu_nocache_npages
= SRMMU_MAX_NOCACHE_PAGES
;
258 srmmu_nocache_size
= srmmu_nocache_npages
* PAGE_SIZE
;
259 srmmu_nocache_end
= SRMMU_NOCACHE_VADDR
+ srmmu_nocache_size
;
262 static void __init
srmmu_nocache_init(void)
264 unsigned int bitmap_bits
;
268 unsigned long paddr
, vaddr
;
269 unsigned long pteval
;
271 bitmap_bits
= srmmu_nocache_size
>> SRMMU_NOCACHE_BITMAP_SHIFT
;
273 srmmu_nocache_pool
= __alloc_bootmem(srmmu_nocache_size
,
274 SRMMU_NOCACHE_ALIGN_MAX
, 0UL);
275 memset(srmmu_nocache_pool
, 0, srmmu_nocache_size
);
277 srmmu_nocache_bitmap
= __alloc_bootmem(bitmap_bits
>> 3, SMP_CACHE_BYTES
, 0UL);
278 bit_map_init(&srmmu_nocache_map
, srmmu_nocache_bitmap
, bitmap_bits
);
280 srmmu_swapper_pg_dir
= __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE
, SRMMU_PGD_TABLE_SIZE
);
281 memset(__nocache_fix(srmmu_swapper_pg_dir
), 0, SRMMU_PGD_TABLE_SIZE
);
282 init_mm
.pgd
= srmmu_swapper_pg_dir
;
284 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR
, srmmu_nocache_end
);
286 paddr
= __pa((unsigned long)srmmu_nocache_pool
);
287 vaddr
= SRMMU_NOCACHE_VADDR
;
289 while (vaddr
< srmmu_nocache_end
) {
290 pgd
= pgd_offset_k(vaddr
);
291 pmd
= pmd_offset(__nocache_fix(pgd
), vaddr
);
292 pte
= pte_offset_kernel(__nocache_fix(pmd
), vaddr
);
294 pteval
= ((paddr
>> 4) | SRMMU_ET_PTE
| SRMMU_PRIV
);
296 if (srmmu_cache_pagetables
)
297 pteval
|= SRMMU_CACHE
;
299 set_pte(__nocache_fix(pte
), __pte(pteval
));
309 pgd_t
*get_pgd_fast(void)
313 pgd
= __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE
, SRMMU_PGD_TABLE_SIZE
);
315 pgd_t
*init
= pgd_offset_k(0);
316 memset(pgd
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
317 memcpy(pgd
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
318 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
325 * Hardware needs alignment to 256 only, but we align to whole page size
326 * to reduce fragmentation problems due to the buddy principle.
327 * XXX Provide actual fragmentation statistics in /proc.
329 * Alignments up to the page size are the same for physical and virtual
330 * addresses of the nocache area.
332 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
337 if ((pte
= (unsigned long)pte_alloc_one_kernel(mm
, address
)) == 0)
339 page
= pfn_to_page(__nocache_pa(pte
) >> PAGE_SHIFT
);
340 pgtable_page_ctor(page
);
344 void pte_free(struct mm_struct
*mm
, pgtable_t pte
)
348 pgtable_page_dtor(pte
);
349 p
= (unsigned long)page_address(pte
); /* Cached address (for test) */
352 p
= page_to_pfn(pte
) << PAGE_SHIFT
; /* Physical address */
354 /* free non cached virtual address*/
355 srmmu_free_nocache(__nocache_va(p
), PTE_SIZE
);
360 static inline void alloc_context(struct mm_struct
*old_mm
, struct mm_struct
*mm
)
362 struct ctx_list
*ctxp
;
364 ctxp
= ctx_free
.next
;
365 if (ctxp
!= &ctx_free
) {
366 remove_from_ctx_list(ctxp
);
367 add_to_used_ctxlist(ctxp
);
368 mm
->context
= ctxp
->ctx_number
;
372 ctxp
= ctx_used
.next
;
373 if (ctxp
->ctx_mm
== old_mm
)
375 if (ctxp
== &ctx_used
)
376 panic("out of mmu contexts");
377 flush_cache_mm(ctxp
->ctx_mm
);
378 flush_tlb_mm(ctxp
->ctx_mm
);
379 remove_from_ctx_list(ctxp
);
380 add_to_used_ctxlist(ctxp
);
381 ctxp
->ctx_mm
->context
= NO_CONTEXT
;
383 mm
->context
= ctxp
->ctx_number
;
386 static inline void free_context(int context
)
388 struct ctx_list
*ctx_old
;
390 ctx_old
= ctx_list_pool
+ context
;
391 remove_from_ctx_list(ctx_old
);
392 add_to_free_ctxlist(ctx_old
);
396 void switch_mm(struct mm_struct
*old_mm
, struct mm_struct
*mm
,
397 struct task_struct
*tsk
)
399 if (mm
->context
== NO_CONTEXT
) {
400 spin_lock(&srmmu_context_spinlock
);
401 alloc_context(old_mm
, mm
);
402 spin_unlock(&srmmu_context_spinlock
);
403 srmmu_ctxd_set(&srmmu_context_table
[mm
->context
], mm
->pgd
);
406 if (sparc_cpu_model
== sparc_leon
)
410 hyper_flush_whole_icache();
412 srmmu_set_context(mm
->context
);
415 /* Low level IO area allocation on the SRMMU. */
416 static inline void srmmu_mapioaddr(unsigned long physaddr
,
417 unsigned long virt_addr
, int bus_type
)
424 physaddr
&= PAGE_MASK
;
425 pgdp
= pgd_offset_k(virt_addr
);
426 pmdp
= pmd_offset(pgdp
, virt_addr
);
427 ptep
= pte_offset_kernel(pmdp
, virt_addr
);
428 tmp
= (physaddr
>> 4) | SRMMU_ET_PTE
;
430 /* I need to test whether this is consistent over all
431 * sun4m's. The bus_type represents the upper 4 bits of
432 * 36-bit physical address on the I/O space lines...
434 tmp
|= (bus_type
<< 28);
436 __flush_page_to_ram(virt_addr
);
437 set_pte(ptep
, __pte(tmp
));
440 void srmmu_mapiorange(unsigned int bus
, unsigned long xpa
,
441 unsigned long xva
, unsigned int len
)
445 srmmu_mapioaddr(xpa
, xva
, bus
);
452 static inline void srmmu_unmapioaddr(unsigned long virt_addr
)
458 pgdp
= pgd_offset_k(virt_addr
);
459 pmdp
= pmd_offset(pgdp
, virt_addr
);
460 ptep
= pte_offset_kernel(pmdp
, virt_addr
);
462 /* No need to flush uncacheable page. */
466 void srmmu_unmapiorange(unsigned long virt_addr
, unsigned int len
)
470 srmmu_unmapioaddr(virt_addr
);
471 virt_addr
+= PAGE_SIZE
;
477 extern void tsunami_flush_cache_all(void);
478 extern void tsunami_flush_cache_mm(struct mm_struct
*mm
);
479 extern void tsunami_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
480 extern void tsunami_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
481 extern void tsunami_flush_page_to_ram(unsigned long page
);
482 extern void tsunami_flush_page_for_dma(unsigned long page
);
483 extern void tsunami_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
484 extern void tsunami_flush_tlb_all(void);
485 extern void tsunami_flush_tlb_mm(struct mm_struct
*mm
);
486 extern void tsunami_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
487 extern void tsunami_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
488 extern void tsunami_setup_blockops(void);
491 extern void swift_flush_cache_all(void);
492 extern void swift_flush_cache_mm(struct mm_struct
*mm
);
493 extern void swift_flush_cache_range(struct vm_area_struct
*vma
,
494 unsigned long start
, unsigned long end
);
495 extern void swift_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
496 extern void swift_flush_page_to_ram(unsigned long page
);
497 extern void swift_flush_page_for_dma(unsigned long page
);
498 extern void swift_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
499 extern void swift_flush_tlb_all(void);
500 extern void swift_flush_tlb_mm(struct mm_struct
*mm
);
501 extern void swift_flush_tlb_range(struct vm_area_struct
*vma
,
502 unsigned long start
, unsigned long end
);
503 extern void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
505 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
506 void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
511 if ((ctx1
= vma
->vm_mm
->context
) != -1) {
512 cctx
= srmmu_get_context();
513 /* Is context # ever different from current context? P3 */
515 printk("flush ctx %02x curr %02x\n", ctx1
, cctx
);
516 srmmu_set_context(ctx1
);
517 swift_flush_page(page
);
518 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : :
519 "r" (page
), "i" (ASI_M_FLUSH_PROBE
));
520 srmmu_set_context(cctx
);
522 /* Rm. prot. bits from virt. c. */
523 /* swift_flush_cache_all(); */
524 /* swift_flush_cache_page(vma, page); */
525 swift_flush_page(page
);
527 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : :
528 "r" (page
), "i" (ASI_M_FLUSH_PROBE
));
529 /* same as above: srmmu_flush_tlb_page() */
536 * The following are all MBUS based SRMMU modules, and therefore could
537 * be found in a multiprocessor configuration. On the whole, these
538 * chips seems to be much more touchy about DVMA and page tables
539 * with respect to cache coherency.
543 extern void viking_flush_cache_all(void);
544 extern void viking_flush_cache_mm(struct mm_struct
*mm
);
545 extern void viking_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
547 extern void viking_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
548 extern void viking_flush_page_to_ram(unsigned long page
);
549 extern void viking_flush_page_for_dma(unsigned long page
);
550 extern void viking_flush_sig_insns(struct mm_struct
*mm
, unsigned long addr
);
551 extern void viking_flush_page(unsigned long page
);
552 extern void viking_mxcc_flush_page(unsigned long page
);
553 extern void viking_flush_tlb_all(void);
554 extern void viking_flush_tlb_mm(struct mm_struct
*mm
);
555 extern void viking_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
557 extern void viking_flush_tlb_page(struct vm_area_struct
*vma
,
559 extern void sun4dsmp_flush_tlb_all(void);
560 extern void sun4dsmp_flush_tlb_mm(struct mm_struct
*mm
);
561 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
563 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct
*vma
,
567 extern void hypersparc_flush_cache_all(void);
568 extern void hypersparc_flush_cache_mm(struct mm_struct
*mm
);
569 extern void hypersparc_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
570 extern void hypersparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
571 extern void hypersparc_flush_page_to_ram(unsigned long page
);
572 extern void hypersparc_flush_page_for_dma(unsigned long page
);
573 extern void hypersparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
574 extern void hypersparc_flush_tlb_all(void);
575 extern void hypersparc_flush_tlb_mm(struct mm_struct
*mm
);
576 extern void hypersparc_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
577 extern void hypersparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
578 extern void hypersparc_setup_blockops(void);
581 * NOTE: All of this startup code assumes the low 16mb (approx.) of
582 * kernel mappings are done with one single contiguous chunk of
583 * ram. On small ram machines (classics mainly) we only get
584 * around 8mb mapped for us.
587 static void __init
early_pgtable_allocfail(char *type
)
589 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type
);
593 static void __init
srmmu_early_allocate_ptable_skeleton(unsigned long start
,
600 while (start
< end
) {
601 pgdp
= pgd_offset_k(start
);
602 if (pgd_none(*(pgd_t
*)__nocache_fix(pgdp
))) {
603 pmdp
= __srmmu_get_nocache(
604 SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
606 early_pgtable_allocfail("pmd");
607 memset(__nocache_fix(pmdp
), 0, SRMMU_PMD_TABLE_SIZE
);
608 pgd_set(__nocache_fix(pgdp
), pmdp
);
610 pmdp
= pmd_offset(__nocache_fix(pgdp
), start
);
611 if (srmmu_pmd_none(*(pmd_t
*)__nocache_fix(pmdp
))) {
612 ptep
= __srmmu_get_nocache(PTE_SIZE
, PTE_SIZE
);
614 early_pgtable_allocfail("pte");
615 memset(__nocache_fix(ptep
), 0, PTE_SIZE
);
616 pmd_set(__nocache_fix(pmdp
), ptep
);
618 if (start
> (0xffffffffUL
- PMD_SIZE
))
620 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
624 static void __init
srmmu_allocate_ptable_skeleton(unsigned long start
,
631 while (start
< end
) {
632 pgdp
= pgd_offset_k(start
);
633 if (pgd_none(*pgdp
)) {
634 pmdp
= __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
636 early_pgtable_allocfail("pmd");
637 memset(pmdp
, 0, SRMMU_PMD_TABLE_SIZE
);
640 pmdp
= pmd_offset(pgdp
, start
);
641 if (srmmu_pmd_none(*pmdp
)) {
642 ptep
= __srmmu_get_nocache(PTE_SIZE
,
645 early_pgtable_allocfail("pte");
646 memset(ptep
, 0, PTE_SIZE
);
649 if (start
> (0xffffffffUL
- PMD_SIZE
))
651 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
655 /* These flush types are not available on all chips... */
656 static inline unsigned long srmmu_probe(unsigned long vaddr
)
658 unsigned long retval
;
660 if (sparc_cpu_model
!= sparc_leon
) {
663 __asm__
__volatile__("lda [%1] %2, %0\n\t" :
665 "r" (vaddr
| 0x400), "i" (ASI_M_FLUSH_PROBE
));
667 retval
= leon_swprobe(vaddr
, 0);
673 * This is much cleaner than poking around physical address space
674 * looking at the prom's page table directly which is what most
675 * other OS's do. Yuck... this is much better.
677 static void __init
srmmu_inherit_prom_mappings(unsigned long start
,
680 unsigned long probed
;
685 int what
; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
687 while (start
<= end
) {
689 break; /* probably wrap around */
690 if (start
== 0xfef00000)
691 start
= KADB_DEBUGGER_BEGVM
;
692 probed
= srmmu_probe(start
);
694 /* continue probing until we find an entry */
699 /* A red snapper, see what it really is. */
701 addr
= start
- PAGE_SIZE
;
703 if (!(start
& ~(SRMMU_REAL_PMD_MASK
))) {
704 if (srmmu_probe(addr
+ SRMMU_REAL_PMD_SIZE
) == probed
)
708 if (!(start
& ~(SRMMU_PGDIR_MASK
))) {
709 if (srmmu_probe(addr
+ SRMMU_PGDIR_SIZE
) == probed
)
713 pgdp
= pgd_offset_k(start
);
715 *(pgd_t
*)__nocache_fix(pgdp
) = __pgd(probed
);
716 start
+= SRMMU_PGDIR_SIZE
;
719 if (pgd_none(*(pgd_t
*)__nocache_fix(pgdp
))) {
720 pmdp
= __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE
,
721 SRMMU_PMD_TABLE_SIZE
);
723 early_pgtable_allocfail("pmd");
724 memset(__nocache_fix(pmdp
), 0, SRMMU_PMD_TABLE_SIZE
);
725 pgd_set(__nocache_fix(pgdp
), pmdp
);
727 pmdp
= pmd_offset(__nocache_fix(pgdp
), start
);
728 if (srmmu_pmd_none(*(pmd_t
*)__nocache_fix(pmdp
))) {
729 ptep
= __srmmu_get_nocache(PTE_SIZE
, PTE_SIZE
);
731 early_pgtable_allocfail("pte");
732 memset(__nocache_fix(ptep
), 0, PTE_SIZE
);
733 pmd_set(__nocache_fix(pmdp
), ptep
);
736 /* We bend the rule where all 16 PTPs in a pmd_t point
737 * inside the same PTE page, and we leak a perfectly
738 * good hardware PTE piece. Alternatives seem worse.
740 unsigned int x
; /* Index of HW PMD in soft cluster */
742 x
= (start
>> PMD_SHIFT
) & 15;
743 val
= &pmdp
->pmdv
[x
];
744 *(unsigned long *)__nocache_fix(val
) = probed
;
745 start
+= SRMMU_REAL_PMD_SIZE
;
748 ptep
= pte_offset_kernel(__nocache_fix(pmdp
), start
);
749 *(pte_t
*)__nocache_fix(ptep
) = __pte(probed
);
754 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
756 /* Create a third-level SRMMU 16MB page mapping. */
757 static void __init
do_large_mapping(unsigned long vaddr
, unsigned long phys_base
)
759 pgd_t
*pgdp
= pgd_offset_k(vaddr
);
760 unsigned long big_pte
;
762 big_pte
= KERNEL_PTE(phys_base
>> 4);
763 *(pgd_t
*)__nocache_fix(pgdp
) = __pgd(big_pte
);
766 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
767 static unsigned long __init
map_spbank(unsigned long vbase
, int sp_entry
)
769 unsigned long pstart
= (sp_banks
[sp_entry
].base_addr
& SRMMU_PGDIR_MASK
);
770 unsigned long vstart
= (vbase
& SRMMU_PGDIR_MASK
);
771 unsigned long vend
= SRMMU_PGDIR_ALIGN(vbase
+ sp_banks
[sp_entry
].num_bytes
);
772 /* Map "low" memory only */
773 const unsigned long min_vaddr
= PAGE_OFFSET
;
774 const unsigned long max_vaddr
= PAGE_OFFSET
+ SRMMU_MAXMEM
;
776 if (vstart
< min_vaddr
|| vstart
>= max_vaddr
)
779 if (vend
> max_vaddr
|| vend
< min_vaddr
)
782 while (vstart
< vend
) {
783 do_large_mapping(vstart
, pstart
);
784 vstart
+= SRMMU_PGDIR_SIZE
; pstart
+= SRMMU_PGDIR_SIZE
;
789 static void __init
map_kernel(void)
794 do_large_mapping(PAGE_OFFSET
, phys_base
);
797 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
798 map_spbank((unsigned long)__va(sp_banks
[i
].base_addr
), i
);
802 /* Paging initialization on the Sparc Reference MMU. */
803 extern void sparc_context_init(int);
805 void (*poke_srmmu
)(void) __cpuinitdata
= NULL
;
807 extern unsigned long bootmem_init(unsigned long *pages_avail
);
809 void __init
srmmu_paging_init(void)
817 unsigned long pages_avail
;
819 sparc_iomap
.start
= SUN4M_IOBASE_VADDR
; /* 16MB of IOSPACE on all sun4m's. */
821 if (sparc_cpu_model
== sun4d
)
822 num_contexts
= 65536; /* We know it is Viking */
824 /* Find the number of contexts on the srmmu. */
825 cpunode
= prom_getchild(prom_root_node
);
827 while (cpunode
!= 0) {
828 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
829 if (!strcmp(node_str
, "cpu")) {
830 num_contexts
= prom_getintdefault(cpunode
, "mmu-nctx", 0x8);
833 cpunode
= prom_getsibling(cpunode
);
838 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
843 last_valid_pfn
= bootmem_init(&pages_avail
);
845 srmmu_nocache_calcsize();
846 srmmu_nocache_init();
847 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM
- PAGE_SIZE
));
850 /* ctx table has to be physically aligned to its size */
851 srmmu_context_table
= __srmmu_get_nocache(num_contexts
* sizeof(ctxd_t
), num_contexts
* sizeof(ctxd_t
));
852 srmmu_ctx_table_phys
= (ctxd_t
*)__nocache_pa((unsigned long)srmmu_context_table
);
854 for (i
= 0; i
< num_contexts
; i
++)
855 srmmu_ctxd_set((ctxd_t
*)__nocache_fix(&srmmu_context_table
[i
]), srmmu_swapper_pg_dir
);
858 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys
);
860 /* Stop from hanging here... */
861 local_ops
->tlb_all();
867 srmmu_allocate_ptable_skeleton(sparc_iomap
.start
, IOBASE_END
);
868 srmmu_allocate_ptable_skeleton(DVMA_VADDR
, DVMA_END
);
870 srmmu_allocate_ptable_skeleton(
871 __fix_to_virt(__end_of_fixed_addresses
- 1), FIXADDR_TOP
);
872 srmmu_allocate_ptable_skeleton(PKMAP_BASE
, PKMAP_END
);
874 pgd
= pgd_offset_k(PKMAP_BASE
);
875 pmd
= pmd_offset(pgd
, PKMAP_BASE
);
876 pte
= pte_offset_kernel(pmd
, PKMAP_BASE
);
877 pkmap_page_table
= pte
;
882 sparc_context_init(num_contexts
);
887 unsigned long zones_size
[MAX_NR_ZONES
];
888 unsigned long zholes_size
[MAX_NR_ZONES
];
889 unsigned long npages
;
892 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
893 zones_size
[znum
] = zholes_size
[znum
] = 0;
895 npages
= max_low_pfn
- pfn_base
;
897 zones_size
[ZONE_DMA
] = npages
;
898 zholes_size
[ZONE_DMA
] = npages
- pages_avail
;
900 npages
= highend_pfn
- max_low_pfn
;
901 zones_size
[ZONE_HIGHMEM
] = npages
;
902 zholes_size
[ZONE_HIGHMEM
] = npages
- calc_highpages();
904 free_area_init_node(0, zones_size
, pfn_base
, zholes_size
);
908 void mmu_info(struct seq_file
*m
)
913 "nocache total\t: %ld\n"
914 "nocache used\t: %d\n",
918 srmmu_nocache_map
.used
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
921 void destroy_context(struct mm_struct
*mm
)
924 if (mm
->context
!= NO_CONTEXT
) {
926 srmmu_ctxd_set(&srmmu_context_table
[mm
->context
], srmmu_swapper_pg_dir
);
928 spin_lock(&srmmu_context_spinlock
);
929 free_context(mm
->context
);
930 spin_unlock(&srmmu_context_spinlock
);
931 mm
->context
= NO_CONTEXT
;
935 /* Init various srmmu chip types. */
936 static void __init
srmmu_is_bad(void)
938 prom_printf("Could not determine SRMMU chip type.\n");
942 static void __init
init_vac_layout(void)
949 unsigned long max_size
= 0;
950 unsigned long min_line_size
= 0x10000000;
953 nd
= prom_getchild(prom_root_node
);
954 while ((nd
= prom_getsibling(nd
)) != 0) {
955 prom_getstring(nd
, "device_type", node_str
, sizeof(node_str
));
956 if (!strcmp(node_str
, "cpu")) {
957 vac_line_size
= prom_getint(nd
, "cache-line-size");
958 if (vac_line_size
== -1) {
959 prom_printf("can't determine cache-line-size, halting.\n");
962 cache_lines
= prom_getint(nd
, "cache-nlines");
963 if (cache_lines
== -1) {
964 prom_printf("can't determine cache-nlines, halting.\n");
968 vac_cache_size
= cache_lines
* vac_line_size
;
970 if (vac_cache_size
> max_size
)
971 max_size
= vac_cache_size
;
972 if (vac_line_size
< min_line_size
)
973 min_line_size
= vac_line_size
;
974 //FIXME: cpus not contiguous!!
976 if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
))
984 prom_printf("No CPU nodes found, halting.\n");
988 vac_cache_size
= max_size
;
989 vac_line_size
= min_line_size
;
991 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
992 (int)vac_cache_size
, (int)vac_line_size
);
995 static void __cpuinit
poke_hypersparc(void)
997 volatile unsigned long clear
;
998 unsigned long mreg
= srmmu_get_mmureg();
1000 hyper_flush_unconditional_combined();
1002 mreg
&= ~(HYPERSPARC_CWENABLE
);
1003 mreg
|= (HYPERSPARC_CENABLE
| HYPERSPARC_WBENABLE
);
1004 mreg
|= (HYPERSPARC_CMODE
);
1006 srmmu_set_mmureg(mreg
);
1008 #if 0 /* XXX I think this is bad news... -DaveM */
1009 hyper_clear_all_tags();
1012 put_ross_icr(HYPERSPARC_ICCR_FTD
| HYPERSPARC_ICCR_ICE
);
1013 hyper_flush_whole_icache();
1014 clear
= srmmu_get_faddr();
1015 clear
= srmmu_get_fstatus();
1018 static const struct sparc32_cachetlb_ops hypersparc_ops
= {
1019 .cache_all
= hypersparc_flush_cache_all
,
1020 .cache_mm
= hypersparc_flush_cache_mm
,
1021 .cache_page
= hypersparc_flush_cache_page
,
1022 .cache_range
= hypersparc_flush_cache_range
,
1023 .tlb_all
= hypersparc_flush_tlb_all
,
1024 .tlb_mm
= hypersparc_flush_tlb_mm
,
1025 .tlb_page
= hypersparc_flush_tlb_page
,
1026 .tlb_range
= hypersparc_flush_tlb_range
,
1027 .page_to_ram
= hypersparc_flush_page_to_ram
,
1028 .sig_insns
= hypersparc_flush_sig_insns
,
1029 .page_for_dma
= hypersparc_flush_page_for_dma
,
1032 static void __init
init_hypersparc(void)
1034 srmmu_name
= "ROSS HyperSparc";
1035 srmmu_modtype
= HyperSparc
;
1040 sparc32_cachetlb_ops
= &hypersparc_ops
;
1042 poke_srmmu
= poke_hypersparc
;
1044 hypersparc_setup_blockops();
1047 static void __cpuinit
poke_swift(void)
1051 /* Clear any crap from the cache or else... */
1052 swift_flush_cache_all();
1054 /* Enable I & D caches */
1055 mreg
= srmmu_get_mmureg();
1056 mreg
|= (SWIFT_IE
| SWIFT_DE
);
1058 * The Swift branch folding logic is completely broken. At
1059 * trap time, if things are just right, if can mistakenly
1060 * think that a trap is coming from kernel mode when in fact
1061 * it is coming from user mode (it mis-executes the branch in
1062 * the trap code). So you see things like crashme completely
1063 * hosing your machine which is completely unacceptable. Turn
1064 * this shit off... nice job Fujitsu.
1066 mreg
&= ~(SWIFT_BF
);
1067 srmmu_set_mmureg(mreg
);
1070 static const struct sparc32_cachetlb_ops swift_ops
= {
1071 .cache_all
= swift_flush_cache_all
,
1072 .cache_mm
= swift_flush_cache_mm
,
1073 .cache_page
= swift_flush_cache_page
,
1074 .cache_range
= swift_flush_cache_range
,
1075 .tlb_all
= swift_flush_tlb_all
,
1076 .tlb_mm
= swift_flush_tlb_mm
,
1077 .tlb_page
= swift_flush_tlb_page
,
1078 .tlb_range
= swift_flush_tlb_range
,
1079 .page_to_ram
= swift_flush_page_to_ram
,
1080 .sig_insns
= swift_flush_sig_insns
,
1081 .page_for_dma
= swift_flush_page_for_dma
,
1084 #define SWIFT_MASKID_ADDR 0x10003018
1085 static void __init
init_swift(void)
1087 unsigned long swift_rev
;
1089 __asm__
__volatile__("lda [%1] %2, %0\n\t"
1090 "srl %0, 0x18, %0\n\t" :
1092 "r" (SWIFT_MASKID_ADDR
), "i" (ASI_M_BYPASS
));
1093 srmmu_name
= "Fujitsu Swift";
1094 switch (swift_rev
) {
1099 srmmu_modtype
= Swift_lots_o_bugs
;
1100 hwbug_bitmask
|= (HWBUG_KERN_ACCBROKEN
| HWBUG_KERN_CBITBROKEN
);
1102 * Gee george, I wonder why Sun is so hush hush about
1103 * this hardware bug... really braindamage stuff going
1104 * on here. However I think we can find a way to avoid
1105 * all of the workaround overhead under Linux. Basically,
1106 * any page fault can cause kernel pages to become user
1107 * accessible (the mmu gets confused and clears some of
1108 * the ACC bits in kernel ptes). Aha, sounds pretty
1109 * horrible eh? But wait, after extensive testing it appears
1110 * that if you use pgd_t level large kernel pte's (like the
1111 * 4MB pages on the Pentium) the bug does not get tripped
1112 * at all. This avoids almost all of the major overhead.
1113 * Welcome to a world where your vendor tells you to,
1114 * "apply this kernel patch" instead of "sorry for the
1115 * broken hardware, send it back and we'll give you
1116 * properly functioning parts"
1121 srmmu_modtype
= Swift_bad_c
;
1122 hwbug_bitmask
|= HWBUG_KERN_CBITBROKEN
;
1124 * You see Sun allude to this hardware bug but never
1125 * admit things directly, they'll say things like,
1126 * "the Swift chip cache problems" or similar.
1130 srmmu_modtype
= Swift_ok
;
1134 sparc32_cachetlb_ops
= &swift_ops
;
1135 flush_page_for_dma_global
= 0;
1138 * Are you now convinced that the Swift is one of the
1139 * biggest VLSI abortions of all time? Bravo Fujitsu!
1140 * Fujitsu, the !#?!%$'d up processor people. I bet if
1141 * you examined the microcode of the Swift you'd find
1142 * XXX's all over the place.
1144 poke_srmmu
= poke_swift
;
1147 static void turbosparc_flush_cache_all(void)
1149 flush_user_windows();
1150 turbosparc_idflash_clear();
1153 static void turbosparc_flush_cache_mm(struct mm_struct
*mm
)
1156 flush_user_windows();
1157 turbosparc_idflash_clear();
1161 static void turbosparc_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1163 FLUSH_BEGIN(vma
->vm_mm
)
1164 flush_user_windows();
1165 turbosparc_idflash_clear();
1169 static void turbosparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1171 FLUSH_BEGIN(vma
->vm_mm
)
1172 flush_user_windows();
1173 if (vma
->vm_flags
& VM_EXEC
)
1174 turbosparc_flush_icache();
1175 turbosparc_flush_dcache();
1179 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1180 static void turbosparc_flush_page_to_ram(unsigned long page
)
1182 #ifdef TURBOSPARC_WRITEBACK
1183 volatile unsigned long clear
;
1185 if (srmmu_probe(page
))
1186 turbosparc_flush_page_cache(page
);
1187 clear
= srmmu_get_fstatus();
1191 static void turbosparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1195 static void turbosparc_flush_page_for_dma(unsigned long page
)
1197 turbosparc_flush_dcache();
1200 static void turbosparc_flush_tlb_all(void)
1202 srmmu_flush_whole_tlb();
1205 static void turbosparc_flush_tlb_mm(struct mm_struct
*mm
)
1208 srmmu_flush_whole_tlb();
1212 static void turbosparc_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1214 FLUSH_BEGIN(vma
->vm_mm
)
1215 srmmu_flush_whole_tlb();
1219 static void turbosparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1221 FLUSH_BEGIN(vma
->vm_mm
)
1222 srmmu_flush_whole_tlb();
1227 static void __cpuinit
poke_turbosparc(void)
1229 unsigned long mreg
= srmmu_get_mmureg();
1230 unsigned long ccreg
;
1232 /* Clear any crap from the cache or else... */
1233 turbosparc_flush_cache_all();
1234 /* Temporarily disable I & D caches */
1235 mreg
&= ~(TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
);
1236 mreg
&= ~(TURBOSPARC_PCENABLE
); /* Don't check parity */
1237 srmmu_set_mmureg(mreg
);
1239 ccreg
= turbosparc_get_ccreg();
1241 #ifdef TURBOSPARC_WRITEBACK
1242 ccreg
|= (TURBOSPARC_SNENABLE
); /* Do DVMA snooping in Dcache */
1243 ccreg
&= ~(TURBOSPARC_uS2
| TURBOSPARC_WTENABLE
);
1244 /* Write-back D-cache, emulate VLSI
1245 * abortion number three, not number one */
1247 /* For now let's play safe, optimize later */
1248 ccreg
|= (TURBOSPARC_SNENABLE
| TURBOSPARC_WTENABLE
);
1249 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1250 ccreg
&= ~(TURBOSPARC_uS2
);
1251 /* Emulate VLSI abortion number three, not number one */
1254 switch (ccreg
& 7) {
1255 case 0: /* No SE cache */
1256 case 7: /* Test mode */
1259 ccreg
|= (TURBOSPARC_SCENABLE
);
1261 turbosparc_set_ccreg(ccreg
);
1263 mreg
|= (TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* I & D caches on */
1264 mreg
|= (TURBOSPARC_ICSNOOP
); /* Icache snooping on */
1265 srmmu_set_mmureg(mreg
);
1268 static const struct sparc32_cachetlb_ops turbosparc_ops
= {
1269 .cache_all
= turbosparc_flush_cache_all
,
1270 .cache_mm
= turbosparc_flush_cache_mm
,
1271 .cache_page
= turbosparc_flush_cache_page
,
1272 .cache_range
= turbosparc_flush_cache_range
,
1273 .tlb_all
= turbosparc_flush_tlb_all
,
1274 .tlb_mm
= turbosparc_flush_tlb_mm
,
1275 .tlb_page
= turbosparc_flush_tlb_page
,
1276 .tlb_range
= turbosparc_flush_tlb_range
,
1277 .page_to_ram
= turbosparc_flush_page_to_ram
,
1278 .sig_insns
= turbosparc_flush_sig_insns
,
1279 .page_for_dma
= turbosparc_flush_page_for_dma
,
1282 static void __init
init_turbosparc(void)
1284 srmmu_name
= "Fujitsu TurboSparc";
1285 srmmu_modtype
= TurboSparc
;
1286 sparc32_cachetlb_ops
= &turbosparc_ops
;
1287 poke_srmmu
= poke_turbosparc
;
1290 static void __cpuinit
poke_tsunami(void)
1292 unsigned long mreg
= srmmu_get_mmureg();
1294 tsunami_flush_icache();
1295 tsunami_flush_dcache();
1296 mreg
&= ~TSUNAMI_ITD
;
1297 mreg
|= (TSUNAMI_IENAB
| TSUNAMI_DENAB
);
1298 srmmu_set_mmureg(mreg
);
1301 static const struct sparc32_cachetlb_ops tsunami_ops
= {
1302 .cache_all
= tsunami_flush_cache_all
,
1303 .cache_mm
= tsunami_flush_cache_mm
,
1304 .cache_page
= tsunami_flush_cache_page
,
1305 .cache_range
= tsunami_flush_cache_range
,
1306 .tlb_all
= tsunami_flush_tlb_all
,
1307 .tlb_mm
= tsunami_flush_tlb_mm
,
1308 .tlb_page
= tsunami_flush_tlb_page
,
1309 .tlb_range
= tsunami_flush_tlb_range
,
1310 .page_to_ram
= tsunami_flush_page_to_ram
,
1311 .sig_insns
= tsunami_flush_sig_insns
,
1312 .page_for_dma
= tsunami_flush_page_for_dma
,
1315 static void __init
init_tsunami(void)
1318 * Tsunami's pretty sane, Sun and TI actually got it
1319 * somewhat right this time. Fujitsu should have
1320 * taken some lessons from them.
1323 srmmu_name
= "TI Tsunami";
1324 srmmu_modtype
= Tsunami
;
1325 sparc32_cachetlb_ops
= &tsunami_ops
;
1326 poke_srmmu
= poke_tsunami
;
1328 tsunami_setup_blockops();
1331 static void __cpuinit
poke_viking(void)
1333 unsigned long mreg
= srmmu_get_mmureg();
1334 static int smp_catch
;
1336 if (viking_mxcc_present
) {
1337 unsigned long mxcc_control
= mxcc_get_creg();
1339 mxcc_control
|= (MXCC_CTL_ECE
| MXCC_CTL_PRE
| MXCC_CTL_MCE
);
1340 mxcc_control
&= ~(MXCC_CTL_RRC
);
1341 mxcc_set_creg(mxcc_control
);
1344 * We don't need memory parity checks.
1345 * XXX This is a mess, have to dig out later. ecd.
1346 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1349 /* We do cache ptables on MXCC. */
1350 mreg
|= VIKING_TCENABLE
;
1352 unsigned long bpreg
;
1354 mreg
&= ~(VIKING_TCENABLE
);
1356 /* Must disable mixed-cmd mode here for other cpu's. */
1357 bpreg
= viking_get_bpreg();
1358 bpreg
&= ~(VIKING_ACTION_MIX
);
1359 viking_set_bpreg(bpreg
);
1361 /* Just in case PROM does something funny. */
1366 mreg
|= VIKING_SPENABLE
;
1367 mreg
|= (VIKING_ICENABLE
| VIKING_DCENABLE
);
1368 mreg
|= VIKING_SBENABLE
;
1369 mreg
&= ~(VIKING_ACENABLE
);
1370 srmmu_set_mmureg(mreg
);
1373 static struct sparc32_cachetlb_ops viking_ops
= {
1374 .cache_all
= viking_flush_cache_all
,
1375 .cache_mm
= viking_flush_cache_mm
,
1376 .cache_page
= viking_flush_cache_page
,
1377 .cache_range
= viking_flush_cache_range
,
1378 .tlb_all
= viking_flush_tlb_all
,
1379 .tlb_mm
= viking_flush_tlb_mm
,
1380 .tlb_page
= viking_flush_tlb_page
,
1381 .tlb_range
= viking_flush_tlb_range
,
1382 .page_to_ram
= viking_flush_page_to_ram
,
1383 .sig_insns
= viking_flush_sig_insns
,
1384 .page_for_dma
= viking_flush_page_for_dma
,
1388 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1389 * perform the local TLB flush and all the other cpus will see it.
1390 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1391 * that requires that we add some synchronization to these flushes.
1393 * The bug is that the fifo which keeps track of all the pending TLB
1394 * broadcasts in the system is an entry or two too small, so if we
1395 * have too many going at once we'll overflow that fifo and lose a TLB
1396 * flush resulting in corruption.
1398 * Our workaround is to take a global spinlock around the TLB flushes,
1399 * which guarentees we won't ever have too many pending. It's a big
1400 * hammer, but a semaphore like system to make sure we only have N TLB
1401 * flushes going at once will require SMP locking anyways so there's
1402 * no real value in trying any harder than this.
1404 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops
= {
1405 .cache_all
= viking_flush_cache_all
,
1406 .cache_mm
= viking_flush_cache_mm
,
1407 .cache_page
= viking_flush_cache_page
,
1408 .cache_range
= viking_flush_cache_range
,
1409 .tlb_all
= sun4dsmp_flush_tlb_all
,
1410 .tlb_mm
= sun4dsmp_flush_tlb_mm
,
1411 .tlb_page
= sun4dsmp_flush_tlb_page
,
1412 .tlb_range
= sun4dsmp_flush_tlb_range
,
1413 .page_to_ram
= viking_flush_page_to_ram
,
1414 .sig_insns
= viking_flush_sig_insns
,
1415 .page_for_dma
= viking_flush_page_for_dma
,
1419 static void __init
init_viking(void)
1421 unsigned long mreg
= srmmu_get_mmureg();
1423 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1424 if (mreg
& VIKING_MMODE
) {
1425 srmmu_name
= "TI Viking";
1426 viking_mxcc_present
= 0;
1430 * We need this to make sure old viking takes no hits
1431 * on it's cache for dma snoops to workaround the
1432 * "load from non-cacheable memory" interrupt bug.
1433 * This is only necessary because of the new way in
1434 * which we use the IOMMU.
1436 viking_ops
.page_for_dma
= viking_flush_page
;
1438 viking_sun4d_smp_ops
.page_for_dma
= viking_flush_page
;
1440 flush_page_for_dma_global
= 0;
1442 srmmu_name
= "TI Viking/MXCC";
1443 viking_mxcc_present
= 1;
1444 srmmu_cache_pagetables
= 1;
1447 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1450 if (sparc_cpu_model
== sun4d
)
1451 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1452 &viking_sun4d_smp_ops
;
1455 poke_srmmu
= poke_viking
;
1458 /* Probe for the srmmu chip version. */
1459 static void __init
get_srmmu_type(void)
1461 unsigned long mreg
, psr
;
1462 unsigned long mod_typ
, mod_rev
, psr_typ
, psr_vers
;
1464 srmmu_modtype
= SRMMU_INVAL_MOD
;
1467 mreg
= srmmu_get_mmureg(); psr
= get_psr();
1468 mod_typ
= (mreg
& 0xf0000000) >> 28;
1469 mod_rev
= (mreg
& 0x0f000000) >> 24;
1470 psr_typ
= (psr
>> 28) & 0xf;
1471 psr_vers
= (psr
>> 24) & 0xf;
1473 /* First, check for sparc-leon. */
1474 if (sparc_cpu_model
== sparc_leon
) {
1479 /* Second, check for HyperSparc or Cypress. */
1483 /* UP or MP Hypersparc */
1495 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1502 /* Now Fujitsu TurboSparc. It might happen that it is
1503 * in Swift emulation mode, so we will check later...
1505 if (psr_typ
== 0 && psr_vers
== 5) {
1510 /* Next check for Fujitsu Swift. */
1511 if (psr_typ
== 0 && psr_vers
== 4) {
1515 /* Look if it is not a TurboSparc emulating Swift... */
1516 cpunode
= prom_getchild(prom_root_node
);
1517 while ((cpunode
= prom_getsibling(cpunode
)) != 0) {
1518 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
1519 if (!strcmp(node_str
, "cpu")) {
1520 if (!prom_getintdefault(cpunode
, "psr-implementation", 1) &&
1521 prom_getintdefault(cpunode
, "psr-version", 1) == 5) {
1533 /* Now the Viking family of srmmu. */
1536 ((psr_vers
== 1) && (mod_typ
== 0) && (mod_rev
== 0)))) {
1541 /* Finally the Tsunami. */
1542 if (psr_typ
== 4 && psr_vers
== 1 && (mod_typ
|| mod_rev
)) {
1552 /* Local cross-calls. */
1553 static void smp_flush_page_for_dma(unsigned long page
)
1555 xc1((smpfunc_t
) local_ops
->page_for_dma
, page
);
1556 local_ops
->page_for_dma(page
);
1559 static void smp_flush_cache_all(void)
1561 xc0((smpfunc_t
) local_ops
->cache_all
);
1562 local_ops
->cache_all();
1565 static void smp_flush_tlb_all(void)
1567 xc0((smpfunc_t
) local_ops
->tlb_all
);
1568 local_ops
->tlb_all();
1571 static void smp_flush_cache_mm(struct mm_struct
*mm
)
1573 if (mm
->context
!= NO_CONTEXT
) {
1575 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1576 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1577 if (!cpumask_empty(&cpu_mask
))
1578 xc1((smpfunc_t
) local_ops
->cache_mm
, (unsigned long) mm
);
1579 local_ops
->cache_mm(mm
);
1583 static void smp_flush_tlb_mm(struct mm_struct
*mm
)
1585 if (mm
->context
!= NO_CONTEXT
) {
1587 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1588 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1589 if (!cpumask_empty(&cpu_mask
)) {
1590 xc1((smpfunc_t
) local_ops
->tlb_mm
, (unsigned long) mm
);
1591 if (atomic_read(&mm
->mm_users
) == 1 && current
->active_mm
== mm
)
1592 cpumask_copy(mm_cpumask(mm
),
1593 cpumask_of(smp_processor_id()));
1595 local_ops
->tlb_mm(mm
);
1599 static void smp_flush_cache_range(struct vm_area_struct
*vma
,
1600 unsigned long start
,
1603 struct mm_struct
*mm
= vma
->vm_mm
;
1605 if (mm
->context
!= NO_CONTEXT
) {
1607 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1608 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1609 if (!cpumask_empty(&cpu_mask
))
1610 xc3((smpfunc_t
) local_ops
->cache_range
,
1611 (unsigned long) vma
, start
, end
);
1612 local_ops
->cache_range(vma
, start
, end
);
1616 static void smp_flush_tlb_range(struct vm_area_struct
*vma
,
1617 unsigned long start
,
1620 struct mm_struct
*mm
= vma
->vm_mm
;
1622 if (mm
->context
!= NO_CONTEXT
) {
1624 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1625 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1626 if (!cpumask_empty(&cpu_mask
))
1627 xc3((smpfunc_t
) local_ops
->tlb_range
,
1628 (unsigned long) vma
, start
, end
);
1629 local_ops
->tlb_range(vma
, start
, end
);
1633 static void smp_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1635 struct mm_struct
*mm
= vma
->vm_mm
;
1637 if (mm
->context
!= NO_CONTEXT
) {
1639 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1640 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1641 if (!cpumask_empty(&cpu_mask
))
1642 xc2((smpfunc_t
) local_ops
->cache_page
,
1643 (unsigned long) vma
, page
);
1644 local_ops
->cache_page(vma
, page
);
1648 static void smp_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1650 struct mm_struct
*mm
= vma
->vm_mm
;
1652 if (mm
->context
!= NO_CONTEXT
) {
1654 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1655 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1656 if (!cpumask_empty(&cpu_mask
))
1657 xc2((smpfunc_t
) local_ops
->tlb_page
,
1658 (unsigned long) vma
, page
);
1659 local_ops
->tlb_page(vma
, page
);
1663 static void smp_flush_page_to_ram(unsigned long page
)
1665 /* Current theory is that those who call this are the one's
1666 * who have just dirtied their cache with the pages contents
1667 * in kernel space, therefore we only run this on local cpu.
1669 * XXX This experiment failed, research further... -DaveM
1672 xc1((smpfunc_t
) local_ops
->page_to_ram
, page
);
1674 local_ops
->page_to_ram(page
);
1677 static void smp_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1680 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1681 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1682 if (!cpumask_empty(&cpu_mask
))
1683 xc2((smpfunc_t
) local_ops
->sig_insns
,
1684 (unsigned long) mm
, insn_addr
);
1685 local_ops
->sig_insns(mm
, insn_addr
);
1688 static struct sparc32_cachetlb_ops smp_cachetlb_ops
= {
1689 .cache_all
= smp_flush_cache_all
,
1690 .cache_mm
= smp_flush_cache_mm
,
1691 .cache_page
= smp_flush_cache_page
,
1692 .cache_range
= smp_flush_cache_range
,
1693 .tlb_all
= smp_flush_tlb_all
,
1694 .tlb_mm
= smp_flush_tlb_mm
,
1695 .tlb_page
= smp_flush_tlb_page
,
1696 .tlb_range
= smp_flush_tlb_range
,
1697 .page_to_ram
= smp_flush_page_to_ram
,
1698 .sig_insns
= smp_flush_sig_insns
,
1699 .page_for_dma
= smp_flush_page_for_dma
,
1703 /* Load up routines and constants for sun4m and sun4d mmu */
1704 void __init
load_mmu(void)
1706 extern void ld_mmu_iommu(void);
1707 extern void ld_mmu_iounit(void);
1713 /* El switcheroo... */
1714 local_ops
= sparc32_cachetlb_ops
;
1716 if (sparc_cpu_model
== sun4d
|| sparc_cpu_model
== sparc_leon
) {
1717 smp_cachetlb_ops
.tlb_all
= local_ops
->tlb_all
;
1718 smp_cachetlb_ops
.tlb_mm
= local_ops
->tlb_mm
;
1719 smp_cachetlb_ops
.tlb_range
= local_ops
->tlb_range
;
1720 smp_cachetlb_ops
.tlb_page
= local_ops
->tlb_page
;
1723 if (poke_srmmu
== poke_viking
) {
1724 /* Avoid unnecessary cross calls. */
1725 smp_cachetlb_ops
.cache_all
= local_ops
->cache_all
;
1726 smp_cachetlb_ops
.cache_mm
= local_ops
->cache_mm
;
1727 smp_cachetlb_ops
.cache_range
= local_ops
->cache_range
;
1728 smp_cachetlb_ops
.cache_page
= local_ops
->cache_page
;
1730 smp_cachetlb_ops
.page_to_ram
= local_ops
->page_to_ram
;
1731 smp_cachetlb_ops
.sig_insns
= local_ops
->sig_insns
;
1732 smp_cachetlb_ops
.page_for_dma
= local_ops
->page_for_dma
;
1735 /* It really is const after this point. */
1736 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1740 if (sparc_cpu_model
== sun4d
)
1745 if (sparc_cpu_model
== sun4d
)
1747 else if (sparc_cpu_model
== sparc_leon
)