sparc: Use HAVE_MEMBLOCK_NODE_MAP
[deliverable/linux.git] / arch / sparc / mm / init_64.c
CommitLineData
b00dc837 1/*
1da177e4
LT
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
c4bce90e 8#include <linux/module.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
1da177e4
LT
16#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
c9cf5528 19#include <linux/poison.h>
1da177e4
LT
20#include <linux/fs.h>
21#include <linux/seq_file.h>
05e14cb3 22#include <linux/kprobes.h>
1ac4f5eb 23#include <linux/cache.h>
13edad7a 24#include <linux/sort.h>
5cbc3073 25#include <linux/percpu.h>
95f72d1e 26#include <linux/memblock.h>
919ee677 27#include <linux/mmzone.h>
5a0e3ad6 28#include <linux/gfp.h>
1da177e4
LT
29
30#include <asm/head.h>
31#include <asm/system.h>
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
517af332 46#include <asm/tsb.h>
481295f9 47#include <asm/hypervisor.h>
372b07bb 48#include <asm/prom.h>
5cbc3073 49#include <asm/mdesc.h>
3d5ae6b6 50#include <asm/cpudata.h>
4f70f7a9 51#include <asm/irq.h>
1da177e4 52
27137e52 53#include "init_64.h"
9cc3a1ac
DM
54
55unsigned long kern_linear_pte_xor[2] __read_mostly;
56
57/* A bitmap, one bit for every 256MB of physical memory. If the bit
58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
60 */
61unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
62
d1acb421 63#ifndef CONFIG_DEBUG_PAGEALLOC
2d9e2763
DM
64/* A special kernel TSB for 4MB and 256MB linear mappings.
65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
67 */
68extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
d1acb421 69#endif
d7744a09 70
13edad7a
DM
71#define MAX_BANKS 32
72
9a2ed5cc
DM
73static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata;
74static int pavail_ents __devinitdata;
13edad7a
DM
75
76static int cmp_p64(const void *a, const void *b)
77{
78 const struct linux_prom64_registers *x = a, *y = b;
79
80 if (x->phys_addr > y->phys_addr)
81 return 1;
82 if (x->phys_addr < y->phys_addr)
83 return -1;
84 return 0;
85}
86
87static void __init read_obp_memory(const char *property,
88 struct linux_prom64_registers *regs,
89 int *num_ents)
90{
8d125562 91 phandle node = prom_finddevice("/memory");
13edad7a
DM
92 int prop_size = prom_getproplen(node, property);
93 int ents, ret, i;
94
95 ents = prop_size / sizeof(struct linux_prom64_registers);
96 if (ents > MAX_BANKS) {
97 prom_printf("The machine has more %s property entries than "
98 "this kernel can support (%d).\n",
99 property, MAX_BANKS);
100 prom_halt();
101 }
102
103 ret = prom_getproperty(node, property, (char *) regs, prop_size);
104 if (ret == -1) {
105 prom_printf("Couldn't get %s property from /memory.\n");
106 prom_halt();
107 }
108
13edad7a
DM
109 /* Sanitize what we got from the firmware, by page aligning
110 * everything.
111 */
112 for (i = 0; i < ents; i++) {
113 unsigned long base, size;
114
115 base = regs[i].phys_addr;
116 size = regs[i].reg_size;
10147570 117
13edad7a
DM
118 size &= PAGE_MASK;
119 if (base & ~PAGE_MASK) {
120 unsigned long new_base = PAGE_ALIGN(base);
121
122 size -= new_base - base;
123 if ((long) size < 0L)
124 size = 0UL;
125 base = new_base;
126 }
0015d3d6
DM
127 if (size == 0UL) {
128 /* If it is empty, simply get rid of it.
129 * This simplifies the logic of the other
130 * functions that process these arrays.
131 */
132 memmove(&regs[i], &regs[i + 1],
133 (ents - i - 1) * sizeof(regs[0]));
486ad10a 134 i--;
0015d3d6
DM
135 ents--;
136 continue;
486ad10a 137 }
0015d3d6
DM
138 regs[i].phys_addr = base;
139 regs[i].reg_size = size;
486ad10a
DM
140 }
141
142 *num_ents = ents;
143
c9c10830 144 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
145 cmp_p64, NULL);
146}
1da177e4 147
d8ed1d43
DM
148unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
149 sizeof(unsigned long)];
917c3660 150EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
1da177e4 151
d1112018 152/* Kernel physical address base and size in bytes. */
1ac4f5eb
DM
153unsigned long kern_base __read_mostly;
154unsigned long kern_size __read_mostly;
1da177e4 155
1da177e4
LT
156/* Initial ramdisk setup */
157extern unsigned long sparc_ramdisk_image64;
158extern unsigned int sparc_ramdisk_image;
159extern unsigned int sparc_ramdisk_size;
160
1ac4f5eb 161struct page *mem_map_zero __read_mostly;
35802c0b 162EXPORT_SYMBOL(mem_map_zero);
1da177e4 163
0835ae0f
DM
164unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
165
166unsigned long sparc64_kern_pri_context __read_mostly;
167unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
168unsigned long sparc64_kern_sec_context __read_mostly;
169
64658743 170int num_kernel_image_mappings;
1da177e4 171
1da177e4
LT
172#ifdef CONFIG_DEBUG_DCFLUSH
173atomic_t dcpage_flushes = ATOMIC_INIT(0);
174#ifdef CONFIG_SMP
175atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
176#endif
177#endif
178
7a591cfe 179inline void flush_dcache_page_impl(struct page *page)
1da177e4 180{
7a591cfe 181 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
182#ifdef CONFIG_DEBUG_DCFLUSH
183 atomic_inc(&dcpage_flushes);
184#endif
185
186#ifdef DCACHE_ALIASING_POSSIBLE
187 __flush_dcache_page(page_address(page),
188 ((tlb_type == spitfire) &&
189 page_mapping(page) != NULL));
190#else
191 if (page_mapping(page) != NULL &&
192 tlb_type == spitfire)
193 __flush_icache_page(__pa(page_address(page)));
194#endif
195}
196
197#define PG_dcache_dirty PG_arch_1
22adb358
DM
198#define PG_dcache_cpu_shift 32UL
199#define PG_dcache_cpu_mask \
200 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
1da177e4
LT
201
202#define dcache_dirty_cpu(page) \
48b0e548 203 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4 204
d979f179 205static inline void set_dcache_dirty(struct page *page, int this_cpu)
1da177e4
LT
206{
207 unsigned long mask = this_cpu;
48b0e548
DM
208 unsigned long non_cpu_bits;
209
210 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
211 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
212
1da177e4
LT
213 __asm__ __volatile__("1:\n\t"
214 "ldx [%2], %%g7\n\t"
215 "and %%g7, %1, %%g1\n\t"
216 "or %%g1, %0, %%g1\n\t"
217 "casx [%2], %%g7, %%g1\n\t"
218 "cmp %%g7, %%g1\n\t"
219 "bne,pn %%xcc, 1b\n\t"
b445e26c 220 " nop"
1da177e4
LT
221 : /* no outputs */
222 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
223 : "g1", "g7");
224}
225
d979f179 226static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
1da177e4
LT
227{
228 unsigned long mask = (1UL << PG_dcache_dirty);
229
230 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
231 "1:\n\t"
232 "ldx [%2], %%g7\n\t"
48b0e548 233 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
234 "and %%g1, %3, %%g1\n\t"
235 "cmp %%g1, %0\n\t"
236 "bne,pn %%icc, 2f\n\t"
237 " andn %%g7, %1, %%g1\n\t"
238 "casx [%2], %%g7, %%g1\n\t"
239 "cmp %%g7, %%g1\n\t"
240 "bne,pn %%xcc, 1b\n\t"
b445e26c 241 " nop\n"
1da177e4
LT
242 "2:"
243 : /* no outputs */
244 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
245 "i" (PG_dcache_cpu_mask),
246 "i" (PG_dcache_cpu_shift)
1da177e4
LT
247 : "g1", "g7");
248}
249
517af332
DM
250static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
251{
252 unsigned long tsb_addr = (unsigned long) ent;
253
3b3ab2eb 254 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
255 tsb_addr = __pa(tsb_addr);
256
257 __tsb_insert(tsb_addr, tag, pte);
258}
259
c4bce90e
DM
260unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
261unsigned long _PAGE_SZBITS __read_mostly;
262
ff9aefbf 263static void flush_dcache(unsigned long pfn)
1da177e4 264{
ff9aefbf 265 struct page *page;
7a591cfe 266
ff9aefbf 267 page = pfn_to_page(pfn);
1a78cedb 268 if (page) {
7a591cfe 269 unsigned long pg_flags;
7a591cfe 270
ff9aefbf
SR
271 pg_flags = page->flags;
272 if (pg_flags & (1UL << PG_dcache_dirty)) {
7a591cfe
DM
273 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
274 PG_dcache_cpu_mask);
275 int this_cpu = get_cpu();
276
277 /* This is just to optimize away some function calls
278 * in the SMP case.
279 */
280 if (cpu == this_cpu)
281 flush_dcache_page_impl(page);
282 else
283 smp_flush_dcache_page_impl(page, cpu);
284
285 clear_dcache_dirty_cpu(page, cpu);
286
287 put_cpu();
288 }
1da177e4 289 }
ff9aefbf
SR
290}
291
4b3073e1 292void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
ff9aefbf
SR
293{
294 struct mm_struct *mm;
295 struct tsb *tsb;
296 unsigned long tag, flags;
297 unsigned long tsb_index, tsb_hash_shift;
4b3073e1 298 pte_t pte = *ptep;
ff9aefbf
SR
299
300 if (tlb_type != hypervisor) {
301 unsigned long pfn = pte_pfn(pte);
302
303 if (pfn_valid(pfn))
304 flush_dcache(pfn);
305 }
bd40791e
DM
306
307 mm = vma->vm_mm;
7a1ac526 308
dcc1e8dd
DM
309 tsb_index = MM_TSB_BASE;
310 tsb_hash_shift = PAGE_SHIFT;
311
7a1ac526
DM
312 spin_lock_irqsave(&mm->context.lock, flags);
313
dcc1e8dd
DM
314#ifdef CONFIG_HUGETLB_PAGE
315 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
316 if ((tlb_type == hypervisor &&
317 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
318 (tlb_type != hypervisor &&
319 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
320 tsb_index = MM_TSB_HUGE;
321 tsb_hash_shift = HPAGE_SHIFT;
322 }
323 }
324#endif
325
326 tsb = mm->context.tsb_block[tsb_index].tsb;
327 tsb += ((address >> tsb_hash_shift) &
328 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
74ae9987
DM
329 tag = (address >> 22UL);
330 tsb_insert(tsb, tag, pte_val(pte));
7a1ac526
DM
331
332 spin_unlock_irqrestore(&mm->context.lock, flags);
1da177e4
LT
333}
334
335void flush_dcache_page(struct page *page)
336{
a9546f59
DM
337 struct address_space *mapping;
338 int this_cpu;
1da177e4 339
7a591cfe
DM
340 if (tlb_type == hypervisor)
341 return;
342
a9546f59
DM
343 /* Do not bother with the expensive D-cache flush if it
344 * is merely the zero page. The 'bigcore' testcase in GDB
345 * causes this case to run millions of times.
346 */
347 if (page == ZERO_PAGE(0))
348 return;
349
350 this_cpu = get_cpu();
351
352 mapping = page_mapping(page);
1da177e4 353 if (mapping && !mapping_mapped(mapping)) {
a9546f59 354 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 355 if (dirty) {
a9546f59
DM
356 int dirty_cpu = dcache_dirty_cpu(page);
357
1da177e4
LT
358 if (dirty_cpu == this_cpu)
359 goto out;
360 smp_flush_dcache_page_impl(page, dirty_cpu);
361 }
362 set_dcache_dirty(page, this_cpu);
363 } else {
364 /* We could delay the flush for the !page_mapping
365 * case too. But that case is for exec env/arg
366 * pages and those are %99 certainly going to get
367 * faulted into the tlb (and thus flushed) anyways.
368 */
369 flush_dcache_page_impl(page);
370 }
371
372out:
373 put_cpu();
374}
917c3660 375EXPORT_SYMBOL(flush_dcache_page);
1da177e4 376
05e14cb3 377void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 378{
a43fe0e7 379 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
380 if (tlb_type == spitfire) {
381 unsigned long kaddr;
382
a94aa253
DM
383 /* This code only runs on Spitfire cpus so this is
384 * why we can assume _PAGE_PADDR_4U.
385 */
386 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
387 unsigned long paddr, mask = _PAGE_PADDR_4U;
388
389 if (kaddr >= PAGE_OFFSET)
390 paddr = kaddr & mask;
391 else {
392 pgd_t *pgdp = pgd_offset_k(kaddr);
393 pud_t *pudp = pud_offset(pgdp, kaddr);
394 pmd_t *pmdp = pmd_offset(pudp, kaddr);
395 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
396
397 paddr = pte_val(*ptep) & mask;
398 }
399 __flush_icache_page(paddr);
400 }
1da177e4
LT
401 }
402}
917c3660 403EXPORT_SYMBOL(flush_icache_range);
1da177e4 404
1da177e4
LT
405void mmu_info(struct seq_file *m)
406{
407 if (tlb_type == cheetah)
408 seq_printf(m, "MMU Type\t: Cheetah\n");
409 else if (tlb_type == cheetah_plus)
410 seq_printf(m, "MMU Type\t: Cheetah+\n");
411 else if (tlb_type == spitfire)
412 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
413 else if (tlb_type == hypervisor)
414 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
415 else
416 seq_printf(m, "MMU Type\t: ???\n");
417
418#ifdef CONFIG_DEBUG_DCFLUSH
419 seq_printf(m, "DCPageFlushes\t: %d\n",
420 atomic_read(&dcpage_flushes));
421#ifdef CONFIG_SMP
422 seq_printf(m, "DCPageFlushesXC\t: %d\n",
423 atomic_read(&dcpage_flushes_xcall));
424#endif /* CONFIG_SMP */
425#endif /* CONFIG_DEBUG_DCFLUSH */
426}
427
a94aa253
DM
428struct linux_prom_translation prom_trans[512] __read_mostly;
429unsigned int prom_trans_ents __read_mostly;
430
1da177e4
LT
431unsigned long kern_locked_tte_data;
432
c9c10830
DM
433/* The obp translations are saved based on 8k pagesize, since obp can
434 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 435 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 436 */
5085b4a5
DM
437static inline int in_obp_range(unsigned long vaddr)
438{
439 return (vaddr >= LOW_OBP_ADDRESS &&
440 vaddr < HI_OBP_ADDRESS);
441}
442
c9c10830 443static int cmp_ptrans(const void *a, const void *b)
405599bd 444{
c9c10830 445 const struct linux_prom_translation *x = a, *y = b;
405599bd 446
c9c10830
DM
447 if (x->virt > y->virt)
448 return 1;
449 if (x->virt < y->virt)
450 return -1;
451 return 0;
405599bd
DM
452}
453
c9c10830 454/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 455static void __init read_obp_translations(void)
405599bd 456{
c9c10830 457 int n, node, ents, first, last, i;
1da177e4
LT
458
459 node = prom_finddevice("/virtual-memory");
460 n = prom_getproplen(node, "translations");
405599bd 461 if (unlikely(n == 0 || n == -1)) {
b206fc4c 462 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
463 prom_halt();
464 }
405599bd
DM
465 if (unlikely(n > sizeof(prom_trans))) {
466 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
1da177e4
LT
467 prom_halt();
468 }
405599bd 469
b206fc4c 470 if ((n = prom_getproperty(node, "translations",
405599bd
DM
471 (char *)&prom_trans[0],
472 sizeof(prom_trans))) == -1) {
b206fc4c 473 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
474 prom_halt();
475 }
9ad98c5b 476
b206fc4c 477 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 478
c9c10830
DM
479 ents = n;
480
481 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
482 cmp_ptrans, NULL);
483
484 /* Now kick out all the non-OBP entries. */
485 for (i = 0; i < ents; i++) {
486 if (in_obp_range(prom_trans[i].virt))
487 break;
488 }
489 first = i;
490 for (; i < ents; i++) {
491 if (!in_obp_range(prom_trans[i].virt))
492 break;
493 }
494 last = i;
495
496 for (i = 0; i < (last - first); i++) {
497 struct linux_prom_translation *src = &prom_trans[i + first];
498 struct linux_prom_translation *dest = &prom_trans[i];
499
500 *dest = *src;
501 }
502 for (; i < ents; i++) {
503 struct linux_prom_translation *dest = &prom_trans[i];
504 dest->virt = dest->size = dest->data = 0x0UL;
505 }
506
507 prom_trans_ents = last - first;
508
509 if (tlb_type == spitfire) {
510 /* Clear diag TTE bits. */
511 for (i = 0; i < prom_trans_ents; i++)
512 prom_trans[i].data &= ~0x0003fe0000000000UL;
513 }
f4142cba
DM
514
515 /* Force execute bit on. */
516 for (i = 0; i < prom_trans_ents; i++)
517 prom_trans[i].data |= (tlb_type == hypervisor ?
518 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
405599bd 519}
1da177e4 520
d82ace7d
DM
521static void __init hypervisor_tlb_lock(unsigned long vaddr,
522 unsigned long pte,
523 unsigned long mmu)
524{
7db35f31
DM
525 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
526
527 if (ret != 0) {
12e126ad 528 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
7db35f31 529 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
12e126ad
DM
530 prom_halt();
531 }
d82ace7d
DM
532}
533
c4bce90e
DM
534static unsigned long kern_large_tte(unsigned long paddr);
535
898cf0ec 536static void __init remap_kernel(void)
405599bd
DM
537{
538 unsigned long phys_page, tte_vaddr, tte_data;
64658743 539 int i, tlb_ent = sparc64_highest_locked_tlbent();
405599bd 540
1da177e4 541 tte_vaddr = (unsigned long) KERNBASE;
bff06d55 542 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
c4bce90e 543 tte_data = kern_large_tte(phys_page);
1da177e4
LT
544
545 kern_locked_tte_data = tte_data;
546
d82ace7d
DM
547 /* Now lock us into the TLBs via Hypervisor or OBP. */
548 if (tlb_type == hypervisor) {
64658743 549 for (i = 0; i < num_kernel_image_mappings; i++) {
d82ace7d
DM
550 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
551 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
64658743
DM
552 tte_vaddr += 0x400000;
553 tte_data += 0x400000;
d82ace7d
DM
554 }
555 } else {
64658743
DM
556 for (i = 0; i < num_kernel_image_mappings; i++) {
557 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
558 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
559 tte_vaddr += 0x400000;
560 tte_data += 0x400000;
d82ace7d 561 }
64658743 562 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
1da177e4 563 }
0835ae0f
DM
564 if (tlb_type == cheetah_plus) {
565 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
566 CTX_CHEETAH_PLUS_NUC);
567 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
568 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
569 }
405599bd 570}
1da177e4 571
405599bd 572
c9c10830 573static void __init inherit_prom_mappings(void)
9ad98c5b 574{
405599bd 575 /* Now fixup OBP's idea about where we really are mapped. */
3c62a2d3 576 printk("Remapping the kernel... ");
405599bd 577 remap_kernel();
3c62a2d3 578 printk("done.\n");
1da177e4
LT
579}
580
1da177e4
LT
581void prom_world(int enter)
582{
1da177e4
LT
583 if (!enter)
584 set_fs((mm_segment_t) { get_thread_current_ds() });
585
3487d1d4 586 __asm__ __volatile__("flushw");
1da177e4
LT
587}
588
1da177e4
LT
589void __flush_dcache_range(unsigned long start, unsigned long end)
590{
591 unsigned long va;
592
593 if (tlb_type == spitfire) {
594 int n = 0;
595
596 for (va = start; va < end; va += 32) {
597 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
598 if (++n >= 512)
599 break;
600 }
a43fe0e7 601 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
602 start = __pa(start);
603 end = __pa(end);
604 for (va = start; va < end; va += 32)
605 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
606 "membar #Sync"
607 : /* no outputs */
608 : "r" (va),
609 "i" (ASI_DCACHE_INVALIDATE));
610 }
611}
917c3660 612EXPORT_SYMBOL(__flush_dcache_range);
1da177e4 613
85f1e1f6
DM
614/* get_new_mmu_context() uses "cache + 1". */
615DEFINE_SPINLOCK(ctx_alloc_lock);
616unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
617#define MAX_CTX_NR (1UL << CTX_NR_BITS)
618#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
619DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
620
1da177e4
LT
621/* Caller does TLB context flushing on local CPU if necessary.
622 * The caller also ensures that CTX_VALID(mm->context) is false.
623 *
624 * We must be careful about boundary cases so that we never
625 * let the user have CTX 0 (nucleus) or we ever use a CTX
626 * version of zero (and thus NO_CONTEXT would not be caught
627 * by version mis-match tests in mmu_context.h).
a0663a79
DM
628 *
629 * Always invoked with interrupts disabled.
1da177e4
LT
630 */
631void get_new_mmu_context(struct mm_struct *mm)
632{
633 unsigned long ctx, new_ctx;
634 unsigned long orig_pgsz_bits;
a77754b4 635 unsigned long flags;
a0663a79 636 int new_version;
1da177e4 637
a77754b4 638 spin_lock_irqsave(&ctx_alloc_lock, flags);
1da177e4
LT
639 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
640 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
641 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 642 new_version = 0;
1da177e4
LT
643 if (new_ctx >= (1 << CTX_NR_BITS)) {
644 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
645 if (new_ctx >= ctx) {
646 int i;
647 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
648 CTX_FIRST_VERSION;
649 if (new_ctx == 1)
650 new_ctx = CTX_FIRST_VERSION;
651
652 /* Don't call memset, for 16 entries that's just
653 * plain silly...
654 */
655 mmu_context_bmap[0] = 3;
656 mmu_context_bmap[1] = 0;
657 mmu_context_bmap[2] = 0;
658 mmu_context_bmap[3] = 0;
659 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
660 mmu_context_bmap[i + 0] = 0;
661 mmu_context_bmap[i + 1] = 0;
662 mmu_context_bmap[i + 2] = 0;
663 mmu_context_bmap[i + 3] = 0;
664 }
a0663a79 665 new_version = 1;
1da177e4
LT
666 goto out;
667 }
668 }
669 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
670 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
671out:
672 tlb_context_cache = new_ctx;
673 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
a77754b4 674 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
a0663a79
DM
675
676 if (unlikely(new_version))
677 smp_new_mmu_context_version();
1da177e4
LT
678}
679
919ee677
DM
680static int numa_enabled = 1;
681static int numa_debug;
682
683static int __init early_numa(char *p)
1da177e4 684{
919ee677
DM
685 if (!p)
686 return 0;
687
688 if (strstr(p, "off"))
689 numa_enabled = 0;
d1112018 690
919ee677
DM
691 if (strstr(p, "debug"))
692 numa_debug = 1;
d1112018 693
919ee677 694 return 0;
d1112018 695}
919ee677
DM
696early_param("numa", early_numa);
697
698#define numadbg(f, a...) \
699do { if (numa_debug) \
700 printk(KERN_INFO f, ## a); \
701} while (0)
d1112018 702
4e82c9a6
DM
703static void __init find_ramdisk(unsigned long phys_base)
704{
705#ifdef CONFIG_BLK_DEV_INITRD
706 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
707 unsigned long ramdisk_image;
708
709 /* Older versions of the bootloader only supported a
710 * 32-bit physical address for the ramdisk image
711 * location, stored at sparc_ramdisk_image. Newer
712 * SILO versions set sparc_ramdisk_image to zero and
713 * provide a full 64-bit physical address at
714 * sparc_ramdisk_image64.
715 */
716 ramdisk_image = sparc_ramdisk_image;
717 if (!ramdisk_image)
718 ramdisk_image = sparc_ramdisk_image64;
719
720 /* Another bootloader quirk. The bootloader normalizes
721 * the physical address to KERNBASE, so we have to
722 * factor that back out and add in the lowest valid
723 * physical page address to get the true physical address.
724 */
725 ramdisk_image -= KERNBASE;
726 ramdisk_image += phys_base;
727
919ee677
DM
728 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
729 ramdisk_image, sparc_ramdisk_size);
730
4e82c9a6
DM
731 initrd_start = ramdisk_image;
732 initrd_end = ramdisk_image + sparc_ramdisk_size;
3b2a7e23 733
95f72d1e 734 memblock_reserve(initrd_start, sparc_ramdisk_size);
d45100f7
DM
735
736 initrd_start += PAGE_OFFSET;
737 initrd_end += PAGE_OFFSET;
4e82c9a6
DM
738 }
739#endif
740}
741
919ee677
DM
742struct node_mem_mask {
743 unsigned long mask;
744 unsigned long val;
745 unsigned long bootmem_paddr;
746};
747static struct node_mem_mask node_masks[MAX_NUMNODES];
748static int num_node_masks;
749
750int numa_cpu_lookup_table[NR_CPUS];
751cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
752
753#ifdef CONFIG_NEED_MULTIPLE_NODES
919ee677
DM
754
755struct mdesc_mblock {
756 u64 base;
757 u64 size;
758 u64 offset; /* RA-to-PA */
759};
760static struct mdesc_mblock *mblocks;
761static int num_mblocks;
762
763static unsigned long ra_to_pa(unsigned long addr)
764{
765 int i;
766
767 for (i = 0; i < num_mblocks; i++) {
768 struct mdesc_mblock *m = &mblocks[i];
769
770 if (addr >= m->base &&
771 addr < (m->base + m->size)) {
772 addr += m->offset;
773 break;
774 }
775 }
776 return addr;
777}
778
779static int find_node(unsigned long addr)
780{
781 int i;
782
783 addr = ra_to_pa(addr);
784 for (i = 0; i < num_node_masks; i++) {
785 struct node_mem_mask *p = &node_masks[i];
786
787 if ((addr & p->mask) == p->val)
788 return i;
789 }
790 return -1;
791}
792
f9b18db3 793static u64 memblock_nid_range(u64 start, u64 end, int *nid)
919ee677
DM
794{
795 *nid = find_node(start);
796 start += PAGE_SIZE;
797 while (start < end) {
798 int n = find_node(start);
799
800 if (n != *nid)
801 break;
802 start += PAGE_SIZE;
803 }
804
c918dcce
DM
805 if (start > end)
806 start = end;
807
919ee677
DM
808 return start;
809}
810#else
f9b18db3 811static u64 memblock_nid_range(u64 start, u64 end, int *nid)
919ee677
DM
812{
813 *nid = 0;
814 return end;
815}
816#endif
817
818/* This must be invoked after performing all of the necessary
2a4814df 819 * memblock_set_node() calls for 'nid'. We need to be able to get
919ee677 820 * correct data from get_pfn_range_for_nid().
f1cfdb55 821 */
919ee677
DM
822static void __init allocate_node_data(int nid)
823{
824 unsigned long paddr, num_pages, start_pfn, end_pfn;
825 struct pglist_data *p;
826
827#ifdef CONFIG_NEED_MULTIPLE_NODES
9d1e2492 828 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
919ee677
DM
829 if (!paddr) {
830 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
831 prom_halt();
832 }
833 NODE_DATA(nid) = __va(paddr);
834 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
835
b61bfa3c 836 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
919ee677
DM
837#endif
838
839 p = NODE_DATA(nid);
840
841 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
842 p->node_start_pfn = start_pfn;
843 p->node_spanned_pages = end_pfn - start_pfn;
844
845 if (p->node_spanned_pages) {
846 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
847
9d1e2492 848 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
919ee677
DM
849 if (!paddr) {
850 prom_printf("Cannot allocate bootmap for nid[%d]\n",
851 nid);
852 prom_halt();
853 }
854 node_masks[nid].bootmem_paddr = paddr;
855 }
856}
857
858static void init_node_masks_nonnuma(void)
d1112018 859{
1da177e4
LT
860 int i;
861
919ee677 862 numadbg("Initializing tables for non-numa.\n");
6fc5bae7 863
919ee677
DM
864 node_masks[0].mask = node_masks[0].val = 0;
865 num_node_masks = 1;
d1112018 866
919ee677
DM
867 for (i = 0; i < NR_CPUS; i++)
868 numa_cpu_lookup_table[i] = 0;
1da177e4 869
fb1fece5 870 cpumask_setall(&numa_cpumask_lookup_table[0]);
919ee677
DM
871}
872
873#ifdef CONFIG_NEED_MULTIPLE_NODES
874struct pglist_data *node_data[MAX_NUMNODES];
875
876EXPORT_SYMBOL(numa_cpu_lookup_table);
877EXPORT_SYMBOL(numa_cpumask_lookup_table);
878EXPORT_SYMBOL(node_data);
879
880struct mdesc_mlgroup {
881 u64 node;
882 u64 latency;
883 u64 match;
884 u64 mask;
885};
886static struct mdesc_mlgroup *mlgroups;
887static int num_mlgroups;
888
889static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
890 u32 cfg_handle)
891{
892 u64 arc;
893
894 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
895 u64 target = mdesc_arc_target(md, arc);
896 const u64 *val;
897
898 val = mdesc_get_property(md, target,
899 "cfg-handle", NULL);
900 if (val && *val == cfg_handle)
901 return 0;
902 }
903 return -ENODEV;
904}
905
906static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
907 u32 cfg_handle)
908{
909 u64 arc, candidate, best_latency = ~(u64)0;
910
911 candidate = MDESC_NODE_NULL;
912 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
913 u64 target = mdesc_arc_target(md, arc);
914 const char *name = mdesc_node_name(md, target);
915 const u64 *val;
916
917 if (strcmp(name, "pio-latency-group"))
918 continue;
919
920 val = mdesc_get_property(md, target, "latency", NULL);
921 if (!val)
922 continue;
923
924 if (*val < best_latency) {
925 candidate = target;
926 best_latency = *val;
927 }
928 }
929
930 if (candidate == MDESC_NODE_NULL)
931 return -ENODEV;
932
933 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
934}
935
936int of_node_to_nid(struct device_node *dp)
937{
938 const struct linux_prom64_registers *regs;
939 struct mdesc_handle *md;
940 u32 cfg_handle;
941 int count, nid;
942 u64 grp;
943
072bd413
DM
944 /* This is the right thing to do on currently supported
945 * SUN4U NUMA platforms as well, as the PCI controller does
946 * not sit behind any particular memory controller.
947 */
919ee677
DM
948 if (!mlgroups)
949 return -1;
950
951 regs = of_get_property(dp, "reg", NULL);
952 if (!regs)
953 return -1;
954
955 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
956
957 md = mdesc_grab();
958
959 count = 0;
960 nid = -1;
961 mdesc_for_each_node_by_name(md, grp, "group") {
962 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
963 nid = count;
964 break;
965 }
966 count++;
967 }
968
969 mdesc_release(md);
970
971 return nid;
972}
973
01c45381 974static void __init add_node_ranges(void)
919ee677 975{
08b84798 976 struct memblock_region *reg;
919ee677 977
08b84798
BH
978 for_each_memblock(memory, reg) {
979 unsigned long size = reg->size;
919ee677
DM
980 unsigned long start, end;
981
08b84798 982 start = reg->base;
919ee677
DM
983 end = start + size;
984 while (start < end) {
985 unsigned long this_end;
986 int nid;
987
35a1f0bd 988 this_end = memblock_nid_range(start, end, &nid);
919ee677 989
2a4814df 990 numadbg("Setting memblock NUMA node nid[%d] "
919ee677
DM
991 "start[%lx] end[%lx]\n",
992 nid, start, this_end);
993
2a4814df 994 memblock_set_node(start, this_end - start, nid);
919ee677
DM
995 start = this_end;
996 }
997 }
998}
999
1000static int __init grab_mlgroups(struct mdesc_handle *md)
1001{
1002 unsigned long paddr;
1003 int count = 0;
1004 u64 node;
1005
1006 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1007 count++;
1008 if (!count)
1009 return -ENOENT;
1010
95f72d1e 1011 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
919ee677
DM
1012 SMP_CACHE_BYTES);
1013 if (!paddr)
1014 return -ENOMEM;
1015
1016 mlgroups = __va(paddr);
1017 num_mlgroups = count;
1018
1019 count = 0;
1020 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1021 struct mdesc_mlgroup *m = &mlgroups[count++];
1022 const u64 *val;
1023
1024 m->node = node;
1025
1026 val = mdesc_get_property(md, node, "latency", NULL);
1027 m->latency = *val;
1028 val = mdesc_get_property(md, node, "address-match", NULL);
1029 m->match = *val;
1030 val = mdesc_get_property(md, node, "address-mask", NULL);
1031 m->mask = *val;
1032
90181136
SR
1033 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1034 "match[%llx] mask[%llx]\n",
919ee677
DM
1035 count - 1, m->node, m->latency, m->match, m->mask);
1036 }
1037
1038 return 0;
1039}
1040
1041static int __init grab_mblocks(struct mdesc_handle *md)
1042{
1043 unsigned long paddr;
1044 int count = 0;
1045 u64 node;
1046
1047 mdesc_for_each_node_by_name(md, node, "mblock")
1048 count++;
1049 if (!count)
1050 return -ENOENT;
1051
95f72d1e 1052 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
919ee677
DM
1053 SMP_CACHE_BYTES);
1054 if (!paddr)
1055 return -ENOMEM;
1056
1057 mblocks = __va(paddr);
1058 num_mblocks = count;
1059
1060 count = 0;
1061 mdesc_for_each_node_by_name(md, node, "mblock") {
1062 struct mdesc_mblock *m = &mblocks[count++];
1063 const u64 *val;
1064
1065 val = mdesc_get_property(md, node, "base", NULL);
1066 m->base = *val;
1067 val = mdesc_get_property(md, node, "size", NULL);
1068 m->size = *val;
1069 val = mdesc_get_property(md, node,
1070 "address-congruence-offset", NULL);
1071 m->offset = *val;
1072
90181136 1073 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
919ee677
DM
1074 count - 1, m->base, m->size, m->offset);
1075 }
1076
1077 return 0;
1078}
1079
1080static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1081 u64 grp, cpumask_t *mask)
1082{
1083 u64 arc;
1084
fb1fece5 1085 cpumask_clear(mask);
919ee677
DM
1086
1087 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1088 u64 target = mdesc_arc_target(md, arc);
1089 const char *name = mdesc_node_name(md, target);
1090 const u64 *id;
1091
1092 if (strcmp(name, "cpu"))
1093 continue;
1094 id = mdesc_get_property(md, target, "id", NULL);
e305cb8f 1095 if (*id < nr_cpu_ids)
fb1fece5 1096 cpumask_set_cpu(*id, mask);
919ee677
DM
1097 }
1098}
1099
1100static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1101{
1102 int i;
1103
1104 for (i = 0; i < num_mlgroups; i++) {
1105 struct mdesc_mlgroup *m = &mlgroups[i];
1106 if (m->node == node)
1107 return m;
1108 }
1109 return NULL;
1110}
1111
1112static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1113 int index)
1114{
1115 struct mdesc_mlgroup *candidate = NULL;
1116 u64 arc, best_latency = ~(u64)0;
1117 struct node_mem_mask *n;
1118
1119 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1120 u64 target = mdesc_arc_target(md, arc);
1121 struct mdesc_mlgroup *m = find_mlgroup(target);
1122 if (!m)
1123 continue;
1124 if (m->latency < best_latency) {
1125 candidate = m;
1126 best_latency = m->latency;
1127 }
1128 }
1129 if (!candidate)
1130 return -ENOENT;
1131
1132 if (num_node_masks != index) {
1133 printk(KERN_ERR "Inconsistent NUMA state, "
1134 "index[%d] != num_node_masks[%d]\n",
1135 index, num_node_masks);
1136 return -EINVAL;
1137 }
1138
1139 n = &node_masks[num_node_masks++];
1140
1141 n->mask = candidate->mask;
1142 n->val = candidate->match;
1da177e4 1143
90181136 1144 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
919ee677 1145 index, n->mask, n->val, candidate->latency);
1da177e4 1146
919ee677
DM
1147 return 0;
1148}
1149
1150static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1151 int index)
1152{
1153 cpumask_t mask;
1154 int cpu;
1155
1156 numa_parse_mdesc_group_cpus(md, grp, &mask);
1157
fb1fece5 1158 for_each_cpu(cpu, &mask)
919ee677 1159 numa_cpu_lookup_table[cpu] = index;
fb1fece5 1160 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
919ee677
DM
1161
1162 if (numa_debug) {
1163 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
fb1fece5 1164 for_each_cpu(cpu, &mask)
919ee677
DM
1165 printk("%d ", cpu);
1166 printk("]\n");
1167 }
1168
1169 return numa_attach_mlgroup(md, grp, index);
1170}
1171
1172static int __init numa_parse_mdesc(void)
1173{
1174 struct mdesc_handle *md = mdesc_grab();
1175 int i, err, count;
1176 u64 node;
1177
1178 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1179 if (node == MDESC_NODE_NULL) {
1180 mdesc_release(md);
1181 return -ENOENT;
1182 }
1183
1184 err = grab_mblocks(md);
1185 if (err < 0)
1186 goto out;
1187
1188 err = grab_mlgroups(md);
1189 if (err < 0)
1190 goto out;
1191
1192 count = 0;
1193 mdesc_for_each_node_by_name(md, node, "group") {
1194 err = numa_parse_mdesc_group(md, node, count);
1195 if (err < 0)
1196 break;
1197 count++;
1198 }
1199
1200 add_node_ranges();
1201
1202 for (i = 0; i < num_node_masks; i++) {
1203 allocate_node_data(i);
1204 node_set_online(i);
1205 }
1206
1207 err = 0;
1208out:
1209 mdesc_release(md);
1210 return err;
1211}
1212
072bd413
DM
1213static int __init numa_parse_jbus(void)
1214{
1215 unsigned long cpu, index;
1216
1217 /* NUMA node id is encoded in bits 36 and higher, and there is
1218 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1219 */
1220 index = 0;
1221 for_each_present_cpu(cpu) {
1222 numa_cpu_lookup_table[cpu] = index;
fb1fece5 1223 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
072bd413
DM
1224 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1225 node_masks[index].val = cpu << 36UL;
1226
1227 index++;
1228 }
1229 num_node_masks = index;
1230
1231 add_node_ranges();
1232
1233 for (index = 0; index < num_node_masks; index++) {
1234 allocate_node_data(index);
1235 node_set_online(index);
1236 }
1237
1238 return 0;
1239}
1240
919ee677
DM
1241static int __init numa_parse_sun4u(void)
1242{
072bd413
DM
1243 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1244 unsigned long ver;
1245
1246 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1247 if ((ver >> 32UL) == __JALAPENO_ID ||
1248 (ver >> 32UL) == __SERRANO_ID)
1249 return numa_parse_jbus();
1250 }
919ee677
DM
1251 return -1;
1252}
1253
1254static int __init bootmem_init_numa(void)
1255{
1256 int err = -1;
1257
1258 numadbg("bootmem_init_numa()\n");
1259
1260 if (numa_enabled) {
1261 if (tlb_type == hypervisor)
1262 err = numa_parse_mdesc();
1263 else
1264 err = numa_parse_sun4u();
1265 }
1266 return err;
1267}
1268
1269#else
1da177e4 1270
919ee677
DM
1271static int bootmem_init_numa(void)
1272{
1273 return -1;
1274}
1275
1276#endif
1277
1278static void __init bootmem_init_nonnuma(void)
1279{
95f72d1e
YL
1280 unsigned long top_of_ram = memblock_end_of_DRAM();
1281 unsigned long total_ram = memblock_phys_mem_size();
919ee677
DM
1282
1283 numadbg("bootmem_init_nonnuma()\n");
1284
1285 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1286 top_of_ram, total_ram);
1287 printk(KERN_INFO "Memory hole size: %ldMB\n",
1288 (top_of_ram - total_ram) >> 20);
1289
1290 init_node_masks_nonnuma();
2a4814df 1291 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
919ee677 1292 allocate_node_data(0);
919ee677
DM
1293 node_set_online(0);
1294}
1295
1296static void __init reserve_range_in_node(int nid, unsigned long start,
1297 unsigned long end)
1298{
1299 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1300 nid, start, end);
1301 while (start < end) {
1302 unsigned long this_end;
1303 int n;
1304
35a1f0bd 1305 this_end = memblock_nid_range(start, end, &n);
919ee677
DM
1306 if (n == nid) {
1307 numadbg(" MATCH reserving range [%lx:%lx]\n",
1308 start, this_end);
1309 reserve_bootmem_node(NODE_DATA(nid), start,
1310 (this_end - start), BOOTMEM_DEFAULT);
1311 } else
1312 numadbg(" NO MATCH, advancing start to %lx\n",
1313 this_end);
1314
1315 start = this_end;
d1112018 1316 }
919ee677
DM
1317}
1318
1319static void __init trim_reserved_in_node(int nid)
1320{
08b84798 1321 struct memblock_region *reg;
919ee677
DM
1322
1323 numadbg(" trim_reserved_in_node(%d)\n", nid);
1324
08b84798
BH
1325 for_each_memblock(reserved, reg)
1326 reserve_range_in_node(nid, reg->base, reg->base + reg->size);
919ee677
DM
1327}
1328
1329static void __init bootmem_init_one_node(int nid)
1330{
1331 struct pglist_data *p;
1332
1333 numadbg("bootmem_init_one_node(%d)\n", nid);
1334
1335 p = NODE_DATA(nid);
1336
1337 if (p->node_spanned_pages) {
1338 unsigned long paddr = node_masks[nid].bootmem_paddr;
1339 unsigned long end_pfn;
1340
1341 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1342
1343 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1344 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1345
1346 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1347 p->node_start_pfn, end_pfn);
1348
1349 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1350 nid, end_pfn);
1351 free_bootmem_with_active_regions(nid, end_pfn);
1352
1353 trim_reserved_in_node(nid);
1354
1355 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1356 nid);
1357 sparse_memory_present_with_active_regions(nid);
1358 }
1359}
1360
1361static unsigned long __init bootmem_init(unsigned long phys_base)
1362{
1363 unsigned long end_pfn;
1364 int nid;
1365
95f72d1e 1366 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
919ee677
DM
1367 max_pfn = max_low_pfn = end_pfn;
1368 min_low_pfn = (phys_base >> PAGE_SHIFT);
1369
1370 if (bootmem_init_numa() < 0)
1371 bootmem_init_nonnuma();
1372
1373 /* XXX cpu notifier XXX */
1374
1375 for_each_online_node(nid)
1376 bootmem_init_one_node(nid);
d1112018
DM
1377
1378 sparse_init();
1379
1da177e4
LT
1380 return end_pfn;
1381}
1382
9cc3a1ac
DM
1383static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1384static int pall_ents __initdata;
1385
56425306 1386#ifdef CONFIG_DEBUG_PAGEALLOC
896aef43
SR
1387static unsigned long __ref kernel_map_range(unsigned long pstart,
1388 unsigned long pend, pgprot_t prot)
56425306
DM
1389{
1390 unsigned long vstart = PAGE_OFFSET + pstart;
1391 unsigned long vend = PAGE_OFFSET + pend;
1392 unsigned long alloc_bytes = 0UL;
1393
1394 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 1395 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
1396 vstart, vend);
1397 prom_halt();
1398 }
1399
1400 while (vstart < vend) {
1401 unsigned long this_end, paddr = __pa(vstart);
1402 pgd_t *pgd = pgd_offset_k(vstart);
1403 pud_t *pud;
1404 pmd_t *pmd;
1405 pte_t *pte;
1406
1407 pud = pud_offset(pgd, vstart);
1408 if (pud_none(*pud)) {
1409 pmd_t *new;
1410
1411 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1412 alloc_bytes += PAGE_SIZE;
1413 pud_populate(&init_mm, pud, new);
1414 }
1415
1416 pmd = pmd_offset(pud, vstart);
1417 if (!pmd_present(*pmd)) {
1418 pte_t *new;
1419
1420 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1421 alloc_bytes += PAGE_SIZE;
1422 pmd_populate_kernel(&init_mm, pmd, new);
1423 }
1424
1425 pte = pte_offset_kernel(pmd, vstart);
1426 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1427 if (this_end > vend)
1428 this_end = vend;
1429
1430 while (vstart < this_end) {
1431 pte_val(*pte) = (paddr | pgprot_val(prot));
1432
1433 vstart += PAGE_SIZE;
1434 paddr += PAGE_SIZE;
1435 pte++;
1436 }
1437 }
1438
1439 return alloc_bytes;
1440}
1441
56425306 1442extern unsigned int kvmap_linear_patch[1];
9cc3a1ac
DM
1443#endif /* CONFIG_DEBUG_PAGEALLOC */
1444
1445static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1446{
1447 const unsigned long shift_256MB = 28;
1448 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1449 const unsigned long size_256MB = (1UL << shift_256MB);
1450
1451 while (start < end) {
1452 long remains;
1453
f7c00338
DM
1454 remains = end - start;
1455 if (remains < size_256MB)
1456 break;
1457
9cc3a1ac
DM
1458 if (start & mask_256MB) {
1459 start = (start + size_256MB) & ~mask_256MB;
1460 continue;
1461 }
1462
9cc3a1ac
DM
1463 while (remains >= size_256MB) {
1464 unsigned long index = start >> shift_256MB;
1465
1466 __set_bit(index, kpte_linear_bitmap);
1467
1468 start += size_256MB;
1469 remains -= size_256MB;
1470 }
1471 }
1472}
56425306 1473
8f361453 1474static void __init init_kpte_bitmap(void)
56425306 1475{
9cc3a1ac 1476 unsigned long i;
13edad7a
DM
1477
1478 for (i = 0; i < pall_ents; i++) {
56425306
DM
1479 unsigned long phys_start, phys_end;
1480
13edad7a
DM
1481 phys_start = pall[i].phys_addr;
1482 phys_end = phys_start + pall[i].reg_size;
9cc3a1ac
DM
1483
1484 mark_kpte_bitmap(phys_start, phys_end);
8f361453
DM
1485 }
1486}
9cc3a1ac 1487
8f361453
DM
1488static void __init kernel_physical_mapping_init(void)
1489{
9cc3a1ac 1490#ifdef CONFIG_DEBUG_PAGEALLOC
8f361453
DM
1491 unsigned long i, mem_alloced = 0UL;
1492
1493 for (i = 0; i < pall_ents; i++) {
1494 unsigned long phys_start, phys_end;
1495
1496 phys_start = pall[i].phys_addr;
1497 phys_end = phys_start + pall[i].reg_size;
1498
56425306
DM
1499 mem_alloced += kernel_map_range(phys_start, phys_end,
1500 PAGE_KERNEL);
56425306
DM
1501 }
1502
1503 printk("Allocated %ld bytes for kernel page tables.\n",
1504 mem_alloced);
1505
1506 kvmap_linear_patch[0] = 0x01000000; /* nop */
1507 flushi(&kvmap_linear_patch[0]);
1508
1509 __flush_tlb_all();
9cc3a1ac 1510#endif
56425306
DM
1511}
1512
9cc3a1ac 1513#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1514void kernel_map_pages(struct page *page, int numpages, int enable)
1515{
1516 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1517 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1518
1519 kernel_map_range(phys_start, phys_end,
1520 (enable ? PAGE_KERNEL : __pgprot(0)));
1521
74bf4312
DM
1522 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1523 PAGE_OFFSET + phys_end);
1524
56425306
DM
1525 /* we should perform an IPI and flush all tlbs,
1526 * but that can deadlock->flush only current cpu.
1527 */
1528 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1529 PAGE_OFFSET + phys_end);
1530}
1531#endif
1532
10147570
DM
1533unsigned long __init find_ecache_flush_span(unsigned long size)
1534{
0836a0eb
DM
1535 int i;
1536
13edad7a
DM
1537 for (i = 0; i < pavail_ents; i++) {
1538 if (pavail[i].reg_size >= size)
1539 return pavail[i].phys_addr;
0836a0eb
DM
1540 }
1541
13edad7a 1542 return ~0UL;
0836a0eb
DM
1543}
1544
517af332
DM
1545static void __init tsb_phys_patch(void)
1546{
d257d5da 1547 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1548 struct tsb_phys_patch_entry *p;
1549
d257d5da
DM
1550 pquad = &__tsb_ldquad_phys_patch;
1551 while (pquad < &__tsb_ldquad_phys_patch_end) {
1552 unsigned long addr = pquad->addr;
1553
1554 if (tlb_type == hypervisor)
1555 *(unsigned int *) addr = pquad->sun4v_insn;
1556 else
1557 *(unsigned int *) addr = pquad->sun4u_insn;
1558 wmb();
1559 __asm__ __volatile__("flush %0"
1560 : /* no outputs */
1561 : "r" (addr));
1562
1563 pquad++;
1564 }
1565
517af332
DM
1566 p = &__tsb_phys_patch;
1567 while (p < &__tsb_phys_patch_end) {
1568 unsigned long addr = p->addr;
1569
1570 *(unsigned int *) addr = p->insn;
1571 wmb();
1572 __asm__ __volatile__("flush %0"
1573 : /* no outputs */
1574 : "r" (addr));
1575
1576 p++;
1577 }
1578}
1579
490384e7 1580/* Don't mark as init, we give this to the Hypervisor. */
d1acb421
DM
1581#ifndef CONFIG_DEBUG_PAGEALLOC
1582#define NUM_KTSB_DESCR 2
1583#else
1584#define NUM_KTSB_DESCR 1
1585#endif
1586static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
490384e7
DM
1587extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1588
9076d0e7
DM
1589static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1590{
1591 pa >>= KTSB_PHYS_SHIFT;
1592
1593 while (start < end) {
1594 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1595
1596 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1597 __asm__ __volatile__("flush %0" : : "r" (ia));
1598
1599 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1600 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1601
1602 start++;
1603 }
1604}
1605
1606static void ktsb_phys_patch(void)
1607{
1608 extern unsigned int __swapper_tsb_phys_patch;
1609 extern unsigned int __swapper_tsb_phys_patch_end;
9076d0e7
DM
1610 unsigned long ktsb_pa;
1611
1612 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1613 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1614 &__swapper_tsb_phys_patch_end, ktsb_pa);
1615#ifndef CONFIG_DEBUG_PAGEALLOC
0785a8e8
DM
1616 {
1617 extern unsigned int __swapper_4m_tsb_phys_patch;
1618 extern unsigned int __swapper_4m_tsb_phys_patch_end;
9076d0e7
DM
1619 ktsb_pa = (kern_base +
1620 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1621 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1622 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
0785a8e8 1623 }
9076d0e7
DM
1624#endif
1625}
1626
490384e7
DM
1627static void __init sun4v_ktsb_init(void)
1628{
1629 unsigned long ktsb_pa;
1630
d7744a09 1631 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1632 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1633
1634 switch (PAGE_SIZE) {
1635 case 8 * 1024:
1636 default:
1637 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1638 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1639 break;
1640
1641 case 64 * 1024:
1642 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1643 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1644 break;
1645
1646 case 512 * 1024:
1647 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1648 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1649 break;
1650
1651 case 4 * 1024 * 1024:
1652 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1653 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1654 break;
6cb79b3f 1655 }
490384e7 1656
3f19a84e 1657 ktsb_descr[0].assoc = 1;
490384e7
DM
1658 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1659 ktsb_descr[0].ctx_idx = 0;
1660 ktsb_descr[0].tsb_base = ktsb_pa;
1661 ktsb_descr[0].resv = 0;
1662
d1acb421 1663#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09
DM
1664 /* Second KTSB for 4MB/256MB mappings. */
1665 ktsb_pa = (kern_base +
1666 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1667
1668 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1669 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1670 HV_PGSZ_MASK_256MB);
1671 ktsb_descr[1].assoc = 1;
1672 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1673 ktsb_descr[1].ctx_idx = 0;
1674 ktsb_descr[1].tsb_base = ktsb_pa;
1675 ktsb_descr[1].resv = 0;
d1acb421 1676#endif
490384e7
DM
1677}
1678
1679void __cpuinit sun4v_ktsb_register(void)
1680{
7db35f31 1681 unsigned long pa, ret;
490384e7
DM
1682
1683 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1684
7db35f31
DM
1685 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1686 if (ret != 0) {
1687 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1688 "errors with %lx\n", pa, ret);
1689 prom_halt();
1690 }
490384e7
DM
1691}
1692
1da177e4
LT
1693/* paging_init() sets up the page tables */
1694
1da177e4 1695static unsigned long last_valid_pfn;
56425306 1696pgd_t swapper_pg_dir[2048];
1da177e4 1697
c4bce90e
DM
1698static void sun4u_pgprot_init(void);
1699static void sun4v_pgprot_init(void);
1700
1da177e4
LT
1701void __init paging_init(void)
1702{
919ee677 1703 unsigned long end_pfn, shift, phys_base;
0836a0eb
DM
1704 unsigned long real_end, i;
1705
22adb358
DM
1706 /* These build time checkes make sure that the dcache_dirty_cpu()
1707 * page->flags usage will work.
1708 *
1709 * When a page gets marked as dcache-dirty, we store the
1710 * cpu number starting at bit 32 in the page->flags. Also,
1711 * functions like clear_dcache_dirty_cpu use the cpu mask
1712 * in 13-bit signed-immediate instruction fields.
1713 */
9223b419
CL
1714
1715 /*
1716 * Page flags must not reach into upper 32 bits that are used
1717 * for the cpu number
1718 */
1719 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1720
1721 /*
1722 * The bit fields placed in the high range must not reach below
1723 * the 32 bit boundary. Otherwise we cannot place the cpu field
1724 * at the 32 bit boundary.
1725 */
22adb358 1726 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
9223b419
CL
1727 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1728
22adb358
DM
1729 BUILD_BUG_ON(NR_CPUS > 4096);
1730
481295f9
DM
1731 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1732 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1733
d7744a09 1734 /* Invalidate both kernel TSBs. */
8b234274 1735 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d1acb421 1736#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09 1737 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
d1acb421 1738#endif
8b234274 1739
c4bce90e
DM
1740 if (tlb_type == hypervisor)
1741 sun4v_pgprot_init();
1742 else
1743 sun4u_pgprot_init();
1744
d257d5da 1745 if (tlb_type == cheetah_plus ||
9076d0e7 1746 tlb_type == hypervisor) {
517af332 1747 tsb_phys_patch();
9076d0e7
DM
1748 ktsb_phys_patch();
1749 }
517af332 1750
490384e7 1751 if (tlb_type == hypervisor) {
d257d5da 1752 sun4v_patch_tlb_handlers();
490384e7
DM
1753 sun4v_ktsb_init();
1754 }
d257d5da 1755
a94a172d
DM
1756 /* Find available physical memory...
1757 *
1758 * Read it twice in order to work around a bug in openfirmware.
1759 * The call to grab this table itself can cause openfirmware to
1760 * allocate memory, which in turn can take away some space from
1761 * the list of available memory. Reading it twice makes sure
1762 * we really do get the final value.
1763 */
1764 read_obp_translations();
1765 read_obp_memory("reg", &pall[0], &pall_ents);
1766 read_obp_memory("available", &pavail[0], &pavail_ents);
13edad7a 1767 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
1768
1769 phys_base = 0xffffffffffffffffUL;
3b2a7e23 1770 for (i = 0; i < pavail_ents; i++) {
13edad7a 1771 phys_base = min(phys_base, pavail[i].phys_addr);
95f72d1e 1772 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
3b2a7e23
DM
1773 }
1774
95f72d1e 1775 memblock_reserve(kern_base, kern_size);
0836a0eb 1776
4e82c9a6
DM
1777 find_ramdisk(phys_base);
1778
95f72d1e 1779 memblock_enforce_memory_limit(cmdline_memory_size);
25b0c659 1780
1aadc056 1781 memblock_allow_resize();
95f72d1e 1782 memblock_dump_all();
3b2a7e23 1783
1da177e4
LT
1784 set_bit(0, mmu_context_bmap);
1785
2bdb3cb2
DM
1786 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1787
1da177e4 1788 real_end = (unsigned long)_end;
64658743
DM
1789 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1790 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1791 num_kernel_image_mappings);
2bdb3cb2
DM
1792
1793 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
1794 * work.
1795 */
1796 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1797
56425306 1798 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1da177e4
LT
1799
1800 /* Now can init the kernel/bad page tables. */
1801 pud_set(pud_offset(&swapper_pg_dir[0], 0),
56425306 1802 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1da177e4 1803
c9c10830 1804 inherit_prom_mappings();
5085b4a5 1805
8f361453
DM
1806 init_kpte_bitmap();
1807
a8b900d8
DM
1808 /* Ok, we can use our TLB miss and window trap handlers safely. */
1809 setup_tba();
1da177e4 1810
c9c10830 1811 __flush_tlb_all();
9ad98c5b 1812
490384e7
DM
1813 if (tlb_type == hypervisor)
1814 sun4v_ktsb_register();
1815
ad072004 1816 prom_build_devicetree();
b696fdc2 1817 of_populate_present_mask();
b99c6ebe
DM
1818#ifndef CONFIG_SMP
1819 of_fill_in_cpu_data();
1820#endif
ad072004 1821
890db403 1822 if (tlb_type == hypervisor) {
4a283339 1823 sun4v_mdesc_init();
6ac5c610 1824 mdesc_populate_present_mask(cpu_all_mask);
b99c6ebe
DM
1825#ifndef CONFIG_SMP
1826 mdesc_fill_in_cpu_data(cpu_all_mask);
1827#endif
890db403 1828 }
4a283339 1829
4f70f7a9
DM
1830 /* Once the OF device tree and MDESC have been setup, we know
1831 * the list of possible cpus. Therefore we can allocate the
1832 * IRQ stacks.
1833 */
1834 for_each_possible_cpu(i) {
1835 /* XXX Use node local allocations... XXX */
95f72d1e
YL
1836 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1837 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
4f70f7a9
DM
1838 }
1839
2bdb3cb2 1840 /* Setup bootmem... */
919ee677 1841 last_valid_pfn = end_pfn = bootmem_init(phys_base);
d1112018 1842
919ee677 1843#ifndef CONFIG_NEED_MULTIPLE_NODES
17b0e199 1844 max_mapnr = last_valid_pfn;
919ee677 1845#endif
56425306 1846 kernel_physical_mapping_init();
56425306 1847
1da177e4 1848 {
919ee677 1849 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4 1850
919ee677 1851 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1da177e4 1852
919ee677 1853 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1da177e4 1854
919ee677 1855 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
1856 }
1857
3c62a2d3 1858 printk("Booting Linux...\n");
1da177e4
LT
1859}
1860
9a2ed5cc 1861int __devinit page_in_phys_avail(unsigned long paddr)
919ee677
DM
1862{
1863 int i;
1864
1865 paddr &= PAGE_MASK;
1866
1867 for (i = 0; i < pavail_ents; i++) {
1868 unsigned long start, end;
1869
1870 start = pavail[i].phys_addr;
1871 end = start + pavail[i].reg_size;
1872
1873 if (paddr >= start && paddr < end)
1874 return 1;
1875 }
1876 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1877 return 1;
1878#ifdef CONFIG_BLK_DEV_INITRD
1879 if (paddr >= __pa(initrd_start) &&
1880 paddr < __pa(PAGE_ALIGN(initrd_end)))
1881 return 1;
1882#endif
1883
1884 return 0;
1885}
1886
1887static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1888static int pavail_rescan_ents __initdata;
1889
1890/* Certain OBP calls, such as fetching "available" properties, can
1891 * claim physical memory. So, along with initializing the valid
1892 * address bitmap, what we do here is refetch the physical available
1893 * memory list again, and make sure it provides at least as much
1894 * memory as 'pavail' does.
1895 */
d8ed1d43 1896static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
1da177e4 1897{
1da177e4
LT
1898 int i;
1899
13edad7a 1900 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1da177e4 1901
13edad7a 1902 for (i = 0; i < pavail_ents; i++) {
1da177e4
LT
1903 unsigned long old_start, old_end;
1904
13edad7a 1905 old_start = pavail[i].phys_addr;
919ee677 1906 old_end = old_start + pavail[i].reg_size;
1da177e4
LT
1907 while (old_start < old_end) {
1908 int n;
1909
c2a5a46b 1910 for (n = 0; n < pavail_rescan_ents; n++) {
1da177e4
LT
1911 unsigned long new_start, new_end;
1912
13edad7a
DM
1913 new_start = pavail_rescan[n].phys_addr;
1914 new_end = new_start +
1915 pavail_rescan[n].reg_size;
1da177e4
LT
1916
1917 if (new_start <= old_start &&
1918 new_end >= (old_start + PAGE_SIZE)) {
d8ed1d43 1919 set_bit(old_start >> 22, bitmap);
1da177e4
LT
1920 goto do_next_page;
1921 }
1922 }
919ee677
DM
1923
1924 prom_printf("mem_init: Lost memory in pavail\n");
1925 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1926 pavail[i].phys_addr,
1927 pavail[i].reg_size);
1928 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1929 pavail_rescan[i].phys_addr,
1930 pavail_rescan[i].reg_size);
1931 prom_printf("mem_init: Cannot continue, aborting.\n");
1932 prom_halt();
1da177e4
LT
1933
1934 do_next_page:
1935 old_start += PAGE_SIZE;
1936 }
1937 }
1938}
1939
d8ed1d43
DM
1940static void __init patch_tlb_miss_handler_bitmap(void)
1941{
1942 extern unsigned int valid_addr_bitmap_insn[];
1943 extern unsigned int valid_addr_bitmap_patch[];
1944
1945 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
1946 mb();
1947 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
1948 flushi(&valid_addr_bitmap_insn[0]);
1949}
1950
1da177e4
LT
1951void __init mem_init(void)
1952{
1953 unsigned long codepages, datapages, initpages;
1954 unsigned long addr, last;
1da177e4
LT
1955
1956 addr = PAGE_OFFSET + kern_base;
1957 last = PAGE_ALIGN(kern_size) + addr;
1958 while (addr < last) {
1959 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1960 addr += PAGE_SIZE;
1961 }
1962
d8ed1d43
DM
1963 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
1964 patch_tlb_miss_handler_bitmap();
1da177e4 1965
1da177e4
LT
1966 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1967
919ee677 1968#ifdef CONFIG_NEED_MULTIPLE_NODES
d8ed1d43
DM
1969 {
1970 int i;
1971 for_each_online_node(i) {
1972 if (NODE_DATA(i)->node_spanned_pages != 0) {
1973 totalram_pages +=
1974 free_all_bootmem_node(NODE_DATA(i));
1975 }
919ee677
DM
1976 }
1977 }
1978#else
1979 totalram_pages = free_all_bootmem();
1980#endif
1981
f1cfdb55
DM
1982 /* We subtract one to account for the mem_map_zero page
1983 * allocated below.
1984 */
919ee677
DM
1985 totalram_pages -= 1;
1986 num_physpages = totalram_pages;
1da177e4
LT
1987
1988 /*
1989 * Set up the zero page, mark it reserved, so that page count
1990 * is not manipulated when freeing the page from user ptes.
1991 */
1992 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1993 if (mem_map_zero == NULL) {
1994 prom_printf("paging_init: Cannot alloc zero page.\n");
1995 prom_halt();
1996 }
1997 SetPageReserved(mem_map_zero);
1998
1999 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
2000 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
2001 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
2002 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
2003 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
2004 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
2005
96177299 2006 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1da177e4
LT
2007 nr_free_pages() << (PAGE_SHIFT-10),
2008 codepages << (PAGE_SHIFT-10),
2009 datapages << (PAGE_SHIFT-10),
2010 initpages << (PAGE_SHIFT-10),
2011 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
2012
2013 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2014 cheetah_ecache_flush_init();
2015}
2016
898cf0ec 2017void free_initmem(void)
1da177e4
LT
2018{
2019 unsigned long addr, initend;
f2b60794
DM
2020 int do_free = 1;
2021
2022 /* If the physical memory maps were trimmed by kernel command
2023 * line options, don't even try freeing this initmem stuff up.
2024 * The kernel image could have been in the trimmed out region
2025 * and if so the freeing below will free invalid page structs.
2026 */
2027 if (cmdline_memory_size)
2028 do_free = 0;
1da177e4
LT
2029
2030 /*
2031 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2032 */
2033 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2034 initend = (unsigned long)(__init_end) & PAGE_MASK;
2035 for (; addr < initend; addr += PAGE_SIZE) {
2036 unsigned long page;
2037 struct page *p;
2038
2039 page = (addr +
2040 ((unsigned long) __va(kern_base)) -
2041 ((unsigned long) KERNBASE));
c9cf5528 2042 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4 2043
f2b60794
DM
2044 if (do_free) {
2045 p = virt_to_page(page);
2046
2047 ClearPageReserved(p);
2048 init_page_count(p);
2049 __free_page(p);
2050 num_physpages++;
2051 totalram_pages++;
2052 }
1da177e4
LT
2053 }
2054}
2055
2056#ifdef CONFIG_BLK_DEV_INITRD
2057void free_initrd_mem(unsigned long start, unsigned long end)
2058{
2059 if (start < end)
2060 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2061 for (; start < end; start += PAGE_SIZE) {
2062 struct page *p = virt_to_page(start);
2063
2064 ClearPageReserved(p);
7835e98b 2065 init_page_count(p);
1da177e4
LT
2066 __free_page(p);
2067 num_physpages++;
2068 totalram_pages++;
2069 }
2070}
2071#endif
c4bce90e 2072
c4bce90e
DM
2073#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2074#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2075#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2076#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2077#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2078#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2079
2080pgprot_t PAGE_KERNEL __read_mostly;
2081EXPORT_SYMBOL(PAGE_KERNEL);
2082
2083pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2084pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
2085
2086pgprot_t PAGE_SHARED __read_mostly;
2087EXPORT_SYMBOL(PAGE_SHARED);
2088
c4bce90e
DM
2089unsigned long pg_iobits __read_mostly;
2090
2091unsigned long _PAGE_IE __read_mostly;
987c74fc 2092EXPORT_SYMBOL(_PAGE_IE);
b2bef442 2093
c4bce90e 2094unsigned long _PAGE_E __read_mostly;
b2bef442
DM
2095EXPORT_SYMBOL(_PAGE_E);
2096
c4bce90e 2097unsigned long _PAGE_CACHE __read_mostly;
b2bef442 2098EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e 2099
46644c24 2100#ifdef CONFIG_SPARSEMEM_VMEMMAP
46644c24
DM
2101unsigned long vmemmap_table[VMEMMAP_SIZE];
2102
2103int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2104{
2105 unsigned long vstart = (unsigned long) start;
2106 unsigned long vend = (unsigned long) (start + nr);
2107 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2108 unsigned long phys_end = (vend - VMEMMAP_BASE);
2109 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2110 unsigned long end = VMEMMAP_ALIGN(phys_end);
2111 unsigned long pte_base;
2112
2113 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2114 _PAGE_CP_4U | _PAGE_CV_4U |
2115 _PAGE_P_4U | _PAGE_W_4U);
2116 if (tlb_type == hypervisor)
2117 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2118 _PAGE_CP_4V | _PAGE_CV_4V |
2119 _PAGE_P_4V | _PAGE_W_4V);
2120
2121 for (; addr < end; addr += VMEMMAP_CHUNK) {
2122 unsigned long *vmem_pp =
2123 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2124 void *block;
2125
2126 if (!(*vmem_pp & _PAGE_VALID)) {
2127 block = vmemmap_alloc_block(1UL << 22, node);
2128 if (!block)
2129 return -ENOMEM;
2130
2131 *vmem_pp = pte_base | __pa(block);
2132
2133 printk(KERN_INFO "[%p-%p] page_structs=%lu "
2134 "node=%d entry=%lu/%lu\n", start, block, nr,
2135 node,
2136 addr >> VMEMMAP_CHUNK_SHIFT,
33cd9dfa 2137 VMEMMAP_SIZE);
46644c24
DM
2138 }
2139 }
2140 return 0;
2141}
2142#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2143
c4bce90e
DM
2144static void prot_init_common(unsigned long page_none,
2145 unsigned long page_shared,
2146 unsigned long page_copy,
2147 unsigned long page_readonly,
2148 unsigned long page_exec_bit)
2149{
2150 PAGE_COPY = __pgprot(page_copy);
0f15952a 2151 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
2152
2153 protection_map[0x0] = __pgprot(page_none);
2154 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2155 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2156 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2157 protection_map[0x4] = __pgprot(page_readonly);
2158 protection_map[0x5] = __pgprot(page_readonly);
2159 protection_map[0x6] = __pgprot(page_copy);
2160 protection_map[0x7] = __pgprot(page_copy);
2161 protection_map[0x8] = __pgprot(page_none);
2162 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2163 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2164 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2165 protection_map[0xc] = __pgprot(page_readonly);
2166 protection_map[0xd] = __pgprot(page_readonly);
2167 protection_map[0xe] = __pgprot(page_shared);
2168 protection_map[0xf] = __pgprot(page_shared);
2169}
2170
2171static void __init sun4u_pgprot_init(void)
2172{
2173 unsigned long page_none, page_shared, page_copy, page_readonly;
2174 unsigned long page_exec_bit;
2175
2176 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2177 _PAGE_CACHE_4U | _PAGE_P_4U |
2178 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2179 _PAGE_EXEC_4U);
2180 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2181 _PAGE_CACHE_4U | _PAGE_P_4U |
2182 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2183 _PAGE_EXEC_4U | _PAGE_L_4U);
c4bce90e
DM
2184
2185 _PAGE_IE = _PAGE_IE_4U;
2186 _PAGE_E = _PAGE_E_4U;
2187 _PAGE_CACHE = _PAGE_CACHE_4U;
2188
2189 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2190 __ACCESS_BITS_4U | _PAGE_E_4U);
2191
d1acb421
DM
2192#ifdef CONFIG_DEBUG_PAGEALLOC
2193 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
af1ee569 2194 0xfffff80000000000UL;
d1acb421 2195#else
9cc3a1ac 2196 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
af1ee569 2197 0xfffff80000000000UL;
d1acb421 2198#endif
9cc3a1ac
DM
2199 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2200 _PAGE_P_4U | _PAGE_W_4U);
2201
2202 /* XXX Should use 256MB on Panther. XXX */
2203 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
c4bce90e
DM
2204
2205 _PAGE_SZBITS = _PAGE_SZBITS_4U;
2206 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2207 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2208 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2209
2210
2211 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2212 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2213 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2214 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2215 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2216 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2217 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2218
2219 page_exec_bit = _PAGE_EXEC_4U;
2220
2221 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2222 page_exec_bit);
2223}
2224
2225static void __init sun4v_pgprot_init(void)
2226{
2227 unsigned long page_none, page_shared, page_copy, page_readonly;
2228 unsigned long page_exec_bit;
2229
2230 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2231 _PAGE_CACHE_4V | _PAGE_P_4V |
2232 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2233 _PAGE_EXEC_4V);
2234 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
c4bce90e
DM
2235
2236 _PAGE_IE = _PAGE_IE_4V;
2237 _PAGE_E = _PAGE_E_4V;
2238 _PAGE_CACHE = _PAGE_CACHE_4V;
2239
d1acb421
DM
2240#ifdef CONFIG_DEBUG_PAGEALLOC
2241 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
af1ee569 2242 0xfffff80000000000UL;
d1acb421 2243#else
9cc3a1ac 2244 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
af1ee569 2245 0xfffff80000000000UL;
d1acb421 2246#endif
9cc3a1ac
DM
2247 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2248 _PAGE_P_4V | _PAGE_W_4V);
2249
d1acb421
DM
2250#ifdef CONFIG_DEBUG_PAGEALLOC
2251 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
af1ee569 2252 0xfffff80000000000UL;
d1acb421 2253#else
9cc3a1ac 2254 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
af1ee569 2255 0xfffff80000000000UL;
d1acb421 2256#endif
9cc3a1ac
DM
2257 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2258 _PAGE_P_4V | _PAGE_W_4V);
c4bce90e
DM
2259
2260 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2261 __ACCESS_BITS_4V | _PAGE_E_4V);
2262
2263 _PAGE_SZBITS = _PAGE_SZBITS_4V;
2264 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2265 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2266 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2267 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2268
2269 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2270 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2271 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2272 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2273 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2274 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2275 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2276
2277 page_exec_bit = _PAGE_EXEC_4V;
2278
2279 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2280 page_exec_bit);
2281}
2282
2283unsigned long pte_sz_bits(unsigned long sz)
2284{
2285 if (tlb_type == hypervisor) {
2286 switch (sz) {
2287 case 8 * 1024:
2288 default:
2289 return _PAGE_SZ8K_4V;
2290 case 64 * 1024:
2291 return _PAGE_SZ64K_4V;
2292 case 512 * 1024:
2293 return _PAGE_SZ512K_4V;
2294 case 4 * 1024 * 1024:
2295 return _PAGE_SZ4MB_4V;
6cb79b3f 2296 }
c4bce90e
DM
2297 } else {
2298 switch (sz) {
2299 case 8 * 1024:
2300 default:
2301 return _PAGE_SZ8K_4U;
2302 case 64 * 1024:
2303 return _PAGE_SZ64K_4U;
2304 case 512 * 1024:
2305 return _PAGE_SZ512K_4U;
2306 case 4 * 1024 * 1024:
2307 return _PAGE_SZ4MB_4U;
6cb79b3f 2308 }
c4bce90e
DM
2309 }
2310}
2311
2312pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2313{
2314 pte_t pte;
cf627156
DM
2315
2316 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
2317 pte_val(pte) |= (((unsigned long)space) << 32);
2318 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 2319
cf627156 2320 return pte;
c4bce90e
DM
2321}
2322
2323static unsigned long kern_large_tte(unsigned long paddr)
2324{
2325 unsigned long val;
2326
2327 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2328 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2329 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2330 if (tlb_type == hypervisor)
2331 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2332 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2333 _PAGE_EXEC_4V | _PAGE_W_4V);
2334
2335 return val | paddr;
2336}
2337
c4bce90e
DM
2338/* If not locked, zap it. */
2339void __flush_tlb_all(void)
2340{
2341 unsigned long pstate;
2342 int i;
2343
2344 __asm__ __volatile__("flushw\n\t"
2345 "rdpr %%pstate, %0\n\t"
2346 "wrpr %0, %1, %%pstate"
2347 : "=r" (pstate)
2348 : "i" (PSTATE_IE));
8f361453
DM
2349 if (tlb_type == hypervisor) {
2350 sun4v_mmu_demap_all();
2351 } else if (tlb_type == spitfire) {
c4bce90e
DM
2352 for (i = 0; i < 64; i++) {
2353 /* Spitfire Errata #32 workaround */
2354 /* NOTE: Always runs on spitfire, so no
2355 * cheetah+ page size encodings.
2356 */
2357 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2358 "flush %%g6"
2359 : /* No outputs */
2360 : "r" (0),
2361 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2362
2363 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2364 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2365 "membar #Sync"
2366 : /* no outputs */
2367 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2368 spitfire_put_dtlb_data(i, 0x0UL);
2369 }
2370
2371 /* Spitfire Errata #32 workaround */
2372 /* NOTE: Always runs on spitfire, so no
2373 * cheetah+ page size encodings.
2374 */
2375 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2376 "flush %%g6"
2377 : /* No outputs */
2378 : "r" (0),
2379 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2380
2381 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2382 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2383 "membar #Sync"
2384 : /* no outputs */
2385 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2386 spitfire_put_itlb_data(i, 0x0UL);
2387 }
2388 }
2389 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2390 cheetah_flush_dtlb_all();
2391 cheetah_flush_itlb_all();
2392 }
2393 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2394 : : "r" (pstate));
2395}
This page took 1.609617 seconds and 5 git commands to generate.