2 * Architecture-specific setup.
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
25 #include <linux/module.h>
26 #include <linux/init.h>
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
45 #include <linux/cpufreq.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
50 #include <asm/machvec.h>
52 #include <asm/meminit.h>
54 #include <asm/patch.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
58 #include <asm/sections.h>
59 #include <asm/setup.h>
61 #include <asm/system.h>
62 #include <asm/unistd.h>
63 #include <asm/system.h>
65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
66 # error "struct cpuinfo_ia64 too big!"
70 unsigned long __per_cpu_offset
[NR_CPUS
];
71 EXPORT_SYMBOL(__per_cpu_offset
);
74 extern void ia64_setup_printk_clock(void);
76 DEFINE_PER_CPU(struct cpuinfo_ia64
, cpu_info
);
77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset
);
78 unsigned long ia64_cycles_per_usec
;
79 struct ia64_boot_param
*ia64_boot_param
;
80 struct screen_info screen_info
;
81 unsigned long vga_console_iobase
;
82 unsigned long vga_console_membase
;
84 static struct resource data_resource
= {
85 .name
= "Kernel data",
86 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
89 static struct resource code_resource
= {
90 .name
= "Kernel code",
91 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
93 extern char _text
[], _end
[], _etext
[];
95 unsigned long ia64_max_cacheline_size
;
97 int dma_get_cache_alignment(void)
99 return ia64_max_cacheline_size
;
101 EXPORT_SYMBOL(dma_get_cache_alignment
);
103 unsigned long ia64_iobase
; /* virtual address for I/O accesses */
104 EXPORT_SYMBOL(ia64_iobase
);
105 struct io_space io_space
[MAX_IO_SPACES
];
106 EXPORT_SYMBOL(io_space
);
107 unsigned int num_io_spaces
;
110 * "flush_icache_range()" needs to know what processor dependent stride size to use
111 * when it makes i-cache(s) coherent with d-caches.
113 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
114 unsigned long ia64_i_cache_stride_shift
= ~0;
117 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
118 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
119 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
120 * address of the second buffer must be aligned to (merge_mask+1) in order to be
121 * mergeable). By default, we assume there is no I/O MMU which can merge physically
122 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
125 unsigned long ia64_max_iommu_merge_mask
= ~0UL;
126 EXPORT_SYMBOL(ia64_max_iommu_merge_mask
);
129 * We use a special marker for the end of memory and it uses the extra (+1) slot
131 struct rsvd_region rsvd_region
[IA64_MAX_RSVD_REGIONS
+ 1] __initdata
;
132 int num_rsvd_regions __initdata
;
136 * Filter incoming memory segments based on the primitive map created from the boot
137 * parameters. Segments contained in the map are removed from the memory ranges. A
138 * caller-specified function is called with the memory ranges that remain after filtering.
139 * This routine does not assume the incoming segments are sorted.
142 filter_rsvd_memory (unsigned long start
, unsigned long end
, void *arg
)
144 unsigned long range_start
, range_end
, prev_start
;
145 void (*func
)(unsigned long, unsigned long, int);
149 if (start
== PAGE_OFFSET
) {
150 printk(KERN_WARNING
"warning: skipping physical page 0\n");
152 if (start
>= end
) return 0;
156 * lowest possible address(walker uses virtual)
158 prev_start
= PAGE_OFFSET
;
161 for (i
= 0; i
< num_rsvd_regions
; ++i
) {
162 range_start
= max(start
, prev_start
);
163 range_end
= min(end
, rsvd_region
[i
].start
);
165 if (range_start
< range_end
)
166 call_pernode_memory(__pa(range_start
), range_end
- range_start
, func
);
168 /* nothing more available in this segment */
169 if (range_end
== end
) return 0;
171 prev_start
= rsvd_region
[i
].end
;
173 /* end of memory marker allows full processing inside loop body */
178 sort_regions (struct rsvd_region
*rsvd_region
, int max
)
182 /* simple bubble sorting */
184 for (j
= 0; j
< max
; ++j
) {
185 if (rsvd_region
[j
].start
> rsvd_region
[j
+1].start
) {
186 struct rsvd_region tmp
;
187 tmp
= rsvd_region
[j
];
188 rsvd_region
[j
] = rsvd_region
[j
+ 1];
189 rsvd_region
[j
+ 1] = tmp
;
196 * Request address space for all standard resources
198 static int __init
register_memory(void)
200 code_resource
.start
= ia64_tpa(_text
);
201 code_resource
.end
= ia64_tpa(_etext
) - 1;
202 data_resource
.start
= ia64_tpa(_etext
);
203 data_resource
.end
= ia64_tpa(_end
) - 1;
204 efi_initialize_iomem_resources(&code_resource
, &data_resource
);
209 __initcall(register_memory
);
212 * reserve_memory - setup reserved memory areas
214 * Setup the reserved memory areas set aside for the boot parameters,
215 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
216 * see include/asm-ia64/meminit.h if you need to define more.
219 reserve_memory (void)
224 * none of the entries in this table overlap
226 rsvd_region
[n
].start
= (unsigned long) ia64_boot_param
;
227 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ sizeof(*ia64_boot_param
);
230 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->efi_memmap
);
231 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->efi_memmap_size
;
234 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->command_line
);
235 rsvd_region
[n
].end
= (rsvd_region
[n
].start
236 + strlen(__va(ia64_boot_param
->command_line
)) + 1);
239 rsvd_region
[n
].start
= (unsigned long) ia64_imva((void *)KERNEL_START
);
240 rsvd_region
[n
].end
= (unsigned long) ia64_imva(_end
);
243 #ifdef CONFIG_BLK_DEV_INITRD
244 if (ia64_boot_param
->initrd_start
) {
245 rsvd_region
[n
].start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
246 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->initrd_size
;
251 #ifdef CONFIG_PROC_VMCORE
252 if (reserve_elfcorehdr(&rsvd_region
[n
].start
,
253 &rsvd_region
[n
].end
) == 0)
257 efi_memmap_init(&rsvd_region
[n
].start
, &rsvd_region
[n
].end
);
261 /* crashkernel=size@offset specifies the size to reserve for a crash
262 * kernel. If offset is 0, then it is determined automatically.
263 * By reserving this memory we guarantee that linux never set's it
264 * up as a DMA target.Useful for holding code to do something
265 * appropriate after a kernel panic.
268 char *from
= strstr(boot_command_line
, "crashkernel=");
269 unsigned long base
, size
;
271 size
= memparse(from
+ 12, &from
);
273 base
= memparse(from
+1, &from
);
278 sort_regions(rsvd_region
, n
);
279 base
= kdump_find_rsvd_region(size
,
283 rsvd_region
[n
].start
=
284 (unsigned long)__va(base
);
286 (unsigned long)__va(base
+ size
);
288 crashk_res
.start
= base
;
289 crashk_res
.end
= base
+ size
- 1;
293 efi_memmap_res
.start
= ia64_boot_param
->efi_memmap
;
294 efi_memmap_res
.end
= efi_memmap_res
.start
+
295 ia64_boot_param
->efi_memmap_size
;
296 boot_param_res
.start
= __pa(ia64_boot_param
);
297 boot_param_res
.end
= boot_param_res
.start
+
298 sizeof(*ia64_boot_param
);
301 /* end of memory marker */
302 rsvd_region
[n
].start
= ~0UL;
303 rsvd_region
[n
].end
= ~0UL;
306 num_rsvd_regions
= n
;
307 BUG_ON(IA64_MAX_RSVD_REGIONS
+ 1 < n
);
309 sort_regions(rsvd_region
, num_rsvd_regions
);
314 * find_initrd - get initrd parameters from the boot parameter structure
316 * Grab the initrd start and end from the boot parameter struct given us by
322 #ifdef CONFIG_BLK_DEV_INITRD
323 if (ia64_boot_param
->initrd_start
) {
324 initrd_start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
325 initrd_end
= initrd_start
+ia64_boot_param
->initrd_size
;
327 printk(KERN_INFO
"Initial ramdisk at: 0x%lx (%lu bytes)\n",
328 initrd_start
, ia64_boot_param
->initrd_size
);
336 unsigned long phys_iobase
;
339 * Set `iobase' based on the EFI memory map or, failing that, the
340 * value firmware left in ar.k0.
342 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
343 * the port's virtual address, so ia32_load_state() loads it with a
344 * user virtual address. But in ia64 mode, glibc uses the
345 * *physical* address in ar.k0 to mmap the appropriate area from
346 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
347 * cases, user-mode can only use the legacy 0-64K I/O port space.
349 * ar.k0 is not involved in kernel I/O port accesses, which can use
350 * any of the I/O port spaces and are done via MMIO using the
351 * virtual mmio_base from the appropriate io_space[].
353 phys_iobase
= efi_get_iobase();
355 phys_iobase
= ia64_get_kr(IA64_KR_IO_BASE
);
356 printk(KERN_INFO
"No I/O port range found in EFI memory map, "
357 "falling back to AR.KR0 (0x%lx)\n", phys_iobase
);
359 ia64_iobase
= (unsigned long) ioremap(phys_iobase
, 0);
360 ia64_set_kr(IA64_KR_IO_BASE
, __pa(ia64_iobase
));
362 /* setup legacy IO port space */
363 io_space
[0].mmio_base
= ia64_iobase
;
364 io_space
[0].sparse
= 1;
369 * early_console_setup - setup debugging console
371 * Consoles started here require little enough setup that we can start using
372 * them very early in the boot process, either right after the machine
373 * vector initialization, or even before if the drivers can detect their hw.
375 * Returns non-zero if a console couldn't be setup.
377 static inline int __init
378 early_console_setup (char *cmdline
)
382 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
384 extern int sn_serial_console_early_setup(void);
385 if (!sn_serial_console_early_setup())
389 #ifdef CONFIG_EFI_PCDP
390 if (!efi_setup_pcdp_console(cmdline
))
394 return (earlycons
) ? 0 : -1;
398 mark_bsp_online (void)
401 /* If we register an early console, allow CPU 0 to printk */
402 cpu_set(smp_processor_id(), cpu_online_map
);
408 check_for_logical_procs (void)
410 pal_logical_to_physical_t info
;
413 status
= ia64_pal_logical_to_phys(0, &info
);
415 printk(KERN_INFO
"No logical to physical processor mapping "
420 printk(KERN_ERR
"ia64_pal_logical_to_phys failed with %ld\n",
425 * Total number of siblings that BSP has. Though not all of them
426 * may have booted successfully. The correct number of siblings
427 * booted is in info.overview_num_log.
429 smp_num_siblings
= info
.overview_tpc
;
430 smp_num_cpucores
= info
.overview_cpp
;
434 static __initdata
int nomca
;
435 static __init
int setup_nomca(char *s
)
440 early_param("nomca", setup_nomca
);
442 #ifdef CONFIG_PROC_VMCORE
443 /* elfcorehdr= specifies the location of elf core header
444 * stored by the crashed kernel.
446 static int __init
parse_elfcorehdr(char *arg
)
451 elfcorehdr_addr
= memparse(arg
, &arg
);
454 early_param("elfcorehdr", parse_elfcorehdr
);
456 int __init
reserve_elfcorehdr(unsigned long *start
, unsigned long *end
)
458 unsigned long length
;
460 /* We get the address using the kernel command line,
461 * but the size is extracted from the EFI tables.
462 * Both address and size are required for reservation
466 if (elfcorehdr_addr
>= ELFCORE_ADDR_MAX
)
469 if ((length
= vmcore_find_descriptor_size(elfcorehdr_addr
)) == 0) {
470 elfcorehdr_addr
= ELFCORE_ADDR_MAX
;
474 *start
= (unsigned long)__va(elfcorehdr_addr
);
475 *end
= *start
+ length
;
479 #endif /* CONFIG_PROC_VMCORE */
482 setup_arch (char **cmdline_p
)
486 ia64_patch_vtop((u64
) __start___vtop_patchlist
, (u64
) __end___vtop_patchlist
);
488 *cmdline_p
= __va(ia64_boot_param
->command_line
);
489 strlcpy(boot_command_line
, *cmdline_p
, COMMAND_LINE_SIZE
);
496 #ifdef CONFIG_IA64_GENERIC
500 if (early_console_setup(*cmdline_p
) == 0)
504 /* Initialize the ACPI boot-time table parser */
506 # ifdef CONFIG_ACPI_NUMA
511 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
513 #endif /* CONFIG_APCI_BOOT */
517 /* process SAL system table: */
518 ia64_sal_init(__va(efi
.sal_systab
));
520 ia64_setup_printk_clock();
523 cpu_physical_id(0) = hard_smp_processor_id();
525 cpu_set(0, cpu_sibling_map
[0]);
526 cpu_set(0, cpu_core_map
[0]);
528 check_for_logical_procs();
529 if (smp_num_cpucores
> 1)
531 "cpu package is Multi-Core capable: number of cores=%d\n",
533 if (smp_num_siblings
> 1)
535 "cpu package is Multi-Threading capable: number of siblings=%d\n",
539 cpu_init(); /* initialize the bootstrap CPU */
540 mmu_context_init(); /* initialize context_id bitmap */
542 check_sal_cache_flush();
550 # if defined(CONFIG_DUMMY_CONSOLE)
551 conswitchp
= &dummy_con
;
553 # if defined(CONFIG_VGA_CONSOLE)
555 * Non-legacy systems may route legacy VGA MMIO range to system
556 * memory. vga_con probes the MMIO hole, so memory looks like
557 * a VGA device to it. The EFI memory map can tell us if it's
558 * memory so we can avoid this problem.
560 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY
)
561 conswitchp
= &vga_con
;
566 /* enable IA-64 Machine Check Abort Handling unless disabled */
570 platform_setup(cmdline_p
);
575 * Display cpu info for all CPUs.
578 show_cpuinfo (struct seq_file
*m
, void *v
)
581 # define lpj c->loops_per_jiffy
582 # define cpunum c->cpu
584 # define lpj loops_per_jiffy
589 const char *feature_name
;
591 { 1UL << 0, "branchlong" },
592 { 1UL << 1, "spontaneous deferral"},
593 { 1UL << 2, "16-byte atomic ops" }
595 char features
[128], *cp
, *sep
;
596 struct cpuinfo_ia64
*c
= v
;
598 unsigned long proc_freq
;
603 /* build the feature string: */
604 memcpy(features
, "standard", 9);
606 size
= sizeof(features
);
608 for (i
= 0; i
< ARRAY_SIZE(feature_bits
) && size
> 1; ++i
) {
609 if (mask
& feature_bits
[i
].mask
) {
610 cp
+= snprintf(cp
, size
, "%s%s", sep
,
611 feature_bits
[i
].feature_name
),
613 mask
&= ~feature_bits
[i
].mask
;
614 size
= sizeof(features
) - (cp
- features
);
617 if (mask
&& size
> 1) {
618 /* print unknown features as a hex value */
619 snprintf(cp
, size
, "%s0x%lx", sep
, mask
);
622 proc_freq
= cpufreq_quick_get(cpunum
);
624 proc_freq
= c
->proc_freq
/ 1000;
638 "cpu MHz : %lu.%03lu\n"
639 "itc MHz : %lu.%06lu\n"
640 "BogoMIPS : %lu.%02lu\n",
641 cpunum
, c
->vendor
, c
->family
, c
->model
,
642 c
->model_name
, c
->revision
, c
->archrev
,
643 features
, c
->ppn
, c
->number
,
644 proc_freq
/ 1000, proc_freq
% 1000,
645 c
->itc_freq
/ 1000000, c
->itc_freq
% 1000000,
646 lpj
*HZ
/500000, (lpj
*HZ
/5000) % 100);
648 seq_printf(m
, "siblings : %u\n", cpus_weight(cpu_core_map
[cpunum
]));
649 if (c
->threads_per_core
> 1 || c
->cores_per_socket
> 1)
654 c
->socket_id
, c
->core_id
, c
->thread_id
);
662 c_start (struct seq_file
*m
, loff_t
*pos
)
665 while (*pos
< NR_CPUS
&& !cpu_isset(*pos
, cpu_online_map
))
668 return *pos
< NR_CPUS
? cpu_data(*pos
) : NULL
;
672 c_next (struct seq_file
*m
, void *v
, loff_t
*pos
)
675 return c_start(m
, pos
);
679 c_stop (struct seq_file
*m
, void *v
)
683 struct seq_operations cpuinfo_op
= {
691 static char brandname
[MAX_BRANDS
][128];
693 static char * __cpuinit
694 get_model_name(__u8 family
, __u8 model
)
700 memcpy(brand
, "Unknown", 8);
701 if (ia64_pal_get_brand_info(brand
)) {
703 memcpy(brand
, "Merced", 7);
704 else if (family
== 0x1f) switch (model
) {
705 case 0: memcpy(brand
, "McKinley", 9); break;
706 case 1: memcpy(brand
, "Madison", 8); break;
707 case 2: memcpy(brand
, "Madison up to 9M cache", 23); break;
710 for (i
= 0; i
< MAX_BRANDS
; i
++)
711 if (strcmp(brandname
[i
], brand
) == 0)
713 for (i
= 0; i
< MAX_BRANDS
; i
++)
714 if (brandname
[i
][0] == '\0')
715 return strcpy(brandname
[i
], brand
);
718 "%s: Table overflow. Some processor model information will be missing\n",
723 static void __cpuinit
724 identify_cpu (struct cpuinfo_ia64
*c
)
727 unsigned long bits
[5];
733 u64 ppn
; /* processor serial number */
737 unsigned revision
: 8;
740 unsigned archrev
: 8;
741 unsigned reserved
: 24;
747 pal_vm_info_1_u_t vm1
;
748 pal_vm_info_2_u_t vm2
;
750 unsigned long impl_va_msb
= 50, phys_addr_size
= 44; /* Itanium defaults */
752 for (i
= 0; i
< 5; ++i
)
753 cpuid
.bits
[i
] = ia64_get_cpuid(i
);
755 memcpy(c
->vendor
, cpuid
.field
.vendor
, 16);
757 c
->cpu
= smp_processor_id();
759 /* below default values will be overwritten by identify_siblings()
760 * for Multi-Threading/Multi-Core capable CPUs
762 c
->threads_per_core
= c
->cores_per_socket
= c
->num_log
= 1;
765 identify_siblings(c
);
767 c
->ppn
= cpuid
.field
.ppn
;
768 c
->number
= cpuid
.field
.number
;
769 c
->revision
= cpuid
.field
.revision
;
770 c
->model
= cpuid
.field
.model
;
771 c
->family
= cpuid
.field
.family
;
772 c
->archrev
= cpuid
.field
.archrev
;
773 c
->features
= cpuid
.field
.features
;
774 c
->model_name
= get_model_name(c
->family
, c
->model
);
776 status
= ia64_pal_vm_summary(&vm1
, &vm2
);
777 if (status
== PAL_STATUS_SUCCESS
) {
778 impl_va_msb
= vm2
.pal_vm_info_2_s
.impl_va_msb
;
779 phys_addr_size
= vm1
.pal_vm_info_1_s
.phys_add_size
;
781 c
->unimpl_va_mask
= ~((7L<<61) | ((1L << (impl_va_msb
+ 1)) - 1));
782 c
->unimpl_pa_mask
= ~((1L<<63) | ((1L << phys_addr_size
) - 1));
786 setup_per_cpu_areas (void)
788 /* start_kernel() requires this... */
789 #ifdef CONFIG_ACPI_HOTPLUG_CPU
790 prefill_possible_map();
795 * Calculate the max. cache line size.
797 * In addition, the minimum of the i-cache stride sizes is calculated for
798 * "flush_icache_range()".
800 static void __cpuinit
801 get_max_cacheline_size (void)
803 unsigned long line_size
, max
= 1;
804 u64 l
, levels
, unique_caches
;
805 pal_cache_config_info_t cci
;
808 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
810 printk(KERN_ERR
"%s: ia64_pal_cache_summary() failed (status=%ld)\n",
811 __FUNCTION__
, status
);
812 max
= SMP_CACHE_BYTES
;
813 /* Safest setup for "flush_icache_range()" */
814 ia64_i_cache_stride_shift
= I_CACHE_STRIDE_SHIFT
;
818 for (l
= 0; l
< levels
; ++l
) {
819 status
= ia64_pal_cache_config_info(l
, /* cache_type (data_or_unified)= */ 2,
823 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
824 __FUNCTION__
, l
, status
);
825 max
= SMP_CACHE_BYTES
;
826 /* The safest setup for "flush_icache_range()" */
827 cci
.pcci_stride
= I_CACHE_STRIDE_SHIFT
;
828 cci
.pcci_unified
= 1;
830 line_size
= 1 << cci
.pcci_line_size
;
833 if (!cci
.pcci_unified
) {
834 status
= ia64_pal_cache_config_info(l
,
835 /* cache_type (instruction)= */ 1,
839 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
840 __FUNCTION__
, l
, status
);
841 /* The safest setup for "flush_icache_range()" */
842 cci
.pcci_stride
= I_CACHE_STRIDE_SHIFT
;
845 if (cci
.pcci_stride
< ia64_i_cache_stride_shift
)
846 ia64_i_cache_stride_shift
= cci
.pcci_stride
;
849 if (max
> ia64_max_cacheline_size
)
850 ia64_max_cacheline_size
= max
;
854 * cpu_init() initializes state that is per-CPU. This function acts
855 * as a 'CPU state barrier', nothing should get across.
860 extern void __cpuinit
ia64_mmu_init (void *);
861 static unsigned long max_num_phys_stacked
= IA64_NUM_PHYS_STACK_REG
;
862 unsigned long num_phys_stacked
;
863 pal_vm_info_2_u_t vmi
;
864 unsigned int max_ctx
;
865 struct cpuinfo_ia64
*cpu_info
;
868 cpu_data
= per_cpu_init();
871 * We set ar.k3 so that assembly code in MCA handler can compute
872 * physical addresses of per cpu variables with a simple:
873 * phys = ar.k3 + &per_cpu_var
875 ia64_set_kr(IA64_KR_PER_CPU_DATA
,
876 ia64_tpa(cpu_data
) - (long) __per_cpu_start
);
878 get_max_cacheline_size();
881 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
882 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
883 * depends on the data returned by identify_cpu(). We break the dependency by
884 * accessing cpu_data() through the canonical per-CPU address.
886 cpu_info
= cpu_data
+ ((char *) &__ia64_per_cpu_var(cpu_info
) - __per_cpu_start
);
887 identify_cpu(cpu_info
);
889 #ifdef CONFIG_MCKINLEY
891 # define FEATURE_SET 16
892 struct ia64_pal_retval iprv
;
894 if (cpu_info
->family
== 0x1f) {
895 PAL_CALL_PHYS(iprv
, PAL_PROC_GET_FEATURES
, 0, FEATURE_SET
, 0);
896 if ((iprv
.status
== 0) && (iprv
.v0
& 0x80) && (iprv
.v2
& 0x80))
897 PAL_CALL_PHYS(iprv
, PAL_PROC_SET_FEATURES
,
898 (iprv
.v1
| 0x80), FEATURE_SET
, 0);
903 /* Clear the stack memory reserved for pt_regs: */
904 memset(task_pt_regs(current
), 0, sizeof(struct pt_regs
));
906 ia64_set_kr(IA64_KR_FPU_OWNER
, 0);
909 * Initialize the page-table base register to a global
910 * directory with all zeroes. This ensure that we can handle
911 * TLB-misses to user address-space even before we created the
912 * first user address-space. This may happen, e.g., due to
913 * aggressive use of lfetch.fault.
915 ia64_set_kr(IA64_KR_PT_BASE
, __pa(ia64_imva(empty_zero_page
)));
918 * Initialize default control register to defer speculative faults except
919 * for those arising from TLB misses, which are not deferred. The
920 * kernel MUST NOT depend on a particular setting of these bits (in other words,
921 * the kernel must have recovery code for all speculative accesses). Turn on
922 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
923 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
926 ia64_setreg(_IA64_REG_CR_DCR
, ( IA64_DCR_DP
| IA64_DCR_DK
| IA64_DCR_DX
| IA64_DCR_DR
927 | IA64_DCR_DA
| IA64_DCR_DD
| IA64_DCR_LC
));
928 atomic_inc(&init_mm
.mm_count
);
929 current
->active_mm
= &init_mm
;
933 ia64_mmu_init(ia64_imva(cpu_data
));
934 ia64_mca_cpu_init(ia64_imva(cpu_data
));
936 #ifdef CONFIG_IA32_SUPPORT
940 /* Clear ITC to eliminate sched_clock() overflows in human time. */
943 /* disable all local interrupt sources: */
944 ia64_set_itv(1 << 16);
945 ia64_set_lrr0(1 << 16);
946 ia64_set_lrr1(1 << 16);
947 ia64_setreg(_IA64_REG_CR_PMV
, 1 << 16);
948 ia64_setreg(_IA64_REG_CR_CMCV
, 1 << 16);
950 /* clear TPR & XTP to enable all interrupt classes: */
951 ia64_setreg(_IA64_REG_CR_TPR
, 0);
956 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
957 if (ia64_pal_vm_summary(NULL
, &vmi
) == 0)
958 max_ctx
= (1U << (vmi
.pal_vm_info_2_s
.rid_size
- 3)) - 1;
960 printk(KERN_WARNING
"cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
961 max_ctx
= (1U << 15) - 1; /* use architected minimum */
963 while (max_ctx
< ia64_ctx
.max_ctx
) {
964 unsigned int old
= ia64_ctx
.max_ctx
;
965 if (cmpxchg(&ia64_ctx
.max_ctx
, old
, max_ctx
) == old
)
969 if (ia64_pal_rse_info(&num_phys_stacked
, NULL
) != 0) {
970 printk(KERN_WARNING
"cpu_init: PAL RSE info failed; assuming 96 physical "
972 num_phys_stacked
= 96;
974 /* size of physical stacked register partition plus 8 bytes: */
975 if (num_phys_stacked
> max_num_phys_stacked
) {
976 ia64_patch_phys_stack_reg(num_phys_stacked
*8 + 8);
977 max_num_phys_stacked
= num_phys_stacked
;
980 pm_idle
= default_idle
;
984 * On SMP systems, when the scheduler does migration-cost autodetection,
985 * it needs a way to flush as much of the CPU's caches as possible.
987 void sched_cacheflush(void)
989 ia64_sal_cache_flush(3);
995 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles
,
996 (unsigned long) __end___mckinley_e9_bundles
);
999 static int __init
run_dmi_scan(void)
1004 core_initcall(run_dmi_scan
);