x86: make read_apic_id return final apicid
[deliverable/linux.git] / include / asm-x86 / smp.h
CommitLineData
c27cfeff
GC
1#ifndef _ASM_X86_SMP_H_
2#define _ASM_X86_SMP_H_
3#ifndef __ASSEMBLY__
53ebef49 4#include <linux/cpumask.h>
93b016f8 5#include <linux/init.h>
7e1efc0c 6#include <asm/percpu.h>
53ebef49 7
b23dab08
GC
8/*
9 * We need the APIC definitions automatically as part of 'smp.h'
10 */
11#ifdef CONFIG_X86_LOCAL_APIC
12# include <asm/mpspec.h>
13# include <asm/apic.h>
14# ifdef CONFIG_X86_IO_APIC
15# include <asm/io_apic.h>
16# endif
17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h>
20
53ebef49 21extern cpumask_t cpu_callout_map;
8be9ac85
GC
22extern cpumask_t cpu_initialized;
23extern cpumask_t cpu_callin_map;
24
25extern void (*mtrr_hook)(void);
26extern void zap_low_mappings(void);
53ebef49
GC
27
28extern int smp_num_siblings;
29extern unsigned int num_processors;
cb3c8b90 30extern cpumask_t cpu_initialized;
c27cfeff 31
7e1efc0c
GOC
32DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
33DECLARE_PER_CPU(cpumask_t, cpu_core_map);
34DECLARE_PER_CPU(u16, cpu_llc_id);
23ca4bba
MT
35
36DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
37DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
7e1efc0c 38
9d97d0da
GOC
39/* Static state in head.S used to set up a CPU */
40extern struct {
41 void *sp;
42 unsigned short ss;
43} stack_start;
44
16694024
GC
45struct smp_ops {
46 void (*smp_prepare_boot_cpu)(void);
47 void (*smp_prepare_cpus)(unsigned max_cpus);
48 int (*cpu_up)(unsigned cpu);
49 void (*smp_cpus_done)(unsigned max_cpus);
50
51 void (*smp_send_stop)(void);
52 void (*smp_send_reschedule)(int cpu);
53 int (*smp_call_function_mask)(cpumask_t mask,
54 void (*func)(void *info), void *info,
55 int wait);
56};
57
14522076
GC
58/* Globals due to paravirt */
59extern void set_cpu_sibling_map(int cpu);
60
c76cb368 61#ifdef CONFIG_SMP
d0173aea
GOC
62#ifndef CONFIG_PARAVIRT
63#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
64#endif
c76cb368 65extern struct smp_ops smp_ops;
8678969e 66
377d6984
GC
67static inline void smp_send_stop(void)
68{
69 smp_ops.smp_send_stop();
70}
71
1e3fac83
GC
72static inline void smp_prepare_boot_cpu(void)
73{
74 smp_ops.smp_prepare_boot_cpu();
75}
76
7557da67
GC
77static inline void smp_prepare_cpus(unsigned int max_cpus)
78{
79 smp_ops.smp_prepare_cpus(max_cpus);
80}
81
c5597649
GC
82static inline void smp_cpus_done(unsigned int max_cpus)
83{
84 smp_ops.smp_cpus_done(max_cpus);
85}
86
71d19549
GC
87static inline int __cpu_up(unsigned int cpu)
88{
89 return smp_ops.cpu_up(cpu);
90}
91
8678969e
GC
92static inline void smp_send_reschedule(int cpu)
93{
94 smp_ops.smp_send_reschedule(cpu);
95}
64b1a21e
GC
96
97static inline int smp_call_function_mask(cpumask_t mask,
98 void (*func) (void *info), void *info,
99 int wait)
100{
101 return smp_ops.smp_call_function_mask(mask, func, info, wait);
102}
71d19549 103
1e3fac83 104void native_smp_prepare_boot_cpu(void);
7557da67 105void native_smp_prepare_cpus(unsigned int max_cpus);
c5597649 106void native_smp_cpus_done(unsigned int max_cpus);
71d19549 107int native_cpu_up(unsigned int cpunum);
93b016f8 108
69c18c15
GC
109extern int __cpu_disable(void);
110extern void __cpu_die(unsigned int cpu);
111
1d89a7f0 112void smp_store_cpu_info(int id);
c70dcb74 113#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
a9c057c1
GC
114
115/* We don't mark CPUs online until __cpu_up(), so we need another measure */
116static inline int num_booting_cpus(void)
117{
118 return cpus_weight(cpu_callout_map);
119}
4a701737
IM
120#endif /* CONFIG_SMP */
121
122#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
123extern void prefill_possible_map(void);
329513a3
YL
124#else
125static inline void prefill_possible_map(void)
126{
127}
4a701737 128#endif
a9c057c1 129
2fe60147
AS
130extern unsigned disabled_cpus __cpuinitdata;
131
a9c057c1
GC
132#ifdef CONFIG_X86_32_SMP
133/*
134 * This function is needed by all SMP systems. It must _always_ be valid
135 * from the initial startup. We map APIC_BASE very early in page_setup(),
136 * so this is correct in the x86 case.
137 */
138DECLARE_PER_CPU(int, cpu_number);
139#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
140extern int safe_smp_processor_id(void);
141
142#elif defined(CONFIG_X86_64_SMP)
143#define raw_smp_processor_id() read_pda(cpunumber)
144
145#define stack_smp_processor_id() \
146({ \
147 struct thread_info *ti; \
148 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
149 ti->cpu; \
150})
151#define safe_smp_processor_id() smp_processor_id()
152
153#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
c70dcb74 154#define cpu_physical_id(cpu) boot_cpu_physical_apicid
a9c057c1
GC
155#define safe_smp_processor_id() 0
156#define stack_smp_processor_id() 0
c76cb368 157#endif
16694024 158
1b000843
GC
159#ifdef CONFIG_X86_LOCAL_APIC
160
1b374e4d 161#ifndef CONFIG_X86_64
1b000843
GC
162static inline int logical_smp_processor_id(void)
163{
164 /* we don't want to mark this access volatile - bad code generation */
165 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
166}
167
4c9961d5 168#include <mach_apicdef.h>
05f2d12c
JS
169static inline unsigned int read_apic_id(void)
170{
4c9961d5
YL
171 unsigned int reg;
172
173 reg = *(u32 *)(APIC_BASE + APIC_ID);
174
175 return GET_APIC_ID(reg);
05f2d12c 176}
ac23d4ee
JS
177#endif
178
05f2d12c 179
0c81c746 180# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
1b000843
GC
181extern int hard_smp_processor_id(void);
182# else
4c9961d5 183#include <mach_apicdef.h>
1b000843
GC
184static inline int hard_smp_processor_id(void)
185{
186 /* we don't want to mark this access volatile - bad code generation */
4c9961d5 187 return read_apic_id();
1b000843
GC
188}
189# endif /* APIC_DEFINITION */
190
191#else /* CONFIG_X86_LOCAL_APIC */
192
193# ifndef CONFIG_SMP
194# define hard_smp_processor_id() 0
195# endif
196
197#endif /* CONFIG_X86_LOCAL_APIC */
198
1dbb4726 199#ifdef CONFIG_HOTPLUG_CPU
1dbb4726 200extern void cpu_uninit(void);
1dbb4726
GC
201#endif
202
639acb16
GC
203extern void lock_ipi_call_lock(void);
204extern void unlock_ipi_call_lock(void);
c27cfeff
GC
205#endif /* __ASSEMBLY__ */
206#endif
This page took 0.073192 seconds and 5 git commands to generate.