x86: sanitize pathes arch/x86/kernel/cpu/Makefile
[deliverable/linux.git] / include / asm-x86_64 / smp.h
CommitLineData
1da177e4
LT
1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4/*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
1da177e4
LT
7#include <linux/threads.h>
8#include <linux/cpumask.h>
9#include <linux/bitops.h>
43999d9e 10#include <linux/init.h>
1da177e4 11extern int disable_apic;
1da177e4 12
1da177e4 13#include <asm/mpspec.h>
1da177e4 14#include <asm/apic.h>
00f1ea69 15#include <asm/io_apic.h>
1da177e4 16#include <asm/thread_info.h>
1da177e4
LT
17
18#ifdef CONFIG_SMP
1da177e4
LT
19
20#include <asm/pda.h>
21
22struct pt_regs;
23
a8ab26fe
AK
24extern cpumask_t cpu_present_mask;
25extern cpumask_t cpu_possible_map;
26extern cpumask_t cpu_online_map;
27extern cpumask_t cpu_callout_map;
3c021751 28extern cpumask_t cpu_initialized;
a8ab26fe 29
1da177e4
LT
30/*
31 * Private routines/data
32 */
33
34extern void smp_alloc_memory(void);
1da177e4 35extern volatile unsigned long smp_invalidate_needed;
884d9e40
AR
36extern void lock_ipi_call_lock(void);
37extern void unlock_ipi_call_lock(void);
1da177e4 38extern int smp_num_siblings;
1da177e4 39extern void smp_send_reschedule(int cpu);
3d483f47 40
1da177e4 41extern cpumask_t cpu_sibling_map[NR_CPUS];
3dd9d514 42extern cpumask_t cpu_core_map[NR_CPUS];
1e9f28fa 43extern u8 cpu_llc_id[NR_CPUS];
1da177e4
LT
44
45#define SMP_TRAMPOLINE_BASE 0x6000
46
47/*
48 * On x86 all CPUs are mapped 1:1 to the APIC space.
49 * This simplifies scheduling and IPI sending and
50 * compresses data structures.
51 */
52
1da177e4
LT
53static inline int num_booting_cpus(void)
54{
55 return cpus_weight(cpu_callout_map);
56}
57
39c715b7 58#define raw_smp_processor_id() read_pda(cpunumber)
1da177e4 59
76e4f660
AR
60extern int __cpu_disable(void);
61extern void __cpu_die(unsigned int cpu);
421c7ce6 62extern void prefill_possible_map(void);
420f8f68 63extern unsigned num_processors;
43999d9e 64extern unsigned __cpuinitdata disabled_cpus;
1da177e4 65
1da177e4
LT
66#define NO_PROC_ID 0xFF /* No processor magic marker */
67
2f4dfe20 68#endif /* CONFIG_SMP */
1da177e4 69
dd988528
FLVC
70static inline int hard_smp_processor_id(void)
71{
72 /* we don't want to mark this access volatile - bad code generation */
73 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
74}
75
1da177e4
LT
76/*
77 * Some lowlevel functions might want to know about
78 * the real APIC ID <-> CPU # mapping.
79 */
80extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
81extern u8 x86_cpu_to_log_apicid[NR_CPUS];
82extern u8 bios_cpu_apicid[];
83
1da177e4
LT
84static inline int cpu_present_to_apicid(int mps_cpu)
85{
86 if (mps_cpu < NR_CPUS)
87 return (int)bios_cpu_apicid[mps_cpu];
88 else
89 return BAD_APICID;
90}
91
1da177e4
LT
92#ifndef CONFIG_SMP
93#define stack_smp_processor_id() 0
1da177e4
LT
94#define cpu_logical_map(x) (x)
95#else
96#include <asm/thread_info.h>
97#define stack_smp_processor_id() \
98({ \
99 struct thread_info *ti; \
100 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
101 ti->cpu; \
102})
103#endif
104
1da177e4
LT
105static __inline int logical_smp_processor_id(void)
106{
107 /* we don't want to mark this access volatile - bad code generation */
108 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
109}
1da177e4 110
b4033c17
AR
111#ifdef CONFIG_SMP
112#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
113#else
114#define cpu_physical_id(cpu) boot_cpu_id
c08c8205 115#endif /* !CONFIG_SMP */
1da177e4
LT
116#endif
117
This page took 0.280637 seconds and 5 git commands to generate.