Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / alpha / include / asm / percpu.h
1 #ifndef __ALPHA_PERCPU_H
2 #define __ALPHA_PERCPU_H
3
4 #include <linux/compiler.h>
5 #include <linux/threads.h>
6 #include <linux/percpu-defs.h>
7
8 /*
9 * Determine the real variable name from the name visible in the
10 * kernel sources.
11 */
12 #define per_cpu_var(var) per_cpu__##var
13
14 #ifdef CONFIG_SMP
15
16 /*
17 * per_cpu_offset() is the offset that has to be added to a
18 * percpu variable to get to the instance for a certain processor.
19 */
20 extern unsigned long __per_cpu_offset[NR_CPUS];
21
22 #define per_cpu_offset(x) (__per_cpu_offset[x])
23
24 #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
25 #ifdef CONFIG_DEBUG_PREEMPT
26 #define my_cpu_offset per_cpu_offset(smp_processor_id())
27 #else
28 #define my_cpu_offset __my_cpu_offset
29 #endif
30
31 #ifndef MODULE
32 #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
33 #define PER_CPU_ATTRIBUTES
34 #else
35 /*
36 * To calculate addresses of locally defined variables, GCC uses 32-bit
37 * displacement from the GP. Which doesn't work for per cpu variables in
38 * modules, as an offset to the kernel per cpu area is way above 4G.
39 *
40 * This forces allocation of a GOT entry for per cpu variable using
41 * ldq instruction with a 'literal' relocation.
42 */
43 #define SHIFT_PERCPU_PTR(var, offset) ({ \
44 extern int simple_identifier_##var(void); \
45 unsigned long __ptr, tmp_gp; \
46 asm ( "br %1, 1f \n\
47 1: ldgp %1, 0(%1) \n\
48 ldq %0, per_cpu__" #var"(%1)\t!literal" \
49 : "=&r"(__ptr), "=&r"(tmp_gp)); \
50 (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
51
52 #define PER_CPU_ATTRIBUTES __used
53
54 #endif /* MODULE */
55
56 /*
57 * A percpu variable may point to a discarded regions. The following are
58 * established ways to produce a usable pointer from the percpu variable
59 * offset.
60 */
61 #define per_cpu(var, cpu) \
62 (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
63 #define __get_cpu_var(var) \
64 (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
65 #define __raw_get_cpu_var(var) \
66 (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
67
68 #else /* ! SMP */
69
70 #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
71 #define __get_cpu_var(var) per_cpu_var(var)
72 #define __raw_get_cpu_var(var) per_cpu_var(var)
73
74 #define PER_CPU_ATTRIBUTES
75
76 #endif /* SMP */
77
78 #ifdef CONFIG_SMP
79 #define PER_CPU_BASE_SECTION ".data.percpu"
80 #else
81 #define PER_CPU_BASE_SECTION ".data"
82 #endif
83
84 #ifdef CONFIG_SMP
85
86 #ifdef MODULE
87 #define PER_CPU_SHARED_ALIGNED_SECTION ""
88 #else
89 #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
90 #endif
91 #define PER_CPU_FIRST_SECTION ".first"
92
93 #else
94
95 #define PER_CPU_SHARED_ALIGNED_SECTION ""
96 #define PER_CPU_FIRST_SECTION ""
97
98 #endif
99
100 #define PER_CPU_ATTRIBUTES
101
102 #endif /* __ALPHA_PERCPU_H */
This page took 0.039637 seconds and 5 git commands to generate.