5 #include <linux/stddef.h>
6 #include <linux/types.h>
7 #include <linux/cache.h>
10 /* Per processor datastructure. %gs points to it while the kernel runs */
12 struct task_struct
*pcurrent
; /* 0 Current process */
13 unsigned long data_offset
; /* 8 Per cpu data offset from linker
15 unsigned long kernelstack
; /* 16 top of kernel stack for current */
16 unsigned long oldrsp
; /* 24 user rsp for system call */
17 int irqcount
; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber
; /* 36 Logical CPU number */
19 #ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary
; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
25 short nodenumber
; /* number of current node (32k max) */
26 short in_bootmem
; /* pda lives in bootmem */
27 unsigned int __softirq_pending
;
28 unsigned int __nmi_count
; /* number of NMI on this CPUs */
31 struct mm_struct
*active_mm
;
32 unsigned apic_timer_irqs
;
33 unsigned apic_perf_irqs
;
35 unsigned irq_resched_count
;
36 unsigned irq_call_count
;
37 unsigned irq_tlb_count
;
38 unsigned irq_thermal_count
;
39 unsigned irq_threshold_count
;
40 unsigned irq_spurious_count
;
41 } ____cacheline_aligned_in_smp
;
43 extern struct x8664_pda
**_cpu_pda
;
44 extern void pda_init(int);
46 #define cpu_pda(i) (_cpu_pda[i])
49 * There is no fast way to get the base address of the PDA, all the accesses
50 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
52 extern void __bad_pda_field(void) __attribute__((noreturn
));
55 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
56 * all PDA accesses so it gets read/write dependencies right.
58 extern struct x8664_pda _proxy_pda
;
60 #define pda_offset(field) offsetof(struct x8664_pda, field)
62 #define pda_to_op(op, field, val) \
64 typedef typeof(_proxy_pda.field) T__; \
65 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
66 switch (sizeof(_proxy_pda.field)) { \
68 asm(op "w %1,%%gs:%c2" : \
69 "+m" (_proxy_pda.field) : \
71 "i"(pda_offset(field))); \
74 asm(op "l %1,%%gs:%c2" : \
75 "+m" (_proxy_pda.field) : \
77 "i" (pda_offset(field))); \
80 asm(op "q %1,%%gs:%c2": \
81 "+m" (_proxy_pda.field) : \
83 "i"(pda_offset(field))); \
90 #define pda_from_op(op, field) \
92 typeof(_proxy_pda.field) ret__; \
93 switch (sizeof(_proxy_pda.field)) { \
95 asm(op "w %%gs:%c1,%0" : \
97 "i" (pda_offset(field)), \
98 "m" (_proxy_pda.field)); \
101 asm(op "l %%gs:%c1,%0": \
103 "i" (pda_offset(field)), \
104 "m" (_proxy_pda.field)); \
107 asm(op "q %%gs:%c1,%0": \
109 "i" (pda_offset(field)), \
110 "m" (_proxy_pda.field)); \
118 #define read_pda(field) pda_from_op("mov", field)
119 #define write_pda(field, val) pda_to_op("mov", field, val)
120 #define add_pda(field, val) pda_to_op("add", field, val)
121 #define sub_pda(field, val) pda_to_op("sub", field, val)
122 #define or_pda(field, val) pda_to_op("or", field, val)
124 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
125 #define test_and_clear_bit_pda(bit, field) \
128 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
129 : "=r" (old__), "+m" (_proxy_pda.field) \
130 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
136 #define PDA_STACKOFFSET (5*8)
138 #endif /* _ASM_X86_PDA_H */