Merge tag 'nfs-for-4.5-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[deliverable/linux.git] / arch / x86 / include / asm / processor.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PROCESSOR_H
2#define _ASM_X86_PROCESSOR_H
c758ecf6 3
053de044
GOC
4#include <asm/processor-flags.h>
5
683e0253
GOC
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9fda6a06 9struct vm86;
683e0253 10
2f66dcc9
GOC
11#include <asm/math_emu.h>
12#include <asm/segment.h>
2f66dcc9 13#include <asm/types.h>
decb4c41 14#include <uapi/asm/sigcontext.h>
2f66dcc9
GOC
15#include <asm/current.h>
16#include <asm/cpufeature.h>
2f66dcc9 17#include <asm/page.h>
54321d94 18#include <asm/pgtable_types.h>
5300db88 19#include <asm/percpu.h>
2f66dcc9
GOC
20#include <asm/msr.h>
21#include <asm/desc_defs.h>
bd61643e 22#include <asm/nops.h>
f05e798a 23#include <asm/special_insns.h>
14b9675a 24#include <asm/fpu/types.h>
4d46a89e 25
2f66dcc9 26#include <linux/personality.h>
5300db88
GOC
27#include <linux/cpumask.h>
28#include <linux/cache.h>
2f66dcc9 29#include <linux/threads.h>
5cbc19a9 30#include <linux/math64.h>
faa4602e 31#include <linux/err.h>
f05e798a
DH
32#include <linux/irqflags.h>
33
34/*
35 * We handle most unaligned accesses in hardware. On the other hand
36 * unaligned DMA can be quite expensive on some Nehalem processors.
37 *
38 * Based on this we disable the IP header alignment in network drivers.
39 */
40#define NET_IP_ALIGN 0
c72dcf83 41
b332828c 42#define HBP_NUM 4
0ccb8acc
GOC
43/*
44 * Default implementation of macro that returns current
45 * instruction pointer ("program counter").
46 */
47static inline void *current_text_addr(void)
48{
49 void *pc;
4d46a89e
IM
50
51 asm volatile("mov $1f, %0; 1:":"=r" (pc));
52
0ccb8acc
GOC
53 return pc;
54}
55
b8c1b8ea
IM
56/*
57 * These alignment constraints are for performance in the vSMP case,
58 * but in the task_struct case we must also meet hardware imposed
59 * alignment requirements of the FPU state:
60 */
dbcb4660 61#ifdef CONFIG_X86_VSMP
4d46a89e
IM
62# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
63# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
dbcb4660 64#else
b8c1b8ea 65# define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
4d46a89e 66# define ARCH_MIN_MMSTRUCT_ALIGN 0
dbcb4660
GOC
67#endif
68
e0ba94f1
AS
69enum tlb_infos {
70 ENTRIES,
71 NR_INFO
72};
73
74extern u16 __read_mostly tlb_lli_4k[NR_INFO];
75extern u16 __read_mostly tlb_lli_2m[NR_INFO];
76extern u16 __read_mostly tlb_lli_4m[NR_INFO];
77extern u16 __read_mostly tlb_lld_4k[NR_INFO];
78extern u16 __read_mostly tlb_lld_2m[NR_INFO];
79extern u16 __read_mostly tlb_lld_4m[NR_INFO];
dd360393 80extern u16 __read_mostly tlb_lld_1g[NR_INFO];
c4211f42 81
5300db88
GOC
82/*
83 * CPU type and hardware bug flags. Kept separately for each CPU.
84 * Members of this structure are referenced in head.S, so think twice
85 * before touching them. [mj]
86 */
87
88struct cpuinfo_x86 {
4d46a89e
IM
89 __u8 x86; /* CPU family */
90 __u8 x86_vendor; /* CPU vendor */
91 __u8 x86_model;
92 __u8 x86_mask;
5300db88 93#ifdef CONFIG_X86_32
4d46a89e
IM
94 char wp_works_ok; /* It doesn't on 386's */
95
96 /* Problems on some 486Dx4's and old 386's: */
4d46a89e 97 char rfu;
4d46a89e 98 char pad0;
60e019eb 99 char pad1;
5300db88 100#else
4d46a89e 101 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
b1882e68 102 int x86_tlbsize;
13c6c532 103#endif
4d46a89e
IM
104 __u8 x86_virt_bits;
105 __u8 x86_phys_bits;
106 /* CPUID returned core id bits: */
107 __u8 x86_coreid_bits;
108 /* Max extended CPUID function supported: */
109 __u32 extended_cpuid_level;
4d46a89e
IM
110 /* Maximum supported CPUID level, -1=no CPUID: */
111 int cpuid_level;
65fc985b 112 __u32 x86_capability[NCAPINTS + NBUGINTS];
4d46a89e
IM
113 char x86_vendor_id[16];
114 char x86_model_id[64];
115 /* in KB - valid for CPUS which support this call: */
116 int x86_cache_size;
117 int x86_cache_alignment; /* In bytes */
cbc82b17
PWJ
118 /* Cache QoS architectural values: */
119 int x86_cache_max_rmid; /* max index */
120 int x86_cache_occ_scale; /* scale to bytes */
4d46a89e
IM
121 int x86_power;
122 unsigned long loops_per_jiffy;
4d46a89e
IM
123 /* cpuid returned max cores value: */
124 u16 x86_max_cores;
125 u16 apicid;
01aaea1a 126 u16 initial_apicid;
4d46a89e 127 u16 x86_clflush_size;
4d46a89e
IM
128 /* number of cores as seen by the OS: */
129 u16 booted_cores;
130 /* Physical processor id: */
131 u16 phys_proc_id;
132 /* Core id: */
133 u16 cpu_core_id;
6057b4d3
AH
134 /* Compute unit id */
135 u8 compute_unit_id;
4d46a89e
IM
136 /* Index into per_cpu list: */
137 u16 cpu_index;
506ed6b5 138 u32 microcode;
2c773dd3 139};
5300db88 140
4d46a89e
IM
141#define X86_VENDOR_INTEL 0
142#define X86_VENDOR_CYRIX 1
143#define X86_VENDOR_AMD 2
144#define X86_VENDOR_UMC 3
4d46a89e
IM
145#define X86_VENDOR_CENTAUR 5
146#define X86_VENDOR_TRANSMETA 7
147#define X86_VENDOR_NSC 8
148#define X86_VENDOR_NUM 9
149
150#define X86_VENDOR_UNKNOWN 0xff
5300db88 151
1a53905a
GOC
152/*
153 * capabilities of CPUs
154 */
4d46a89e
IM
155extern struct cpuinfo_x86 boot_cpu_data;
156extern struct cpuinfo_x86 new_cpu_data;
157
158extern struct tss_struct doublefault_tss;
3e0c3737
YL
159extern __u32 cpu_caps_cleared[NCAPINTS];
160extern __u32 cpu_caps_set[NCAPINTS];
5300db88
GOC
161
162#ifdef CONFIG_SMP
2c773dd3 163DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
5300db88 164#define cpu_data(cpu) per_cpu(cpu_info, cpu)
5300db88 165#else
7b543a53 166#define cpu_info boot_cpu_data
5300db88 167#define cpu_data(cpu) boot_cpu_data
5300db88
GOC
168#endif
169
1c6c727d
JS
170extern const struct seq_operations cpuinfo_op;
171
4d46a89e
IM
172#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
173
174extern void cpu_detect(struct cpuinfo_x86 *c);
1a53905a 175
f580366f 176extern void early_cpu_init(void);
1a53905a
GOC
177extern void identify_boot_cpu(void);
178extern void identify_secondary_cpu(struct cpuinfo_x86 *);
5300db88 179extern void print_cpu_info(struct cpuinfo_x86 *);
21c3fcf3 180void print_cpu_msr(struct cpuinfo_x86 *);
5300db88
GOC
181extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
182extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
04a15418 183extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
5300db88 184
bbb65d2d 185extern void detect_extended_topology(struct cpuinfo_x86 *c);
1a53905a 186extern void detect_ht(struct cpuinfo_x86 *c);
1a53905a 187
d288e1cf
FY
188#ifdef CONFIG_X86_32
189extern int have_cpuid_p(void);
190#else
191static inline int have_cpuid_p(void)
192{
193 return 1;
194}
195#endif
c758ecf6 196static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
4d46a89e 197 unsigned int *ecx, unsigned int *edx)
c758ecf6
GOC
198{
199 /* ecx is often an input as well as an output. */
45a94d7c 200 asm volatile("cpuid"
cca2e6f8
JP
201 : "=a" (*eax),
202 "=b" (*ebx),
203 "=c" (*ecx),
204 "=d" (*edx)
506ed6b5
AK
205 : "0" (*eax), "2" (*ecx)
206 : "memory");
c758ecf6
GOC
207}
208
c72dcf83
GOC
209static inline void load_cr3(pgd_t *pgdir)
210{
211 write_cr3(__pa(pgdir));
212}
c758ecf6 213
ca241c75
GOC
214#ifdef CONFIG_X86_32
215/* This is the TSS defined by the hardware. */
216struct x86_hw_tss {
4d46a89e
IM
217 unsigned short back_link, __blh;
218 unsigned long sp0;
219 unsigned short ss0, __ss0h;
cf9328cc 220 unsigned long sp1;
76e4c490
AL
221
222 /*
cf9328cc
AL
223 * We don't use ring 1, so ss1 is a convenient scratch space in
224 * the same cacheline as sp0. We use ss1 to cache the value in
225 * MSR_IA32_SYSENTER_CS. When we context switch
226 * MSR_IA32_SYSENTER_CS, we first check if the new value being
227 * written matches ss1, and, if it's not, then we wrmsr the new
228 * value and update ss1.
76e4c490 229 *
cf9328cc
AL
230 * The only reason we context switch MSR_IA32_SYSENTER_CS is
231 * that we set it to zero in vm86 tasks to avoid corrupting the
232 * stack if we were to go through the sysenter path from vm86
233 * mode.
76e4c490 234 */
76e4c490
AL
235 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
236
237 unsigned short __ss1h;
4d46a89e
IM
238 unsigned long sp2;
239 unsigned short ss2, __ss2h;
240 unsigned long __cr3;
241 unsigned long ip;
242 unsigned long flags;
243 unsigned long ax;
244 unsigned long cx;
245 unsigned long dx;
246 unsigned long bx;
247 unsigned long sp;
248 unsigned long bp;
249 unsigned long si;
250 unsigned long di;
251 unsigned short es, __esh;
252 unsigned short cs, __csh;
253 unsigned short ss, __ssh;
254 unsigned short ds, __dsh;
255 unsigned short fs, __fsh;
256 unsigned short gs, __gsh;
257 unsigned short ldt, __ldth;
258 unsigned short trace;
259 unsigned short io_bitmap_base;
260
ca241c75
GOC
261} __attribute__((packed));
262#else
263struct x86_hw_tss {
4d46a89e
IM
264 u32 reserved1;
265 u64 sp0;
266 u64 sp1;
267 u64 sp2;
268 u64 reserved2;
269 u64 ist[7];
270 u32 reserved3;
271 u32 reserved4;
272 u16 reserved5;
273 u16 io_bitmap_base;
274
ca241c75
GOC
275} __attribute__((packed)) ____cacheline_aligned;
276#endif
277
278/*
4d46a89e 279 * IO-bitmap sizes:
ca241c75 280 */
4d46a89e
IM
281#define IO_BITMAP_BITS 65536
282#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
283#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
284#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
285#define INVALID_IO_BITMAP_OFFSET 0x8000
ca241c75
GOC
286
287struct tss_struct {
4d46a89e
IM
288 /*
289 * The hardware state:
290 */
291 struct x86_hw_tss x86_tss;
ca241c75
GOC
292
293 /*
294 * The extra 1 is there because the CPU will access an
295 * additional byte beyond the end of the IO permission
296 * bitmap. The extra byte must be all 1 bits, and must
297 * be within the limit.
298 */
4d46a89e 299 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
4d46a89e 300
ca241c75 301 /*
d828c71f 302 * Space for the temporary SYSENTER stack:
ca241c75 303 */
d828c71f 304 unsigned long SYSENTER_stack[64];
4d46a89e 305
84e65b0a 306} ____cacheline_aligned;
ca241c75 307
24933b82 308DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
ca241c75 309
a7fcf28d
AL
310#ifdef CONFIG_X86_32
311DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
312#endif
313
4d46a89e
IM
314/*
315 * Save the original ist values for checking stack pointers during debugging
316 */
1a53905a 317struct orig_ist {
4d46a89e 318 unsigned long ist[7];
1a53905a
GOC
319};
320
fe676203 321#ifdef CONFIG_X86_64
2f66dcc9 322DECLARE_PER_CPU(struct orig_ist, orig_ist);
26f80bd6 323
947e76cd
BG
324union irq_stack_union {
325 char irq_stack[IRQ_STACK_SIZE];
326 /*
327 * GCC hardcodes the stack canary as %gs:40. Since the
328 * irq_stack is the object at %gs:0, we reserve the bottom
329 * 48 bytes of the irq stack for the canary.
330 */
331 struct {
332 char gs_base[40];
333 unsigned long stack_canary;
334 };
335};
336
277d5b40 337DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
2add8e23
BG
338DECLARE_INIT_PER_CPU(irq_stack_union);
339
26f80bd6 340DECLARE_PER_CPU(char *, irq_stack_ptr);
9766cdbc 341DECLARE_PER_CPU(unsigned int, irq_count);
9766cdbc 342extern asmlinkage void ignore_sysret(void);
60a5317f
TH
343#else /* X86_64 */
344#ifdef CONFIG_CC_STACKPROTECTOR
1ea0d14e
JF
345/*
346 * Make sure stack canary segment base is cached-aligned:
347 * "For Intel Atom processors, avoid non zero segment base address
348 * that is not aligned to cache line boundary at all cost."
349 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
350 */
351struct stack_canary {
352 char __pad[20]; /* canary at %gs:20 */
353 unsigned long canary;
354};
53f82452 355DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
96a388de 356#endif
198d208d
SR
357/*
358 * per-CPU IRQ handling stacks
359 */
360struct irq_stack {
361 u32 stack[THREAD_SIZE/sizeof(u32)];
362} __aligned(THREAD_SIZE);
363
364DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
365DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
60a5317f 366#endif /* X86_64 */
c758ecf6 367
61c4628b 368extern unsigned int xstate_size;
683e0253 369
24f1e32c
FW
370struct perf_event;
371
cb38d377 372struct thread_struct {
4d46a89e
IM
373 /* Cached TLS descriptors: */
374 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
375 unsigned long sp0;
376 unsigned long sp;
cb38d377 377#ifdef CONFIG_X86_32
4d46a89e 378 unsigned long sysenter_cs;
cb38d377 379#else
4d46a89e
IM
380 unsigned short es;
381 unsigned short ds;
382 unsigned short fsindex;
383 unsigned short gsindex;
cb38d377 384#endif
0c23590f 385#ifdef CONFIG_X86_32
4d46a89e 386 unsigned long ip;
0c23590f 387#endif
d756f4ad 388#ifdef CONFIG_X86_64
4d46a89e 389 unsigned long fs;
d756f4ad 390#endif
4d46a89e 391 unsigned long gs;
c5bedc68 392
24f1e32c
FW
393 /* Save middle states of ptrace breakpoints */
394 struct perf_event *ptrace_bps[HBP_NUM];
395 /* Debug status used for traps, single steps, etc... */
396 unsigned long debugreg6;
326264a0
FW
397 /* Keep track of the exact dr7 value set by the user */
398 unsigned long ptrace_dr7;
4d46a89e
IM
399 /* Fault info: */
400 unsigned long cr2;
51e7dc70 401 unsigned long trap_nr;
4d46a89e 402 unsigned long error_code;
9fda6a06 403#ifdef CONFIG_VM86
4d46a89e 404 /* Virtual 86 mode info */
9fda6a06 405 struct vm86 *vm86;
cb38d377 406#endif
4d46a89e
IM
407 /* IO permissions: */
408 unsigned long *io_bitmap_ptr;
409 unsigned long iopl;
410 /* Max allowed port in the bitmap, in bytes: */
411 unsigned io_bitmap_max;
0c8c0f03
DH
412
413 /* Floating point and extended processor state */
414 struct fpu fpu;
415 /*
416 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
417 * the end.
418 */
cb38d377
GOC
419};
420
62d7d7ed
GOC
421/*
422 * Set IOPL bits in EFLAGS from given mask
423 */
424static inline void native_set_iopl_mask(unsigned mask)
425{
426#ifdef CONFIG_X86_32
427 unsigned int reg;
4d46a89e 428
cca2e6f8
JP
429 asm volatile ("pushfl;"
430 "popl %0;"
431 "andl %1, %0;"
432 "orl %2, %0;"
433 "pushl %0;"
434 "popfl"
435 : "=&r" (reg)
436 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
62d7d7ed
GOC
437#endif
438}
439
4d46a89e
IM
440static inline void
441native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
7818a1e0
GOC
442{
443 tss->x86_tss.sp0 = thread->sp0;
444#ifdef CONFIG_X86_32
4d46a89e 445 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
7818a1e0
GOC
446 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
447 tss->x86_tss.ss1 = thread->sysenter_cs;
448 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
449 }
450#endif
451}
1b46cbe0 452
e801f864
GOC
453static inline void native_swapgs(void)
454{
455#ifdef CONFIG_X86_64
456 asm volatile("swapgs" ::: "memory");
457#endif
458}
459
a7fcf28d 460static inline unsigned long current_top_of_stack(void)
8ef46a67 461{
a7fcf28d 462#ifdef CONFIG_X86_64
24933b82 463 return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
a7fcf28d
AL
464#else
465 /* sp0 on x86_32 is special in and around vm86 mode. */
466 return this_cpu_read_stable(cpu_current_top_of_stack);
467#endif
8ef46a67
AL
468}
469
7818a1e0
GOC
470#ifdef CONFIG_PARAVIRT
471#include <asm/paravirt.h>
472#else
4d46a89e
IM
473#define __cpuid native_cpuid
474#define paravirt_enabled() 0
d8c98a1d 475#define paravirt_has(x) 0
1b46cbe0 476
cca2e6f8
JP
477static inline void load_sp0(struct tss_struct *tss,
478 struct thread_struct *thread)
7818a1e0
GOC
479{
480 native_load_sp0(tss, thread);
481}
482
62d7d7ed 483#define set_iopl_mask native_set_iopl_mask
1b46cbe0
GOC
484#endif /* CONFIG_PARAVIRT */
485
fc87e906 486typedef struct {
4d46a89e 487 unsigned long seg;
fc87e906
GOC
488} mm_segment_t;
489
490
683e0253
GOC
491/* Free all resources held by a thread. */
492extern void release_thread(struct task_struct *);
493
683e0253 494unsigned long get_wchan(struct task_struct *p);
c758ecf6
GOC
495
496/*
497 * Generic CPUID function
498 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
499 * resulting in stale register contents being returned.
500 */
501static inline void cpuid(unsigned int op,
502 unsigned int *eax, unsigned int *ebx,
503 unsigned int *ecx, unsigned int *edx)
504{
505 *eax = op;
506 *ecx = 0;
507 __cpuid(eax, ebx, ecx, edx);
508}
509
510/* Some CPUID calls want 'count' to be placed in ecx */
511static inline void cpuid_count(unsigned int op, int count,
512 unsigned int *eax, unsigned int *ebx,
513 unsigned int *ecx, unsigned int *edx)
514{
515 *eax = op;
516 *ecx = count;
517 __cpuid(eax, ebx, ecx, edx);
518}
519
520/*
521 * CPUID functions returning a single datum
522 */
523static inline unsigned int cpuid_eax(unsigned int op)
524{
525 unsigned int eax, ebx, ecx, edx;
526
527 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 528
c758ecf6
GOC
529 return eax;
530}
4d46a89e 531
c758ecf6
GOC
532static inline unsigned int cpuid_ebx(unsigned int op)
533{
534 unsigned int eax, ebx, ecx, edx;
535
536 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 537
c758ecf6
GOC
538 return ebx;
539}
4d46a89e 540
c758ecf6
GOC
541static inline unsigned int cpuid_ecx(unsigned int op)
542{
543 unsigned int eax, ebx, ecx, edx;
544
545 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 546
c758ecf6
GOC
547 return ecx;
548}
4d46a89e 549
c758ecf6
GOC
550static inline unsigned int cpuid_edx(unsigned int op)
551{
552 unsigned int eax, ebx, ecx, edx;
553
554 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 555
c758ecf6
GOC
556 return edx;
557}
558
683e0253 559/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
0b101e62 560static __always_inline void rep_nop(void)
683e0253 561{
cca2e6f8 562 asm volatile("rep; nop" ::: "memory");
683e0253
GOC
563}
564
0b101e62 565static __always_inline void cpu_relax(void)
4d46a89e
IM
566{
567 rep_nop();
568}
569
3a6bfbc9
DB
570#define cpu_relax_lowlatency() cpu_relax()
571
5367b688 572/* Stop speculative execution and prefetching of modified code. */
683e0253
GOC
573static inline void sync_core(void)
574{
575 int tmp;
4d46a89e 576
eb068e78 577#ifdef CONFIG_M486
45c39fb0
PA
578 /*
579 * Do a CPUID if available, otherwise do a jump. The jump
580 * can conveniently enough be the jump around CPUID.
581 */
582 asm volatile("cmpl %2,%1\n\t"
583 "jl 1f\n\t"
584 "cpuid\n"
585 "1:"
586 : "=a" (tmp)
587 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
588 : "ebx", "ecx", "edx", "memory");
589#else
590 /*
591 * CPUID is a barrier to speculative execution.
592 * Prefetched instructions are automatically
593 * invalidated when modified.
594 */
595 asm volatile("cpuid"
596 : "=a" (tmp)
597 : "0" (1)
598 : "ebx", "ecx", "edx", "memory");
5367b688 599#endif
683e0253
GOC
600}
601
683e0253 602extern void select_idle_routine(const struct cpuinfo_x86 *c);
02c68a02 603extern void init_amd_e400_c1e_mask(void);
683e0253 604
4d46a89e 605extern unsigned long boot_option_idle_override;
02c68a02 606extern bool amd_e400_c1e_detected;
683e0253 607
d1896049 608enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
69fb3676 609 IDLE_POLL};
d1896049 610
1a53905a
GOC
611extern void enable_sep_cpu(void);
612extern int sysenter_setup(void);
613
29c84391 614extern void early_trap_init(void);
8170e6be 615void early_trap_pf_init(void);
29c84391 616
1a53905a 617/* Defined in head.S */
4d46a89e 618extern struct desc_ptr early_gdt_descr;
1a53905a
GOC
619
620extern void cpu_set_gdt(int);
552be871 621extern void switch_to_new_gdt(int);
11e3a840 622extern void load_percpu_segment(int);
1a53905a 623extern void cpu_init(void);
1a53905a 624
c2724775
MM
625static inline unsigned long get_debugctlmsr(void)
626{
ea8e61b7 627 unsigned long debugctlmsr = 0;
c2724775
MM
628
629#ifndef CONFIG_X86_DEBUGCTLMSR
630 if (boot_cpu_data.x86 < 6)
631 return 0;
632#endif
633 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
634
ea8e61b7 635 return debugctlmsr;
c2724775
MM
636}
637
5b0e5084
JB
638static inline void update_debugctlmsr(unsigned long debugctlmsr)
639{
640#ifndef CONFIG_X86_DEBUGCTLMSR
641 if (boot_cpu_data.x86 < 6)
642 return;
643#endif
644 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
645}
646
9bd1190a
ON
647extern void set_task_blockstep(struct task_struct *task, bool on);
648
4d46a89e
IM
649/* Boot loader type from the setup header: */
650extern int bootloader_type;
5031296c 651extern int bootloader_version;
1a53905a 652
4d46a89e 653extern char ignore_fpu_irq;
683e0253
GOC
654
655#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
656#define ARCH_HAS_PREFETCHW
657#define ARCH_HAS_SPINLOCK_PREFETCH
658
ae2e15eb 659#ifdef CONFIG_X86_32
a930dc45 660# define BASE_PREFETCH ""
4d46a89e 661# define ARCH_HAS_PREFETCH
ae2e15eb 662#else
a930dc45 663# define BASE_PREFETCH "prefetcht0 %P1"
ae2e15eb
GOC
664#endif
665
4d46a89e
IM
666/*
667 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
668 *
669 * It's not worth to care about 3dnow prefetches for the K6
670 * because they are microcoded there and very slow.
671 */
ae2e15eb
GOC
672static inline void prefetch(const void *x)
673{
a930dc45 674 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
ae2e15eb 675 X86_FEATURE_XMM,
a930dc45 676 "m" (*(const char *)x));
ae2e15eb
GOC
677}
678
4d46a89e
IM
679/*
680 * 3dnow prefetch to get an exclusive cache line.
681 * Useful for spinlocks to avoid one state transition in the
682 * cache coherency protocol:
683 */
ae2e15eb
GOC
684static inline void prefetchw(const void *x)
685{
a930dc45
BP
686 alternative_input(BASE_PREFETCH, "prefetchw %P1",
687 X86_FEATURE_3DNOWPREFETCH,
688 "m" (*(const char *)x));
ae2e15eb
GOC
689}
690
4d46a89e
IM
691static inline void spin_lock_prefetch(const void *x)
692{
693 prefetchw(x);
694}
695
d9e05cc5
AL
696#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
697 TOP_OF_KERNEL_STACK_PADDING)
698
2f66dcc9
GOC
699#ifdef CONFIG_X86_32
700/*
701 * User space process size: 3GB (default).
702 */
4d46a89e 703#define TASK_SIZE PAGE_OFFSET
d9517346 704#define TASK_SIZE_MAX TASK_SIZE
4d46a89e
IM
705#define STACK_TOP TASK_SIZE
706#define STACK_TOP_MAX STACK_TOP
707
708#define INIT_THREAD { \
d9e05cc5 709 .sp0 = TOP_OF_INIT_STACK, \
4d46a89e
IM
710 .sysenter_cs = __KERNEL_CS, \
711 .io_bitmap_ptr = NULL, \
2f66dcc9
GOC
712}
713
2f66dcc9
GOC
714extern unsigned long thread_saved_pc(struct task_struct *tsk);
715
2f66dcc9 716/*
5c39403e 717 * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
2f66dcc9 718 * This is necessary to guarantee that the entire "struct pt_regs"
b595076a 719 * is accessible even if the CPU haven't stored the SS/ESP registers
2f66dcc9
GOC
720 * on the stack (interrupt gate does not save these registers
721 * when switching to the same priv ring).
722 * Therefore beware: accessing the ss/esp fields of the
723 * "struct pt_regs" is possible, but they may contain the
724 * completely wrong values.
725 */
5c39403e
DV
726#define task_pt_regs(task) \
727({ \
728 unsigned long __ptr = (unsigned long)task_stack_page(task); \
729 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
730 ((struct pt_regs *)__ptr) - 1; \
2f66dcc9
GOC
731})
732
4d46a89e 733#define KSTK_ESP(task) (task_pt_regs(task)->sp)
2f66dcc9
GOC
734
735#else
736/*
07114f0f
AL
737 * User space process size. 47bits minus one guard page. The guard
738 * page is necessary on Intel CPUs: if a SYSCALL instruction is at
739 * the highest possible canonical userspace address, then that
740 * syscall will enter the kernel with a non-canonical return
741 * address, and SYSRET will explode dangerously. We avoid this
742 * particular problem by preventing anything from being mapped
743 * at the maximum canonical address.
2f66dcc9 744 */
d9517346 745#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
2f66dcc9
GOC
746
747/* This decides where the kernel will search for a free chunk of vm
748 * space during mmap's.
749 */
4d46a89e
IM
750#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
751 0xc0000000 : 0xFFFFe000)
2f66dcc9 752
6bd33008 753#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
d9517346 754 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
6bd33008 755#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
d9517346 756 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
2f66dcc9 757
922a70d3 758#define STACK_TOP TASK_SIZE
d9517346 759#define STACK_TOP_MAX TASK_SIZE_MAX
922a70d3 760
2f66dcc9 761#define INIT_THREAD { \
d9e05cc5 762 .sp0 = TOP_OF_INIT_STACK \
2f66dcc9
GOC
763}
764
2f66dcc9
GOC
765/*
766 * Return saved PC of a blocked thread.
767 * What is this good for? it will be always the scheduler or ret_from_fork.
768 */
4d46a89e 769#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
2f66dcc9 770
4d46a89e 771#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
89240ba0 772extern unsigned long KSTK_ESP(struct task_struct *task);
d046ff8b 773
2f66dcc9
GOC
774#endif /* CONFIG_X86_64 */
775
513ad84b
IM
776extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
777 unsigned long new_sp);
778
4d46a89e
IM
779/*
780 * This decides where the kernel will search for a free chunk of vm
683e0253
GOC
781 * space during mmap's.
782 */
783#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
784
4d46a89e 785#define KSTK_EIP(task) (task_pt_regs(task)->ip)
683e0253 786
529e25f6
EB
787/* Get/set a process' ability to use the timestamp counter instruction */
788#define GET_TSC_CTL(adr) get_tsc_mode((adr))
789#define SET_TSC_CTL(val) set_tsc_mode((val))
790
791extern int get_tsc_mode(unsigned long adr);
792extern int set_tsc_mode(unsigned int val);
793
fe3d197f 794/* Register/unregister a process' MPX related resource */
46a6e0cf
DH
795#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
796#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
fe3d197f
DH
797
798#ifdef CONFIG_X86_INTEL_MPX
46a6e0cf
DH
799extern int mpx_enable_management(void);
800extern int mpx_disable_management(void);
fe3d197f 801#else
46a6e0cf 802static inline int mpx_enable_management(void)
fe3d197f
DH
803{
804 return -EINVAL;
805}
46a6e0cf 806static inline int mpx_disable_management(void)
fe3d197f
DH
807{
808 return -EINVAL;
809}
810#endif /* CONFIG_X86_INTEL_MPX */
811
8b84c8df 812extern u16 amd_get_nb_id(int cpu);
cc2749e4 813extern u32 amd_get_nodes_per_socket(void);
6a812691 814
96e39ac0
JW
815static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
816{
817 uint32_t base, eax, signature[3];
818
819 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
820 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
821
822 if (!memcmp(sig, signature, 12) &&
823 (leaves == 0 || ((eax - base) >= leaves)))
824 return base;
825 }
826
827 return 0;
828}
829
f05e798a
DH
830extern unsigned long arch_align_stack(unsigned long sp);
831extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
832
833void default_idle(void);
6a377ddc
LB
834#ifdef CONFIG_XEN
835bool xen_set_default_idle(void);
836#else
837#define xen_set_default_idle 0
838#endif
f05e798a
DH
839
840void stop_this_cpu(void *dummy);
4d067d8e 841void df_debug(struct pt_regs *regs, long error_code);
1965aae3 842#endif /* _ASM_X86_PROCESSOR_H */
This page took 0.669057 seconds and 5 git commands to generate.