80c50020ef602afb73e119125e1675e94c90faba
[deliverable/linux.git] / include / asm-x86 / processor.h
1 #ifndef __ASM_X86_PROCESSOR_H
2 #define __ASM_X86_PROCESSOR_H
3
4 #include <asm/processor-flags.h>
5
6 /* Forward declaration, a strange C thing */
7 struct task_struct;
8 struct mm_struct;
9
10 #include <asm/page.h>
11 #include <asm/percpu.h>
12 #include <asm/system.h>
13 #include <asm/percpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/cache.h>
16
17 /*
18 * Default implementation of macro that returns current
19 * instruction pointer ("program counter").
20 */
21 static inline void *current_text_addr(void)
22 {
23 void *pc;
24 asm volatile("mov $1f,%0\n1:":"=r" (pc));
25 return pc;
26 }
27
28 #ifdef CONFIG_X86_VSMP
29 #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
30 #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
31 #else
32 #define ARCH_MIN_TASKALIGN 16
33 #define ARCH_MIN_MMSTRUCT_ALIGN 0
34 #endif
35
36 /*
37 * CPU type and hardware bug flags. Kept separately for each CPU.
38 * Members of this structure are referenced in head.S, so think twice
39 * before touching them. [mj]
40 */
41
42 struct cpuinfo_x86 {
43 __u8 x86; /* CPU family */
44 __u8 x86_vendor; /* CPU vendor */
45 __u8 x86_model;
46 __u8 x86_mask;
47 #ifdef CONFIG_X86_32
48 char wp_works_ok; /* It doesn't on 386's */
49 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
50 char hard_math;
51 char rfu;
52 char fdiv_bug;
53 char f00f_bug;
54 char coma_bug;
55 char pad0;
56 #else
57 /* number of 4K pages in DTLB/ITLB combined(in pages)*/
58 int x86_tlbsize;
59 __u8 x86_virt_bits, x86_phys_bits;
60 /* cpuid returned core id bits */
61 __u8 x86_coreid_bits;
62 /* Max extended CPUID function supported */
63 __u32 extended_cpuid_level;
64 #endif
65 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
66 __u32 x86_capability[NCAPINTS];
67 char x86_vendor_id[16];
68 char x86_model_id[64];
69 int x86_cache_size; /* in KB - valid for CPUS which support this
70 call */
71 int x86_cache_alignment; /* In bytes */
72 int x86_power;
73 unsigned long loops_per_jiffy;
74 #ifdef CONFIG_SMP
75 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
76 #endif
77 unsigned char x86_max_cores; /* cpuid returned max cores value */
78 unsigned char apicid;
79 unsigned short x86_clflush_size;
80 #ifdef CONFIG_SMP
81 unsigned char booted_cores; /* number of cores as seen by OS */
82 __u8 phys_proc_id; /* Physical processor id. */
83 __u8 cpu_core_id; /* Core id */
84 __u8 cpu_index; /* index into per_cpu list */
85 #endif
86 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
87
88 #define X86_VENDOR_INTEL 0
89 #define X86_VENDOR_CYRIX 1
90 #define X86_VENDOR_AMD 2
91 #define X86_VENDOR_UMC 3
92 #define X86_VENDOR_NEXGEN 4
93 #define X86_VENDOR_CENTAUR 5
94 #define X86_VENDOR_TRANSMETA 7
95 #define X86_VENDOR_NSC 8
96 #define X86_VENDOR_NUM 9
97 #define X86_VENDOR_UNKNOWN 0xff
98
99 /*
100 * capabilities of CPUs
101 */
102 extern struct cpuinfo_x86 boot_cpu_data;
103 extern struct cpuinfo_x86 new_cpu_data;
104 extern struct tss_struct doublefault_tss;
105
106 #ifdef CONFIG_SMP
107 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
108 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
109 #define current_cpu_data cpu_data(smp_processor_id())
110 #else
111 #define cpu_data(cpu) boot_cpu_data
112 #define current_cpu_data boot_cpu_data
113 #endif
114
115 void cpu_detect(struct cpuinfo_x86 *c);
116
117 extern void identify_cpu(struct cpuinfo_x86 *);
118 extern void identify_boot_cpu(void);
119 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
120 extern void print_cpu_info(struct cpuinfo_x86 *);
121 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
122 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
123 extern unsigned short num_cache_leaves;
124
125 #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
126 extern void detect_ht(struct cpuinfo_x86 *c);
127 #else
128 static inline void detect_ht(struct cpuinfo_x86 *c) {}
129 #endif
130
131 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
132 unsigned int *ecx, unsigned int *edx)
133 {
134 /* ecx is often an input as well as an output. */
135 __asm__("cpuid"
136 : "=a" (*eax),
137 "=b" (*ebx),
138 "=c" (*ecx),
139 "=d" (*edx)
140 : "0" (*eax), "2" (*ecx));
141 }
142
143 static inline void load_cr3(pgd_t *pgdir)
144 {
145 write_cr3(__pa(pgdir));
146 }
147
148 #ifdef CONFIG_X86_32
149 /* This is the TSS defined by the hardware. */
150 struct x86_hw_tss {
151 unsigned short back_link, __blh;
152 unsigned long sp0;
153 unsigned short ss0, __ss0h;
154 unsigned long sp1;
155 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
156 unsigned long sp2;
157 unsigned short ss2, __ss2h;
158 unsigned long __cr3;
159 unsigned long ip;
160 unsigned long flags;
161 unsigned long ax, cx, dx, bx;
162 unsigned long sp, bp, si, di;
163 unsigned short es, __esh;
164 unsigned short cs, __csh;
165 unsigned short ss, __ssh;
166 unsigned short ds, __dsh;
167 unsigned short fs, __fsh;
168 unsigned short gs, __gsh;
169 unsigned short ldt, __ldth;
170 unsigned short trace, io_bitmap_base;
171 } __attribute__((packed));
172 #else
173 struct x86_hw_tss {
174 u32 reserved1;
175 u64 sp0;
176 u64 sp1;
177 u64 sp2;
178 u64 reserved2;
179 u64 ist[7];
180 u32 reserved3;
181 u32 reserved4;
182 u16 reserved5;
183 u16 io_bitmap_base;
184 } __attribute__((packed)) ____cacheline_aligned;
185 #endif
186
187 /*
188 * Size of io_bitmap.
189 */
190 #define IO_BITMAP_BITS 65536
191 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
192 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
193 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
194 #define INVALID_IO_BITMAP_OFFSET 0x8000
195 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
196
197 struct tss_struct {
198 struct x86_hw_tss x86_tss;
199
200 /*
201 * The extra 1 is there because the CPU will access an
202 * additional byte beyond the end of the IO permission
203 * bitmap. The extra byte must be all 1 bits, and must
204 * be within the limit.
205 */
206 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
207 /*
208 * Cache the current maximum and the last task that used the bitmap:
209 */
210 unsigned long io_bitmap_max;
211 struct thread_struct *io_bitmap_owner;
212 /*
213 * pads the TSS to be cacheline-aligned (size is 0x100)
214 */
215 unsigned long __cacheline_filler[35];
216 /*
217 * .. and then another 0x100 bytes for emergency kernel stack
218 */
219 unsigned long stack[64];
220 } __attribute__((packed));
221
222 DECLARE_PER_CPU(struct tss_struct, init_tss);
223
224 /* Save the original ist values for checking stack pointers during debugging */
225 struct orig_ist {
226 unsigned long ist[7];
227 };
228
229 #ifdef CONFIG_X86_32
230 struct i387_fsave_struct {
231 long cwd;
232 long swd;
233 long twd;
234 long fip;
235 long fcs;
236 long foo;
237 long fos;
238 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
239 long status; /* software status information */
240 };
241
242 struct i387_fxsave_struct {
243 unsigned short cwd;
244 unsigned short swd;
245 unsigned short twd;
246 unsigned short fop;
247 long fip;
248 long fcs;
249 long foo;
250 long fos;
251 long mxcsr;
252 long mxcsr_mask;
253 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
254 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
255 long padding[56];
256 } __attribute__((aligned(16)));
257
258 struct i387_soft_struct {
259 long cwd;
260 long swd;
261 long twd;
262 long fip;
263 long fcs;
264 long foo;
265 long fos;
266 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
267 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
268 struct info *info;
269 unsigned long entry_eip;
270 };
271
272 union i387_union {
273 struct i387_fsave_struct fsave;
274 struct i387_fxsave_struct fxsave;
275 struct i387_soft_struct soft;
276 };
277
278 # include "processor_32.h"
279 #else
280 struct i387_fxsave_struct {
281 u16 cwd;
282 u16 swd;
283 u16 twd;
284 u16 fop;
285 u64 rip;
286 u64 rdp;
287 u32 mxcsr;
288 u32 mxcsr_mask;
289 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
290 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
291 u32 padding[24];
292 } __attribute__((aligned(16)));
293
294 union i387_union {
295 struct i387_fxsave_struct fxsave;
296 };
297
298 # include "processor_64.h"
299 #endif
300
301 extern void print_cpu_info(struct cpuinfo_x86 *);
302 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
303 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
304 extern unsigned short num_cache_leaves;
305
306 struct thread_struct {
307 /* cached TLS descriptors. */
308 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
309 unsigned long sp0;
310 unsigned long sp;
311 #ifdef CONFIG_X86_32
312 unsigned long sysenter_cs;
313 #else
314 unsigned long usersp; /* Copy from PDA */
315 unsigned short es, ds, fsindex, gsindex;
316 #endif
317 unsigned long ip;
318 unsigned long fs;
319 unsigned long gs;
320 /* Hardware debugging registers */
321 unsigned long debugreg0;
322 unsigned long debugreg1;
323 unsigned long debugreg2;
324 unsigned long debugreg3;
325 unsigned long debugreg6;
326 unsigned long debugreg7;
327 /* fault info */
328 unsigned long cr2, trap_no, error_code;
329 /* floating point info */
330 union i387_union i387 __attribute__((aligned(16)));;
331 #ifdef CONFIG_X86_32
332 /* virtual 86 mode info */
333 struct vm86_struct __user *vm86_info;
334 unsigned long screen_bitmap;
335 unsigned long v86flags, v86mask, saved_sp0;
336 unsigned int saved_fs, saved_gs;
337 #endif
338 /* IO permissions */
339 unsigned long *io_bitmap_ptr;
340 unsigned long iopl;
341 /* max allowed port in the bitmap, in bytes: */
342 unsigned io_bitmap_max;
343 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
344 unsigned long debugctlmsr;
345 /* Debug Store - if not 0 points to a DS Save Area configuration;
346 * goes into MSR_IA32_DS_AREA */
347 unsigned long ds_area_msr;
348 };
349
350 static inline unsigned long native_get_debugreg(int regno)
351 {
352 unsigned long val = 0; /* Damn you, gcc! */
353
354 switch (regno) {
355 case 0:
356 asm("mov %%db0, %0" :"=r" (val)); break;
357 case 1:
358 asm("mov %%db1, %0" :"=r" (val)); break;
359 case 2:
360 asm("mov %%db2, %0" :"=r" (val)); break;
361 case 3:
362 asm("mov %%db3, %0" :"=r" (val)); break;
363 case 6:
364 asm("mov %%db6, %0" :"=r" (val)); break;
365 case 7:
366 asm("mov %%db7, %0" :"=r" (val)); break;
367 default:
368 BUG();
369 }
370 return val;
371 }
372
373 static inline void native_set_debugreg(int regno, unsigned long value)
374 {
375 switch (regno) {
376 case 0:
377 asm("mov %0,%%db0" : /* no output */ :"r" (value));
378 break;
379 case 1:
380 asm("mov %0,%%db1" : /* no output */ :"r" (value));
381 break;
382 case 2:
383 asm("mov %0,%%db2" : /* no output */ :"r" (value));
384 break;
385 case 3:
386 asm("mov %0,%%db3" : /* no output */ :"r" (value));
387 break;
388 case 6:
389 asm("mov %0,%%db6" : /* no output */ :"r" (value));
390 break;
391 case 7:
392 asm("mov %0,%%db7" : /* no output */ :"r" (value));
393 break;
394 default:
395 BUG();
396 }
397 }
398
399 /*
400 * Set IOPL bits in EFLAGS from given mask
401 */
402 static inline void native_set_iopl_mask(unsigned mask)
403 {
404 #ifdef CONFIG_X86_32
405 unsigned int reg;
406 __asm__ __volatile__ ("pushfl;"
407 "popl %0;"
408 "andl %1, %0;"
409 "orl %2, %0;"
410 "pushl %0;"
411 "popfl"
412 : "=&r" (reg)
413 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
414 #endif
415 }
416
417 static inline void native_load_sp0(struct tss_struct *tss,
418 struct thread_struct *thread)
419 {
420 tss->x86_tss.sp0 = thread->sp0;
421 #ifdef CONFIG_X86_32
422 /* Only happens when SEP is enabled, no need to test "SEP"arately */
423 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
424 tss->x86_tss.ss1 = thread->sysenter_cs;
425 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
426 }
427 #endif
428 }
429
430 #ifdef CONFIG_PARAVIRT
431 #include <asm/paravirt.h>
432 #else
433 #define __cpuid native_cpuid
434 #define paravirt_enabled() 0
435
436 /*
437 * These special macros can be used to get or set a debugging register
438 */
439 #define get_debugreg(var, register) \
440 (var) = native_get_debugreg(register)
441 #define set_debugreg(value, register) \
442 native_set_debugreg(register, value)
443
444 static inline void load_sp0(struct tss_struct *tss,
445 struct thread_struct *thread)
446 {
447 native_load_sp0(tss, thread);
448 }
449
450 #define set_iopl_mask native_set_iopl_mask
451 #endif /* CONFIG_PARAVIRT */
452
453 /*
454 * Save the cr4 feature set we're using (ie
455 * Pentium 4MB enable and PPro Global page
456 * enable), so that any CPU's that boot up
457 * after us can get the correct flags.
458 */
459 extern unsigned long mmu_cr4_features;
460
461 static inline void set_in_cr4(unsigned long mask)
462 {
463 unsigned cr4;
464 mmu_cr4_features |= mask;
465 cr4 = read_cr4();
466 cr4 |= mask;
467 write_cr4(cr4);
468 }
469
470 static inline void clear_in_cr4(unsigned long mask)
471 {
472 unsigned cr4;
473 mmu_cr4_features &= ~mask;
474 cr4 = read_cr4();
475 cr4 &= ~mask;
476 write_cr4(cr4);
477 }
478
479 struct microcode_header {
480 unsigned int hdrver;
481 unsigned int rev;
482 unsigned int date;
483 unsigned int sig;
484 unsigned int cksum;
485 unsigned int ldrver;
486 unsigned int pf;
487 unsigned int datasize;
488 unsigned int totalsize;
489 unsigned int reserved[3];
490 };
491
492 struct microcode {
493 struct microcode_header hdr;
494 unsigned int bits[0];
495 };
496
497 typedef struct microcode microcode_t;
498 typedef struct microcode_header microcode_header_t;
499
500 /* microcode format is extended from prescott processors */
501 struct extended_signature {
502 unsigned int sig;
503 unsigned int pf;
504 unsigned int cksum;
505 };
506
507 struct extended_sigtable {
508 unsigned int count;
509 unsigned int cksum;
510 unsigned int reserved[3];
511 struct extended_signature sigs[0];
512 };
513
514 typedef struct {
515 unsigned long seg;
516 } mm_segment_t;
517
518
519 /*
520 * create a kernel thread without removing it from tasklists
521 */
522 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
523
524 /* Free all resources held by a thread. */
525 extern void release_thread(struct task_struct *);
526
527 /* Prepare to copy thread state - unlazy all lazy status */
528 extern void prepare_to_copy(struct task_struct *tsk);
529
530 unsigned long get_wchan(struct task_struct *p);
531
532 /*
533 * Generic CPUID function
534 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
535 * resulting in stale register contents being returned.
536 */
537 static inline void cpuid(unsigned int op,
538 unsigned int *eax, unsigned int *ebx,
539 unsigned int *ecx, unsigned int *edx)
540 {
541 *eax = op;
542 *ecx = 0;
543 __cpuid(eax, ebx, ecx, edx);
544 }
545
546 /* Some CPUID calls want 'count' to be placed in ecx */
547 static inline void cpuid_count(unsigned int op, int count,
548 unsigned int *eax, unsigned int *ebx,
549 unsigned int *ecx, unsigned int *edx)
550 {
551 *eax = op;
552 *ecx = count;
553 __cpuid(eax, ebx, ecx, edx);
554 }
555
556 /*
557 * CPUID functions returning a single datum
558 */
559 static inline unsigned int cpuid_eax(unsigned int op)
560 {
561 unsigned int eax, ebx, ecx, edx;
562
563 cpuid(op, &eax, &ebx, &ecx, &edx);
564 return eax;
565 }
566 static inline unsigned int cpuid_ebx(unsigned int op)
567 {
568 unsigned int eax, ebx, ecx, edx;
569
570 cpuid(op, &eax, &ebx, &ecx, &edx);
571 return ebx;
572 }
573 static inline unsigned int cpuid_ecx(unsigned int op)
574 {
575 unsigned int eax, ebx, ecx, edx;
576
577 cpuid(op, &eax, &ebx, &ecx, &edx);
578 return ecx;
579 }
580 static inline unsigned int cpuid_edx(unsigned int op)
581 {
582 unsigned int eax, ebx, ecx, edx;
583
584 cpuid(op, &eax, &ebx, &ecx, &edx);
585 return edx;
586 }
587
588 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
589 static inline void rep_nop(void)
590 {
591 __asm__ __volatile__("rep;nop": : :"memory");
592 }
593
594 /* Stop speculative execution */
595 static inline void sync_core(void)
596 {
597 int tmp;
598 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
599 : "ebx", "ecx", "edx", "memory");
600 }
601
602 #define cpu_relax() rep_nop()
603
604 static inline void __monitor(const void *eax, unsigned long ecx,
605 unsigned long edx)
606 {
607 /* "monitor %eax,%ecx,%edx;" */
608 asm volatile(
609 ".byte 0x0f,0x01,0xc8;"
610 : :"a" (eax), "c" (ecx), "d"(edx));
611 }
612
613 static inline void __mwait(unsigned long eax, unsigned long ecx)
614 {
615 /* "mwait %eax,%ecx;" */
616 asm volatile(
617 ".byte 0x0f,0x01,0xc9;"
618 : :"a" (eax), "c" (ecx));
619 }
620
621 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
622 {
623 /* "mwait %eax,%ecx;" */
624 asm volatile(
625 "sti; .byte 0x0f,0x01,0xc9;"
626 : :"a" (eax), "c" (ecx));
627 }
628
629 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
630
631 extern int force_mwait;
632
633 extern void select_idle_routine(const struct cpuinfo_x86 *c);
634
635 extern unsigned long boot_option_idle_override;
636
637 extern void enable_sep_cpu(void);
638 extern int sysenter_setup(void);
639
640 /* Defined in head.S */
641 extern struct desc_ptr early_gdt_descr;
642
643 extern void cpu_set_gdt(int);
644 extern void switch_to_new_gdt(void);
645 extern void cpu_init(void);
646 extern void init_gdt(int cpu);
647
648 /* from system description table in BIOS. Mostly for MCA use, but
649 * others may find it useful. */
650 extern unsigned int machine_id;
651 extern unsigned int machine_submodel_id;
652 extern unsigned int BIOS_revision;
653 extern unsigned int mca_pentium_flag;
654
655 /* Boot loader type from the setup header */
656 extern int bootloader_type;
657
658 extern char ignore_fpu_irq;
659 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
660
661 /* generic versions from gas */
662 #define GENERIC_NOP1 ".byte 0x90\n"
663 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
664 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
665 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
666 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
667 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
668 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
669 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
670
671 /* Opteron nops */
672 #define K8_NOP1 GENERIC_NOP1
673 #define K8_NOP2 ".byte 0x66,0x90\n"
674 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
675 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
676 #define K8_NOP5 K8_NOP3 K8_NOP2
677 #define K8_NOP6 K8_NOP3 K8_NOP3
678 #define K8_NOP7 K8_NOP4 K8_NOP3
679 #define K8_NOP8 K8_NOP4 K8_NOP4
680
681 /* K7 nops */
682 /* uses eax dependencies (arbitary choice) */
683 #define K7_NOP1 GENERIC_NOP1
684 #define K7_NOP2 ".byte 0x8b,0xc0\n"
685 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
686 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
687 #define K7_NOP5 K7_NOP4 ASM_NOP1
688 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
689 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
690 #define K7_NOP8 K7_NOP7 ASM_NOP1
691
692 /* P6 nops */
693 /* uses eax dependencies (Intel-recommended choice) */
694 #define P6_NOP1 GENERIC_NOP1
695 #define P6_NOP2 ".byte 0x66,0x90\n"
696 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
697 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
698 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
699 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
700 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
701 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
702
703 #ifdef CONFIG_MK7
704 #define ASM_NOP1 K7_NOP1
705 #define ASM_NOP2 K7_NOP2
706 #define ASM_NOP3 K7_NOP3
707 #define ASM_NOP4 K7_NOP4
708 #define ASM_NOP5 K7_NOP5
709 #define ASM_NOP6 K7_NOP6
710 #define ASM_NOP7 K7_NOP7
711 #define ASM_NOP8 K7_NOP8
712 #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
713 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
714 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) || \
715 defined(CONFIG_MPSC)
716 #define ASM_NOP1 P6_NOP1
717 #define ASM_NOP2 P6_NOP2
718 #define ASM_NOP3 P6_NOP3
719 #define ASM_NOP4 P6_NOP4
720 #define ASM_NOP5 P6_NOP5
721 #define ASM_NOP6 P6_NOP6
722 #define ASM_NOP7 P6_NOP7
723 #define ASM_NOP8 P6_NOP8
724 #elif defined(CONFIG_MK8) || defined(CONFIG_X86_64)
725 #define ASM_NOP1 K8_NOP1
726 #define ASM_NOP2 K8_NOP2
727 #define ASM_NOP3 K8_NOP3
728 #define ASM_NOP4 K8_NOP4
729 #define ASM_NOP5 K8_NOP5
730 #define ASM_NOP6 K8_NOP6
731 #define ASM_NOP7 K8_NOP7
732 #define ASM_NOP8 K8_NOP8
733 #else
734 #define ASM_NOP1 GENERIC_NOP1
735 #define ASM_NOP2 GENERIC_NOP2
736 #define ASM_NOP3 GENERIC_NOP3
737 #define ASM_NOP4 GENERIC_NOP4
738 #define ASM_NOP5 GENERIC_NOP5
739 #define ASM_NOP6 GENERIC_NOP6
740 #define ASM_NOP7 GENERIC_NOP7
741 #define ASM_NOP8 GENERIC_NOP8
742 #endif
743
744 #define ASM_NOP_MAX 8
745
746 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
747 #define ARCH_HAS_PREFETCHW
748 #define ARCH_HAS_SPINLOCK_PREFETCH
749
750 #ifdef CONFIG_X86_32
751 #define BASE_PREFETCH ASM_NOP4
752 #define ARCH_HAS_PREFETCH
753 #else
754 #define BASE_PREFETCH "prefetcht0 (%1)"
755 #endif
756
757 /* Prefetch instructions for Pentium III and AMD Athlon */
758 /* It's not worth to care about 3dnow! prefetches for the K6
759 because they are microcoded there and very slow.
760 However we don't do prefetches for pre XP Athlons currently
761 That should be fixed. */
762 static inline void prefetch(const void *x)
763 {
764 alternative_input(BASE_PREFETCH,
765 "prefetchnta (%1)",
766 X86_FEATURE_XMM,
767 "r" (x));
768 }
769
770 /* 3dnow! prefetch to get an exclusive cache line. Useful for
771 spinlocks to avoid one state transition in the cache coherency protocol. */
772 static inline void prefetchw(const void *x)
773 {
774 alternative_input(BASE_PREFETCH,
775 "prefetchw (%1)",
776 X86_FEATURE_3DNOW,
777 "r" (x));
778 }
779
780 #define spin_lock_prefetch(x) prefetchw(x)
781 /* This decides where the kernel will search for a free chunk of vm
782 * space during mmap's.
783 */
784 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
785
786 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
787
788 #endif
This page took 0.06339 seconds and 4 git commands to generate.