x86/cpu: Add CLZERO detection
[deliverable/linux.git] / arch / x86 / include / asm / cpufeature.h
1 /*
2 * Defines x86 CPU feature bits
3 */
4 #ifndef _ASM_X86_CPUFEATURE_H
5 #define _ASM_X86_CPUFEATURE_H
6
7 #ifndef _ASM_X86_REQUIRED_FEATURES_H
8 #include <asm/required-features.h>
9 #endif
10
11 #ifndef _ASM_X86_DISABLED_FEATURES_H
12 #include <asm/disabled-features.h>
13 #endif
14
15 #define NCAPINTS 14 /* N 32-bit words worth of info */
16 #define NBUGINTS 1 /* N 32-bit bug flags */
17
18 /*
19 * Note: If the comment begins with a quoted string, that string is used
20 * in /proc/cpuinfo instead of the macro name. If the string is "",
21 * this feature bit is not displayed in /proc/cpuinfo at all.
22 */
23
24 /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
25 #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
26 #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
27 #define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
28 #define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
29 #define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
30 #define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
31 #define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
32 #define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
33 #define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
34 #define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
35 #define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
36 #define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
37 #define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
38 #define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
39 #define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
40 /* (plus FCMOVcc, FCOMI with FPU) */
41 #define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42 #define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43 #define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44 #define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45 #define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46 #define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47 #define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48 #define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49 #define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50 #define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51 #define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52 #define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53 #define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54 #define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55 #define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56
57 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
58 /* Don't duplicate feature flags which are redundant with Intel! */
59 #define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
60 #define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
61 #define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
62 #define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
63 #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
64 #define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
65 #define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
66 #define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
67 #define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
68 #define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
69
70 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
71 #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
72 #define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
73 #define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
74
75 /* Other features, Linux-defined mapping, word 3 */
76 /* This range is used for feature bits which conflict or are synthesized */
77 #define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
78 #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
79 #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
80 #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
81 /* cpu types for specific tunings: */
82 #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
83 #define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
84 #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
85 #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
86 #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
87 #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
88 /* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
89 #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
90 #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
91 #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
92 #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
93 #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
94 #define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
95 #define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
96 #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
97 /* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
98 #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
99 #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
100 #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
101 #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
102 #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
103 /* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
104 #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
105 #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
106 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
107 #define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
108 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
109
110 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
111 #define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
112 #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
113 #define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
114 #define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
115 #define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
116 #define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
117 #define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
118 #define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
119 #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
120 #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
121 #define X86_FEATURE_CID ( 4*32+10) /* Context ID */
122 #define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
123 #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
124 #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
125 #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
126 #define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
127 #define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
128 #define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
129 #define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
130 #define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
131 #define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
132 #define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
133 #define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
134 #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
135 #define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
136 #define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
137 #define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
138 #define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
139 #define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
140 #define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
141 #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
142
143 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
144 #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
145 #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
146 #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
147 #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
148 #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
149 #define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
150 #define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
151 #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
152 #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
153 #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
154
155 /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
156 #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
157 #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
158 #define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
159 #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
160 #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
161 #define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
162 #define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
163 #define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
164 #define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
165 #define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
166 #define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
167 #define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
168 #define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
169 #define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
170 #define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
171 #define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
172 #define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
173 #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
174 #define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
175 #define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
176 #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
177 #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
178 #define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
179 #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
180 #define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
181
182 /*
183 * Auxiliary flags: Linux defined - For features scattered in various
184 * CPUID levels like 0x6, 0xA etc, word 7
185 */
186 #define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
187 #define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
188 #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
189 #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
190 #define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
191 #define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
192 #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
193 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
194 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195 #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
196 #define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
197 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
198 #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
199 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
200 #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
201
202 /* Virtualization flags: Linux defined, word 8 */
203 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
204 #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
205 #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
206 #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
207 #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
208 #define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
209 #define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
210 #define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
211 #define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
212 #define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
213 #define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
214 #define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
215 #define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
216 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
217 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
218 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
219
220
221 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
222 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
223 #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
224 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
225 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
226 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
227 #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
228 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
229 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
230 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
231 #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
232 #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
233 #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
234 #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
235 #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
236 #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
237 #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
238 #define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
239 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
240 #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
241 #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
242 #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
243 #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
244 #define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
245
246 /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
247 #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
248 #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
249 #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
250 #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
251
252 /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
253 #define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
254
255 /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
256 #define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
257
258 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
259 #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
260
261 /*
262 * BUG word(s)
263 */
264 #define X86_BUG(x) (NCAPINTS*32 + (x))
265
266 #define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
267 #define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
268 #define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
269 #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
270 #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
271 #define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
272 #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
273 #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
274 #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
275
276 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
277
278 #include <asm/asm.h>
279 #include <linux/bitops.h>
280
281 #ifdef CONFIG_X86_FEATURE_NAMES
282 extern const char * const x86_cap_flags[NCAPINTS*32];
283 extern const char * const x86_power_flags[32];
284 #define X86_CAP_FMT "%s"
285 #define x86_cap_flag(flag) x86_cap_flags[flag]
286 #else
287 #define X86_CAP_FMT "%d:%d"
288 #define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31)
289 #endif
290
291 /*
292 * In order to save room, we index into this array by doing
293 * X86_BUG_<name> - NCAPINTS*32.
294 */
295 extern const char * const x86_bug_flags[NBUGINTS*32];
296
297 #define test_cpu_cap(c, bit) \
298 test_bit(bit, (unsigned long *)((c)->x86_capability))
299
300 #define REQUIRED_MASK_BIT_SET(bit) \
301 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
302 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
303 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
304 (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
305 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
306 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
307 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
308 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
309 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
310 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
311
312 #define DISABLED_MASK_BIT_SET(bit) \
313 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
314 (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
315 (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
316 (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
317 (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
318 (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
319 (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
320 (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
321 (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
322 (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
323
324 #define cpu_has(c, bit) \
325 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
326 test_cpu_cap(c, bit))
327
328 #define this_cpu_has(bit) \
329 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
330 x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
331
332 /*
333 * This macro is for detection of features which need kernel
334 * infrastructure to be used. It may *not* directly test the CPU
335 * itself. Use the cpu_has() family if you want true runtime
336 * testing of CPU features, like in hypervisor code where you are
337 * supporting a possible guest feature where host support for it
338 * is not relevant.
339 */
340 #define cpu_feature_enabled(bit) \
341 (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
342 cpu_has(&boot_cpu_data, bit))
343
344 #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
345
346 #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
347 #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
348 #define setup_clear_cpu_cap(bit) do { \
349 clear_cpu_cap(&boot_cpu_data, bit); \
350 set_bit(bit, (unsigned long *)cpu_caps_cleared); \
351 } while (0)
352 #define setup_force_cpu_cap(bit) do { \
353 set_cpu_cap(&boot_cpu_data, bit); \
354 set_bit(bit, (unsigned long *)cpu_caps_set); \
355 } while (0)
356
357 #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
358 #define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
359 #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
360 #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
361 #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
362 #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
363 #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
364 #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
365 #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
366 #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
367 #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
368 #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
369 #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
370 #define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
371 #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
372 #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
373 #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
374 #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
375 #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
376 #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
377 #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
378 #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
379 #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
380 #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
381 #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
382 #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
383 #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
384 #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
385 #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
386 #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
387 #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
388 #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
389 #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
390 #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
391 #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
392 #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
393 #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
394 #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
395 #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
396 #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
397 #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
398 #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
399 #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
400 #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
401 #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
402 #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
403 #define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
404 #define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
405 #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
406 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
407 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
408 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
409 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
410
411 #if __GNUC__ >= 4
412 extern void warn_pre_alternatives(void);
413 extern bool __static_cpu_has_safe(u16 bit);
414
415 /*
416 * Static testing of CPU features. Used the same as boot_cpu_has().
417 * These are only valid after alternatives have run, but will statically
418 * patch the target code for additional performance.
419 */
420 static __always_inline __pure bool __static_cpu_has(u16 bit)
421 {
422 #ifdef CC_HAVE_ASM_GOTO
423
424 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
425
426 /*
427 * Catch too early usage of this before alternatives
428 * have run.
429 */
430 asm_volatile_goto("1: jmp %l[t_warn]\n"
431 "2:\n"
432 ".section .altinstructions,\"a\"\n"
433 " .long 1b - .\n"
434 " .long 0\n" /* no replacement */
435 " .word %P0\n" /* 1: do replace */
436 " .byte 2b - 1b\n" /* source len */
437 " .byte 0\n" /* replacement len */
438 " .byte 0\n" /* pad len */
439 ".previous\n"
440 /* skipping size check since replacement size = 0 */
441 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
442
443 #endif
444
445 asm_volatile_goto("1: jmp %l[t_no]\n"
446 "2:\n"
447 ".section .altinstructions,\"a\"\n"
448 " .long 1b - .\n"
449 " .long 0\n" /* no replacement */
450 " .word %P0\n" /* feature bit */
451 " .byte 2b - 1b\n" /* source len */
452 " .byte 0\n" /* replacement len */
453 " .byte 0\n" /* pad len */
454 ".previous\n"
455 /* skipping size check since replacement size = 0 */
456 : : "i" (bit) : : t_no);
457 return true;
458 t_no:
459 return false;
460
461 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
462 t_warn:
463 warn_pre_alternatives();
464 return false;
465 #endif
466
467 #else /* CC_HAVE_ASM_GOTO */
468
469 u8 flag;
470 /* Open-coded due to __stringify() in ALTERNATIVE() */
471 asm volatile("1: movb $0,%0\n"
472 "2:\n"
473 ".section .altinstructions,\"a\"\n"
474 " .long 1b - .\n"
475 " .long 3f - .\n"
476 " .word %P1\n" /* feature bit */
477 " .byte 2b - 1b\n" /* source len */
478 " .byte 4f - 3f\n" /* replacement len */
479 " .byte 0\n" /* pad len */
480 ".previous\n"
481 ".section .discard,\"aw\",@progbits\n"
482 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
483 ".previous\n"
484 ".section .altinstr_replacement,\"ax\"\n"
485 "3: movb $1,%0\n"
486 "4:\n"
487 ".previous\n"
488 : "=qm" (flag) : "i" (bit));
489 return flag;
490
491 #endif /* CC_HAVE_ASM_GOTO */
492 }
493
494 #define static_cpu_has(bit) \
495 ( \
496 __builtin_constant_p(boot_cpu_has(bit)) ? \
497 boot_cpu_has(bit) : \
498 __builtin_constant_p(bit) ? \
499 __static_cpu_has(bit) : \
500 boot_cpu_has(bit) \
501 )
502
503 static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
504 {
505 #ifdef CC_HAVE_ASM_GOTO
506 asm_volatile_goto("1: jmp %l[t_dynamic]\n"
507 "2:\n"
508 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
509 "((5f-4f) - (2b-1b)),0x90\n"
510 "3:\n"
511 ".section .altinstructions,\"a\"\n"
512 " .long 1b - .\n" /* src offset */
513 " .long 4f - .\n" /* repl offset */
514 " .word %P1\n" /* always replace */
515 " .byte 3b - 1b\n" /* src len */
516 " .byte 5f - 4f\n" /* repl len */
517 " .byte 3b - 2b\n" /* pad len */
518 ".previous\n"
519 ".section .altinstr_replacement,\"ax\"\n"
520 "4: jmp %l[t_no]\n"
521 "5:\n"
522 ".previous\n"
523 ".section .altinstructions,\"a\"\n"
524 " .long 1b - .\n" /* src offset */
525 " .long 0\n" /* no replacement */
526 " .word %P0\n" /* feature bit */
527 " .byte 3b - 1b\n" /* src len */
528 " .byte 0\n" /* repl len */
529 " .byte 0\n" /* pad len */
530 ".previous\n"
531 : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
532 : : t_dynamic, t_no);
533 return true;
534 t_no:
535 return false;
536 t_dynamic:
537 return __static_cpu_has_safe(bit);
538 #else
539 u8 flag;
540 /* Open-coded due to __stringify() in ALTERNATIVE() */
541 asm volatile("1: movb $2,%0\n"
542 "2:\n"
543 ".section .altinstructions,\"a\"\n"
544 " .long 1b - .\n" /* src offset */
545 " .long 3f - .\n" /* repl offset */
546 " .word %P2\n" /* always replace */
547 " .byte 2b - 1b\n" /* source len */
548 " .byte 4f - 3f\n" /* replacement len */
549 " .byte 0\n" /* pad len */
550 ".previous\n"
551 ".section .discard,\"aw\",@progbits\n"
552 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
553 ".previous\n"
554 ".section .altinstr_replacement,\"ax\"\n"
555 "3: movb $0,%0\n"
556 "4:\n"
557 ".previous\n"
558 ".section .altinstructions,\"a\"\n"
559 " .long 1b - .\n" /* src offset */
560 " .long 5f - .\n" /* repl offset */
561 " .word %P1\n" /* feature bit */
562 " .byte 4b - 3b\n" /* src len */
563 " .byte 6f - 5f\n" /* repl len */
564 " .byte 0\n" /* pad len */
565 ".previous\n"
566 ".section .discard,\"aw\",@progbits\n"
567 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
568 ".previous\n"
569 ".section .altinstr_replacement,\"ax\"\n"
570 "5: movb $1,%0\n"
571 "6:\n"
572 ".previous\n"
573 : "=qm" (flag)
574 : "i" (bit), "i" (X86_FEATURE_ALWAYS));
575 return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
576 #endif /* CC_HAVE_ASM_GOTO */
577 }
578
579 #define static_cpu_has_safe(bit) \
580 ( \
581 __builtin_constant_p(boot_cpu_has(bit)) ? \
582 boot_cpu_has(bit) : \
583 _static_cpu_has_safe(bit) \
584 )
585 #else
586 /*
587 * gcc 3.x is too stupid to do the static test; fall back to dynamic.
588 */
589 #define static_cpu_has(bit) boot_cpu_has(bit)
590 #define static_cpu_has_safe(bit) boot_cpu_has(bit)
591 #endif
592
593 #define cpu_has_bug(c, bit) cpu_has(c, (bit))
594 #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
595 #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
596
597 #define static_cpu_has_bug(bit) static_cpu_has((bit))
598 #define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
599 #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
600
601 #define MAX_CPU_FEATURES (NCAPINTS * 32)
602 #define cpu_have_feature boot_cpu_has
603
604 #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X"
605 #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
606 boot_cpu_data.x86_model
607
608 #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
609 #endif /* _ASM_X86_CPUFEATURE_H */
This page took 0.075455 seconds and 5 git commands to generate.