Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SYSTEM_H |
2 | #define __ASM_SYSTEM_H | |
3 | ||
4 | #include <linux/config.h> | |
5 | #include <linux/kernel.h> | |
6 | #include <asm/segment.h> | |
7 | #include <asm/cpufeature.h> | |
8 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | |
9 | ||
10 | #ifdef __KERNEL__ | |
11 | ||
12 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | |
13 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | |
14 | ||
15 | #define switch_to(prev,next,last) do { \ | |
16 | unsigned long esi,edi; \ | |
a5201129 | 17 | asm volatile("pushl %%ebp\n\t" \ |
1da177e4 LT |
18 | "movl %%esp,%0\n\t" /* save ESP */ \ |
19 | "movl %5,%%esp\n\t" /* restore ESP */ \ | |
20 | "movl $1f,%1\n\t" /* save EIP */ \ | |
21 | "pushl %6\n\t" /* restore EIP */ \ | |
22 | "jmp __switch_to\n" \ | |
23 | "1:\t" \ | |
24 | "popl %%ebp\n\t" \ | |
1da177e4 LT |
25 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
26 | "=a" (last),"=S" (esi),"=D" (edi) \ | |
27 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | |
28 | "2" (prev), "d" (next)); \ | |
29 | } while (0) | |
30 | ||
31 | #define _set_base(addr,base) do { unsigned long __pr; \ | |
32 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
33 | "rorl $16,%%edx\n\t" \ | |
34 | "movb %%dl,%2\n\t" \ | |
35 | "movb %%dh,%3" \ | |
36 | :"=&d" (__pr) \ | |
37 | :"m" (*((addr)+2)), \ | |
38 | "m" (*((addr)+4)), \ | |
39 | "m" (*((addr)+7)), \ | |
40 | "0" (base) \ | |
41 | ); } while(0) | |
42 | ||
43 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | |
44 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
45 | "rorl $16,%%edx\n\t" \ | |
46 | "movb %2,%%dh\n\t" \ | |
47 | "andb $0xf0,%%dh\n\t" \ | |
48 | "orb %%dh,%%dl\n\t" \ | |
49 | "movb %%dl,%2" \ | |
50 | :"=&d" (__lr) \ | |
51 | :"m" (*(addr)), \ | |
52 | "m" (*((addr)+6)), \ | |
53 | "0" (limit) \ | |
54 | ); } while(0) | |
55 | ||
56 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | |
5fe9fe3c | 57 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) |
1da177e4 | 58 | |
1da177e4 LT |
59 | /* |
60 | * Load a segment. Fall back on loading the zero | |
61 | * segment if something goes wrong.. | |
62 | */ | |
63 | #define loadsegment(seg,value) \ | |
64 | asm volatile("\n" \ | |
65 | "1:\t" \ | |
fd51f666 | 66 | "mov %0,%%" #seg "\n" \ |
1da177e4 LT |
67 | "2:\n" \ |
68 | ".section .fixup,\"ax\"\n" \ | |
69 | "3:\t" \ | |
70 | "pushl $0\n\t" \ | |
71 | "popl %%" #seg "\n\t" \ | |
72 | "jmp 2b\n" \ | |
73 | ".previous\n" \ | |
74 | ".section __ex_table,\"a\"\n\t" \ | |
75 | ".align 4\n\t" \ | |
76 | ".long 1b,3b\n" \ | |
77 | ".previous" \ | |
4d37e7e3 | 78 | : :"rm" (value)) |
1da177e4 LT |
79 | |
80 | /* | |
81 | * Save a segment register away | |
82 | */ | |
83 | #define savesegment(seg, value) \ | |
4d37e7e3 | 84 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
1da177e4 LT |
85 | |
86 | /* | |
87 | * Clear and set 'TS' bit respectively | |
88 | */ | |
89 | #define clts() __asm__ __volatile__ ("clts") | |
90 | #define read_cr0() ({ \ | |
91 | unsigned int __dummy; \ | |
4bb0d3ec | 92 | __asm__ __volatile__( \ |
1da177e4 LT |
93 | "movl %%cr0,%0\n\t" \ |
94 | :"=r" (__dummy)); \ | |
95 | __dummy; \ | |
96 | }) | |
97 | #define write_cr0(x) \ | |
4bb0d3ec ZA |
98 | __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); |
99 | ||
100 | #define read_cr2() ({ \ | |
101 | unsigned int __dummy; \ | |
102 | __asm__ __volatile__( \ | |
103 | "movl %%cr2,%0\n\t" \ | |
104 | :"=r" (__dummy)); \ | |
105 | __dummy; \ | |
106 | }) | |
107 | #define write_cr2(x) \ | |
108 | __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); | |
109 | ||
110 | #define read_cr3() ({ \ | |
111 | unsigned int __dummy; \ | |
112 | __asm__ ( \ | |
113 | "movl %%cr3,%0\n\t" \ | |
114 | :"=r" (__dummy)); \ | |
115 | __dummy; \ | |
116 | }) | |
117 | #define write_cr3(x) \ | |
118 | __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); | |
1da177e4 LT |
119 | |
120 | #define read_cr4() ({ \ | |
121 | unsigned int __dummy; \ | |
122 | __asm__( \ | |
123 | "movl %%cr4,%0\n\t" \ | |
124 | :"=r" (__dummy)); \ | |
125 | __dummy; \ | |
126 | }) | |
ff6e8c0d ZA |
127 | |
128 | #define read_cr4_safe() ({ \ | |
129 | unsigned int __dummy; \ | |
130 | /* This could fault if %cr4 does not exist */ \ | |
131 | __asm__("1: movl %%cr4, %0 \n" \ | |
132 | "2: \n" \ | |
133 | ".section __ex_table,\"a\" \n" \ | |
134 | ".long 1b,2b \n" \ | |
135 | ".previous \n" \ | |
136 | : "=r" (__dummy): "0" (0)); \ | |
137 | __dummy; \ | |
138 | }) | |
139 | ||
1da177e4 | 140 | #define write_cr4(x) \ |
4bb0d3ec | 141 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); |
1da177e4 LT |
142 | #define stts() write_cr0(8 | read_cr0()) |
143 | ||
144 | #endif /* __KERNEL__ */ | |
145 | ||
146 | #define wbinvd() \ | |
147 | __asm__ __volatile__ ("wbinvd": : :"memory"); | |
148 | ||
149 | static inline unsigned long get_limit(unsigned long segment) | |
150 | { | |
151 | unsigned long __limit; | |
152 | __asm__("lsll %1,%0" | |
153 | :"=r" (__limit):"r" (segment)); | |
154 | return __limit+1; | |
155 | } | |
156 | ||
157 | #define nop() __asm__ __volatile__ ("nop") | |
158 | ||
159 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | |
160 | ||
161 | #define tas(ptr) (xchg((ptr),1)) | |
162 | ||
163 | struct __xchg_dummy { unsigned long a[100]; }; | |
164 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
165 | ||
166 | ||
8896fab3 JB |
167 | #ifdef CONFIG_X86_CMPXCHG64 |
168 | ||
1da177e4 LT |
169 | /* |
170 | * The semantics of XCHGCMP8B are a bit strange, this is why | |
171 | * there is a loop and the loading of %%eax and %%edx has to | |
172 | * be inside. This inlines well in most cases, the cached | |
173 | * cost is around ~38 cycles. (in the future we might want | |
174 | * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that | |
175 | * might have an implicit FPU-save as a cost, so it's not | |
176 | * clear which path to go.) | |
177 | * | |
178 | * cmpxchg8b must be used with the lock prefix here to allow | |
179 | * the instruction to be executed atomically, see page 3-102 | |
180 | * of the instruction set reference 24319102.pdf. We need | |
181 | * the reader side to see the coherent 64bit value. | |
182 | */ | |
183 | static inline void __set_64bit (unsigned long long * ptr, | |
184 | unsigned int low, unsigned int high) | |
185 | { | |
186 | __asm__ __volatile__ ( | |
187 | "\n1:\t" | |
188 | "movl (%0), %%eax\n\t" | |
189 | "movl 4(%0), %%edx\n\t" | |
190 | "lock cmpxchg8b (%0)\n\t" | |
191 | "jnz 1b" | |
192 | : /* no outputs */ | |
193 | : "D"(ptr), | |
194 | "b"(low), | |
195 | "c"(high) | |
196 | : "ax","dx","memory"); | |
197 | } | |
198 | ||
199 | static inline void __set_64bit_constant (unsigned long long *ptr, | |
200 | unsigned long long value) | |
201 | { | |
202 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | |
203 | } | |
204 | #define ll_low(x) *(((unsigned int*)&(x))+0) | |
205 | #define ll_high(x) *(((unsigned int*)&(x))+1) | |
206 | ||
207 | static inline void __set_64bit_var (unsigned long long *ptr, | |
208 | unsigned long long value) | |
209 | { | |
210 | __set_64bit(ptr,ll_low(value), ll_high(value)); | |
211 | } | |
212 | ||
213 | #define set_64bit(ptr,value) \ | |
214 | (__builtin_constant_p(value) ? \ | |
215 | __set_64bit_constant(ptr, value) : \ | |
216 | __set_64bit_var(ptr, value) ) | |
217 | ||
218 | #define _set_64bit(ptr,value) \ | |
219 | (__builtin_constant_p(value) ? \ | |
220 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | |
221 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | |
222 | ||
8896fab3 JB |
223 | #endif |
224 | ||
1da177e4 LT |
225 | /* |
226 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
227 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
228 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
229 | */ | |
230 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
231 | { | |
232 | switch (size) { | |
233 | case 1: | |
234 | __asm__ __volatile__("xchgb %b0,%1" | |
235 | :"=q" (x) | |
236 | :"m" (*__xg(ptr)), "0" (x) | |
237 | :"memory"); | |
238 | break; | |
239 | case 2: | |
240 | __asm__ __volatile__("xchgw %w0,%1" | |
241 | :"=r" (x) | |
242 | :"m" (*__xg(ptr)), "0" (x) | |
243 | :"memory"); | |
244 | break; | |
245 | case 4: | |
246 | __asm__ __volatile__("xchgl %0,%1" | |
247 | :"=r" (x) | |
248 | :"m" (*__xg(ptr)), "0" (x) | |
249 | :"memory"); | |
250 | break; | |
251 | } | |
252 | return x; | |
253 | } | |
254 | ||
255 | /* | |
256 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
257 | * store NEW in MEM. Return the initial value in MEM. Success is | |
258 | * indicated by comparing RETURN with OLD. | |
259 | */ | |
260 | ||
261 | #ifdef CONFIG_X86_CMPXCHG | |
262 | #define __HAVE_ARCH_CMPXCHG 1 | |
53e86b91 NP |
263 | #define cmpxchg(ptr,o,n)\ |
264 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | |
265 | (unsigned long)(n),sizeof(*(ptr)))) | |
266 | #endif | |
1da177e4 LT |
267 | |
268 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
269 | unsigned long new, int size) | |
270 | { | |
271 | unsigned long prev; | |
272 | switch (size) { | |
273 | case 1: | |
274 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | |
275 | : "=a"(prev) | |
276 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | |
277 | : "memory"); | |
278 | return prev; | |
279 | case 2: | |
280 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | |
281 | : "=a"(prev) | |
8896fab3 | 282 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
1da177e4 LT |
283 | : "memory"); |
284 | return prev; | |
285 | case 4: | |
286 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | |
287 | : "=a"(prev) | |
8896fab3 | 288 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
1da177e4 LT |
289 | : "memory"); |
290 | return prev; | |
291 | } | |
292 | return old; | |
293 | } | |
294 | ||
53e86b91 NP |
295 | #ifndef CONFIG_X86_CMPXCHG |
296 | /* | |
297 | * Building a kernel capable running on 80386. It may be necessary to | |
298 | * simulate the cmpxchg on the 80386 CPU. For that purpose we define | |
299 | * a function for each of the sizes we support. | |
300 | */ | |
8896fab3 | 301 | |
53e86b91 NP |
302 | extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); |
303 | extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); | |
304 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); | |
305 | ||
306 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |
307 | unsigned long new, int size) | |
308 | { | |
309 | switch (size) { | |
310 | case 1: | |
311 | return cmpxchg_386_u8(ptr, old, new); | |
312 | case 2: | |
313 | return cmpxchg_386_u16(ptr, old, new); | |
314 | case 4: | |
315 | return cmpxchg_386_u32(ptr, old, new); | |
316 | } | |
317 | return old; | |
318 | } | |
319 | ||
320 | #define cmpxchg(ptr,o,n) \ | |
321 | ({ \ | |
322 | __typeof__(*(ptr)) __ret; \ | |
323 | if (likely(boot_cpu_data.x86 > 3)) \ | |
324 | __ret = __cmpxchg((ptr), (unsigned long)(o), \ | |
325 | (unsigned long)(n), sizeof(*(ptr))); \ | |
326 | else \ | |
327 | __ret = cmpxchg_386((ptr), (unsigned long)(o), \ | |
328 | (unsigned long)(n), sizeof(*(ptr))); \ | |
329 | __ret; \ | |
330 | }) | |
8896fab3 JB |
331 | #endif |
332 | ||
333 | #ifdef CONFIG_X86_CMPXCHG64 | |
334 | ||
335 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | |
336 | unsigned long long new) | |
337 | { | |
338 | unsigned long long prev; | |
339 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | |
340 | : "=A"(prev) | |
341 | : "b"((unsigned long)new), | |
342 | "c"((unsigned long)(new >> 32)), | |
343 | "m"(*__xg(ptr)), | |
344 | "0"(old) | |
345 | : "memory"); | |
346 | return prev; | |
347 | } | |
348 | ||
349 | #define cmpxchg64(ptr,o,n)\ | |
350 | ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ | |
351 | (unsigned long long)(n))) | |
352 | ||
353 | #endif | |
1da177e4 LT |
354 | |
355 | #ifdef __KERNEL__ | |
356 | struct alt_instr { | |
357 | __u8 *instr; /* original instruction */ | |
358 | __u8 *replacement; | |
359 | __u8 cpuid; /* cpuid bit set for replacement */ | |
360 | __u8 instrlen; /* length of original instruction */ | |
361 | __u8 replacementlen; /* length of new instruction, <= instrlen */ | |
362 | __u8 pad; | |
363 | }; | |
364 | #endif | |
365 | ||
366 | /* | |
367 | * Alternative instructions for different CPU types or capabilities. | |
368 | * | |
369 | * This allows to use optimized instructions even on generic binary | |
370 | * kernels. | |
371 | * | |
372 | * length of oldinstr must be longer or equal the length of newinstr | |
373 | * It can be padded with nops as needed. | |
374 | * | |
375 | * For non barrier like inlines please define new variants | |
376 | * without volatile and memory clobber. | |
377 | */ | |
378 | #define alternative(oldinstr, newinstr, feature) \ | |
379 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | |
380 | ".section .altinstructions,\"a\"\n" \ | |
381 | " .align 4\n" \ | |
382 | " .long 661b\n" /* label */ \ | |
383 | " .long 663f\n" /* new instruction */ \ | |
384 | " .byte %c0\n" /* feature bit */ \ | |
385 | " .byte 662b-661b\n" /* sourcelen */ \ | |
386 | " .byte 664f-663f\n" /* replacementlen */ \ | |
387 | ".previous\n" \ | |
388 | ".section .altinstr_replacement,\"ax\"\n" \ | |
389 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | |
390 | ".previous" :: "i" (feature) : "memory") | |
391 | ||
392 | /* | |
393 | * Alternative inline assembly with input. | |
394 | * | |
395 | * Pecularities: | |
396 | * No memory clobber here. | |
397 | * Argument numbers start with 1. | |
398 | * Best is to use constraints that are fixed size (like (%1) ... "r") | |
399 | * If you use variable sized constraints like "m" or "g" in the | |
400 | * replacement maake sure to pad to the worst case length. | |
401 | */ | |
402 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | |
403 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | |
404 | ".section .altinstructions,\"a\"\n" \ | |
405 | " .align 4\n" \ | |
406 | " .long 661b\n" /* label */ \ | |
407 | " .long 663f\n" /* new instruction */ \ | |
408 | " .byte %c0\n" /* feature bit */ \ | |
409 | " .byte 662b-661b\n" /* sourcelen */ \ | |
410 | " .byte 664f-663f\n" /* replacementlen */ \ | |
411 | ".previous\n" \ | |
412 | ".section .altinstr_replacement,\"ax\"\n" \ | |
413 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | |
414 | ".previous" :: "i" (feature), ##input) | |
415 | ||
416 | /* | |
417 | * Force strict CPU ordering. | |
418 | * And yes, this is required on UP too when we're talking | |
419 | * to devices. | |
420 | * | |
421 | * For now, "wmb()" doesn't actually do anything, as all | |
422 | * Intel CPU's follow what Intel calls a *Processor Order*, | |
423 | * in which all writes are seen in the program order even | |
424 | * outside the CPU. | |
425 | * | |
426 | * I expect future Intel CPU's to have a weaker ordering, | |
427 | * but I'd also expect them to finally get their act together | |
428 | * and add some real memory barriers if so. | |
429 | * | |
430 | * Some non intel clones support out of order store. wmb() ceases to be a | |
431 | * nop for these. | |
432 | */ | |
433 | ||
434 | ||
435 | /* | |
436 | * Actually only lfence would be needed for mb() because all stores done | |
437 | * by the kernel should be already ordered. But keep a full barrier for now. | |
438 | */ | |
439 | ||
440 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | |
441 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | |
442 | ||
443 | /** | |
444 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
445 | * depend on. | |
446 | * | |
447 | * No data-dependent reads from memory-like regions are ever reordered | |
448 | * over this barrier. All reads preceding this primitive are guaranteed | |
449 | * to access memory (but not necessarily other CPUs' caches) before any | |
450 | * reads following this primitive that depend on the data return by | |
451 | * any of the preceding reads. This primitive is much lighter weight than | |
452 | * rmb() on most CPUs, and is never heavier weight than is | |
453 | * rmb(). | |
454 | * | |
455 | * These ordering constraints are respected by both the local CPU | |
456 | * and the compiler. | |
457 | * | |
458 | * Ordering is not guaranteed by anything other than these primitives, | |
459 | * not even by data dependencies. See the documentation for | |
460 | * memory_barrier() for examples and URLs to more information. | |
461 | * | |
462 | * For example, the following code would force ordering (the initial | |
463 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
464 | * | |
465 | * <programlisting> | |
466 | * CPU 0 CPU 1 | |
467 | * | |
468 | * b = 2; | |
469 | * memory_barrier(); | |
470 | * p = &b; q = p; | |
471 | * read_barrier_depends(); | |
472 | * d = *q; | |
473 | * </programlisting> | |
474 | * | |
475 | * because the read of "*q" depends on the read of "p" and these | |
476 | * two reads are separated by a read_barrier_depends(). However, | |
477 | * the following code, with the same initial values for "a" and "b": | |
478 | * | |
479 | * <programlisting> | |
480 | * CPU 0 CPU 1 | |
481 | * | |
482 | * a = 2; | |
483 | * memory_barrier(); | |
484 | * b = 3; y = b; | |
485 | * read_barrier_depends(); | |
486 | * x = a; | |
487 | * </programlisting> | |
488 | * | |
489 | * does not enforce ordering, since there is no data dependency between | |
490 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
491 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
492 | * in cases like thiswhere there are no data dependencies. | |
493 | **/ | |
494 | ||
495 | #define read_barrier_depends() do { } while(0) | |
496 | ||
497 | #ifdef CONFIG_X86_OOSTORE | |
498 | /* Actually there are no OOO store capable CPUs for now that do SSE, | |
499 | but make it already an possibility. */ | |
500 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | |
501 | #else | |
502 | #define wmb() __asm__ __volatile__ ("": : :"memory") | |
503 | #endif | |
504 | ||
505 | #ifdef CONFIG_SMP | |
506 | #define smp_mb() mb() | |
507 | #define smp_rmb() rmb() | |
508 | #define smp_wmb() wmb() | |
509 | #define smp_read_barrier_depends() read_barrier_depends() | |
510 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | |
511 | #else | |
512 | #define smp_mb() barrier() | |
513 | #define smp_rmb() barrier() | |
514 | #define smp_wmb() barrier() | |
515 | #define smp_read_barrier_depends() do { } while(0) | |
516 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | |
517 | #endif | |
518 | ||
519 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
520 | ||
521 | /* interrupt control.. */ | |
522 | #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) | |
523 | #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) | |
524 | #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") | |
525 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") | |
526 | /* used in the idle loop; sti takes one instruction cycle to complete */ | |
527 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | |
4bb0d3ec ZA |
528 | /* used when interrupts are already enabled or to shutdown the processor */ |
529 | #define halt() __asm__ __volatile__("hlt": : :"memory") | |
1da177e4 LT |
530 | |
531 | #define irqs_disabled() \ | |
532 | ({ \ | |
533 | unsigned long flags; \ | |
534 | local_save_flags(flags); \ | |
535 | !(flags & (1<<9)); \ | |
536 | }) | |
537 | ||
538 | /* For spinlocks etc */ | |
539 | #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") | |
540 | ||
541 | /* | |
542 | * disable hlt during certain critical i/o operations | |
543 | */ | |
544 | #define HAVE_DISABLE_HLT | |
545 | void disable_hlt(void); | |
546 | void enable_hlt(void); | |
547 | ||
548 | extern int es7000_plat; | |
549 | void cpu_idle_wait(void); | |
550 | ||
4dc7a0bb IM |
551 | /* |
552 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
553 | * it needs a way to flush as much of the CPU's caches as possible: | |
554 | */ | |
555 | static inline void sched_cacheflush(void) | |
556 | { | |
557 | wbinvd(); | |
558 | } | |
559 | ||
1da177e4 LT |
560 | extern unsigned long arch_align_stack(unsigned long sp); |
561 | ||
562 | #endif |