IA64: Slim down __clear_bit_unlock
[deliverable/linux.git] / include / asm-ia64 / gcc_intrin.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_IA64_GCC_INTRIN_H
2#define _ASM_IA64_GCC_INTRIN_H
3/*
4 *
5 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7 */
8
9#include <linux/compiler.h>
10
11/* define this macro to get some asm stmts included in 'c' files */
12#define ASM_SUPPORTED
13
14/* Optimization barrier */
15/* The "volatile" is due to gcc bugs */
16#define ia64_barrier() asm volatile ("":::"memory")
17
18#define ia64_stop() asm volatile (";;"::)
19
20#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
21
22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23
24extern void ia64_bad_param_for_setreg (void);
25extern void ia64_bad_param_for_getreg (void);
26
27register unsigned long ia64_r13 asm ("r13") __attribute_used__;
28
29#define ia64_setreg(regnum, val) \
30({ \
31 switch (regnum) { \
32 case _IA64_REG_PSR_L: \
33 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
34 break; \
35 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
36 asm volatile ("mov ar%0=%1" :: \
37 "i" (regnum - _IA64_REG_AR_KR0), \
38 "r"(val): "memory"); \
39 break; \
40 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
41 asm volatile ("mov cr%0=%1" :: \
42 "i" (regnum - _IA64_REG_CR_DCR), \
43 "r"(val): "memory" ); \
44 break; \
45 case _IA64_REG_SP: \
46 asm volatile ("mov r12=%0" :: \
47 "r"(val): "memory"); \
48 break; \
49 case _IA64_REG_GP: \
50 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
51 break; \
52 default: \
53 ia64_bad_param_for_setreg(); \
54 break; \
55 } \
56})
57
58#define ia64_getreg(regnum) \
59({ \
60 __u64 ia64_intri_res; \
61 \
62 switch (regnum) { \
63 case _IA64_REG_GP: \
64 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
65 break; \
66 case _IA64_REG_IP: \
67 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
68 break; \
69 case _IA64_REG_PSR: \
70 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
71 break; \
72 case _IA64_REG_TP: /* for current() */ \
73 ia64_intri_res = ia64_r13; \
74 break; \
75 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
76 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
77 : "i"(regnum - _IA64_REG_AR_KR0)); \
78 break; \
79 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
80 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
81 : "i" (regnum - _IA64_REG_CR_DCR)); \
82 break; \
83 case _IA64_REG_SP: \
84 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
85 break; \
86 default: \
87 ia64_bad_param_for_getreg(); \
88 break; \
89 } \
90 ia64_intri_res; \
91})
92
93#define ia64_hint_pause 0
94
95#define ia64_hint(mode) \
96({ \
97 switch (mode) { \
98 case ia64_hint_pause: \
99 asm volatile ("hint @pause" ::: "memory"); \
100 break; \
101 } \
102})
103
104
105/* Integer values for mux1 instruction */
106#define ia64_mux1_brcst 0
107#define ia64_mux1_mix 8
108#define ia64_mux1_shuf 9
109#define ia64_mux1_alt 10
110#define ia64_mux1_rev 11
111
112#define ia64_mux1(x, mode) \
113({ \
114 __u64 ia64_intri_res; \
115 \
116 switch (mode) { \
117 case ia64_mux1_brcst: \
118 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
119 break; \
120 case ia64_mux1_mix: \
121 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
122 break; \
123 case ia64_mux1_shuf: \
124 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
125 break; \
126 case ia64_mux1_alt: \
127 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
128 break; \
129 case ia64_mux1_rev: \
130 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
131 break; \
132 } \
133 ia64_intri_res; \
134})
135
821376bf
DMT
136#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
137# define ia64_popcnt(x) __builtin_popcountl(x)
138#else
139# define ia64_popcnt(x) \
140 ({ \
1da177e4
LT
141 __u64 ia64_intri_res; \
142 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
143 \
144 ia64_intri_res; \
821376bf
DMT
145 })
146#endif
1da177e4
LT
147
148#define ia64_getf_exp(x) \
149({ \
150 long ia64_intri_res; \
151 \
152 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
153 \
154 ia64_intri_res; \
155})
156
157#define ia64_shrp(a, b, count) \
158({ \
159 __u64 ia64_intri_res; \
160 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
161 ia64_intri_res; \
162})
163
164#define ia64_ldfs(regnum, x) \
165({ \
166 register double __f__ asm ("f"#regnum); \
167 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
168})
169
170#define ia64_ldfd(regnum, x) \
171({ \
172 register double __f__ asm ("f"#regnum); \
173 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
174})
175
176#define ia64_ldfe(regnum, x) \
177({ \
178 register double __f__ asm ("f"#regnum); \
179 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
180})
181
182#define ia64_ldf8(regnum, x) \
183({ \
184 register double __f__ asm ("f"#regnum); \
185 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
186})
187
188#define ia64_ldf_fill(regnum, x) \
189({ \
190 register double __f__ asm ("f"#regnum); \
191 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
192})
193
a3ebdb6c
CL
194#define ia64_st4_rel_nta(m, val) \
195({ \
196 asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
197})
198
1da177e4
LT
199#define ia64_stfs(x, regnum) \
200({ \
201 register double __f__ asm ("f"#regnum); \
202 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
203})
204
205#define ia64_stfd(x, regnum) \
206({ \
207 register double __f__ asm ("f"#regnum); \
208 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
209})
210
211#define ia64_stfe(x, regnum) \
212({ \
213 register double __f__ asm ("f"#regnum); \
214 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
215})
216
217#define ia64_stf8(x, regnum) \
218({ \
219 register double __f__ asm ("f"#regnum); \
220 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
221})
222
223#define ia64_stf_spill(x, regnum) \
224({ \
225 register double __f__ asm ("f"#regnum); \
226 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
227})
228
229#define ia64_fetchadd4_acq(p, inc) \
230({ \
231 \
232 __u64 ia64_intri_res; \
233 asm volatile ("fetchadd4.acq %0=[%1],%2" \
234 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
235 : "memory"); \
236 \
237 ia64_intri_res; \
238})
239
240#define ia64_fetchadd4_rel(p, inc) \
241({ \
242 __u64 ia64_intri_res; \
243 asm volatile ("fetchadd4.rel %0=[%1],%2" \
244 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
245 : "memory"); \
246 \
247 ia64_intri_res; \
248})
249
250#define ia64_fetchadd8_acq(p, inc) \
251({ \
252 \
253 __u64 ia64_intri_res; \
254 asm volatile ("fetchadd8.acq %0=[%1],%2" \
255 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
256 : "memory"); \
257 \
258 ia64_intri_res; \
259})
260
261#define ia64_fetchadd8_rel(p, inc) \
262({ \
263 __u64 ia64_intri_res; \
264 asm volatile ("fetchadd8.rel %0=[%1],%2" \
265 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
266 : "memory"); \
267 \
268 ia64_intri_res; \
269})
270
271#define ia64_xchg1(ptr,x) \
272({ \
273 __u64 ia64_intri_res; \
274 asm volatile ("xchg1 %0=[%1],%2" \
275 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
276 ia64_intri_res; \
277})
278
279#define ia64_xchg2(ptr,x) \
280({ \
281 __u64 ia64_intri_res; \
282 asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
283 : "r" (ptr), "r" (x) : "memory"); \
284 ia64_intri_res; \
285})
286
287#define ia64_xchg4(ptr,x) \
288({ \
289 __u64 ia64_intri_res; \
290 asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
291 : "r" (ptr), "r" (x) : "memory"); \
292 ia64_intri_res; \
293})
294
295#define ia64_xchg8(ptr,x) \
296({ \
297 __u64 ia64_intri_res; \
298 asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
299 : "r" (ptr), "r" (x) : "memory"); \
300 ia64_intri_res; \
301})
302
303#define ia64_cmpxchg1_acq(ptr, new, old) \
304({ \
305 __u64 ia64_intri_res; \
306 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
307 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
308 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
309 ia64_intri_res; \
310})
311
312#define ia64_cmpxchg1_rel(ptr, new, old) \
313({ \
314 __u64 ia64_intri_res; \
315 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
316 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
317 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
318 ia64_intri_res; \
319})
320
321#define ia64_cmpxchg2_acq(ptr, new, old) \
322({ \
323 __u64 ia64_intri_res; \
324 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
325 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
326 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
327 ia64_intri_res; \
328})
329
330#define ia64_cmpxchg2_rel(ptr, new, old) \
331({ \
332 __u64 ia64_intri_res; \
333 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
334 \
335 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
336 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
337 ia64_intri_res; \
338})
339
340#define ia64_cmpxchg4_acq(ptr, new, old) \
341({ \
342 __u64 ia64_intri_res; \
343 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
344 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
345 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
346 ia64_intri_res; \
347})
348
349#define ia64_cmpxchg4_rel(ptr, new, old) \
350({ \
351 __u64 ia64_intri_res; \
352 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
353 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
354 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
355 ia64_intri_res; \
356})
357
358#define ia64_cmpxchg8_acq(ptr, new, old) \
359({ \
360 __u64 ia64_intri_res; \
361 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
362 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
363 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
364 ia64_intri_res; \
365})
366
367#define ia64_cmpxchg8_rel(ptr, new, old) \
368({ \
369 __u64 ia64_intri_res; \
370 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
371 \
372 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
373 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
374 ia64_intri_res; \
375})
376
377#define ia64_mf() asm volatile ("mf" ::: "memory")
378#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
379
380#define ia64_invala() asm volatile ("invala" ::: "memory")
381
382#define ia64_thash(addr) \
383({ \
384 __u64 ia64_intri_res; \
385 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
386 ia64_intri_res; \
387})
388
389#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
390#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
391
392#ifdef HAVE_SERIALIZE_DIRECTIVE
393# define ia64_dv_serialize_data() asm volatile (".serialize.data");
394# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
395#else
396# define ia64_dv_serialize_data()
397# define ia64_dv_serialize_instruction()
398#endif
399
400#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
401
402#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
403
404#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
405
406
407#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
408 :: "r"(trnum), "r"(addr) : "memory")
409
410#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
411 :: "r"(trnum), "r"(addr) : "memory")
412
413#define ia64_tpa(addr) \
414({ \
415 __u64 ia64_pa; \
416 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
417 ia64_pa; \
418})
419
420#define __ia64_set_dbr(index, val) \
421 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
422
423#define ia64_set_ibr(index, val) \
424 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
425
426#define ia64_set_pkr(index, val) \
427 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
428
429#define ia64_set_pmc(index, val) \
430 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
431
432#define ia64_set_pmd(index, val) \
433 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
434
435#define ia64_set_rr(index, val) \
436 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
437
438#define ia64_get_cpuid(index) \
439({ \
440 __u64 ia64_intri_res; \
441 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
442 ia64_intri_res; \
443})
444
445#define __ia64_get_dbr(index) \
446({ \
447 __u64 ia64_intri_res; \
448 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
449 ia64_intri_res; \
450})
451
452#define ia64_get_ibr(index) \
453({ \
454 __u64 ia64_intri_res; \
455 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
456 ia64_intri_res; \
457})
458
459#define ia64_get_pkr(index) \
460({ \
461 __u64 ia64_intri_res; \
462 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
463 ia64_intri_res; \
464})
465
466#define ia64_get_pmc(index) \
467({ \
468 __u64 ia64_intri_res; \
469 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
470 ia64_intri_res; \
471})
472
473
474#define ia64_get_pmd(index) \
475({ \
476 __u64 ia64_intri_res; \
477 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
478 ia64_intri_res; \
479})
480
481#define ia64_get_rr(index) \
482({ \
483 __u64 ia64_intri_res; \
484 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
485 ia64_intri_res; \
486})
487
488#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
489
490
491#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
492
493#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
494#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
495#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
496#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
497
498#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
499
500#define ia64_ptcga(addr, size) \
501do { \
502 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
503 ia64_dv_serialize_data(); \
504} while (0)
505
506#define ia64_ptcl(addr, size) \
507do { \
508 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
509 ia64_dv_serialize_data(); \
510} while (0)
511
512#define ia64_ptri(addr, size) \
513 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
514
515#define ia64_ptrd(addr, size) \
516 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
517
518/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
519
520#define ia64_lfhint_none 0
521#define ia64_lfhint_nt1 1
522#define ia64_lfhint_nt2 2
523#define ia64_lfhint_nta 3
524
525#define ia64_lfetch(lfhint, y) \
526({ \
527 switch (lfhint) { \
528 case ia64_lfhint_none: \
529 asm volatile ("lfetch [%0]" : : "r"(y)); \
530 break; \
531 case ia64_lfhint_nt1: \
532 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
533 break; \
534 case ia64_lfhint_nt2: \
535 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
536 break; \
537 case ia64_lfhint_nta: \
538 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
539 break; \
540 } \
541})
542
543#define ia64_lfetch_excl(lfhint, y) \
544({ \
545 switch (lfhint) { \
546 case ia64_lfhint_none: \
547 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
548 break; \
549 case ia64_lfhint_nt1: \
550 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
551 break; \
552 case ia64_lfhint_nt2: \
553 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
554 break; \
555 case ia64_lfhint_nta: \
556 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
557 break; \
558 } \
559})
560
561#define ia64_lfetch_fault(lfhint, y) \
562({ \
563 switch (lfhint) { \
564 case ia64_lfhint_none: \
565 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
566 break; \
567 case ia64_lfhint_nt1: \
568 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
569 break; \
570 case ia64_lfhint_nt2: \
571 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
572 break; \
573 case ia64_lfhint_nta: \
574 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
575 break; \
576 } \
577})
578
579#define ia64_lfetch_fault_excl(lfhint, y) \
580({ \
581 switch (lfhint) { \
582 case ia64_lfhint_none: \
583 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
584 break; \
585 case ia64_lfhint_nt1: \
586 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
587 break; \
588 case ia64_lfhint_nt2: \
589 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
590 break; \
591 case ia64_lfhint_nta: \
592 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
593 break; \
594 } \
595})
596
597#define ia64_intrin_local_irq_restore(x) \
598do { \
599 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
600 "(p6) ssm psr.i;" \
601 "(p7) rsm psr.i;;" \
602 "(p6) srlz.d" \
603 :: "r"((x)) : "p6", "p7", "memory"); \
604} while (0)
605
606#endif /* _ASM_IA64_GCC_INTRIN_H */
This page took 0.297334 seconds and 5 git commands to generate.