Merge ../linux-2.6
[deliverable/linux.git] / include / asm-xtensa / system.h
CommitLineData
9a8fd558
CZ
1/*
2 * include/asm-xtensa/system.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_SYSTEM_H
12#define _XTENSA_SYSTEM_H
13
9a8fd558
CZ
14#include <linux/stringify.h>
15
16#include <asm/processor.h>
17
18/* interrupt control */
19
20#define local_save_flags(x) \
21 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x));
22#define local_irq_restore(x) do { \
23 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \
24 :: "a" (x) : "memory"); } while(0);
25#define local_irq_save(x) do { \
26 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \
27 : "=a" (x) :: "memory");} while(0);
28
29static inline void local_irq_disable(void)
30{
31 unsigned long flags;
32 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL)
33 : "=a" (flags) :: "memory");
34}
35static inline void local_irq_enable(void)
36{
37 unsigned long flags;
38 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory");
39
40}
41
42static inline int irqs_disabled(void)
43{
44 unsigned long flags;
45 local_save_flags(flags);
46 return flags & 0xf;
47}
48
9a8fd558
CZ
49
50#define smp_read_barrier_depends() do { } while(0)
51#define read_barrier_depends() do { } while(0)
52
53#define mb() barrier()
54#define rmb() mb()
55#define wmb() mb()
56
57#ifdef CONFIG_SMP
58#error smp_* not defined
59#else
60#define smp_mb() barrier()
61#define smp_rmb() barrier()
62#define smp_wmb() barrier()
63#endif
64
65#define set_mb(var, value) do { var = value; mb(); } while (0)
9a8fd558
CZ
66
67#if !defined (__ASSEMBLY__)
68
69/* * switch_to(n) should switch tasks to task nr n, first
70 * checking that n isn't the current task, in which case it does nothing.
71 */
72extern void *_switch_to(void *last, void *next);
73
74#endif /* __ASSEMBLY__ */
75
9a8fd558
CZ
76#define switch_to(prev,next,last) \
77do { \
9a8fd558
CZ
78 (last) = _switch_to(prev, next); \
79} while(0)
80
81/*
82 * cmpxchg
83 */
84
d99cf715 85static inline unsigned long
9a8fd558
CZ
86__cmpxchg_u32(volatile int *p, int old, int new)
87{
88 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
89 "l32i %0, %1, 0 \n\t"
90 "bne %0, %2, 1f \n\t"
91 "s32i %3, %1, 0 \n\t"
92 "1: \n\t"
93 "wsr a15, "__stringify(PS)" \n\t"
94 "rsync \n\t"
95 : "=&a" (old)
96 : "a" (p), "a" (old), "r" (new)
97 : "a15", "memory");
98 return old;
99}
100/* This function doesn't exist, so you'll get a linker error
101 * if something tries to do an invalid cmpxchg(). */
102
103extern void __cmpxchg_called_with_bad_pointer(void);
104
105static __inline__ unsigned long
106__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
107{
108 switch (size) {
109 case 4: return __cmpxchg_u32(ptr, old, new);
110 default: __cmpxchg_called_with_bad_pointer();
111 return old;
112 }
113}
114
115#define cmpxchg(ptr,o,n) \
116 ({ __typeof__(*(ptr)) _o_ = (o); \
117 __typeof__(*(ptr)) _n_ = (n); \
118 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
119 (unsigned long)_n_, sizeof (*(ptr))); \
120 })
121
9a7744f9 122#include <asm-generic/cmpxchg-local.h>
9a8fd558 123
9a7744f9
MD
124static inline unsigned long __cmpxchg_local(volatile void *ptr,
125 unsigned long old,
126 unsigned long new, int size)
127{
128 switch (size) {
129 case 4:
130 return __cmpxchg_u32(ptr, old, new);
131 default:
132 return __cmpxchg_local_generic(ptr, old, new, size);
133 }
9a8fd558 134
9a7744f9
MD
135 return old;
136}
137
138/*
139 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
140 * them available.
141 */
142#define cmpxchg_local(ptr, o, n) \
143 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
144 (unsigned long)(n), sizeof(*(ptr))))
145#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
9a8fd558
CZ
146
147/*
148 * xchg_u32
149 *
150 * Note that a15 is used here because the register allocation
151 * done by the compiler is not guaranteed and a window overflow
152 * may not occur between the rsil and wsr instructions. By using
153 * a15 in the rsil, the machine is guaranteed to be in a state
154 * where no register reference will cause an overflow.
155 */
156
d99cf715 157static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
9a8fd558
CZ
158{
159 unsigned long tmp;
160 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
161 "l32i %0, %1, 0 \n\t"
162 "s32i %2, %1, 0 \n\t"
163 "wsr a15, "__stringify(PS)" \n\t"
164 "rsync \n\t"
165 : "=&a" (tmp)
166 : "a" (m), "a" (val)
167 : "a15", "memory");
168 return tmp;
169}
170
9a8fd558
CZ
171#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
172
173/*
174 * This only works if the compiler isn't horribly bad at optimizing.
175 * gcc-2.5.8 reportedly can't handle this, but I define that one to
176 * be dead anyway.
177 */
178
179extern void __xchg_called_with_bad_pointer(void);
180
181static __inline__ unsigned long
182__xchg(unsigned long x, volatile void * ptr, int size)
183{
184 switch (size) {
185 case 4:
186 return xchg_u32(ptr, x);
187 }
188 __xchg_called_with_bad_pointer();
189 return x;
190}
191
9a8fd558
CZ
192extern void set_except_vector(int n, void *addr);
193
194static inline void spill_registers(void)
195{
196 unsigned int a0, ps;
197
198 __asm__ __volatile__ (
173d6681 199 "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
9a8fd558
CZ
200 "mov a12, a0\n\t"
201 "rsr a13," __stringify(SAR) "\n\t"
202 "xsr a14," __stringify(PS) "\n\t"
203 "movi a0, _spill_registers\n\t"
204 "rsync\n\t"
205 "callx0 a0\n\t"
206 "mov a0, a12\n\t"
207 "wsr a13," __stringify(SAR) "\n\t"
208 "wsr a14," __stringify(PS) "\n\t"
209 :: "a" (&a0), "a" (&ps)
c658eac6 210 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
9a8fd558
CZ
211}
212
213#define arch_align_stack(x) (x)
214
215#endif /* _XTENSA_SYSTEM_H */
This page took 0.327669 seconds and 5 git commands to generate.