Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_M32R_SYSTEM_H |
2 | #define _ASM_M32R_SYSTEM_H | |
3 | ||
4 | /* | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
9 | * Copyright (C) 2001 by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto | |
10 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | |
11 | */ | |
12 | ||
13 | #include <linux/config.h> | |
0332db5a | 14 | #include <asm/assembler.h> |
1da177e4 LT |
15 | |
16 | #ifdef __KERNEL__ | |
17 | ||
18 | /* | |
19 | * switch_to(prev, next) should switch from task `prev' to `next' | |
20 | * `prev' will never be the same as `next'. | |
21 | * | |
22 | * `next' and `prev' should be struct task_struct, but it isn't always defined | |
23 | */ | |
24 | ||
25 | #ifndef CONFIG_SMP | |
26 | #define prepare_to_switch() do { } while(0) | |
27 | #endif /* not CONFIG_SMP */ | |
28 | ||
29 | #define switch_to(prev, next, last) do { \ | |
30 | register unsigned long arg0 __asm__ ("r0") = (unsigned long)prev; \ | |
31 | register unsigned long arg1 __asm__ ("r1") = (unsigned long)next; \ | |
32 | register unsigned long *oldsp __asm__ ("r2") = &(prev->thread.sp); \ | |
33 | register unsigned long *newsp __asm__ ("r3") = &(next->thread.sp); \ | |
34 | register unsigned long *oldlr __asm__ ("r4") = &(prev->thread.lr); \ | |
35 | register unsigned long *newlr __asm__ ("r5") = &(next->thread.lr); \ | |
36 | register struct task_struct *__last __asm__ ("r6"); \ | |
37 | __asm__ __volatile__ ( \ | |
38 | "st r8, @-r15 \n\t" \ | |
39 | "st r9, @-r15 \n\t" \ | |
40 | "st r10, @-r15 \n\t" \ | |
41 | "st r11, @-r15 \n\t" \ | |
42 | "st r12, @-r15 \n\t" \ | |
43 | "st r13, @-r15 \n\t" \ | |
44 | "st r14, @-r15 \n\t" \ | |
45 | "seth r14, #high(1f) \n\t" \ | |
46 | "or3 r14, r14, #low(1f) \n\t" \ | |
47 | "st r14, @r4 ; store old LR \n\t" \ | |
48 | "st r15, @r2 ; store old SP \n\t" \ | |
49 | "ld r15, @r3 ; load new SP \n\t" \ | |
50 | "st r0, @-r15 ; store 'prev' onto new stack \n\t" \ | |
51 | "ld r14, @r5 ; load new LR \n\t" \ | |
52 | "jmp r14 \n\t" \ | |
53 | ".fillinsn \n " \ | |
54 | "1: \n\t" \ | |
55 | "ld r6, @r15+ ; load 'prev' from new stack \n\t" \ | |
56 | "ld r14, @r15+ \n\t" \ | |
57 | "ld r13, @r15+ \n\t" \ | |
58 | "ld r12, @r15+ \n\t" \ | |
59 | "ld r11, @r15+ \n\t" \ | |
60 | "ld r10, @r15+ \n\t" \ | |
61 | "ld r9, @r15+ \n\t" \ | |
62 | "ld r8, @r15+ \n\t" \ | |
63 | : "=&r" (__last) \ | |
64 | : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \ | |
65 | "r" (oldlr), "r" (newlr) \ | |
66 | : "memory" \ | |
67 | ); \ | |
68 | last = __last; \ | |
69 | } while(0) | |
70 | ||
4dc7a0bb IM |
71 | /* |
72 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
73 | * it needs a way to flush as much of the CPU's caches as possible. | |
74 | * | |
75 | * TODO: fill this in! | |
76 | */ | |
77 | static inline void sched_cacheflush(void) | |
78 | { | |
79 | } | |
80 | ||
1da177e4 | 81 | /* Interrupt Control */ |
9287d95e | 82 | #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) |
1da177e4 LT |
83 | #define local_irq_enable() \ |
84 | __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") | |
85 | #define local_irq_disable() \ | |
86 | __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") | |
9287d95e | 87 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
88 | static inline void local_irq_enable(void) |
89 | { | |
90 | unsigned long tmpreg; | |
91 | __asm__ __volatile__( | |
92 | "mvfc %0, psw; \n\t" | |
93 | "or3 %0, %0, #0x0040; \n\t" | |
94 | "mvtc %0, psw; \n\t" | |
95 | : "=&r" (tmpreg) : : "cbit", "memory"); | |
96 | } | |
97 | ||
98 | static inline void local_irq_disable(void) | |
99 | { | |
100 | unsigned long tmpreg0, tmpreg1; | |
101 | __asm__ __volatile__( | |
102 | "ld24 %0, #0 ; Use 32-bit insn. \n\t" | |
103 | "mvfc %1, psw ; No interrupt can be accepted here. \n\t" | |
104 | "mvtc %0, psw \n\t" | |
105 | "and3 %0, %1, #0xffbf \n\t" | |
106 | "mvtc %0, psw \n\t" | |
107 | : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); | |
108 | } | |
9287d95e | 109 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
110 | |
111 | #define local_save_flags(x) \ | |
112 | __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) | |
113 | ||
114 | #define local_irq_restore(x) \ | |
115 | __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ | |
116 | : "r" (x) : "cbit", "memory") | |
117 | ||
9287d95e | 118 | #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) |
1da177e4 LT |
119 | #define local_irq_save(x) \ |
120 | __asm__ __volatile__( \ | |
121 | "mvfc %0, psw; \n\t" \ | |
122 | "clrpsw #0x40 -> nop; \n\t" \ | |
123 | : "=r" (x) : /* no input */ : "memory") | |
9287d95e | 124 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
125 | #define local_irq_save(x) \ |
126 | ({ \ | |
127 | unsigned long tmpreg; \ | |
128 | __asm__ __volatile__( \ | |
129 | "ld24 %1, #0 \n\t" \ | |
130 | "mvfc %0, psw \n\t" \ | |
131 | "mvtc %1, psw \n\t" \ | |
132 | "and3 %1, %0, #0xffbf \n\t" \ | |
133 | "mvtc %1, psw \n\t" \ | |
134 | : "=r" (x), "=&r" (tmpreg) \ | |
135 | : : "cbit", "memory"); \ | |
136 | }) | |
9287d95e | 137 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
1da177e4 LT |
138 | |
139 | #define irqs_disabled() \ | |
140 | ({ \ | |
141 | unsigned long flags; \ | |
142 | local_save_flags(flags); \ | |
143 | !(flags & 0x40); \ | |
144 | }) | |
145 | ||
1da177e4 LT |
146 | #define nop() __asm__ __volatile__ ("nop" : : ) |
147 | ||
148 | #define xchg(ptr,x) \ | |
149 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
150 | ||
151 | #define tas(ptr) (xchg((ptr),1)) | |
152 | ||
153 | #ifdef CONFIG_SMP | |
154 | extern void __xchg_called_with_bad_pointer(void); | |
155 | #endif | |
156 | ||
157 | #ifdef CONFIG_CHIP_M32700_TS1 | |
158 | #define DCACHE_CLEAR(reg0, reg1, addr) \ | |
159 | "seth "reg1", #high(dcache_dummy); \n\t" \ | |
160 | "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ | |
161 | "lock "reg0", @"reg1"; \n\t" \ | |
162 | "add3 "reg0", "addr", #0x1000; \n\t" \ | |
163 | "ld "reg0", @"reg0"; \n\t" \ | |
164 | "add3 "reg0", "addr", #0x2000; \n\t" \ | |
165 | "ld "reg0", @"reg0"; \n\t" \ | |
166 | "unlock "reg0", @"reg1"; \n\t" | |
167 | /* FIXME: This workaround code cannot handle kenrel modules | |
168 | * correctly under SMP environment. | |
169 | */ | |
170 | #else /* CONFIG_CHIP_M32700_TS1 */ | |
171 | #define DCACHE_CLEAR(reg0, reg1, addr) | |
172 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
173 | ||
174 | static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | |
175 | int size) | |
176 | { | |
177 | unsigned long flags; | |
178 | unsigned long tmp = 0; | |
179 | ||
180 | local_irq_save(flags); | |
181 | ||
182 | switch (size) { | |
183 | #ifndef CONFIG_SMP | |
184 | case 1: | |
185 | __asm__ __volatile__ ( | |
186 | "ldb %0, @%2 \n\t" | |
187 | "stb %1, @%2 \n\t" | |
188 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
189 | break; | |
190 | case 2: | |
191 | __asm__ __volatile__ ( | |
192 | "ldh %0, @%2 \n\t" | |
193 | "sth %1, @%2 \n\t" | |
194 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
195 | break; | |
196 | case 4: | |
197 | __asm__ __volatile__ ( | |
198 | "ld %0, @%2 \n\t" | |
199 | "st %1, @%2 \n\t" | |
200 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | |
201 | break; | |
202 | #else /* CONFIG_SMP */ | |
203 | case 4: | |
204 | __asm__ __volatile__ ( | |
205 | DCACHE_CLEAR("%0", "r4", "%2") | |
206 | "lock %0, @%2; \n\t" | |
207 | "unlock %1, @%2; \n\t" | |
208 | : "=&r" (tmp) : "r" (x), "r" (ptr) | |
209 | : "memory" | |
210 | #ifdef CONFIG_CHIP_M32700_TS1 | |
211 | , "r4" | |
212 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
213 | ); | |
214 | break; | |
215 | default: | |
216 | __xchg_called_with_bad_pointer(); | |
217 | #endif /* CONFIG_SMP */ | |
218 | } | |
219 | ||
220 | local_irq_restore(flags); | |
221 | ||
222 | return (tmp); | |
223 | } | |
224 | ||
0332db5a HT |
225 | #define __HAVE_ARCH_CMPXCHG 1 |
226 | ||
227 | static __inline__ unsigned long | |
228 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | |
229 | { | |
230 | unsigned long flags; | |
231 | unsigned int retval; | |
232 | ||
233 | local_irq_save(flags); | |
234 | __asm__ __volatile__ ( | |
235 | DCACHE_CLEAR("%0", "r4", "%1") | |
236 | M32R_LOCK" %0, @%1; \n" | |
237 | " bne %0, %2, 1f; \n" | |
238 | M32R_UNLOCK" %3, @%1; \n" | |
239 | " bra 2f; \n" | |
240 | " .fillinsn \n" | |
241 | "1:" | |
242 | M32R_UNLOCK" %2, @%1; \n" | |
243 | " .fillinsn \n" | |
244 | "2:" | |
245 | : "=&r" (retval) | |
246 | : "r" (p), "r" (old), "r" (new) | |
247 | : "cbit", "memory" | |
248 | #ifdef CONFIG_CHIP_M32700_TS1 | |
249 | , "r4" | |
250 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
251 | ); | |
252 | local_irq_restore(flags); | |
253 | ||
254 | return retval; | |
255 | } | |
256 | ||
257 | /* This function doesn't exist, so you'll get a linker error | |
258 | if something tries to do an invalid cmpxchg(). */ | |
259 | extern void __cmpxchg_called_with_bad_pointer(void); | |
260 | ||
261 | static __inline__ unsigned long | |
262 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |
263 | { | |
264 | switch (size) { | |
265 | case 4: | |
266 | return __cmpxchg_u32(ptr, old, new); | |
267 | #if 0 /* we don't have __cmpxchg_u64 */ | |
268 | case 8: | |
269 | return __cmpxchg_u64(ptr, old, new); | |
270 | #endif /* 0 */ | |
271 | } | |
272 | __cmpxchg_called_with_bad_pointer(); | |
273 | return old; | |
274 | } | |
275 | ||
276 | #define cmpxchg(ptr,o,n) \ | |
277 | ({ \ | |
278 | __typeof__(*(ptr)) _o_ = (o); \ | |
279 | __typeof__(*(ptr)) _n_ = (n); \ | |
280 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
281 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
282 | }) | |
283 | ||
284 | #endif /* __KERNEL__ */ | |
285 | ||
1da177e4 LT |
286 | /* |
287 | * Memory barrier. | |
288 | * | |
289 | * mb() prevents loads and stores being reordered across this point. | |
290 | * rmb() prevents loads being reordered across this point. | |
291 | * wmb() prevents stores being reordered across this point. | |
292 | */ | |
293 | #define mb() barrier() | |
294 | #define rmb() mb() | |
295 | #define wmb() mb() | |
296 | ||
297 | /** | |
298 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
299 | * depend on. | |
300 | * | |
301 | * No data-dependent reads from memory-like regions are ever reordered | |
302 | * over this barrier. All reads preceding this primitive are guaranteed | |
303 | * to access memory (but not necessarily other CPUs' caches) before any | |
304 | * reads following this primitive that depend on the data return by | |
305 | * any of the preceding reads. This primitive is much lighter weight than | |
306 | * rmb() on most CPUs, and is never heavier weight than is | |
307 | * rmb(). | |
308 | * | |
309 | * These ordering constraints are respected by both the local CPU | |
310 | * and the compiler. | |
311 | * | |
312 | * Ordering is not guaranteed by anything other than these primitives, | |
313 | * not even by data dependencies. See the documentation for | |
314 | * memory_barrier() for examples and URLs to more information. | |
315 | * | |
316 | * For example, the following code would force ordering (the initial | |
317 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
318 | * | |
319 | * <programlisting> | |
320 | * CPU 0 CPU 1 | |
321 | * | |
322 | * b = 2; | |
323 | * memory_barrier(); | |
324 | * p = &b; q = p; | |
325 | * read_barrier_depends(); | |
326 | * d = *q; | |
327 | * </programlisting> | |
328 | * | |
329 | * | |
330 | * because the read of "*q" depends on the read of "p" and these | |
331 | * two reads are separated by a read_barrier_depends(). However, | |
332 | * the following code, with the same initial values for "a" and "b": | |
333 | * | |
334 | * <programlisting> | |
335 | * CPU 0 CPU 1 | |
336 | * | |
337 | * a = 2; | |
338 | * memory_barrier(); | |
339 | * b = 3; y = b; | |
340 | * read_barrier_depends(); | |
341 | * x = a; | |
342 | * </programlisting> | |
343 | * | |
344 | * does not enforce ordering, since there is no data dependency between | |
345 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
346 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
347 | * in cases like thiswhere there are no data dependencies. | |
348 | **/ | |
349 | ||
350 | #define read_barrier_depends() do { } while (0) | |
351 | ||
352 | #ifdef CONFIG_SMP | |
353 | #define smp_mb() mb() | |
354 | #define smp_rmb() rmb() | |
355 | #define smp_wmb() wmb() | |
356 | #define smp_read_barrier_depends() read_barrier_depends() | |
357 | #else | |
358 | #define smp_mb() barrier() | |
359 | #define smp_rmb() barrier() | |
360 | #define smp_wmb() barrier() | |
361 | #define smp_read_barrier_depends() do { } while (0) | |
362 | #endif | |
363 | ||
364 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | |
365 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
366 | ||
367 | #define arch_align_stack(x) (x) | |
368 | ||
369 | #endif /* _ASM_M32R_SYSTEM_H */ |