Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_ARM_SYSTEM_H |
2 | #define __ASM_ARM_SYSTEM_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
398e692f | 6 | #include <asm/memory.h> |
1da177e4 LT |
7 | |
8 | #define CPU_ARCH_UNKNOWN 0 | |
9 | #define CPU_ARCH_ARMv3 1 | |
10 | #define CPU_ARCH_ARMv4 2 | |
11 | #define CPU_ARCH_ARMv4T 3 | |
12 | #define CPU_ARCH_ARMv5 4 | |
13 | #define CPU_ARCH_ARMv5T 5 | |
14 | #define CPU_ARCH_ARMv5TE 6 | |
15 | #define CPU_ARCH_ARMv5TEJ 7 | |
16 | #define CPU_ARCH_ARMv6 8 | |
bbe88886 | 17 | #define CPU_ARCH_ARMv7 9 |
1da177e4 LT |
18 | |
19 | /* | |
20 | * CR1 bits (CP#15 CR1) | |
21 | */ | |
22 | #define CR_M (1 << 0) /* MMU enable */ | |
23 | #define CR_A (1 << 1) /* Alignment abort enable */ | |
24 | #define CR_C (1 << 2) /* Dcache enable */ | |
25 | #define CR_W (1 << 3) /* Write buffer enable */ | |
26 | #define CR_P (1 << 4) /* 32-bit exception handler */ | |
27 | #define CR_D (1 << 5) /* 32-bit data address range */ | |
28 | #define CR_L (1 << 6) /* Implementation defined */ | |
29 | #define CR_B (1 << 7) /* Big endian */ | |
30 | #define CR_S (1 << 8) /* System MMU protection */ | |
31 | #define CR_R (1 << 9) /* ROM MMU protection */ | |
32 | #define CR_F (1 << 10) /* Implementation defined */ | |
33 | #define CR_Z (1 << 11) /* Implementation defined */ | |
34 | #define CR_I (1 << 12) /* Icache enable */ | |
35 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | |
36 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | |
37 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | |
38 | #define CR_DT (1 << 16) | |
39 | #define CR_IT (1 << 18) | |
40 | #define CR_ST (1 << 19) | |
41 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | |
42 | #define CR_U (1 << 22) /* Unaligned access operation */ | |
43 | #define CR_XP (1 << 23) /* Extended page tables */ | |
44 | #define CR_VE (1 << 24) /* Vectored interrupts */ | |
45 | ||
46 | #define CPUID_ID 0 | |
47 | #define CPUID_CACHETYPE 1 | |
48 | #define CPUID_TCM 2 | |
49 | #define CPUID_TLBTYPE 3 | |
50 | ||
f12d0d7c | 51 | #ifdef CONFIG_CPU_CP15 |
1da177e4 LT |
52 | #define read_cpuid(reg) \ |
53 | ({ \ | |
54 | unsigned int __val; \ | |
55 | asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ | |
56 | : "=r" (__val) \ | |
57 | : \ | |
58 | : "cc"); \ | |
59 | __val; \ | |
60 | }) | |
f12d0d7c HC |
61 | #else |
62 | #define read_cpuid(reg) (processor_id) | |
63 | #endif | |
1da177e4 LT |
64 | |
65 | /* | |
66 | * This is used to ensure the compiler did actually allocate the register we | |
67 | * asked it for some inline assembly sequences. Apparently we can't trust | |
68 | * the compiler from one version to another so a bit of paranoia won't hurt. | |
69 | * This string is meant to be concatenated with the inline asm string and | |
70 | * will cause compilation to stop on mismatch. | |
71 | * (for details, see gcc PR 15089) | |
72 | */ | |
73 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | |
74 | ||
75 | #ifndef __ASSEMBLY__ | |
76 | ||
77 | #include <linux/linkage.h> | |
198a6d5a | 78 | #include <linux/stringify.h> |
255d1f86 | 79 | #include <linux/irqflags.h> |
1da177e4 | 80 | |
198a6d5a RK |
81 | /* |
82 | * The CPU ID never changes at run time, so we might as well tell the | |
83 | * compiler that it's constant. Use this function to read the CPU ID | |
84 | * rather than directly reading processor_id or read_cpuid() directly. | |
85 | */ | |
86 | static inline unsigned int read_cpuid_id(void) __attribute_const__; | |
87 | ||
88 | static inline unsigned int read_cpuid_id(void) | |
89 | { | |
90 | return read_cpuid(CPUID_ID); | |
91 | } | |
92 | ||
7ab3f8d5 RK |
93 | #define __exception __attribute__((section(".exception.text"))) |
94 | ||
1da177e4 LT |
95 | struct thread_info; |
96 | struct task_struct; | |
97 | ||
98 | /* information about the system we're running on */ | |
99 | extern unsigned int system_rev; | |
100 | extern unsigned int system_serial_low; | |
101 | extern unsigned int system_serial_high; | |
102 | extern unsigned int mem_fclk_21285; | |
103 | ||
104 | struct pt_regs; | |
105 | ||
106 | void die(const char *msg, struct pt_regs *regs, int err) | |
107 | __attribute__((noreturn)); | |
108 | ||
cfb0810e | 109 | struct siginfo; |
1eeb66a1 | 110 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, |
cfb0810e | 111 | unsigned long err, unsigned long trap); |
1da177e4 LT |
112 | |
113 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | |
114 | struct pt_regs *), | |
115 | int sig, const char *name); | |
116 | ||
1da177e4 LT |
117 | #define xchg(ptr,x) \ |
118 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
119 | ||
1da177e4 | 120 | extern asmlinkage void __backtrace(void); |
652a12ef | 121 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); |
5470dc65 RK |
122 | |
123 | struct mm_struct; | |
652a12ef RK |
124 | extern void show_pte(struct mm_struct *mm, unsigned long addr); |
125 | extern void __show_regs(struct pt_regs *); | |
1da177e4 LT |
126 | |
127 | extern int cpu_architecture(void); | |
36c5ed23 | 128 | extern void cpu_init(void); |
1da177e4 | 129 | |
74617fb6 RP |
130 | void arm_machine_restart(char mode); |
131 | extern void (*arm_pm_restart)(char str); | |
132 | ||
23bdf86a LB |
133 | /* |
134 | * Intel's XScale3 core supports some v6 features (supersections, L2) | |
135 | * but advertises itself as v5 as it does not support the v6 ISA. For | |
136 | * this reason, we need a way to explicitly test for this type of CPU. | |
137 | */ | |
138 | #ifndef CONFIG_CPU_XSC3 | |
139 | #define cpu_is_xsc3() 0 | |
140 | #else | |
141 | static inline int cpu_is_xsc3(void) | |
142 | { | |
143 | extern unsigned int processor_id; | |
144 | ||
145 | if ((processor_id & 0xffffe000) == 0x69056000) | |
146 | return 1; | |
147 | ||
148 | return 0; | |
149 | } | |
150 | #endif | |
151 | ||
5cedae9c DS |
152 | #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) |
153 | #define cpu_is_xscale() 0 | |
154 | #else | |
155 | #define cpu_is_xscale() 1 | |
156 | #endif | |
157 | ||
56660faf CM |
158 | #define UDBG_UNDEFINED (1 << 0) |
159 | #define UDBG_SYSCALL (1 << 1) | |
160 | #define UDBG_BADABORT (1 << 2) | |
161 | #define UDBG_SEGV (1 << 3) | |
162 | #define UDBG_BUS (1 << 4) | |
163 | ||
164 | extern unsigned int user_debug; | |
165 | ||
166 | #if __LINUX_ARM_ARCH__ >= 4 | |
167 | #define vectors_high() (cr_alignment & CR_V) | |
168 | #else | |
169 | #define vectors_high() (0) | |
170 | #endif | |
171 | ||
56163fcf CM |
172 | #if __LINUX_ARM_ARCH__ >= 7 |
173 | #define isb() __asm__ __volatile__ ("isb" : : : "memory") | |
174 | #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") | |
175 | #define dmb() __asm__ __volatile__ ("dmb" : : : "memory") | |
176 | #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 | |
56660faf CM |
177 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ |
178 | : : "r" (0) : "memory") | |
179 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | |
180 | : : "r" (0) : "memory") | |
181 | #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | |
182 | : : "r" (0) : "memory") | |
183 | #else | |
184 | #define isb() __asm__ __volatile__ ("" : : : "memory") | |
185 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | |
186 | : : "r" (0) : "memory") | |
187 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | |
188 | #endif | |
9623b373 | 189 | |
398e692f LB |
190 | #ifndef CONFIG_SMP |
191 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | |
192 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | |
193 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | |
194 | #define smp_mb() barrier() | |
195 | #define smp_rmb() barrier() | |
196 | #define smp_wmb() barrier() | |
9623b373 | 197 | #else |
398e692f LB |
198 | #define mb() dmb() |
199 | #define rmb() dmb() | |
200 | #define wmb() dmb() | |
201 | #define smp_mb() dmb() | |
202 | #define smp_rmb() dmb() | |
203 | #define smp_wmb() dmb() | |
204 | #endif | |
205 | #define read_barrier_depends() do { } while(0) | |
206 | #define smp_read_barrier_depends() do { } while(0) | |
9623b373 CM |
207 | |
208 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | |
56660faf CM |
209 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); |
210 | ||
255d1f86 RK |
211 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ |
212 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | |
213 | ||
efe90d27 RK |
214 | static inline unsigned int get_cr(void) |
215 | { | |
216 | unsigned int val; | |
217 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | |
218 | return val; | |
219 | } | |
220 | ||
221 | static inline void set_cr(unsigned int val) | |
222 | { | |
223 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | |
224 | : : "r" (val) : "cc"); | |
56660faf | 225 | isb(); |
efe90d27 RK |
226 | } |
227 | ||
255d1f86 RK |
228 | #ifndef CONFIG_SMP |
229 | extern void adjust_cr(unsigned long mask, unsigned long set); | |
230 | #endif | |
231 | ||
efe90d27 RK |
232 | #define CPACC_FULL(n) (3 << (n * 2)) |
233 | #define CPACC_SVC(n) (1 << (n * 2)) | |
234 | #define CPACC_DISABLE(n) (0 << (n * 2)) | |
235 | ||
236 | static inline unsigned int get_copro_access(void) | |
237 | { | |
238 | unsigned int val; | |
239 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | |
240 | : "=r" (val) : : "cc"); | |
241 | return val; | |
242 | } | |
243 | ||
244 | static inline void set_copro_access(unsigned int val) | |
245 | { | |
246 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | |
247 | : : "r" (val) : "cc"); | |
56660faf | 248 | isb(); |
efe90d27 | 249 | } |
1da177e4 | 250 | |
1da177e4 | 251 | /* |
4866cde0 NP |
252 | * switch_mm() may do a full cache flush over the context switch, |
253 | * so enable interrupts over the context switch to avoid high | |
254 | * latency. | |
1da177e4 | 255 | */ |
4866cde0 | 256 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW |
1da177e4 LT |
257 | |
258 | /* | |
259 | * switch_to(prev, next) should switch from task `prev' to `next' | |
260 | * `prev' will never be the same as `next'. schedule() itself | |
261 | * contains the memory barrier to tell GCC not to cache `current'. | |
262 | */ | |
263 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | |
264 | ||
265 | #define switch_to(prev,next,last) \ | |
266 | do { \ | |
e7c1b32f | 267 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ |
1da177e4 LT |
268 | } while (0) |
269 | ||
1da177e4 LT |
270 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
271 | /* | |
272 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
273 | * cache totally. This means that the cache becomes inconsistent, and, | |
274 | * since we use normal loads/stores as well, this is really bad. | |
275 | * Typically, this causes oopsen in filp_close, but could have other, | |
276 | * more disasterous effects. There are two work-arounds: | |
277 | * 1. Disable interrupts and emulate the atomic swap | |
278 | * 2. Clean the cache, perform atomic swap, flush the cache | |
279 | * | |
280 | * We choose (1) since its the "easiest" to achieve here and is not | |
281 | * dependent on the processor type. | |
053a7b5b RK |
282 | * |
283 | * NOTE that this solution won't work on an SMP system, so explcitly | |
284 | * forbid it here. | |
1da177e4 LT |
285 | */ |
286 | #define swp_is_buggy | |
287 | #endif | |
288 | ||
289 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
290 | { | |
291 | extern void __bad_xchg(volatile void *, int); | |
292 | unsigned long ret; | |
293 | #ifdef swp_is_buggy | |
294 | unsigned long flags; | |
295 | #endif | |
9560782f RK |
296 | #if __LINUX_ARM_ARCH__ >= 6 |
297 | unsigned int tmp; | |
298 | #endif | |
1da177e4 LT |
299 | |
300 | switch (size) { | |
9560782f RK |
301 | #if __LINUX_ARM_ARCH__ >= 6 |
302 | case 1: | |
303 | asm volatile("@ __xchg1\n" | |
304 | "1: ldrexb %0, [%3]\n" | |
305 | " strexb %1, %2, [%3]\n" | |
306 | " teq %1, #0\n" | |
307 | " bne 1b" | |
308 | : "=&r" (ret), "=&r" (tmp) | |
309 | : "r" (x), "r" (ptr) | |
310 | : "memory", "cc"); | |
311 | break; | |
312 | case 4: | |
313 | asm volatile("@ __xchg4\n" | |
314 | "1: ldrex %0, [%3]\n" | |
315 | " strex %1, %2, [%3]\n" | |
316 | " teq %1, #0\n" | |
317 | " bne 1b" | |
318 | : "=&r" (ret), "=&r" (tmp) | |
319 | : "r" (x), "r" (ptr) | |
320 | : "memory", "cc"); | |
321 | break; | |
322 | #elif defined(swp_is_buggy) | |
323 | #ifdef CONFIG_SMP | |
324 | #error SMP is not supported on this platform | |
325 | #endif | |
326 | case 1: | |
e7cc2c59 | 327 | raw_local_irq_save(flags); |
9560782f RK |
328 | ret = *(volatile unsigned char *)ptr; |
329 | *(volatile unsigned char *)ptr = x; | |
e7cc2c59 | 330 | raw_local_irq_restore(flags); |
9560782f RK |
331 | break; |
332 | ||
333 | case 4: | |
e7cc2c59 | 334 | raw_local_irq_save(flags); |
9560782f RK |
335 | ret = *(volatile unsigned long *)ptr; |
336 | *(volatile unsigned long *)ptr = x; | |
e7cc2c59 | 337 | raw_local_irq_restore(flags); |
9560782f | 338 | break; |
1da177e4 | 339 | #else |
9560782f RK |
340 | case 1: |
341 | asm volatile("@ __xchg1\n" | |
342 | " swpb %0, %1, [%2]" | |
343 | : "=&r" (ret) | |
344 | : "r" (x), "r" (ptr) | |
345 | : "memory", "cc"); | |
346 | break; | |
347 | case 4: | |
348 | asm volatile("@ __xchg4\n" | |
349 | " swp %0, %1, [%2]" | |
350 | : "=&r" (ret) | |
351 | : "r" (x), "r" (ptr) | |
352 | : "memory", "cc"); | |
353 | break; | |
1da177e4 | 354 | #endif |
9560782f RK |
355 | default: |
356 | __bad_xchg(ptr, size), ret = 0; | |
357 | break; | |
1da177e4 LT |
358 | } |
359 | ||
360 | return ret; | |
361 | } | |
362 | ||
dabaeff0 BD |
363 | extern void disable_hlt(void); |
364 | extern void enable_hlt(void); | |
365 | ||
176393d4 MD |
366 | #include <asm-generic/cmpxchg-local.h> |
367 | ||
368 | /* | |
369 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
370 | * them available. | |
371 | */ | |
372 | #define cmpxchg_local(ptr, o, n) \ | |
373 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | |
374 | (unsigned long)(n), sizeof(*(ptr)))) | |
375 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
376 | ||
377 | #ifndef CONFIG_SMP | |
378 | #include <asm-generic/cmpxchg.h> | |
379 | #endif | |
380 | ||
1da177e4 LT |
381 | #endif /* __ASSEMBLY__ */ |
382 | ||
383 | #define arch_align_stack(x) (x) | |
384 | ||
385 | #endif /* __KERNEL__ */ | |
386 | ||
387 | #endif |