Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/assembler.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996-2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This file contains arm architecture specific defines | |
11 | * for the different processors. | |
12 | * | |
13 | * Do not include any C declarations in this file - it is included by | |
14 | * assembler source. | |
15 | */ | |
16 | #ifndef __ASSEMBLY__ | |
17 | #error "Only include this from assembly code" | |
18 | #endif | |
19 | ||
20 | #include <asm/ptrace.h> | |
21 | ||
22 | /* | |
23 | * Endian independent macros for shifting bytes within registers. | |
24 | */ | |
25 | #ifndef __ARMEB__ | |
26 | #define pull lsr | |
27 | #define push lsl | |
28 | #define get_byte_0 lsl #0 | |
29 | #define get_byte_1 lsr #8 | |
30 | #define get_byte_2 lsr #16 | |
31 | #define get_byte_3 lsr #24 | |
32 | #define put_byte_0 lsl #0 | |
33 | #define put_byte_1 lsl #8 | |
34 | #define put_byte_2 lsl #16 | |
35 | #define put_byte_3 lsl #24 | |
36 | #else | |
37 | #define pull lsl | |
38 | #define push lsr | |
39 | #define get_byte_0 lsr #24 | |
40 | #define get_byte_1 lsr #16 | |
41 | #define get_byte_2 lsr #8 | |
42 | #define get_byte_3 lsl #0 | |
43 | #define put_byte_0 lsl #24 | |
44 | #define put_byte_1 lsl #16 | |
45 | #define put_byte_2 lsl #8 | |
46 | #define put_byte_3 lsl #0 | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * Data preload for architectures that support it | |
51 | */ | |
52 | #if __LINUX_ARM_ARCH__ >= 5 | |
53 | #define PLD(code...) code | |
54 | #else | |
55 | #define PLD(code...) | |
56 | #endif | |
57 | ||
2239aff6 NP |
58 | /* |
59 | * This can be used to enable code to cacheline align the destination | |
60 | * pointer when bulk writing to memory. Experiments on StrongARM and | |
61 | * XScale didn't show this a worthwhile thing to do when the cache is not | |
62 | * set to write-allocate (this would need further testing on XScale when WA | |
63 | * is used). | |
64 | * | |
65 | * On Feroceon there is much to gain however, regardless of cache mode. | |
66 | */ | |
67 | #ifdef CONFIG_CPU_FEROCEON | |
68 | #define CALGN(code...) code | |
69 | #else | |
70 | #define CALGN(code...) | |
71 | #endif | |
72 | ||
1da177e4 | 73 | /* |
9c42954d | 74 | * Enable and disable interrupts |
1da177e4 | 75 | */ |
59d1ff3b | 76 | #if __LINUX_ARM_ARCH__ >= 6 |
0d928b0b | 77 | .macro disable_irq_notrace |
59d1ff3b | 78 | cpsid i |
9c42954d RK |
79 | .endm |
80 | ||
0d928b0b | 81 | .macro enable_irq_notrace |
9c42954d RK |
82 | cpsie i |
83 | .endm | |
59d1ff3b | 84 | #else |
0d928b0b | 85 | .macro disable_irq_notrace |
9c42954d RK |
86 | msr cpsr_c, #PSR_I_BIT | SVC_MODE |
87 | .endm | |
88 | ||
0d928b0b | 89 | .macro enable_irq_notrace |
9c42954d RK |
90 | msr cpsr_c, #SVC_MODE |
91 | .endm | |
59d1ff3b | 92 | #endif |
9c42954d | 93 | |
0d928b0b UKK |
94 | .macro asm_trace_hardirqs_off |
95 | #if defined(CONFIG_TRACE_IRQFLAGS) | |
96 | stmdb sp!, {r0-r3, ip, lr} | |
97 | bl trace_hardirqs_off | |
98 | ldmia sp!, {r0-r3, ip, lr} | |
99 | #endif | |
100 | .endm | |
101 | ||
102 | .macro asm_trace_hardirqs_on_cond, cond | |
103 | #if defined(CONFIG_TRACE_IRQFLAGS) | |
104 | /* | |
105 | * actually the registers should be pushed and pop'd conditionally, but | |
106 | * after bl the flags are certainly clobbered | |
107 | */ | |
108 | stmdb sp!, {r0-r3, ip, lr} | |
109 | bl\cond trace_hardirqs_on | |
110 | ldmia sp!, {r0-r3, ip, lr} | |
111 | #endif | |
112 | .endm | |
113 | ||
114 | .macro asm_trace_hardirqs_on | |
115 | asm_trace_hardirqs_on_cond al | |
116 | .endm | |
117 | ||
118 | .macro disable_irq | |
119 | disable_irq_notrace | |
120 | asm_trace_hardirqs_off | |
121 | .endm | |
122 | ||
123 | .macro enable_irq | |
124 | asm_trace_hardirqs_on | |
125 | enable_irq_notrace | |
126 | .endm | |
9c42954d RK |
127 | /* |
128 | * Save the current IRQ state and disable IRQs. Note that this macro | |
129 | * assumes FIQs are enabled, and that the processor is in SVC mode. | |
130 | */ | |
131 | .macro save_and_disable_irqs, oldcpsr | |
132 | mrs \oldcpsr, cpsr | |
133 | disable_irq | |
1da177e4 LT |
134 | .endm |
135 | ||
136 | /* | |
137 | * Restore interrupt state previously stored in a register. We don't | |
138 | * guarantee that this will preserve the flags. | |
139 | */ | |
0d928b0b | 140 | .macro restore_irqs_notrace, oldcpsr |
1da177e4 LT |
141 | msr cpsr_c, \oldcpsr |
142 | .endm | |
143 | ||
0d928b0b UKK |
144 | .macro restore_irqs, oldcpsr |
145 | tst \oldcpsr, #PSR_I_BIT | |
146 | asm_trace_hardirqs_on_cond eq | |
147 | restore_irqs_notrace \oldcpsr | |
148 | .endm | |
149 | ||
1da177e4 LT |
150 | #define USER(x...) \ |
151 | 9999: x; \ | |
4260415f | 152 | .pushsection __ex_table,"a"; \ |
1da177e4 LT |
153 | .align 3; \ |
154 | .long 9999b,9001f; \ | |
4260415f | 155 | .popsection |
bac4e960 RK |
156 | |
157 | /* | |
158 | * SMP data memory barrier | |
159 | */ | |
160 | .macro smp_dmb | |
161 | #ifdef CONFIG_SMP | |
162 | #if __LINUX_ARM_ARCH__ >= 7 | |
163 | dmb | |
164 | #elif __LINUX_ARM_ARCH__ == 6 | |
165 | mcr p15, 0, r0, c7, c10, 5 @ dmb | |
166 | #endif | |
167 | #endif | |
168 | .endm | |
b86040a5 CM |
169 | |
170 | #ifdef CONFIG_THUMB2_KERNEL | |
171 | .macro setmode, mode, reg | |
172 | mov \reg, #\mode | |
173 | msr cpsr_c, \reg | |
174 | .endm | |
175 | #else | |
176 | .macro setmode, mode, reg | |
177 | msr cpsr_c, #\mode | |
178 | .endm | |
179 | #endif | |
8b592783 CM |
180 | |
181 | /* | |
182 | * STRT/LDRT access macros with ARM and Thumb-2 variants | |
183 | */ | |
184 | #ifdef CONFIG_THUMB2_KERNEL | |
185 | ||
186 | .macro usraccoff, instr, reg, ptr, inc, off, cond, abort | |
187 | 9999: | |
188 | .if \inc == 1 | |
189 | \instr\cond\()bt \reg, [\ptr, #\off] | |
190 | .elseif \inc == 4 | |
191 | \instr\cond\()t \reg, [\ptr, #\off] | |
192 | .else | |
193 | .error "Unsupported inc macro argument" | |
194 | .endif | |
195 | ||
4260415f | 196 | .pushsection __ex_table,"a" |
8b592783 CM |
197 | .align 3 |
198 | .long 9999b, \abort | |
4260415f | 199 | .popsection |
8b592783 CM |
200 | .endm |
201 | ||
202 | .macro usracc, instr, reg, ptr, inc, cond, rept, abort | |
203 | @ explicit IT instruction needed because of the label | |
204 | @ introduced by the USER macro | |
205 | .ifnc \cond,al | |
206 | .if \rept == 1 | |
207 | itt \cond | |
208 | .elseif \rept == 2 | |
209 | ittt \cond | |
210 | .else | |
211 | .error "Unsupported rept macro argument" | |
212 | .endif | |
213 | .endif | |
214 | ||
215 | @ Slightly optimised to avoid incrementing the pointer twice | |
216 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort | |
217 | .if \rept == 2 | |
218 | usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort | |
219 | .endif | |
220 | ||
221 | add\cond \ptr, #\rept * \inc | |
222 | .endm | |
223 | ||
224 | #else /* !CONFIG_THUMB2_KERNEL */ | |
225 | ||
226 | .macro usracc, instr, reg, ptr, inc, cond, rept, abort | |
227 | .rept \rept | |
228 | 9999: | |
229 | .if \inc == 1 | |
230 | \instr\cond\()bt \reg, [\ptr], #\inc | |
231 | .elseif \inc == 4 | |
232 | \instr\cond\()t \reg, [\ptr], #\inc | |
233 | .else | |
234 | .error "Unsupported inc macro argument" | |
235 | .endif | |
236 | ||
4260415f | 237 | .pushsection __ex_table,"a" |
8b592783 CM |
238 | .align 3 |
239 | .long 9999b, \abort | |
4260415f | 240 | .popsection |
8b592783 CM |
241 | .endr |
242 | .endm | |
243 | ||
244 | #endif /* CONFIG_THUMB2_KERNEL */ | |
245 | ||
246 | .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f | |
247 | usracc str, \reg, \ptr, \inc, \cond, \rept, \abort | |
248 | .endm | |
249 | ||
250 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f | |
251 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort | |
252 | .endm |