[ARM] Don't include asm/elf.h in asm code
[deliverable/linux.git] / arch / arm / mm / proc-arm1022.S
1 /*
2 * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E
3 *
4 * Copyright (C) 2000 ARM Limited
5 * Copyright (C) 2000 Deep Blue Solutions Ltd.
6 * hacked for non-paged-MM by Hyok S. Choi, 2003.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 *
14 * These are the low level assembler for performing cache and TLB
15 * functions on the ARM1022E.
16 */
17 #include <linux/linkage.h>
18 #include <linux/init.h>
19 #include <asm/assembler.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/hwcap.h>
22 #include <asm/pgtable-hwdef.h>
23 #include <asm/pgtable.h>
24 #include <asm/ptrace.h>
25
26 #include "proc-macros.S"
27
28 /*
29 * This is the maximum size of an area which will be invalidated
30 * using the single invalidate entry instructions. Anything larger
31 * than this, and we go for the whole cache.
32 *
33 * This value should be chosen such that we choose the cheapest
34 * alternative.
35 */
36 #define MAX_AREA_SIZE 32768
37
38 /*
39 * The size of one data cache line.
40 */
41 #define CACHE_DLINESIZE 32
42
43 /*
44 * The number of data cache segments.
45 */
46 #define CACHE_DSEGMENTS 16
47
48 /*
49 * The number of lines in a cache segment.
50 */
51 #define CACHE_DENTRIES 64
52
53 /*
54 * This is the size at which it becomes more efficient to
55 * clean the whole cache, rather than using the individual
56 * cache line maintainence instructions.
57 */
58 #define CACHE_DLIMIT 32768
59
60 .text
61 /*
62 * cpu_arm1022_proc_init()
63 */
64 ENTRY(cpu_arm1022_proc_init)
65 mov pc, lr
66
67 /*
68 * cpu_arm1022_proc_fin()
69 */
70 ENTRY(cpu_arm1022_proc_fin)
71 stmfd sp!, {lr}
72 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
73 msr cpsr_c, ip
74 bl arm1022_flush_kern_cache_all
75 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
76 bic r0, r0, #0x1000 @ ...i............
77 bic r0, r0, #0x000e @ ............wca.
78 mcr p15, 0, r0, c1, c0, 0 @ disable caches
79 ldmfd sp!, {pc}
80
81 /*
82 * cpu_arm1022_reset(loc)
83 *
84 * Perform a soft reset of the system. Put the CPU into the
85 * same state as it would be if it had been reset, and branch
86 * to what would be the reset vector.
87 *
88 * loc: location to jump to for soft reset
89 */
90 .align 5
91 ENTRY(cpu_arm1022_reset)
92 mov ip, #0
93 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
94 mcr p15, 0, ip, c7, c10, 4 @ drain WB
95 #ifdef CONFIG_MMU
96 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
97 #endif
98 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
99 bic ip, ip, #0x000f @ ............wcam
100 bic ip, ip, #0x1100 @ ...i...s........
101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
102 mov pc, r0
103
104 /*
105 * cpu_arm1022_do_idle()
106 */
107 .align 5
108 ENTRY(cpu_arm1022_do_idle)
109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
110 mov pc, lr
111
112 /* ================================= CACHE ================================ */
113
114 .align 5
115 /*
116 * flush_user_cache_all()
117 *
118 * Invalidate all cache entries in a particular address
119 * space.
120 */
121 ENTRY(arm1022_flush_user_cache_all)
122 /* FALLTHROUGH */
123 /*
124 * flush_kern_cache_all()
125 *
126 * Clean and invalidate the entire cache.
127 */
128 ENTRY(arm1022_flush_kern_cache_all)
129 mov r2, #VM_EXEC
130 mov ip, #0
131 __flush_whole_cache:
132 #ifndef CONFIG_CPU_DCACHE_DISABLE
133 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
134 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
135 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
136 subs r3, r3, #1 << 26
137 bcs 2b @ entries 63 to 0
138 subs r1, r1, #1 << 5
139 bcs 1b @ segments 15 to 0
140 #endif
141 tst r2, #VM_EXEC
142 #ifndef CONFIG_CPU_ICACHE_DISABLE
143 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
144 #endif
145 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
146 mov pc, lr
147
148 /*
149 * flush_user_cache_range(start, end, flags)
150 *
151 * Invalidate a range of cache entries in the specified
152 * address space.
153 *
154 * - start - start address (inclusive)
155 * - end - end address (exclusive)
156 * - flags - vm_flags for this space
157 */
158 ENTRY(arm1022_flush_user_cache_range)
159 mov ip, #0
160 sub r3, r1, r0 @ calculate total size
161 cmp r3, #CACHE_DLIMIT
162 bhs __flush_whole_cache
163
164 #ifndef CONFIG_CPU_DCACHE_DISABLE
165 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
166 add r0, r0, #CACHE_DLINESIZE
167 cmp r0, r1
168 blo 1b
169 #endif
170 tst r2, #VM_EXEC
171 #ifndef CONFIG_CPU_ICACHE_DISABLE
172 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
173 #endif
174 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
175 mov pc, lr
176
177 /*
178 * coherent_kern_range(start, end)
179 *
180 * Ensure coherency between the Icache and the Dcache in the
181 * region described by start. If you have non-snooping
182 * Harvard caches, you need to implement this function.
183 *
184 * - start - virtual start address
185 * - end - virtual end address
186 */
187 ENTRY(arm1022_coherent_kern_range)
188 /* FALLTHROUGH */
189
190 /*
191 * coherent_user_range(start, end)
192 *
193 * Ensure coherency between the Icache and the Dcache in the
194 * region described by start. If you have non-snooping
195 * Harvard caches, you need to implement this function.
196 *
197 * - start - virtual start address
198 * - end - virtual end address
199 */
200 ENTRY(arm1022_coherent_user_range)
201 mov ip, #0
202 bic r0, r0, #CACHE_DLINESIZE - 1
203 1:
204 #ifndef CONFIG_CPU_DCACHE_DISABLE
205 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
206 #endif
207 #ifndef CONFIG_CPU_ICACHE_DISABLE
208 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
209 #endif
210 add r0, r0, #CACHE_DLINESIZE
211 cmp r0, r1
212 blo 1b
213 mcr p15, 0, ip, c7, c10, 4 @ drain WB
214 mov pc, lr
215
216 /*
217 * flush_kern_dcache_page(void *page)
218 *
219 * Ensure no D cache aliasing occurs, either with itself or
220 * the I cache
221 *
222 * - page - page aligned address
223 */
224 ENTRY(arm1022_flush_kern_dcache_page)
225 mov ip, #0
226 #ifndef CONFIG_CPU_DCACHE_DISABLE
227 add r1, r0, #PAGE_SZ
228 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
229 add r0, r0, #CACHE_DLINESIZE
230 cmp r0, r1
231 blo 1b
232 #endif
233 mcr p15, 0, ip, c7, c10, 4 @ drain WB
234 mov pc, lr
235
236 /*
237 * dma_inv_range(start, end)
238 *
239 * Invalidate (discard) the specified virtual address range.
240 * May not write back any entries. If 'start' or 'end'
241 * are not cache line aligned, those lines must be written
242 * back.
243 *
244 * - start - virtual start address
245 * - end - virtual end address
246 *
247 * (same as v4wb)
248 */
249 ENTRY(arm1022_dma_inv_range)
250 mov ip, #0
251 #ifndef CONFIG_CPU_DCACHE_DISABLE
252 tst r0, #CACHE_DLINESIZE - 1
253 bic r0, r0, #CACHE_DLINESIZE - 1
254 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
255 tst r1, #CACHE_DLINESIZE - 1
256 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
257 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
258 add r0, r0, #CACHE_DLINESIZE
259 cmp r0, r1
260 blo 1b
261 #endif
262 mcr p15, 0, ip, c7, c10, 4 @ drain WB
263 mov pc, lr
264
265 /*
266 * dma_clean_range(start, end)
267 *
268 * Clean the specified virtual address range.
269 *
270 * - start - virtual start address
271 * - end - virtual end address
272 *
273 * (same as v4wb)
274 */
275 ENTRY(arm1022_dma_clean_range)
276 mov ip, #0
277 #ifndef CONFIG_CPU_DCACHE_DISABLE
278 bic r0, r0, #CACHE_DLINESIZE - 1
279 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
280 add r0, r0, #CACHE_DLINESIZE
281 cmp r0, r1
282 blo 1b
283 #endif
284 mcr p15, 0, ip, c7, c10, 4 @ drain WB
285 mov pc, lr
286
287 /*
288 * dma_flush_range(start, end)
289 *
290 * Clean and invalidate the specified virtual address range.
291 *
292 * - start - virtual start address
293 * - end - virtual end address
294 */
295 ENTRY(arm1022_dma_flush_range)
296 mov ip, #0
297 #ifndef CONFIG_CPU_DCACHE_DISABLE
298 bic r0, r0, #CACHE_DLINESIZE - 1
299 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
300 add r0, r0, #CACHE_DLINESIZE
301 cmp r0, r1
302 blo 1b
303 #endif
304 mcr p15, 0, ip, c7, c10, 4 @ drain WB
305 mov pc, lr
306
307 ENTRY(arm1022_cache_fns)
308 .long arm1022_flush_kern_cache_all
309 .long arm1022_flush_user_cache_all
310 .long arm1022_flush_user_cache_range
311 .long arm1022_coherent_kern_range
312 .long arm1022_coherent_user_range
313 .long arm1022_flush_kern_dcache_page
314 .long arm1022_dma_inv_range
315 .long arm1022_dma_clean_range
316 .long arm1022_dma_flush_range
317
318 .align 5
319 ENTRY(cpu_arm1022_dcache_clean_area)
320 #ifndef CONFIG_CPU_DCACHE_DISABLE
321 mov ip, #0
322 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
323 add r0, r0, #CACHE_DLINESIZE
324 subs r1, r1, #CACHE_DLINESIZE
325 bhi 1b
326 #endif
327 mov pc, lr
328
329 /* =============================== PageTable ============================== */
330
331 /*
332 * cpu_arm1022_switch_mm(pgd)
333 *
334 * Set the translation base pointer to be as described by pgd.
335 *
336 * pgd: new page tables
337 */
338 .align 5
339 ENTRY(cpu_arm1022_switch_mm)
340 #ifdef CONFIG_MMU
341 #ifndef CONFIG_CPU_DCACHE_DISABLE
342 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
343 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
344 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
345 subs r3, r3, #1 << 26
346 bcs 2b @ entries 63 to 0
347 subs r1, r1, #1 << 5
348 bcs 1b @ segments 15 to 0
349 #endif
350 mov r1, #0
351 #ifndef CONFIG_CPU_ICACHE_DISABLE
352 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
353 #endif
354 mcr p15, 0, r1, c7, c10, 4 @ drain WB
355 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
356 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
357 #endif
358 mov pc, lr
359
360 /*
361 * cpu_arm1022_set_pte_ext(ptep, pte, ext)
362 *
363 * Set a PTE and flush it out
364 */
365 .align 5
366 ENTRY(cpu_arm1022_set_pte_ext)
367 #ifdef CONFIG_MMU
368 armv3_set_pte_ext
369 mov r0, r0
370 #ifndef CONFIG_CPU_DCACHE_DISABLE
371 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
372 #endif
373 #endif /* CONFIG_MMU */
374 mov pc, lr
375
376 __INIT
377
378 .type __arm1022_setup, #function
379 __arm1022_setup:
380 mov r0, #0
381 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
382 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
383 #ifdef CONFIG_MMU
384 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
385 #endif
386 adr r5, arm1022_crval
387 ldmia r5, {r5, r6}
388 mrc p15, 0, r0, c1, c0 @ get control register v4
389 bic r0, r0, r5
390 orr r0, r0, r6
391 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
392 orr r0, r0, #0x4000 @ .R..............
393 #endif
394 mov pc, lr
395 .size __arm1022_setup, . - __arm1022_setup
396
397 /*
398 * R
399 * .RVI ZFRS BLDP WCAM
400 * .011 1001 ..11 0101
401 *
402 */
403 .type arm1022_crval, #object
404 arm1022_crval:
405 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
406
407 __INITDATA
408
409 /*
410 * Purpose : Function pointers used to access above functions - all calls
411 * come through these
412 */
413 .type arm1022_processor_functions, #object
414 arm1022_processor_functions:
415 .word v4t_early_abort
416 .word pabort_noifar
417 .word cpu_arm1022_proc_init
418 .word cpu_arm1022_proc_fin
419 .word cpu_arm1022_reset
420 .word cpu_arm1022_do_idle
421 .word cpu_arm1022_dcache_clean_area
422 .word cpu_arm1022_switch_mm
423 .word cpu_arm1022_set_pte_ext
424 .size arm1022_processor_functions, . - arm1022_processor_functions
425
426 .section ".rodata"
427
428 .type cpu_arch_name, #object
429 cpu_arch_name:
430 .asciz "armv5te"
431 .size cpu_arch_name, . - cpu_arch_name
432
433 .type cpu_elf_name, #object
434 cpu_elf_name:
435 .asciz "v5"
436 .size cpu_elf_name, . - cpu_elf_name
437
438 .type cpu_arm1022_name, #object
439 cpu_arm1022_name:
440 .asciz "ARM1022"
441 .size cpu_arm1022_name, . - cpu_arm1022_name
442
443 .align
444
445 .section ".proc.info.init", #alloc, #execinstr
446
447 .type __arm1022_proc_info,#object
448 __arm1022_proc_info:
449 .long 0x4105a220 @ ARM 1022E (v5TE)
450 .long 0xff0ffff0
451 .long PMD_TYPE_SECT | \
452 PMD_BIT4 | \
453 PMD_SECT_AP_WRITE | \
454 PMD_SECT_AP_READ
455 .long PMD_TYPE_SECT | \
456 PMD_BIT4 | \
457 PMD_SECT_AP_WRITE | \
458 PMD_SECT_AP_READ
459 b __arm1022_setup
460 .long cpu_arch_name
461 .long cpu_elf_name
462 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
463 .long cpu_arm1022_name
464 .long arm1022_processor_functions
465 .long v4wbi_tlb_fns
466 .long v4wb_user_fns
467 .long arm1022_cache_fns
468 .size __arm1022_proc_info, . - __arm1022_proc_info
This page took 0.044903 seconds and 5 git commands to generate.