Merge remote-tracking branch 'asoc/fix/samsung' into asoc-linus
[deliverable/linux.git] / arch / arm / mm / proc-arm940.S
1 /*
2 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11 #include <linux/linkage.h>
12 #include <linux/init.h>
13 #include <asm/assembler.h>
14 #include <asm/hwcap.h>
15 #include <asm/pgtable-hwdef.h>
16 #include <asm/pgtable.h>
17 #include <asm/ptrace.h>
18 #include "proc-macros.S"
19
20 /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
21 #define CACHE_DLINESIZE 16
22 #define CACHE_DSEGMENTS 4
23 #define CACHE_DENTRIES 64
24
25 .text
26 /*
27 * cpu_arm940_proc_init()
28 * cpu_arm940_switch_mm()
29 *
30 * These are not required.
31 */
32 ENTRY(cpu_arm940_proc_init)
33 ENTRY(cpu_arm940_switch_mm)
34 mov pc, lr
35
36 /*
37 * cpu_arm940_proc_fin()
38 */
39 ENTRY(cpu_arm940_proc_fin)
40 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache
43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
44 mov pc, lr
45
46 /*
47 * cpu_arm940_reset(loc)
48 * Params : r0 = address to jump to
49 * Notes : This sets up everything for a reset
50 */
51 .pushsection .idmap.text, "ax"
52 ENTRY(cpu_arm940_reset)
53 mov ip, #0
54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
55 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
56 mcr p15, 0, ip, c7, c10, 4 @ drain WB
57 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
58 bic ip, ip, #0x00000005 @ .............c.p
59 bic ip, ip, #0x00001000 @ i-cache
60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
61 mov pc, r0
62 ENDPROC(cpu_arm940_reset)
63 .popsection
64
65 /*
66 * cpu_arm940_do_idle()
67 */
68 .align 5
69 ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 mov pc, lr
72
73 /*
74 * flush_icache_all()
75 *
76 * Unconditionally clean and invalidate the entire icache.
77 */
78 ENTRY(arm940_flush_icache_all)
79 mov r0, #0
80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
81 mov pc, lr
82 ENDPROC(arm940_flush_icache_all)
83
84 /*
85 * flush_user_cache_all()
86 */
87 ENTRY(arm940_flush_user_cache_all)
88 /* FALLTHROUGH */
89
90 /*
91 * flush_kern_cache_all()
92 *
93 * Clean and invalidate the entire cache.
94 */
95 ENTRY(arm940_flush_kern_cache_all)
96 mov r2, #VM_EXEC
97 /* FALLTHROUGH */
98
99 /*
100 * flush_user_cache_range(start, end, flags)
101 *
102 * There is no efficient way to flush a range of cache entries
103 * in the specified address range. Thus, flushes all.
104 *
105 * - start - start address (inclusive)
106 * - end - end address (exclusive)
107 * - flags - vm_flags describing address space
108 */
109 ENTRY(arm940_flush_user_cache_range)
110 mov ip, #0
111 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
112 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
113 #else
114 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
115 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
116 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
117 subs r3, r3, #1 << 26
118 bcs 2b @ entries 63 to 0
119 subs r1, r1, #1 << 4
120 bcs 1b @ segments 3 to 0
121 #endif
122 tst r2, #VM_EXEC
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
125 mov pc, lr
126
127 /*
128 * coherent_kern_range(start, end)
129 *
130 * Ensure coherency between the Icache and the Dcache in the
131 * region described by start, end. If you have non-snooping
132 * Harvard caches, you need to implement this function.
133 *
134 * - start - virtual start address
135 * - end - virtual end address
136 */
137 ENTRY(arm940_coherent_kern_range)
138 /* FALLTHROUGH */
139
140 /*
141 * coherent_user_range(start, end)
142 *
143 * Ensure coherency between the Icache and the Dcache in the
144 * region described by start, end. If you have non-snooping
145 * Harvard caches, you need to implement this function.
146 *
147 * - start - virtual start address
148 * - end - virtual end address
149 */
150 ENTRY(arm940_coherent_user_range)
151 /* FALLTHROUGH */
152
153 /*
154 * flush_kern_dcache_area(void *addr, size_t size)
155 *
156 * Ensure no D cache aliasing occurs, either with itself or
157 * the I cache
158 *
159 * - addr - kernel address
160 * - size - region size
161 */
162 ENTRY(arm940_flush_kern_dcache_area)
163 mov r0, #0
164 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
165 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
166 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
167 subs r3, r3, #1 << 26
168 bcs 2b @ entries 63 to 0
169 subs r1, r1, #1 << 4
170 bcs 1b @ segments 7 to 0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr
174
175 /*
176 * dma_inv_range(start, end)
177 *
178 * There is no efficient way to invalidate a specifid virtual
179 * address range. Thus, invalidates all.
180 *
181 * - start - virtual start address
182 * - end - virtual end address
183 */
184 arm940_dma_inv_range:
185 mov ip, #0
186 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
187 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
188 2: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
189 subs r3, r3, #1 << 26
190 bcs 2b @ entries 63 to 0
191 subs r1, r1, #1 << 4
192 bcs 1b @ segments 7 to 0
193 mcr p15, 0, ip, c7, c10, 4 @ drain WB
194 mov pc, lr
195
196 /*
197 * dma_clean_range(start, end)
198 *
199 * There is no efficient way to clean a specifid virtual
200 * address range. Thus, cleans all.
201 *
202 * - start - virtual start address
203 * - end - virtual end address
204 */
205 arm940_dma_clean_range:
206 ENTRY(cpu_arm940_dcache_clean_area)
207 mov ip, #0
208 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
209 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
210 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
211 2: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
212 subs r3, r3, #1 << 26
213 bcs 2b @ entries 63 to 0
214 subs r1, r1, #1 << 4
215 bcs 1b @ segments 7 to 0
216 #endif
217 mcr p15, 0, ip, c7, c10, 4 @ drain WB
218 mov pc, lr
219
220 /*
221 * dma_flush_range(start, end)
222 *
223 * There is no efficient way to clean and invalidate a specifid
224 * virtual address range.
225 *
226 * - start - virtual start address
227 * - end - virtual end address
228 */
229 ENTRY(arm940_dma_flush_range)
230 mov ip, #0
231 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
232 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
233 2:
234 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
235 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
236 #else
237 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
238 #endif
239 subs r3, r3, #1 << 26
240 bcs 2b @ entries 63 to 0
241 subs r1, r1, #1 << 4
242 bcs 1b @ segments 7 to 0
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov pc, lr
245
246 /*
247 * dma_map_area(start, size, dir)
248 * - start - kernel virtual start address
249 * - size - size of region
250 * - dir - DMA direction
251 */
252 ENTRY(arm940_dma_map_area)
253 add r1, r1, r0
254 cmp r2, #DMA_TO_DEVICE
255 beq arm940_dma_clean_range
256 bcs arm940_dma_inv_range
257 b arm940_dma_flush_range
258 ENDPROC(arm940_dma_map_area)
259
260 /*
261 * dma_unmap_area(start, size, dir)
262 * - start - kernel virtual start address
263 * - size - size of region
264 * - dir - DMA direction
265 */
266 ENTRY(arm940_dma_unmap_area)
267 mov pc, lr
268 ENDPROC(arm940_dma_unmap_area)
269
270 .globl arm940_flush_kern_cache_louis
271 .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
272
273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
274 define_cache_functions arm940
275
276 __CPUINIT
277
278 .type __arm940_setup, #function
279 __arm940_setup:
280 mov r0, #0
281 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
282 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
283 mcr p15, 0, r0, c7, c10, 4 @ drain WB
284
285 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
286 mcr p15, 0, r0, c6, c4, 0
287 mcr p15, 0, r0, c6, c5, 0
288 mcr p15, 0, r0, c6, c6, 0
289 mcr p15, 0, r0, c6, c7, 0
290
291 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
292 mcr p15, 0, r0, c6, c4, 1
293 mcr p15, 0, r0, c6, c5, 1
294 mcr p15, 0, r0, c6, c6, 1
295 mcr p15, 0, r0, c6, c7, 1
296
297 mov r0, #0x0000003F @ base = 0, size = 4GB
298 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
299 mcr p15, 0, r0, c6, c0, 1
300
301 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
302 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
303 mov r2, #10 @ 11 is the minimum (4KB)
304 1: add r2, r2, #1 @ area size *= 2
305 mov r1, r1, lsr #1
306 bne 1b @ count not zero r-shift
307 orr r0, r0, r2, lsl #1 @ the area register value
308 orr r0, r0, #1 @ set enable bit
309 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
310 mcr p15, 0, r0, c6, c1, 1
311
312 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
313 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
314 mov r2, #10 @ 11 is the minimum (4KB)
315 1: add r2, r2, #1 @ area size *= 2
316 mov r1, r1, lsr #1
317 bne 1b @ count not zero r-shift
318 orr r0, r0, r2, lsl #1 @ the area register value
319 orr r0, r0, #1 @ set enable bit
320 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
321 mcr p15, 0, r0, c6, c2, 1
322
323 mov r0, #0x06
324 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
325 mcr p15, 0, r0, c2, c0, 1
326 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
327 mov r0, #0x00 @ disable whole write buffer
328 #else
329 mov r0, #0x02 @ Region 1 write bufferred
330 #endif
331 mcr p15, 0, r0, c3, c0, 0
332
333 mov r0, #0x10000
334 sub r0, r0, #1 @ r0 = 0xffff
335 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
336 mcr p15, 0, r0, c5, c0, 1
337
338 mrc p15, 0, r0, c1, c0 @ get control register
339 orr r0, r0, #0x00001000 @ I-cache
340 orr r0, r0, #0x00000005 @ MPU/D-cache
341
342 mov pc, lr
343
344 .size __arm940_setup, . - __arm940_setup
345
346 __INITDATA
347
348 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
349 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
350
351 .section ".rodata"
352
353 string cpu_arch_name, "armv4t"
354 string cpu_elf_name, "v4"
355 string cpu_arm940_name, "ARM940T"
356
357 .align
358
359 .section ".proc.info.init", #alloc, #execinstr
360
361 .type __arm940_proc_info,#object
362 __arm940_proc_info:
363 .long 0x41009400
364 .long 0xff00fff0
365 .long 0
366 b __arm940_setup
367 .long cpu_arch_name
368 .long cpu_elf_name
369 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
370 .long cpu_arm940_name
371 .long arm940_processor_functions
372 .long 0
373 .long 0
374 .long arm940_cache_fns
375 .size __arm940_proc_info, . - __arm940_proc_info
376
This page took 0.059391 seconds and 5 git commands to generate.