Commit | Line | Data |
---|---|---|
23bdf86a LB |
1 | /* |
2 | * linux/arch/arm/mm/proc-xsc3.S | |
3 | * | |
4 | * Original Author: Matthew Gilbert | |
5 | * Current Maintainer: Deepak Saxena <dsaxena@plexity.net> | |
6 | * | |
7 | * Copyright 2004 (C) Intel Corp. | |
8 | * Copyright 2005 (c) MontaVista Software, Inc. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is an | |
15 | * extension to Intel's original XScale core that adds the following | |
16 | * features: | |
17 | * | |
18 | * - ARMv6 Supersections | |
19 | * - Low Locality Reference pages (replaces mini-cache) | |
20 | * - 36-bit addressing | |
21 | * - L2 cache | |
22 | * - Cache-coherency if chipset supports it | |
23 | * | |
24 | * Based on orignal XScale code by Nicolas Pitre | |
25 | */ | |
26 | ||
27 | #include <linux/linkage.h> | |
28 | #include <linux/init.h> | |
29 | #include <asm/assembler.h> | |
30 | #include <asm/procinfo.h> | |
31 | #include <asm/hardware.h> | |
32 | #include <asm/pgtable.h> | |
b48340af | 33 | #include <asm/pgtable-hwdef.h> |
23bdf86a LB |
34 | #include <asm/page.h> |
35 | #include <asm/ptrace.h> | |
36 | #include "proc-macros.S" | |
37 | ||
38 | /* | |
39 | * This is the maximum size of an area which will be flushed. If the | |
40 | * area is larger than this, then we flush the whole cache. | |
41 | */ | |
42 | #define MAX_AREA_SIZE 32768 | |
43 | ||
44 | /* | |
45 | * The cache line size of the I and D cache. | |
46 | */ | |
47 | #define CACHELINESIZE 32 | |
48 | ||
49 | /* | |
50 | * The size of the data cache. | |
51 | */ | |
52 | #define CACHESIZE 32768 | |
53 | ||
54 | /* | |
55 | * Run with L2 enabled. | |
56 | */ | |
57 | #define L2_CACHE_ENABLE 1 | |
58 | ||
59 | /* | |
60 | * Enable the Branch Target Buffer (can cause crashes, see erratum #42.) | |
61 | */ | |
62 | #define BTB_ENABLE 0 | |
63 | ||
64 | /* | |
65 | * This macro is used to wait for a CP15 write and is needed | |
66 | * when we have to ensure that the last operation to the co-pro | |
67 | * was completed before continuing with operation. | |
68 | */ | |
69 | .macro cpwait_ret, lr, rd | |
70 | mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 | |
71 | sub pc, \lr, \rd, LSR #32 @ wait for completion and | |
72 | @ flush instruction pipeline | |
73 | .endm | |
74 | ||
75 | /* | |
76 | * This macro cleans & invalidates the entire xsc3 dcache by set & way. | |
77 | */ | |
78 | ||
79 | .macro clean_d_cache rd, rs | |
80 | mov \rd, #0x1f00 | |
81 | orr \rd, \rd, #0x00e0 | |
82 | 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/inv set/way | |
83 | adds \rd, \rd, #0x40000000 | |
84 | bcc 1b | |
85 | subs \rd, \rd, #0x20 | |
86 | bpl 1b | |
87 | .endm | |
88 | ||
89 | .text | |
90 | ||
91 | /* | |
92 | * cpu_xsc3_proc_init() | |
93 | * | |
94 | * Nothing too exciting at the moment | |
95 | */ | |
96 | ENTRY(cpu_xsc3_proc_init) | |
97 | mov pc, lr | |
98 | ||
99 | /* | |
100 | * cpu_xsc3_proc_fin() | |
101 | */ | |
102 | ENTRY(cpu_xsc3_proc_fin) | |
103 | str lr, [sp, #-4]! | |
104 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | |
105 | msr cpsr_c, r0 | |
106 | bl xsc3_flush_kern_cache_all @ clean caches | |
107 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | |
108 | bic r0, r0, #0x1800 @ ...IZ........... | |
109 | bic r0, r0, #0x0006 @ .............CA. | |
110 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | |
111 | ldr pc, [sp], #4 | |
112 | ||
113 | /* | |
114 | * cpu_xsc3_reset(loc) | |
115 | * | |
116 | * Perform a soft reset of the system. Put the CPU into the | |
117 | * same state as it would be if it had been reset, and branch | |
118 | * to what would be the reset vector. | |
119 | * | |
120 | * loc: location to jump to for soft reset | |
121 | */ | |
122 | .align 5 | |
123 | ENTRY(cpu_xsc3_reset) | |
124 | mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | |
125 | msr cpsr_c, r1 @ reset CPSR | |
126 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | |
127 | bic r1, r1, #0x0086 @ ........B....CA. | |
128 | bic r1, r1, #0x3900 @ ..VIZ..S........ | |
129 | mcr p15, 0, r1, c1, c0, 0 @ ctrl register | |
130 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB | |
131 | bic r1, r1, #0x0001 @ ...............M | |
132 | mcr p15, 0, r1, c1, c0, 0 @ ctrl register | |
133 | @ CAUTION: MMU turned off from this point. We count on the pipeline | |
134 | @ already containing those two last instructions to survive. | |
135 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
136 | mov pc, r0 | |
137 | ||
138 | /* | |
139 | * cpu_xsc3_do_idle() | |
140 | * | |
141 | * Cause the processor to idle | |
142 | * | |
143 | * For now we do nothing but go to idle mode for every case | |
144 | * | |
145 | * XScale supports clock switching, but using idle mode support | |
146 | * allows external hardware to react to system state changes. | |
147 | ||
148 | MMG: Come back to this one. | |
149 | */ | |
150 | .align 5 | |
151 | ||
152 | ENTRY(cpu_xsc3_do_idle) | |
153 | mov r0, #1 | |
154 | mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE | |
155 | mov pc, lr | |
156 | ||
157 | /* ================================= CACHE ================================ */ | |
158 | ||
159 | /* | |
160 | * flush_user_cache_all() | |
161 | * | |
162 | * Invalidate all cache entries in a particular address | |
163 | * space. | |
164 | */ | |
165 | ENTRY(xsc3_flush_user_cache_all) | |
166 | /* FALLTHROUGH */ | |
167 | ||
168 | /* | |
169 | * flush_kern_cache_all() | |
170 | * | |
171 | * Clean and invalidate the entire cache. | |
172 | */ | |
173 | ENTRY(xsc3_flush_kern_cache_all) | |
174 | mov r2, #VM_EXEC | |
175 | mov ip, #0 | |
176 | __flush_whole_cache: | |
177 | clean_d_cache r0, r1 | |
178 | tst r2, #VM_EXEC | |
179 | mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB | |
180 | mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer | |
181 | mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush | |
182 | mov pc, lr | |
183 | ||
184 | /* | |
185 | * flush_user_cache_range(start, end, vm_flags) | |
186 | * | |
187 | * Invalidate a range of cache entries in the specified | |
188 | * address space. | |
189 | * | |
190 | * - start - start address (may not be aligned) | |
191 | * - end - end address (exclusive, may not be aligned) | |
192 | * - vma - vma_area_struct describing address space | |
193 | */ | |
194 | .align 5 | |
195 | ENTRY(xsc3_flush_user_cache_range) | |
196 | mov ip, #0 | |
197 | sub r3, r1, r0 @ calculate total size | |
198 | cmp r3, #MAX_AREA_SIZE | |
199 | bhs __flush_whole_cache | |
200 | ||
201 | 1: tst r2, #VM_EXEC | |
202 | mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line | |
203 | mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate D cache line | |
204 | add r0, r0, #CACHELINESIZE | |
205 | cmp r0, r1 | |
206 | blo 1b | |
207 | tst r2, #VM_EXEC | |
208 | mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB | |
209 | mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer | |
210 | mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush | |
211 | mov pc, lr | |
212 | ||
213 | /* | |
214 | * coherent_kern_range(start, end) | |
215 | * | |
216 | * Ensure coherency between the Icache and the Dcache in the | |
217 | * region described by start. If you have non-snooping | |
218 | * Harvard caches, you need to implement this function. | |
219 | * | |
220 | * - start - virtual start address | |
221 | * - end - virtual end address | |
222 | * | |
223 | * Note: single I-cache line invalidation isn't used here since | |
224 | * it also trashes the mini I-cache used by JTAG debuggers. | |
225 | */ | |
226 | ENTRY(xsc3_coherent_kern_range) | |
227 | /* FALLTHROUGH */ | |
228 | ENTRY(xsc3_coherent_user_range) | |
229 | bic r0, r0, #CACHELINESIZE - 1 | |
230 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
231 | add r0, r0, #CACHELINESIZE | |
232 | cmp r0, r1 | |
233 | blo 1b | |
234 | mov r0, #0 | |
235 | mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB | |
236 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer | |
237 | mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush | |
238 | mov pc, lr | |
239 | ||
240 | /* | |
241 | * flush_kern_dcache_page(void *page) | |
242 | * | |
243 | * Ensure no D cache aliasing occurs, either with itself or | |
244 | * the I cache | |
245 | * | |
246 | * - addr - page aligned address | |
247 | */ | |
248 | ENTRY(xsc3_flush_kern_dcache_page) | |
249 | add r1, r0, #PAGE_SZ | |
250 | 1: mcr p15, 0, r0, c7, c14, 1 @ Clean/Invalidate D Cache line | |
251 | add r0, r0, #CACHELINESIZE | |
252 | cmp r0, r1 | |
253 | blo 1b | |
254 | mov r0, #0 | |
255 | mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB | |
256 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer | |
257 | mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush | |
258 | mov pc, lr | |
259 | ||
260 | /* | |
261 | * dma_inv_range(start, end) | |
262 | * | |
263 | * Invalidate (discard) the specified virtual address range. | |
264 | * May not write back any entries. If 'start' or 'end' | |
265 | * are not cache line aligned, those lines must be written | |
266 | * back. | |
267 | * | |
268 | * - start - virtual start address | |
269 | * - end - virtual end address | |
270 | */ | |
271 | ENTRY(xsc3_dma_inv_range) | |
272 | tst r0, #CACHELINESIZE - 1 | |
273 | bic r0, r0, #CACHELINESIZE - 1 | |
274 | mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D entry | |
275 | mcrne p15, 1, r0, c7, c11, 1 @ clean L2 D entry | |
276 | tst r1, #CACHELINESIZE - 1 | |
277 | mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D entry | |
278 | mcrne p15, 1, r1, c7, c11, 1 @ clean L2 D entry | |
279 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D entry | |
280 | mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line | |
281 | add r0, r0, #CACHELINESIZE | |
282 | cmp r0, r1 | |
283 | blo 1b | |
284 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer | |
285 | mov pc, lr | |
286 | ||
287 | /* | |
288 | * dma_clean_range(start, end) | |
289 | * | |
290 | * Clean the specified virtual address range. | |
291 | * | |
292 | * - start - virtual start address | |
293 | * - end - virtual end address | |
294 | */ | |
295 | ENTRY(xsc3_dma_clean_range) | |
296 | bic r0, r0, #CACHELINESIZE - 1 | |
297 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D entry | |
298 | mcr p15, 1, r0, c7, c11, 1 @ clean L2 D entry | |
299 | add r0, r0, #CACHELINESIZE | |
300 | cmp r0, r1 | |
301 | blo 1b | |
302 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer | |
303 | mov pc, lr | |
304 | ||
305 | /* | |
306 | * dma_flush_range(start, end) | |
307 | * | |
308 | * Clean and invalidate the specified virtual address range. | |
309 | * | |
310 | * - start - virtual start address | |
311 | * - end - virtual end address | |
312 | */ | |
313 | ENTRY(xsc3_dma_flush_range) | |
314 | bic r0, r0, #CACHELINESIZE - 1 | |
315 | 1: mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate L1 D cache line | |
316 | mcr p15, 1, r0, c7, c11, 1 @ Clean L2 D cache line | |
317 | mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line | |
318 | add r0, r0, #CACHELINESIZE | |
319 | cmp r0, r1 | |
320 | blo 1b | |
321 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer | |
322 | mov pc, lr | |
323 | ||
324 | ENTRY(xsc3_cache_fns) | |
325 | .long xsc3_flush_kern_cache_all | |
326 | .long xsc3_flush_user_cache_all | |
327 | .long xsc3_flush_user_cache_range | |
328 | .long xsc3_coherent_kern_range | |
329 | .long xsc3_coherent_user_range | |
330 | .long xsc3_flush_kern_dcache_page | |
331 | .long xsc3_dma_inv_range | |
332 | .long xsc3_dma_clean_range | |
333 | .long xsc3_dma_flush_range | |
334 | ||
335 | ENTRY(cpu_xsc3_dcache_clean_area) | |
336 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
337 | add r0, r0, #CACHELINESIZE | |
338 | subs r1, r1, #CACHELINESIZE | |
339 | bhi 1b | |
340 | mov pc, lr | |
341 | ||
342 | /* =============================== PageTable ============================== */ | |
343 | ||
344 | /* | |
345 | * cpu_xsc3_switch_mm(pgd) | |
346 | * | |
347 | * Set the translation base pointer to be as described by pgd. | |
348 | * | |
349 | * pgd: new page tables | |
350 | */ | |
351 | .align 5 | |
352 | ENTRY(cpu_xsc3_switch_mm) | |
353 | clean_d_cache r1, r2 | |
354 | mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB | |
355 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer | |
356 | mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush | |
357 | #ifdef L2_CACHE_ENABLE | |
358 | orr r0, r0, #0x18 @ cache the page table in L2 | |
359 | #endif | |
360 | mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | |
361 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
362 | cpwait_ret lr, ip | |
363 | ||
364 | /* | |
365 | * cpu_xsc3_set_pte(ptep, pte) | |
366 | * | |
367 | * Set a PTE and flush it out | |
368 | * | |
369 | */ | |
370 | .align 5 | |
371 | ENTRY(cpu_xsc3_set_pte) | |
372 | str r1, [r0], #-2048 @ linux version | |
373 | ||
23759dc6 | 374 | bic r2, r1, #0xdf0 @ Keep C, B, coherency bits |
23bdf86a LB |
375 | orr r2, r2, #PTE_TYPE_EXT @ extended page |
376 | ||
377 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY | |
378 | ||
379 | tst r3, #L_PTE_USER @ User? | |
380 | orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w | |
381 | ||
382 | tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? | |
383 | orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w | |
384 | @ combined with user -> user r/w | |
385 | ||
386 | #if L2_CACHE_ENABLE | |
387 | @ If its cacheable it needs to be in L2 also. | |
388 | eor ip, r1, #L_PTE_CACHEABLE | |
389 | tst ip, #L_PTE_CACHEABLE | |
390 | orreq r2, r2, #PTE_EXT_TEX(0x5) | |
391 | #endif | |
392 | ||
393 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? | |
394 | movne r2, #0 @ no -> fault | |
395 | ||
396 | str r2, [r0] @ hardware version | |
397 | mov ip, #0 | |
398 | mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr | |
399 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer | |
400 | mov pc, lr | |
401 | ||
402 | .ltorg | |
403 | ||
404 | .align | |
405 | ||
406 | __INIT | |
407 | ||
408 | .type __xsc3_setup, #function | |
409 | __xsc3_setup: | |
410 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | |
411 | msr cpsr_c, r0 | |
412 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB | |
413 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer | |
414 | mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush | |
415 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs | |
416 | #if L2_CACHE_ENABLE | |
417 | orr r4, r4, #0x18 @ cache the page table in L2 | |
418 | #endif | |
419 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | |
420 | mov r0, #1 @ Allow access to CP0 and CP13 | |
421 | orr r0, r0, #1 << 13 @ Its undefined whether this | |
422 | mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes | |
423 | mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg | |
424 | and r0, r0, #2 @ preserve bit P bit setting | |
425 | #if L2_CACHE_ENABLE | |
426 | orr r0, r0, #(1 << 10) @ enable L2 for LLR cache | |
427 | #endif | |
428 | mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg | |
22b19086 RK |
429 | |
430 | adr r5, xsc3_crval | |
431 | ldmia r5, {r5, r6} | |
23bdf86a | 432 | mrc p15, 0, r0, c1, c0, 0 @ get control register |
22b19086 RK |
433 | bic r0, r0, r5 @ .... .... .... ..A. |
434 | orr r0, r0, r6 @ .... .... .... .C.M | |
23bdf86a | 435 | #if BTB_ENABLE |
22b19086 | 436 | orr r0, r0, #0x00000800 @ ..VI Z..S .... .... |
23bdf86a LB |
437 | #endif |
438 | #if L2_CACHE_ENABLE | |
22b19086 | 439 | orr r0, r0, #0x04000000 @ L2 enable |
23bdf86a LB |
440 | #endif |
441 | mov pc, lr | |
442 | ||
443 | .size __xsc3_setup, . - __xsc3_setup | |
444 | ||
22b19086 RK |
445 | .type xsc3_crval, #object |
446 | xsc3_crval: | |
447 | crval clear=0x04003b02, mmuset=0x00003105, ucset=0x00001100 | |
448 | ||
23bdf86a LB |
449 | __INITDATA |
450 | ||
451 | /* | |
452 | * Purpose : Function pointers used to access above functions - all calls | |
453 | * come through these | |
454 | */ | |
455 | ||
456 | .type xsc3_processor_functions, #object | |
457 | ENTRY(xsc3_processor_functions) | |
458 | .word v5t_early_abort | |
459 | .word cpu_xsc3_proc_init | |
460 | .word cpu_xsc3_proc_fin | |
461 | .word cpu_xsc3_reset | |
462 | .word cpu_xsc3_do_idle | |
463 | .word cpu_xsc3_dcache_clean_area | |
464 | .word cpu_xsc3_switch_mm | |
465 | .word cpu_xsc3_set_pte | |
466 | .size xsc3_processor_functions, . - xsc3_processor_functions | |
467 | ||
468 | .section ".rodata" | |
469 | ||
470 | .type cpu_arch_name, #object | |
471 | cpu_arch_name: | |
472 | .asciz "armv5te" | |
473 | .size cpu_arch_name, . - cpu_arch_name | |
474 | ||
475 | .type cpu_elf_name, #object | |
476 | cpu_elf_name: | |
477 | .asciz "v5" | |
478 | .size cpu_elf_name, . - cpu_elf_name | |
479 | ||
480 | .type cpu_xsc3_name, #object | |
481 | cpu_xsc3_name: | |
482 | .asciz "XScale-Core3" | |
483 | .size cpu_xsc3_name, . - cpu_xsc3_name | |
484 | ||
485 | .align | |
486 | ||
487 | .section ".proc.info.init", #alloc, #execinstr | |
488 | ||
489 | .type __xsc3_proc_info,#object | |
490 | __xsc3_proc_info: | |
491 | .long 0x69056000 | |
492 | .long 0xffffe000 | |
8799ee9f RK |
493 | .long PMD_TYPE_SECT | \ |
494 | PMD_SECT_BUFFERABLE | \ | |
495 | PMD_SECT_CACHEABLE | \ | |
496 | PMD_SECT_AP_WRITE | \ | |
497 | PMD_SECT_AP_READ | |
498 | .long PMD_TYPE_SECT | \ | |
499 | PMD_SECT_AP_WRITE | \ | |
500 | PMD_SECT_AP_READ | |
23bdf86a LB |
501 | b __xsc3_setup |
502 | .long cpu_arch_name | |
503 | .long cpu_elf_name | |
504 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | |
505 | .long cpu_xsc3_name | |
506 | .long xsc3_processor_functions | |
507 | .long v4wbi_tlb_fns | |
508 | .long xsc3_mc_user_fns | |
509 | .long xsc3_cache_fns | |
510 | .size __xsc3_proc_info, . - __xsc3_proc_info |