ARM: suspend: use idmap_pgd instead of suspend_pgd
[deliverable/linux.git] / arch / arm / mm / proc-feroceon.S
CommitLineData
e50d6409
AH
1/*
2 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
3 *
4 * Heavily based on proc-arm926.S
5 * Maintainer: Assaf Hoffman <hoffman@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <asm/assembler.h>
5ec9407d 25#include <asm/hwcap.h>
e50d6409
AH
26#include <asm/pgtable-hwdef.h>
27#include <asm/pgtable.h>
28#include <asm/page.h>
29#include <asm/ptrace.h>
30#include "proc-macros.S"
31
32/*
33 * This is the maximum size of an area which will be invalidated
34 * using the single invalidate entry instructions. Anything larger
35 * than this, and we go for the whole cache.
36 *
37 * This value should be chosen such that we choose the cheapest
38 * alternative.
39 */
40#define CACHE_DLIMIT 16384
41
42/*
43 * the cache line size of the I and D cache
44 */
45#define CACHE_DLINESIZE 32
46
6c386e58
NP
47 .bss
48 .align 3
49__cache_params_loc:
50 .space 8
51
e50d6409 52 .text
6c386e58
NP
53__cache_params:
54 .word __cache_params_loc
55
e50d6409
AH
56/*
57 * cpu_feroceon_proc_init()
58 */
59ENTRY(cpu_feroceon_proc_init)
6c386e58
NP
60 mrc p15, 0, r0, c0, c0, 1 @ read cache type register
61 ldr r1, __cache_params
62 mov r2, #(16 << 5)
63 tst r0, #(1 << 16) @ get way
64 mov r0, r0, lsr #18 @ get cache size order
65 movne r3, #((4 - 1) << 30) @ 4-way
66 and r0, r0, #0xf
67 moveq r3, #0 @ 1-way
68 mov r2, r2, lsl r0 @ actual cache size
69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3}
e50d6409
AH
72 mov pc, lr
73
74/*
75 * cpu_feroceon_proc_fin()
76 */
77ENTRY(cpu_feroceon_proc_fin)
4360bb41
RS
78#if defined(CONFIG_CACHE_FEROCEON_L2) && \
79 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
99c6dc11
LB
80 mov r0, #0
81 mcr p15, 1, r0, c15, c9, 0 @ clean L2
82 mcr p15, 0, r0, c7, c10, 4 @ drain WB
83#endif
84
e50d6409
AH
85 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
9ca03a21 89 mov pc, lr
e50d6409
AH
90
91/*
92 * cpu_feroceon_reset(loc)
93 *
94 * Perform a soft reset of the system. Put the CPU into the
95 * same state as it would be if it had been reset, and branch
96 * to what would be the reset vector.
97 *
98 * loc: location to jump to for soft reset
99 */
100 .align 5
101ENTRY(cpu_feroceon_reset)
102 mov ip, #0
103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
104 mcr p15, 0, ip, c7, c10, 4 @ drain WB
105#ifdef CONFIG_MMU
106 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
107#endif
108 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
109 bic ip, ip, #0x000f @ ............wcam
110 bic ip, ip, #0x1100 @ ...i...s........
111 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
112 mov pc, r0
113
114/*
115 * cpu_feroceon_do_idle()
116 *
117 * Called with IRQs disabled
118 */
6b29e681 119 .align 5
e50d6409
AH
120ENTRY(cpu_feroceon_do_idle)
121 mov r0, #0
122 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
123 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
124 mov pc, lr
125
c8c90860
MW
126/*
127 * flush_icache_all()
128 *
129 * Unconditionally clean and invalidate the entire icache.
130 */
131ENTRY(feroceon_flush_icache_all)
132 mov r0, #0
133 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
134 mov pc, lr
135ENDPROC(feroceon_flush_icache_all)
136
e50d6409
AH
137/*
138 * flush_user_cache_all()
139 *
140 * Clean and invalidate all cache entries in a particular
141 * address space.
142 */
6b29e681 143 .align 5
e50d6409
AH
144ENTRY(feroceon_flush_user_cache_all)
145 /* FALLTHROUGH */
146
147/*
148 * flush_kern_cache_all()
149 *
150 * Clean and invalidate the entire cache.
151 */
152ENTRY(feroceon_flush_kern_cache_all)
153 mov r2, #VM_EXEC
6c386e58 154
e50d6409 155__flush_whole_cache:
6c386e58
NP
156 ldr r1, __cache_params
157 ldmia r1, {r1, r3}
1581: orr ip, r1, r3
1592: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
160 subs ip, ip, #(1 << 30) @ next way
161 bcs 2b
162 subs r1, r1, #(1 << 5) @ next set
163 bcs 1b
164
e50d6409 165 tst r2, #VM_EXEC
6c386e58 166 mov ip, #0
e50d6409
AH
167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
168 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
169 mov pc, lr
170
171/*
172 * flush_user_cache_range(start, end, flags)
173 *
174 * Clean and invalidate a range of cache entries in the
175 * specified address range.
176 *
177 * - start - start address (inclusive)
178 * - end - end address (exclusive)
179 * - flags - vm_flags describing address space
180 */
6b29e681 181 .align 5
e50d6409 182ENTRY(feroceon_flush_user_cache_range)
e50d6409
AH
183 sub r3, r1, r0 @ calculate total size
184 cmp r3, #CACHE_DLIMIT
185 bgt __flush_whole_cache
1861: tst r2, #VM_EXEC
e50d6409
AH
187 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
188 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
189 add r0, r0, #CACHE_DLINESIZE
190 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
191 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
192 add r0, r0, #CACHE_DLINESIZE
e50d6409
AH
193 cmp r0, r1
194 blo 1b
195 tst r2, #VM_EXEC
6c386e58 196 mov ip, #0
e50d6409
AH
197 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
198 mov pc, lr
199
200/*
201 * coherent_kern_range(start, end)
202 *
203 * Ensure coherency between the Icache and the Dcache in the
204 * region described by start, end. If you have non-snooping
205 * Harvard caches, you need to implement this function.
206 *
207 * - start - virtual start address
208 * - end - virtual end address
209 */
6b29e681 210 .align 5
e50d6409
AH
211ENTRY(feroceon_coherent_kern_range)
212 /* FALLTHROUGH */
213
214/*
215 * coherent_user_range(start, end)
216 *
217 * Ensure coherency between the Icache and the Dcache in the
218 * region described by start, end. If you have non-snooping
219 * Harvard caches, you need to implement this function.
220 *
221 * - start - virtual start address
222 * - end - virtual end address
223 */
224ENTRY(feroceon_coherent_user_range)
225 bic r0, r0, #CACHE_DLINESIZE - 1
2261: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
227 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
228 add r0, r0, #CACHE_DLINESIZE
229 cmp r0, r1
230 blo 1b
231 mcr p15, 0, r0, c7, c10, 4 @ drain WB
232 mov pc, lr
233
234/*
2c9b9c84 235 * flush_kern_dcache_area(void *addr, size_t size)
e50d6409
AH
236 *
237 * Ensure no D cache aliasing occurs, either with itself or
238 * the I cache
239 *
2c9b9c84
RK
240 * - addr - kernel address
241 * - size - region size
e50d6409 242 */
6b29e681 243 .align 5
2c9b9c84
RK
244ENTRY(feroceon_flush_kern_dcache_area)
245 add r1, r0, r1
e50d6409
AH
2461: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
247 add r0, r0, #CACHE_DLINESIZE
248 cmp r0, r1
249 blo 1b
250 mov r0, #0
251 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
252 mcr p15, 0, r0, c7, c10, 4 @ drain WB
253 mov pc, lr
254
836a8051 255 .align 5
2c9b9c84 256ENTRY(feroceon_range_flush_kern_dcache_area)
836a8051
SS
257 mrs r2, cpsr
258 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
259 orr r3, r2, #PSR_I_BIT
260 msr cpsr_c, r3 @ disable interrupts
261 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
262 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
263 msr cpsr_c, r2 @ restore interrupts
264 mov r0, #0
265 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
266 mcr p15, 0, r0, c7, c10, 4 @ drain WB
267 mov pc, lr
268
e50d6409
AH
269/*
270 * dma_inv_range(start, end)
271 *
272 * Invalidate (discard) the specified virtual address range.
273 * May not write back any entries. If 'start' or 'end'
274 * are not cache line aligned, those lines must be written
275 * back.
276 *
277 * - start - virtual start address
278 * - end - virtual end address
279 *
280 * (same as v4wb)
281 */
6b29e681 282 .align 5
702b94bf 283feroceon_dma_inv_range:
e50d6409 284 tst r0, #CACHE_DLINESIZE - 1
836a8051 285 bic r0, r0, #CACHE_DLINESIZE - 1
e50d6409
AH
286 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
287 tst r1, #CACHE_DLINESIZE - 1
288 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
e50d6409
AH
2891: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
290 add r0, r0, #CACHE_DLINESIZE
291 cmp r0, r1
292 blo 1b
293 mcr p15, 0, r0, c7, c10, 4 @ drain WB
294 mov pc, lr
295
836a8051 296 .align 5
702b94bf 297feroceon_range_dma_inv_range:
836a8051
SS
298 mrs r2, cpsr
299 tst r0, #CACHE_DLINESIZE - 1
300 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
301 tst r1, #CACHE_DLINESIZE - 1
302 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
303 cmp r1, r0
304 subne r1, r1, #1 @ top address is inclusive
305 orr r3, r2, #PSR_I_BIT
306 msr cpsr_c, r3 @ disable interrupts
307 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
308 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
309 msr cpsr_c, r2 @ restore interrupts
310 mov pc, lr
311
e50d6409
AH
312/*
313 * dma_clean_range(start, end)
314 *
315 * Clean the specified virtual address range.
316 *
317 * - start - virtual start address
318 * - end - virtual end address
319 *
320 * (same as v4wb)
321 */
6b29e681 322 .align 5
702b94bf 323feroceon_dma_clean_range:
e50d6409
AH
324 bic r0, r0, #CACHE_DLINESIZE - 1
3251: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
326 add r0, r0, #CACHE_DLINESIZE
327 cmp r0, r1
328 blo 1b
e50d6409
AH
329 mcr p15, 0, r0, c7, c10, 4 @ drain WB
330 mov pc, lr
331
836a8051 332 .align 5
702b94bf 333feroceon_range_dma_clean_range:
836a8051
SS
334 mrs r2, cpsr
335 cmp r1, r0
336 subne r1, r1, #1 @ top address is inclusive
337 orr r3, r2, #PSR_I_BIT
338 msr cpsr_c, r3 @ disable interrupts
339 mcr p15, 5, r0, c15, c13, 0 @ D clean range start
340 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
341 msr cpsr_c, r2 @ restore interrupts
342 mcr p15, 0, r0, c7, c10, 4 @ drain WB
343 mov pc, lr
344
e50d6409
AH
345/*
346 * dma_flush_range(start, end)
347 *
348 * Clean and invalidate the specified virtual address range.
349 *
350 * - start - virtual start address
351 * - end - virtual end address
352 */
6b29e681 353 .align 5
e50d6409
AH
354ENTRY(feroceon_dma_flush_range)
355 bic r0, r0, #CACHE_DLINESIZE - 1
6b29e681 3561: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
e50d6409
AH
357 add r0, r0, #CACHE_DLINESIZE
358 cmp r0, r1
359 blo 1b
360 mcr p15, 0, r0, c7, c10, 4 @ drain WB
361 mov pc, lr
362
836a8051
SS
363 .align 5
364ENTRY(feroceon_range_dma_flush_range)
365 mrs r2, cpsr
366 cmp r1, r0
367 subne r1, r1, #1 @ top address is inclusive
368 orr r3, r2, #PSR_I_BIT
369 msr cpsr_c, r3 @ disable interrupts
370 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
371 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
372 msr cpsr_c, r2 @ restore interrupts
373 mcr p15, 0, r0, c7, c10, 4 @ drain WB
374 mov pc, lr
375
a9c9147e
RK
376/*
377 * dma_map_area(start, size, dir)
378 * - start - kernel virtual start address
379 * - size - size of region
380 * - dir - DMA direction
381 */
382ENTRY(feroceon_dma_map_area)
383 add r1, r1, r0
384 cmp r2, #DMA_TO_DEVICE
385 beq feroceon_dma_clean_range
386 bcs feroceon_dma_inv_range
387 b feroceon_dma_flush_range
388ENDPROC(feroceon_dma_map_area)
389
390/*
391 * dma_map_area(start, size, dir)
392 * - start - kernel virtual start address
393 * - size - size of region
394 * - dir - DMA direction
395 */
396ENTRY(feroceon_range_dma_map_area)
397 add r1, r1, r0
398 cmp r2, #DMA_TO_DEVICE
399 beq feroceon_range_dma_clean_range
400 bcs feroceon_range_dma_inv_range
401 b feroceon_range_dma_flush_range
402ENDPROC(feroceon_range_dma_map_area)
403
404/*
405 * dma_unmap_area(start, size, dir)
406 * - start - kernel virtual start address
407 * - size - size of region
408 * - dir - DMA direction
409 */
410ENTRY(feroceon_dma_unmap_area)
411 mov pc, lr
412ENDPROC(feroceon_dma_unmap_area)
413
e43b670e
DM
414 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
415 define_cache_functions feroceon
416
417.macro range_alias basename
418 .globl feroceon_range_\basename
419 .type feroceon_range_\basename , %function
420 .equ feroceon_range_\basename , feroceon_\basename
421.endm
422
423/*
424 * Most of the cache functions are unchanged for this case.
425 * Export suitable alias symbols for the unchanged functions:
426 */
427 range_alias flush_icache_all
428 range_alias flush_user_cache_all
429 range_alias flush_kern_cache_all
430 range_alias flush_user_cache_range
431 range_alias coherent_kern_range
432 range_alias coherent_user_range
433 range_alias dma_unmap_area
434
435 define_cache_functions feroceon_range
836a8051 436
6b29e681 437 .align 5
e50d6409 438ENTRY(cpu_feroceon_dcache_clean_area)
4360bb41
RS
439#if defined(CONFIG_CACHE_FEROCEON_L2) && \
440 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
99c6dc11
LB
441 mov r2, r0
442 mov r3, r1
443#endif
e50d6409
AH
4441: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
445 add r0, r0, #CACHE_DLINESIZE
446 subs r1, r1, #CACHE_DLINESIZE
447 bhi 1b
4360bb41
RS
448#if defined(CONFIG_CACHE_FEROCEON_L2) && \
449 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
99c6dc11
LB
4501: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
451 add r2, r2, #CACHE_DLINESIZE
452 subs r3, r3, #CACHE_DLINESIZE
453 bhi 1b
454#endif
e50d6409
AH
455 mcr p15, 0, r0, c7, c10, 4 @ drain WB
456 mov pc, lr
457
458/* =============================== PageTable ============================== */
459
460/*
461 * cpu_feroceon_switch_mm(pgd)
462 *
463 * Set the translation base pointer to be as described by pgd.
464 *
465 * pgd: new page tables
466 */
467 .align 5
468ENTRY(cpu_feroceon_switch_mm)
469#ifdef CONFIG_MMU
6c386e58
NP
470 /*
471 * Note: we wish to call __flush_whole_cache but we need to preserve
472 * lr to do so. The only way without touching main memory is to
473 * use r2 which is normally used to test the VM_EXEC flag, and
474 * compensate locally for the skipped ops if it is not set.
475 */
476 mov r2, lr @ abuse r2 to preserve lr
477 bl __flush_whole_cache
478 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
479 tst r2, #VM_EXEC
480 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
481 mcreq p15, 0, ip, c7, c10, 4 @ drain WB
482
e50d6409
AH
483 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
484 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
6c386e58
NP
485 mov pc, r2
486#else
e50d6409 487 mov pc, lr
6c386e58 488#endif
e50d6409
AH
489
490/*
491 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
492 *
493 * Set a PTE and flush it out
494 */
495 .align 5
496ENTRY(cpu_feroceon_set_pte_ext)
497#ifdef CONFIG_MMU
da091653 498 armv3_set_pte_ext wc_disable=0
e50d6409 499 mov r0, r0
e50d6409 500 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
4360bb41
RS
501#if defined(CONFIG_CACHE_FEROCEON_L2) && \
502 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
99c6dc11
LB
503 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
504#endif
e50d6409
AH
505 mcr p15, 0, r0, c7, c10, 4 @ drain WB
506#endif
507 mov pc, lr
508
5085f3ff 509 __CPUINIT
e50d6409
AH
510
511 .type __feroceon_setup, #function
512__feroceon_setup:
513 mov r0, #0
514 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
515 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
516#ifdef CONFIG_MMU
517 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
518#endif
519
e50d6409
AH
520 adr r5, feroceon_crval
521 ldmia r5, {r5, r6}
522 mrc p15, 0, r0, c1, c0 @ get control register v4
523 bic r0, r0, r5
524 orr r0, r0, r6
e50d6409
AH
525 mov pc, lr
526 .size __feroceon_setup, . - __feroceon_setup
527
528 /*
188237e2
SB
529 * B
530 * R P
531 * .RVI UFRS BLDP WCAM
532 * .011 .001 ..11 0101
e50d6409
AH
533 *
534 */
535 .type feroceon_crval, #object
536feroceon_crval:
188237e2 537 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
e50d6409
AH
538
539 __INITDATA
540
e43b670e
DM
541 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
542 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
e50d6409
AH
543
544 .section ".rodata"
545
e43b670e
DM
546 string cpu_arch_name, "armv5te"
547 string cpu_elf_name, "v5"
548 string cpu_feroceon_name, "Feroceon"
549 string cpu_88fr531_name, "Feroceon 88FR531-vd"
550 string cpu_88fr571_name, "Feroceon 88FR571-vd"
551 string cpu_88fr131_name, "Feroceon 88FR131"
9c2af6c5 552
e50d6409
AH
553 .align
554
555 .section ".proc.info.init", #alloc, #execinstr
556
e43b670e
DM
557.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
558 .type __\name\()_proc_info,#object
559__\name\()_proc_info:
560 .long \cpu_val
561 .long \cpu_mask
e7068ad3 562 .long PMD_TYPE_SECT | \
e50d6409
AH
563 PMD_SECT_BUFFERABLE | \
564 PMD_SECT_CACHEABLE | \
565 PMD_BIT4 | \
566 PMD_SECT_AP_WRITE | \
567 PMD_SECT_AP_READ
e7068ad3 568 .long PMD_TYPE_SECT | \
e50d6409
AH
569 PMD_BIT4 | \
570 PMD_SECT_AP_WRITE | \
571 PMD_SECT_AP_READ
572 b __feroceon_setup
573 .long cpu_arch_name
574 .long cpu_elf_name
575 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
e43b670e 576 .long \cpu_name
e50d6409
AH
577 .long feroceon_processor_functions
578 .long v4wbi_tlb_fns
0ed15071 579 .long feroceon_user_fns
e43b670e
DM
580 .long \cache
581 .size __\name\()_proc_info, . - __\name\()_proc_info
582.endm
9c2af6c5 583
e43b670e
DM
584#ifdef CONFIG_CPU_FEROCEON_OLD_ID
585 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
586 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
587#endif
0a17c7bc 588
e43b670e
DM
589 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
590 cache=feroceon_cache_fns
591 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
592 cache=feroceon_range_cache_fns
593 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
594 cache=feroceon_range_cache_fns
This page took 0.276369 seconds and 5 git commands to generate.