Merge tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[deliverable/linux.git] / arch / arm / mm / tlb-v6.S
1 /*
2 * linux/arch/arm/mm/tlb-v6.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARM architecture version 6 TLB handling functions.
11 * These assume a split I/D TLB.
12 */
13 #include <linux/init.h>
14 #include <linux/linkage.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/assembler.h>
17 #include <asm/page.h>
18 #include <asm/tlbflush.h>
19 #include "proc-macros.S"
20
21 #define HARVARD_TLB
22
23 /*
24 * v6wbi_flush_user_tlb_range(start, end, vma)
25 *
26 * Invalidate a range of TLB entries in the specified address space.
27 *
28 * - start - start address (may not be aligned)
29 * - end - end address (exclusive, may not be aligned)
30 * - vma - vma_struct describing address range
31 *
32 * It is assumed that:
33 * - the "Invalidate single entry" instruction will invalidate
34 * both the I and the D TLBs on Harvard-style TLBs
35 */
36 ENTRY(v6wbi_flush_user_tlb_range)
37 vma_vm_mm r3, r2 @ get vma->vm_mm
38 mov ip, #0
39 mmid r3, r3 @ get vm_mm->context.id
40 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
41 mov r0, r0, lsr #PAGE_SHIFT @ align address
42 mov r1, r1, lsr #PAGE_SHIFT
43 asid r3, r3 @ mask ASID
44 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
45 mov r1, r1, lsl #PAGE_SHIFT
46 vma_vm_flags r2, r2 @ get vma->vm_flags
47 1:
48 #ifdef HARVARD_TLB
49 mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
50 tst r2, #VM_EXEC @ Executable area ?
51 mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
52 #else
53 mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
54 #endif
55 add r0, r0, #PAGE_SZ
56 cmp r0, r1
57 blo 1b
58 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
59 ret lr
60
61 /*
62 * v6wbi_flush_kern_tlb_range(start,end)
63 *
64 * Invalidate a range of kernel TLB entries
65 *
66 * - start - start address (may not be aligned)
67 * - end - end address (exclusive, may not be aligned)
68 */
69 ENTRY(v6wbi_flush_kern_tlb_range)
70 mov r2, #0
71 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
72 mov r0, r0, lsr #PAGE_SHIFT @ align address
73 mov r1, r1, lsr #PAGE_SHIFT
74 mov r0, r0, lsl #PAGE_SHIFT
75 mov r1, r1, lsl #PAGE_SHIFT
76 1:
77 #ifdef HARVARD_TLB
78 mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
79 mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
80 #else
81 mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
82 #endif
83 add r0, r0, #PAGE_SZ
84 cmp r0, r1
85 blo 1b
86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
88 ret lr
89
90 __INIT
91
92 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
93 define_tlb_functions v6wbi, v6wbi_tlb_flags
This page took 0.036439 seconds and 5 git commands to generate.