Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / arch / arm64 / include / asm / tlbflush.h
CommitLineData
58d0ba57
CM
1/*
2 * Based on arch/arm/include/asm/tlbflush.h
3 *
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_TLBFLUSH_H
20#define __ASM_TLBFLUSH_H
21
22#ifndef __ASSEMBLY__
23
24#include <linux/sched.h>
25#include <asm/cputype.h>
26
58d0ba57
CM
27/*
28 * TLB Management
29 * ==============
30 *
31 * The arch/arm64/mm/tlb.S files implement these methods.
32 *
33 * The TLB specific code is expected to perform whatever tests it needs
34 * to determine if it should invalidate the TLB for each call. Start
35 * addresses are inclusive and end addresses are exclusive; it is safe to
36 * round these addresses down.
37 *
38 * flush_tlb_all()
39 *
40 * Invalidate the entire TLB.
41 *
42 * flush_tlb_mm(mm)
43 *
44 * Invalidate all TLB entries in a particular address space.
45 * - mm - mm_struct describing address space
46 *
47 * flush_tlb_range(mm,start,end)
48 *
49 * Invalidate a range of TLB entries in the specified address
50 * space.
51 * - mm - mm_struct describing address space
52 * - start - start address (may not be aligned)
53 * - end - end address (exclusive, may not be aligned)
54 *
55 * flush_tlb_page(vaddr,vma)
56 *
57 * Invalidate the specified page in the specified address range.
58 * - vaddr - virtual address (may not be aligned)
59 * - vma - vma_struct describing address range
60 *
61 * flush_kern_tlb_page(kaddr)
62 *
63 * Invalidate the TLB entry for the specified page. The address
64 * will be in the kernels virtual memory space. Current uses
65 * only require the D-TLB to be invalidated.
66 * - kaddr - Kernel virtual memory address
67 */
68static inline void flush_tlb_all(void)
69{
98f7685e 70 dsb(ishst);
58d0ba57 71 asm("tlbi vmalle1is");
98f7685e 72 dsb(ish);
58d0ba57
CM
73 isb();
74}
75
76static inline void flush_tlb_mm(struct mm_struct *mm)
77{
78 unsigned long asid = (unsigned long)ASID(mm) << 48;
79
98f7685e 80 dsb(ishst);
58d0ba57 81 asm("tlbi aside1is, %0" : : "r" (asid));
98f7685e 82 dsb(ish);
58d0ba57
CM
83}
84
85static inline void flush_tlb_page(struct vm_area_struct *vma,
86 unsigned long uaddr)
87{
88 unsigned long addr = uaddr >> 12 |
89 ((unsigned long)ASID(vma->vm_mm) << 48);
90
98f7685e 91 dsb(ishst);
58d0ba57 92 asm("tlbi vae1is, %0" : : "r" (addr));
98f7685e 93 dsb(ish);
58d0ba57
CM
94}
95
05ac6530
MS
96static inline void __flush_tlb_range(struct vm_area_struct *vma,
97 unsigned long start, unsigned long end)
fa48e6f7
SC
98{
99 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
100 unsigned long addr;
101 start = asid | (start >> 12);
102 end = asid | (end >> 12);
103
104 dsb(ishst);
105 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
106 asm("tlbi vae1is, %0" : : "r"(addr));
107 dsb(ish);
108}
109
05ac6530 110static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
fa48e6f7
SC
111{
112 unsigned long addr;
113 start >>= 12;
114 end >>= 12;
115
116 dsb(ishst);
117 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
118 asm("tlbi vaae1is, %0" : : "r"(addr));
119 dsb(ish);
7f0b1bf0 120 isb();
fa48e6f7 121}
58d0ba57 122
05ac6530
MS
123/*
124 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
125 * necessarily a performance improvement.
126 */
127#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
128
129static inline void flush_tlb_range(struct vm_area_struct *vma,
130 unsigned long start, unsigned long end)
131{
132 if ((end - start) <= MAX_TLB_RANGE)
133 __flush_tlb_range(vma, start, end);
134 else
135 flush_tlb_mm(vma->vm_mm);
136}
137
138static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
139{
140 if ((end - start) <= MAX_TLB_RANGE)
141 __flush_tlb_kernel_range(start, end);
142 else
143 flush_tlb_all();
144}
145
285994a6
CM
146/*
147 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
148 * table levels (pgd/pud/pmd).
149 */
150static inline void __flush_tlb_pgtable(struct mm_struct *mm,
151 unsigned long uaddr)
152{
153 unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
154
155 dsb(ishst);
156 asm("tlbi vae1is, %0" : : "r" (addr));
157 dsb(ish);
158}
58d0ba57
CM
159/*
160 * On AArch64, the cache coherency is handled via the set_pte_at() function.
161 */
162static inline void update_mmu_cache(struct vm_area_struct *vma,
163 unsigned long addr, pte_t *ptep)
164{
165 /*
7f0b1bf0
CM
166 * set_pte() does not have a DSB for user mappings, so make sure that
167 * the page table write is visible.
58d0ba57 168 */
98f7685e 169 dsb(ishst);
58d0ba57
CM
170}
171
af074848
SC
172#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
173
58d0ba57
CM
174#endif
175
176#endif
This page took 0.142793 seconds and 5 git commands to generate.