9ed363d3de44e21db44576c2be526451331e3dbd
[deliverable/linux.git] / arch / powerpc / include / asm / tlbflush.h
1 #ifndef _ASM_POWERPC_TLBFLUSH_H
2 #define _ASM_POWERPC_TLBFLUSH_H
3
4 /*
5 * TLB flushing:
6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - local_flush_tlb_page(vmaddr) flushes one page on the local processor
10 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
11 * - flush_tlb_range(vma, start, end) flushes a range of pages
12 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19 #ifdef __KERNEL__
20
21 #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
22 /*
23 * TLB flushing for software loaded TLB chips
24 *
25 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
26 * flush_tlb_kernel_range are best implemented as tlbia vs
27 * specific tlbie's
28 */
29
30 #include <linux/mm.h>
31
32 #define MMU_NO_CONTEXT ((unsigned int)-1)
33
34 extern void _tlbie(unsigned long address, unsigned int pid);
35 extern void _tlbil_all(void);
36 extern void _tlbil_pid(unsigned int pid);
37 extern void _tlbil_va(unsigned long address, unsigned int pid);
38
39 #if defined(CONFIG_40x) || defined(CONFIG_8xx)
40 #define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
41 #else /* CONFIG_44x || CONFIG_FSL_BOOKE */
42 extern void _tlbia(void);
43 #endif
44
45 static inline void local_flush_tlb_mm(struct mm_struct *mm)
46 {
47 _tlbil_pid(mm->context.id);
48 }
49
50 static inline void flush_tlb_mm(struct mm_struct *mm)
51 {
52 _tlbil_pid(mm->context.id);
53 }
54
55 static inline void local_flush_tlb_page(unsigned long vmaddr)
56 {
57 _tlbil_va(vmaddr, 0);
58 }
59
60 static inline void flush_tlb_page(struct vm_area_struct *vma,
61 unsigned long vmaddr)
62 {
63 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
64 }
65
66 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
67 unsigned long vmaddr)
68 {
69 flush_tlb_page(vma, vmaddr);
70 }
71
72 static inline void flush_tlb_range(struct vm_area_struct *vma,
73 unsigned long start, unsigned long end)
74 {
75 _tlbil_pid(vma->vm_mm->context.id);
76 }
77
78 static inline void flush_tlb_kernel_range(unsigned long start,
79 unsigned long end)
80 {
81 _tlbil_pid(0);
82 }
83
84 #elif defined(CONFIG_PPC32)
85 /*
86 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
87 */
88 extern void _tlbie(unsigned long address);
89 extern void _tlbia(void);
90
91 extern void flush_tlb_mm(struct mm_struct *mm);
92 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
93 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
94 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
95 unsigned long end);
96 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
97 static inline void local_flush_tlb_page(unsigned long vmaddr)
98 {
99 flush_tlb_page(NULL, vmaddr);
100 }
101
102 #else
103 /*
104 * TLB flushing for 64-bit has-MMU CPUs
105 */
106
107 #include <linux/percpu.h>
108 #include <asm/page.h>
109
110 #define PPC64_TLB_BATCH_NR 192
111
112 struct ppc64_tlb_batch {
113 int active;
114 unsigned long index;
115 struct mm_struct *mm;
116 real_pte_t pte[PPC64_TLB_BATCH_NR];
117 unsigned long vaddr[PPC64_TLB_BATCH_NR];
118 unsigned int psize;
119 int ssize;
120 };
121 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
122
123 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
124
125 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
126 pte_t *ptep, unsigned long pte, int huge);
127
128 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
129
130 static inline void arch_enter_lazy_mmu_mode(void)
131 {
132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133
134 batch->active = 1;
135 }
136
137 static inline void arch_leave_lazy_mmu_mode(void)
138 {
139 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
140
141 if (batch->index)
142 __flush_tlb_pending(batch);
143 batch->active = 0;
144 }
145
146 #define arch_flush_lazy_mmu_mode() do {} while (0)
147
148
149 extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
150 int ssize, int local);
151 extern void flush_hash_range(unsigned long number, int local);
152
153
154 static inline void flush_tlb_mm(struct mm_struct *mm)
155 {
156 }
157
158 static inline void local_flush_tlb_page(unsigned long vmaddr)
159 {
160 }
161
162 static inline void flush_tlb_page(struct vm_area_struct *vma,
163 unsigned long vmaddr)
164 {
165 }
166
167 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
168 unsigned long vmaddr)
169 {
170 }
171
172 static inline void flush_tlb_range(struct vm_area_struct *vma,
173 unsigned long start, unsigned long end)
174 {
175 }
176
177 static inline void flush_tlb_kernel_range(unsigned long start,
178 unsigned long end)
179 {
180 }
181
182 /* Private function for use by PCI IO mapping code */
183 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
184 unsigned long end);
185
186
187 #endif
188
189 #endif /*__KERNEL__ */
190 #endif /* _ASM_POWERPC_TLBFLUSH_H */
This page took 0.033414 seconds and 4 git commands to generate.