[POWERPC] Rewrite IO allocation & mapping on powerpc64
[deliverable/linux.git] / include / asm-powerpc / tlbflush.h
1 #ifndef _ASM_POWERPC_TLBFLUSH_H
2 #define _ASM_POWERPC_TLBFLUSH_H
3 /*
4 * TLB flushing:
5 *
6 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
7 * - flush_tlb_page(vma, vmaddr) flushes one page
8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
9 * - flush_tlb_range(vma, start, end) flushes a range of pages
10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
11 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18 #ifdef __KERNEL__
19
20 struct mm_struct;
21 struct vm_area_struct;
22
23 #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
24 /*
25 * TLB flushing for software loaded TLB chips
26 *
27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
28 * flush_tlb_kernel_range are best implemented as tlbia vs
29 * specific tlbie's
30 */
31
32 extern void _tlbie(unsigned long address);
33
34 #if defined(CONFIG_40x) || defined(CONFIG_8xx)
35 #define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
36 #else /* CONFIG_44x || CONFIG_FSL_BOOKE */
37 extern void _tlbia(void);
38 #endif
39
40 static inline void flush_tlb_mm(struct mm_struct *mm)
41 {
42 _tlbia();
43 }
44
45 static inline void flush_tlb_page(struct vm_area_struct *vma,
46 unsigned long vmaddr)
47 {
48 _tlbie(vmaddr);
49 }
50
51 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
52 unsigned long vmaddr)
53 {
54 _tlbie(vmaddr);
55 }
56
57 static inline void flush_tlb_range(struct vm_area_struct *vma,
58 unsigned long start, unsigned long end)
59 {
60 _tlbia();
61 }
62
63 static inline void flush_tlb_kernel_range(unsigned long start,
64 unsigned long end)
65 {
66 _tlbia();
67 }
68
69 #elif defined(CONFIG_PPC32)
70 /*
71 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
72 */
73 extern void _tlbie(unsigned long address);
74 extern void _tlbia(void);
75
76 extern void flush_tlb_mm(struct mm_struct *mm);
77 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
78 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
79 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
80 unsigned long end);
81 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
82
83 #else
84 /*
85 * TLB flushing for 64-bit has-MMU CPUs
86 */
87
88 #include <linux/percpu.h>
89 #include <asm/page.h>
90
91 #define PPC64_TLB_BATCH_NR 192
92
93 struct ppc64_tlb_batch {
94 int active;
95 unsigned long index;
96 struct mm_struct *mm;
97 real_pte_t pte[PPC64_TLB_BATCH_NR];
98 unsigned long vaddr[PPC64_TLB_BATCH_NR];
99 unsigned int psize;
100 };
101 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
102
103 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
104
105 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
106 pte_t *ptep, unsigned long pte, int huge);
107
108 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
109
110 static inline void arch_enter_lazy_mmu_mode(void)
111 {
112 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
113
114 batch->active = 1;
115 }
116
117 static inline void arch_leave_lazy_mmu_mode(void)
118 {
119 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
120
121 if (batch->index)
122 __flush_tlb_pending(batch);
123 batch->active = 0;
124 }
125
126 #define arch_flush_lazy_mmu_mode() do {} while (0)
127
128
129 extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
130 int local);
131 extern void flush_hash_range(unsigned long number, int local);
132
133
134 static inline void flush_tlb_mm(struct mm_struct *mm)
135 {
136 }
137
138 static inline void flush_tlb_page(struct vm_area_struct *vma,
139 unsigned long vmaddr)
140 {
141 }
142
143 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
144 unsigned long vmaddr)
145 {
146 }
147
148 static inline void flush_tlb_range(struct vm_area_struct *vma,
149 unsigned long start, unsigned long end)
150 {
151 }
152
153 static inline void flush_tlb_kernel_range(unsigned long start,
154 unsigned long end)
155 {
156 }
157
158 /* Private function for use by PCI IO mapping code */
159 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
160 unsigned long end);
161
162
163 #endif
164
165 /*
166 * This gets called at the end of handling a page fault, when
167 * the kernel has put a new PTE into the page table for the process.
168 * We use it to ensure coherency between the i-cache and d-cache
169 * for the page which has just been mapped in.
170 * On machines which use an MMU hash table, we use this to put a
171 * corresponding HPTE into the hash table ahead of time, instead of
172 * waiting for the inevitable extra hash-table miss exception.
173 */
174 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
175
176 /*
177 * This is called in munmap when we have freed up some page-table
178 * pages. We don't need to do anything here, there's nothing special
179 * about our page-table pages. -- paulus
180 */
181 static inline void flush_tlb_pgtables(struct mm_struct *mm,
182 unsigned long start, unsigned long end)
183 {
184 }
185
186 #endif /*__KERNEL__ */
187 #endif /* _ASM_POWERPC_TLBFLUSH_H */
This page took 0.038729 seconds and 5 git commands to generate.