propogate_mnt: Handle the first propogated copy being a slave
[deliverable/linux.git] / arch / s390 / include / asm / tlbflush.h
1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <asm/processor.h>
7 #include <asm/pgalloc.h>
8
9 /*
10 * Flush all TLB entries on the local CPU.
11 */
12 static inline void __tlb_flush_local(void)
13 {
14 asm volatile("ptlb" : : : "memory");
15 }
16
17 /*
18 * Flush TLB entries for a specific ASCE on all CPUs
19 */
20 static inline void __tlb_flush_idte(unsigned long asce)
21 {
22 /* Global TLB flush for the mm */
23 asm volatile(
24 " .insn rrf,0xb98e0000,0,%0,%1,0"
25 : : "a" (2048), "a" (asce) : "cc");
26 }
27
28 /*
29 * Flush TLB entries for a specific ASCE on the local CPU
30 */
31 static inline void __tlb_flush_idte_local(unsigned long asce)
32 {
33 /* Local TLB flush for the mm */
34 asm volatile(
35 " .insn rrf,0xb98e0000,0,%0,%1,1"
36 : : "a" (2048), "a" (asce) : "cc");
37 }
38
39 #ifdef CONFIG_SMP
40 void smp_ptlb_all(void);
41
42 /*
43 * Flush all TLB entries on all CPUs.
44 */
45 static inline void __tlb_flush_global(void)
46 {
47 register unsigned long reg2 asm("2");
48 register unsigned long reg3 asm("3");
49 register unsigned long reg4 asm("4");
50 long dummy;
51
52 dummy = 0;
53 reg2 = reg3 = 0;
54 reg4 = ((unsigned long) &dummy) + 1;
55 asm volatile(
56 " csp %0,%2"
57 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
58 }
59
60 /*
61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
62 * this implicates multiple ASCEs!).
63 */
64 static inline void __tlb_flush_full(struct mm_struct *mm)
65 {
66 preempt_disable();
67 atomic_add(0x10000, &mm->context.attach_count);
68 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
69 /* Local TLB flush */
70 __tlb_flush_local();
71 } else {
72 /* Global TLB flush */
73 __tlb_flush_global();
74 /* Reset TLB flush mask */
75 if (MACHINE_HAS_TLB_LC)
76 cpumask_copy(mm_cpumask(mm),
77 &mm->context.cpu_attach_mask);
78 }
79 atomic_sub(0x10000, &mm->context.attach_count);
80 preempt_enable();
81 }
82
83 /*
84 * Flush TLB entries for a specific ASCE on all CPUs.
85 */
86 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
87 {
88 int active, count;
89
90 preempt_disable();
91 active = (mm == current->active_mm) ? 1 : 0;
92 count = atomic_add_return(0x10000, &mm->context.attach_count);
93 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
94 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
95 __tlb_flush_idte_local(asce);
96 } else {
97 if (MACHINE_HAS_IDTE)
98 __tlb_flush_idte(asce);
99 else
100 __tlb_flush_global();
101 /* Reset TLB flush mask */
102 if (MACHINE_HAS_TLB_LC)
103 cpumask_copy(mm_cpumask(mm),
104 &mm->context.cpu_attach_mask);
105 }
106 atomic_sub(0x10000, &mm->context.attach_count);
107 preempt_enable();
108 }
109
110 static inline void __tlb_flush_kernel(void)
111 {
112 if (MACHINE_HAS_IDTE)
113 __tlb_flush_idte((unsigned long) init_mm.pgd |
114 init_mm.context.asce_bits);
115 else
116 __tlb_flush_global();
117 }
118 #else
119 #define __tlb_flush_global() __tlb_flush_local()
120 #define __tlb_flush_full(mm) __tlb_flush_local()
121
122 /*
123 * Flush TLB entries for a specific ASCE on all CPUs.
124 */
125 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
126 {
127 if (MACHINE_HAS_TLB_LC)
128 __tlb_flush_idte_local(asce);
129 else
130 __tlb_flush_local();
131 }
132
133 static inline void __tlb_flush_kernel(void)
134 {
135 if (MACHINE_HAS_TLB_LC)
136 __tlb_flush_idte_local((unsigned long) init_mm.pgd |
137 init_mm.context.asce_bits);
138 else
139 __tlb_flush_local();
140 }
141 #endif
142
143 static inline void __tlb_flush_mm(struct mm_struct * mm)
144 {
145 /*
146 * If the machine has IDTE we prefer to do a per mm flush
147 * on all cpus instead of doing a local flush if the mm
148 * only ran on the local cpu.
149 */
150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
151 __tlb_flush_asce(mm, (unsigned long) mm->pgd |
152 mm->context.asce_bits);
153 else
154 __tlb_flush_full(mm);
155 }
156
157 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
158 {
159 if (mm->context.flush_mm) {
160 __tlb_flush_mm(mm);
161 mm->context.flush_mm = 0;
162 }
163 }
164
165 /*
166 * TLB flushing:
167 * flush_tlb() - flushes the current mm struct TLBs
168 * flush_tlb_all() - flushes all processes TLBs
169 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
170 * flush_tlb_page(vma, vmaddr) - flushes one page
171 * flush_tlb_range(vma, start, end) - flushes a range of pages
172 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
173 */
174
175 /*
176 * flush_tlb_mm goes together with ptep_set_wrprotect for the
177 * copy_page_range operation and flush_tlb_range is related to
178 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
179 * ptep_get_and_clear do not flush the TLBs directly if the mm has
180 * only one user. At the end of the update the flush_tlb_mm and
181 * flush_tlb_range functions need to do the flush.
182 */
183 #define flush_tlb() do { } while (0)
184 #define flush_tlb_all() do { } while (0)
185 #define flush_tlb_page(vma, addr) do { } while (0)
186
187 static inline void flush_tlb_mm(struct mm_struct *mm)
188 {
189 __tlb_flush_mm_lazy(mm);
190 }
191
192 static inline void flush_tlb_range(struct vm_area_struct *vma,
193 unsigned long start, unsigned long end)
194 {
195 __tlb_flush_mm_lazy(vma->vm_mm);
196 }
197
198 static inline void flush_tlb_kernel_range(unsigned long start,
199 unsigned long end)
200 {
201 __tlb_flush_kernel();
202 }
203
204 #endif /* _S390_TLBFLUSH_H */
This page took 0.04136 seconds and 5 git commands to generate.