Merge branch 'i2c/for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[deliverable/linux.git] / arch / sparc / mm / tlb.c
1 /* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19
20 /* Heavily inspired by the ppc64 code. */
21
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23
24 void flush_tlb_pending(void)
25 {
26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 struct mm_struct *mm = tb->mm;
28
29 if (!tb->tlb_nr)
30 goto out;
31
32 flush_tsb_user(tb);
33
34 if (CTX_VALID(mm->context)) {
35 if (tb->tlb_nr == 1) {
36 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 } else {
38 #ifdef CONFIG_SMP
39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40 &tb->vaddrs[0]);
41 #else
42 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
43 tb->tlb_nr, &tb->vaddrs[0]);
44 #endif
45 }
46 }
47
48 tb->tlb_nr = 0;
49
50 out:
51 put_cpu_var(tlb_batch);
52 }
53
54 void arch_enter_lazy_mmu_mode(void)
55 {
56 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57
58 tb->active = 1;
59 }
60
61 void arch_leave_lazy_mmu_mode(void)
62 {
63 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64
65 if (tb->tlb_nr)
66 flush_tlb_pending();
67 tb->active = 0;
68 }
69
70 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
71 bool exec)
72 {
73 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
74 unsigned long nr;
75
76 vaddr &= PAGE_MASK;
77 if (exec)
78 vaddr |= 0x1UL;
79
80 nr = tb->tlb_nr;
81
82 if (unlikely(nr != 0 && mm != tb->mm)) {
83 flush_tlb_pending();
84 nr = 0;
85 }
86
87 if (!tb->active) {
88 global_flush_tlb_page(mm, vaddr);
89 flush_tsb_user_page(mm, vaddr);
90 goto out;
91 }
92
93 if (nr == 0)
94 tb->mm = mm;
95
96 tb->vaddrs[nr] = vaddr;
97 tb->tlb_nr = ++nr;
98 if (nr >= TLB_BATCH_NR)
99 flush_tlb_pending();
100
101 out:
102 put_cpu_var(tlb_batch);
103 }
104
105 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
106 pte_t *ptep, pte_t orig, int fullmm)
107 {
108 if (tlb_type != hypervisor &&
109 pte_dirty(orig)) {
110 unsigned long paddr, pfn = pte_pfn(orig);
111 struct address_space *mapping;
112 struct page *page;
113
114 if (!pfn_valid(pfn))
115 goto no_cache_flush;
116
117 page = pfn_to_page(pfn);
118 if (PageReserved(page))
119 goto no_cache_flush;
120
121 /* A real file page? */
122 mapping = page_mapping(page);
123 if (!mapping)
124 goto no_cache_flush;
125
126 paddr = (unsigned long) page_address(page);
127 if ((paddr ^ vaddr) & (1 << 13))
128 flush_dcache_page_all(mm, page);
129 }
130
131 no_cache_flush:
132 if (!fullmm)
133 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
134 }
135
136 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
137 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
138 pmd_t pmd, bool exec)
139 {
140 unsigned long end;
141 pte_t *pte;
142
143 pte = pte_offset_map(&pmd, vaddr);
144 end = vaddr + HPAGE_SIZE;
145 while (vaddr < end) {
146 if (pte_val(*pte) & _PAGE_VALID)
147 tlb_batch_add_one(mm, vaddr, exec);
148 pte++;
149 vaddr += PAGE_SIZE;
150 }
151 pte_unmap(pte);
152 }
153
154 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
155 pmd_t *pmdp, pmd_t pmd)
156 {
157 pmd_t orig = *pmdp;
158
159 *pmdp = pmd;
160
161 if (mm == &init_mm)
162 return;
163
164 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
165 if (pmd_val(pmd) & PMD_ISHUGE)
166 mm->context.huge_pte_count++;
167 else
168 mm->context.huge_pte_count--;
169
170 /* Do not try to allocate the TSB hash table if we
171 * don't have one already. We have various locks held
172 * and thus we'll end up doing a GFP_KERNEL allocation
173 * in an atomic context.
174 *
175 * Instead, we let the first TLB miss on a hugepage
176 * take care of this.
177 */
178 }
179
180 if (!pmd_none(orig)) {
181 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
182
183 addr &= HPAGE_MASK;
184 if (pmd_val(orig) & PMD_ISHUGE)
185 tlb_batch_add_one(mm, addr, exec);
186 else
187 tlb_batch_pmd_scan(mm, addr, orig, exec);
188 }
189 }
190
191 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
192 {
193 struct list_head *lh = (struct list_head *) pgtable;
194
195 assert_spin_locked(&mm->page_table_lock);
196
197 /* FIFO */
198 if (!mm->pmd_huge_pte)
199 INIT_LIST_HEAD(lh);
200 else
201 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
202 mm->pmd_huge_pte = pgtable;
203 }
204
205 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
206 {
207 struct list_head *lh;
208 pgtable_t pgtable;
209
210 assert_spin_locked(&mm->page_table_lock);
211
212 /* FIFO */
213 pgtable = mm->pmd_huge_pte;
214 lh = (struct list_head *) pgtable;
215 if (list_empty(lh))
216 mm->pmd_huge_pte = NULL;
217 else {
218 mm->pmd_huge_pte = (pgtable_t) lh->next;
219 list_del(lh);
220 }
221 pte_val(pgtable[0]) = 0;
222 pte_val(pgtable[1]) = 0;
223
224 return pgtable;
225 }
226 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
This page took 0.041721 seconds and 6 git commands to generate.