powerpc/mm: Move around mmu_gathers definition on 64-bit
[deliverable/linux.git] / arch / powerpc / mm / tlb_nohash.c
CommitLineData
f048aace
BH
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
7 *
8 * -- BenH
9 *
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp.
12 *
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/init.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/preempt.h>
36#include <linux/spinlock.h>
37
38#include <asm/tlbflush.h>
39#include <asm/tlb.h>
40
41#include "mmu_decl.h"
42
43/*
44 * Base TLB flushing operations:
45 *
46 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
47 * - flush_tlb_page(vma, vmaddr) flushes one page
48 * - flush_tlb_range(vma, start, end) flushes a range of pages
49 * - flush_tlb_kernel_range(start, end) flushes kernel pages
50 *
51 * - local_* variants of page and mm only apply to the current
52 * processor
53 */
54
55/*
56 * These are the base non-SMP variants of page and mm flushing
57 */
58void local_flush_tlb_mm(struct mm_struct *mm)
59{
60 unsigned int pid;
61
62 preempt_disable();
63 pid = mm->context.id;
64 if (pid != MMU_NO_CONTEXT)
65 _tlbil_pid(pid);
66 preempt_enable();
67}
68EXPORT_SYMBOL(local_flush_tlb_mm);
69
d4e167da
BH
70void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
71 int tsize, int ind)
f048aace
BH
72{
73 unsigned int pid;
74
75 preempt_disable();
d4e167da 76 pid = mm ? mm->context.id : 0;
f048aace 77 if (pid != MMU_NO_CONTEXT)
d4e167da 78 _tlbil_va(vmaddr, pid, tsize, ind);
f048aace
BH
79 preempt_enable();
80}
f048aace 81
d4e167da
BH
82void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
83{
84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
85 0 /* tsize unused for now */, 0);
86}
87EXPORT_SYMBOL(local_flush_tlb_page);
f048aace
BH
88
89/*
90 * And here are the SMP non-local implementations
91 */
92#ifdef CONFIG_SMP
93
94static DEFINE_SPINLOCK(tlbivax_lock);
95
fcce8109
BH
96static int mm_is_core_local(struct mm_struct *mm)
97{
98 return cpumask_subset(mm_cpumask(mm),
99 topology_thread_cpumask(smp_processor_id()));
100}
101
f048aace
BH
102struct tlb_flush_param {
103 unsigned long addr;
104 unsigned int pid;
d4e167da
BH
105 unsigned int tsize;
106 unsigned int ind;
f048aace
BH
107};
108
109static void do_flush_tlb_mm_ipi(void *param)
110{
111 struct tlb_flush_param *p = param;
112
113 _tlbil_pid(p ? p->pid : 0);
114}
115
116static void do_flush_tlb_page_ipi(void *param)
117{
118 struct tlb_flush_param *p = param;
119
d4e167da 120 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
f048aace
BH
121}
122
123
124/* Note on invalidations and PID:
125 *
126 * We snapshot the PID with preempt disabled. At this point, it can still
127 * change either because:
128 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
129 * - we are invaliating some target that isn't currently running here
130 * and is concurrently acquiring a new PID on another CPU
131 * - some other CPU is re-acquiring a lost PID for this mm
132 * etc...
133 *
134 * However, this shouldn't be a problem as we only guarantee
135 * invalidation of TLB entries present prior to this call, so we
136 * don't care about the PID changing, and invalidating a stale PID
137 * is generally harmless.
138 */
139
140void flush_tlb_mm(struct mm_struct *mm)
141{
f048aace
BH
142 unsigned int pid;
143
144 preempt_disable();
145 pid = mm->context.id;
146 if (unlikely(pid == MMU_NO_CONTEXT))
147 goto no_context;
fcce8109 148 if (!mm_is_core_local(mm)) {
f048aace 149 struct tlb_flush_param p = { .pid = pid };
56aa4129
RR
150 /* Ignores smp_processor_id() even if set. */
151 smp_call_function_many(mm_cpumask(mm),
152 do_flush_tlb_mm_ipi, &p, 1);
f048aace
BH
153 }
154 _tlbil_pid(pid);
155 no_context:
156 preempt_enable();
157}
158EXPORT_SYMBOL(flush_tlb_mm);
159
d4e167da
BH
160void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
161 int tsize, int ind)
f048aace 162{
56aa4129 163 struct cpumask *cpu_mask;
f048aace
BH
164 unsigned int pid;
165
166 preempt_disable();
d4e167da 167 pid = mm ? mm->context.id : 0;
f048aace
BH
168 if (unlikely(pid == MMU_NO_CONTEXT))
169 goto bail;
d4e167da 170 cpu_mask = mm_cpumask(mm);
fcce8109 171 if (!mm_is_core_local(mm)) {
f048aace
BH
172 /* If broadcast tlbivax is supported, use it */
173 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
174 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
175 if (lock)
176 spin_lock(&tlbivax_lock);
d4e167da 177 _tlbivax_bcast(vmaddr, pid, tsize, ind);
f048aace
BH
178 if (lock)
179 spin_unlock(&tlbivax_lock);
180 goto bail;
181 } else {
d4e167da
BH
182 struct tlb_flush_param p = {
183 .pid = pid,
184 .addr = vmaddr,
185 .tsize = tsize,
186 .ind = ind,
187 };
56aa4129
RR
188 /* Ignores smp_processor_id() even if set in cpu_mask */
189 smp_call_function_many(cpu_mask,
f048aace
BH
190 do_flush_tlb_page_ipi, &p, 1);
191 }
192 }
d4e167da 193 _tlbil_va(vmaddr, pid, tsize, ind);
f048aace
BH
194 bail:
195 preempt_enable();
196}
d4e167da
BH
197
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
199{
200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
201 0 /* tsize unused for now */, 0);
202}
f048aace
BH
203EXPORT_SYMBOL(flush_tlb_page);
204
205#endif /* CONFIG_SMP */
206
207/*
208 * Flush kernel TLB entries in the given range
209 */
210void flush_tlb_kernel_range(unsigned long start, unsigned long end)
211{
212#ifdef CONFIG_SMP
213 preempt_disable();
214 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
215 _tlbil_pid(0);
216 preempt_enable();
d6a09e0c 217#else
f048aace 218 _tlbil_pid(0);
d6a09e0c 219#endif
f048aace
BH
220}
221EXPORT_SYMBOL(flush_tlb_kernel_range);
222
223/*
224 * Currently, for range flushing, we just do a full mm flush. This should
225 * be optimized based on a threshold on the size of the range, since
226 * some implementation can stack multiple tlbivax before a tlbsync but
227 * for now, we keep it that way
228 */
229void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
230 unsigned long end)
231
232{
233 flush_tlb_mm(vma->vm_mm);
234}
235EXPORT_SYMBOL(flush_tlb_range);
c7cc58a1
BH
236
237void tlb_flush(struct mmu_gather *tlb)
238{
239 flush_tlb_mm(tlb->mm);
240
241 /* Push out batch of freed page tables */
242 pte_free_finish();
243}
This page took 0.078227 seconds and 5 git commands to generate.