mm: change the interface for __tlb_remove_page()
[deliverable/linux.git] / include / asm-generic / tlb.h
CommitLineData
f30c2269 1/* include/asm-generic/tlb.h
1da177e4
LT
2 *
3 * Generic TLB shootdown code
4 *
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 *
90eec103 8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
d16dfc55 9 *
1da177e4
LT
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
1da177e4 18#include <linux/swap.h>
62152d0e 19#include <asm/pgalloc.h>
1da177e4
LT
20#include <asm/tlbflush.h>
21
26723911
PZ
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23/*
24 * Semi RCU freeing of the page directories.
25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
50 */
51struct mmu_table_batch {
52 struct rcu_head rcu;
53 unsigned int nr;
54 void *tables[0];
55};
56
57#define MAX_TABLE_BATCH \
58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
d16dfc55
PZ
65/*
66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
68 */
69#define MMU_GATHER_BUNDLE 8
70
e303297e
PZ
71struct mmu_gather_batch {
72 struct mmu_gather_batch *next;
73 unsigned int nr;
74 unsigned int max;
75 struct page *pages[0];
76};
77
78#define MAX_GATHER_BATCH \
79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
53a59fc6
MH
81/*
82 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
83 * lockups for non-preemptible kernels on huge machines when a lot of memory
84 * is zapped during unmapping.
85 * 10K pages freed at once should be safe even without a preemption point.
86 */
87#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
88
1da177e4 89/* struct mmu_gather is an opaque type used by the mm code for passing around
15a23ffa 90 * any data needed by arch specific code for tlb_remove_page.
1da177e4
LT
91 */
92struct mmu_gather {
93 struct mm_struct *mm;
26723911
PZ
94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 struct mmu_table_batch *batch;
96#endif
597e1c35
AS
97 unsigned long start;
98 unsigned long end;
1de14c3c
DH
99 /* we are in the middle of an operation to clear
100 * a full mm and can make some optimizations */
fb7332a9 101 unsigned int fullmm : 1,
1de14c3c
DH
102 /* we have performed an operation which
103 * requires a complete flush of the tlb */
104 need_flush_all : 1;
e303297e
PZ
105
106 struct mmu_gather_batch *active;
107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE];
53a59fc6 109 unsigned int batch_count;
e9d55e15
AK
110 /*
111 * __tlb_adjust_range will track the new addr here,
112 * that that we can adjust the range after the flush
113 */
114 unsigned long addr;
1da177e4
LT
115};
116
9547d01b 117#define HAVE_GENERIC_MMU_GATHER
e303297e 118
2b047252 119void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
9547d01b 120void tlb_flush_mmu(struct mmu_gather *tlb);
c4211f42
AS
121void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
122 unsigned long end);
e9d55e15 123bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
1da177e4 124
fb7332a9
WD
125static inline void __tlb_adjust_range(struct mmu_gather *tlb,
126 unsigned long address)
127{
128 tlb->start = min(tlb->start, address);
129 tlb->end = max(tlb->end, address + PAGE_SIZE);
e9d55e15
AK
130 /*
131 * Track the last address with which we adjusted the range. This
132 * will be used later to adjust again after a mmu_flush due to
133 * failed __tlb_remove_page
134 */
135 tlb->addr = address;
fb7332a9
WD
136}
137
138static inline void __tlb_reset_range(struct mmu_gather *tlb)
139{
721c21c1
WD
140 if (tlb->fullmm) {
141 tlb->start = tlb->end = ~0;
142 } else {
143 tlb->start = TASK_SIZE;
144 tlb->end = 0;
145 }
fb7332a9
WD
146}
147
e9d55e15
AK
148/* tlb_remove_page
149 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
150 * required.
151 */
152static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
153{
154 if (__tlb_remove_page(tlb, page)) {
155 tlb_flush_mmu(tlb);
156 __tlb_adjust_range(tlb, tlb->addr);
157 __tlb_remove_page(tlb, page);
158 }
159}
160
161static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
162{
163 /* active->nr should be zero when we call this */
164 VM_BUG_ON_PAGE(tlb->active->nr, page);
165 __tlb_adjust_range(tlb, tlb->addr);
166 return __tlb_remove_page(tlb, page);
167}
168
fb7332a9
WD
169/*
170 * In the case of tlb vma handling, we can optimise these away in the
171 * case where we're doing a full MM flush. When we're doing a munmap,
172 * the vmas are adjusted to only cover the region to be torn down.
173 */
174#ifndef tlb_start_vma
175#define tlb_start_vma(tlb, vma) do { } while (0)
176#endif
177
178#define __tlb_end_vma(tlb, vma) \
179 do { \
180 if (!tlb->fullmm && tlb->end) { \
181 tlb_flush(tlb); \
182 __tlb_reset_range(tlb); \
183 } \
184 } while (0)
185
186#ifndef tlb_end_vma
187#define tlb_end_vma __tlb_end_vma
188#endif
189
190#ifndef __tlb_remove_tlb_entry
191#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
192#endif
193
1da177e4
LT
194/**
195 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
196 *
fb7332a9
WD
197 * Record the fact that pte's were really unmapped by updating the range,
198 * so we can later optimise away the tlb invalidate. This helps when
199 * userspace is unmapping already-unmapped pages, which happens quite a lot.
1da177e4
LT
200 */
201#define tlb_remove_tlb_entry(tlb, ptep, address) \
202 do { \
fb7332a9 203 __tlb_adjust_range(tlb, address); \
1da177e4
LT
204 __tlb_remove_tlb_entry(tlb, ptep, address); \
205 } while (0)
206
f21760b1
SL
207/**
208 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
209 * This is a nop so far, because only x86 needs it.
210 */
211#ifndef __tlb_remove_pmd_tlb_entry
212#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
213#endif
214
215#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
216 do { \
fb7332a9 217 __tlb_adjust_range(tlb, address); \
f21760b1
SL
218 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
219 } while (0)
220
9e1b32ca 221#define pte_free_tlb(tlb, ptep, address) \
1da177e4 222 do { \
fb7332a9 223 __tlb_adjust_range(tlb, address); \
9e1b32ca 224 __pte_free_tlb(tlb, ptep, address); \
1da177e4
LT
225 } while (0)
226
227#ifndef __ARCH_HAS_4LEVEL_HACK
9e1b32ca 228#define pud_free_tlb(tlb, pudp, address) \
1da177e4 229 do { \
fb7332a9 230 __tlb_adjust_range(tlb, address); \
9e1b32ca 231 __pud_free_tlb(tlb, pudp, address); \
1da177e4
LT
232 } while (0)
233#endif
234
9e1b32ca 235#define pmd_free_tlb(tlb, pmdp, address) \
1da177e4 236 do { \
fb7332a9 237 __tlb_adjust_range(tlb, address); \
9e1b32ca 238 __pmd_free_tlb(tlb, pmdp, address); \
1da177e4
LT
239 } while (0)
240
241#define tlb_migrate_finish(mm) do {} while (0)
242
243#endif /* _ASM_GENERIC__TLB_H */
This page took 0.822705 seconds and 5 git commands to generate.