[PATCH] mm: tlb_is_full_mm was obscure
[deliverable/linux.git] / include / asm-arm / tlb.h
CommitLineData
1da177e4
LT
1/*
2 * linux/include/asm-arm/tlb.h
3 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22#include <asm/pgalloc.h>
23
24/*
25 * TLB handling. This allows us to remove pages from the page
26 * tables, and efficiently handle the TLB issues.
27 */
28struct mmu_gather {
29 struct mm_struct *mm;
30 unsigned int freed;
31 unsigned int fullmm;
32
33 unsigned int flushes;
34 unsigned int avoided_flushes;
35};
36
37DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
38
39static inline struct mmu_gather *
40tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
41{
15a23ffa 42 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
1da177e4
LT
43
44 tlb->mm = mm;
45 tlb->freed = 0;
46 tlb->fullmm = full_mm_flush;
47
48 return tlb;
49}
50
51static inline void
52tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
53{
54 struct mm_struct *mm = tlb->mm;
55 unsigned long freed = tlb->freed;
56 int rss = get_mm_counter(mm, rss);
57
58 if (rss < freed)
59 freed = rss;
60 add_mm_counter(mm, rss, -freed);
61
62 if (tlb->fullmm)
63 flush_tlb_mm(mm);
64
65 /* keep the page table cache within bounds */
66 check_pgt_cache();
15a23ffa
HD
67
68 put_cpu_var(mmu_gathers);
1da177e4
LT
69}
70
1da177e4
LT
71#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
72
73/*
74 * In the case of tlb vma handling, we can optimise these away in the
75 * case where we're doing a full MM flush. When we're doing a munmap,
76 * the vmas are adjusted to only cover the region to be torn down.
77 */
78static inline void
79tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
80{
81 if (!tlb->fullmm)
82 flush_cache_range(vma, vma->vm_start, vma->vm_end);
83}
84
85static inline void
86tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
87{
88 if (!tlb->fullmm)
89 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
90}
91
92#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
93#define pte_free_tlb(tlb,ptep) pte_free(ptep)
94#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
95
96#define tlb_migrate_finish(mm) do { } while (0)
97
98#endif
This page took 0.065045 seconds and 5 git commands to generate.