Merge tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[deliverable/linux.git] / arch / powerpc / mm / tlb_hash32.c
1 /*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU uses a hash table to store virtual to
4 * physical translations, these routines flush entries from the
5 * hash table also.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 *
15 * Derived from "arch/i386/mm/init.c"
16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/init.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/export.h>
31
32 #include <asm/tlbflush.h>
33 #include <asm/tlb.h>
34
35 #include "mmu_decl.h"
36
37 /*
38 * Called when unmapping pages to flush entries from the TLB/hash table.
39 */
40 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
41 {
42 unsigned long ptephys;
43
44 if (Hash != 0) {
45 ptephys = __pa(ptep) & PAGE_MASK;
46 flush_hash_pages(mm->context.id, addr, ptephys, 1);
47 }
48 }
49 EXPORT_SYMBOL(flush_hash_entry);
50
51 /*
52 * Called at the end of a mmu_gather operation to make sure the
53 * TLB flush is completely done.
54 */
55 void tlb_flush(struct mmu_gather *tlb)
56 {
57 if (Hash == 0) {
58 /*
59 * 603 needs to flush the whole TLB here since
60 * it doesn't use a hash table.
61 */
62 _tlbia();
63 }
64 }
65
66 /*
67 * TLB flushing:
68 *
69 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
70 * - flush_tlb_page(vma, vmaddr) flushes one page
71 * - flush_tlb_range(vma, start, end) flushes a range of pages
72 * - flush_tlb_kernel_range(start, end) flushes kernel pages
73 *
74 * since the hardware hash table functions as an extension of the
75 * tlb as far as the linux tables are concerned, flush it too.
76 * -- Cort
77 */
78
79 static void flush_range(struct mm_struct *mm, unsigned long start,
80 unsigned long end)
81 {
82 pmd_t *pmd;
83 unsigned long pmd_end;
84 int count;
85 unsigned int ctx = mm->context.id;
86
87 if (Hash == 0) {
88 _tlbia();
89 return;
90 }
91 start &= PAGE_MASK;
92 if (start >= end)
93 return;
94 end = (end - 1) | ~PAGE_MASK;
95 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
96 for (;;) {
97 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
98 if (pmd_end > end)
99 pmd_end = end;
100 if (!pmd_none(*pmd)) {
101 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
102 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
103 }
104 if (pmd_end == end)
105 break;
106 start = pmd_end + 1;
107 ++pmd;
108 }
109 }
110
111 /*
112 * Flush kernel TLB entries in the given range
113 */
114 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
115 {
116 flush_range(&init_mm, start, end);
117 }
118 EXPORT_SYMBOL(flush_tlb_kernel_range);
119
120 /*
121 * Flush all the (user) entries for the address space described by mm.
122 */
123 void flush_tlb_mm(struct mm_struct *mm)
124 {
125 struct vm_area_struct *mp;
126
127 if (Hash == 0) {
128 _tlbia();
129 return;
130 }
131
132 /*
133 * It is safe to go down the mm's list of vmas when called
134 * from dup_mmap, holding mmap_sem. It would also be safe from
135 * unmap_region or exit_mmap, but not from vmtruncate on SMP -
136 * but it seems dup_mmap is the only SMP case which gets here.
137 */
138 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
139 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
140 }
141 EXPORT_SYMBOL(flush_tlb_mm);
142
143 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
144 {
145 struct mm_struct *mm;
146 pmd_t *pmd;
147
148 if (Hash == 0) {
149 _tlbie(vmaddr);
150 return;
151 }
152 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
153 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
154 if (!pmd_none(*pmd))
155 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
156 }
157 EXPORT_SYMBOL(flush_tlb_page);
158
159 /*
160 * For each address in the range, find the pte for the address
161 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
162 * the corresponding HPTE.
163 */
164 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
165 unsigned long end)
166 {
167 flush_range(vma->vm_mm, start, end);
168 }
169 EXPORT_SYMBOL(flush_tlb_range);
170
171 void __init early_init_mmu(void)
172 {
173 }
This page took 0.034912 seconds and 5 git commands to generate.