/spare/repo/netdev-2.6 branch 'master'
[deliverable/linux.git] / arch / ppc64 / mm / tlb.c
1 /*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
4 *
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/init.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <asm/pgalloc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/tlb.h>
33 #include <linux/highmem.h>
34
35 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
36
37 /* This is declared as we are using the more or less generic
38 * include/asm-ppc64/tlb.h file -- tgall
39 */
40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42 unsigned long pte_freelist_forced_free;
43
44 struct pte_freelist_batch
45 {
46 struct rcu_head rcu;
47 unsigned int index;
48 pgtable_free_t tables[0];
49 };
50
51 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
52 unsigned long pte_freelist_forced_free;
53
54 #define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
57
58 #ifdef CONFIG_SMP
59 static void pte_free_smp_sync(void *arg)
60 {
61 /* Do nothing, just ensure we sync with all CPUs */
62 }
63 #endif
64
65 /* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68 static void pgtable_free_now(pgtable_free_t pgf)
69 {
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
73
74 pgtable_free(pgf);
75 }
76
77 static void pte_free_rcu_callback(struct rcu_head *head)
78 {
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++)
84 pgtable_free(batch->tables[i]);
85
86 free_page((unsigned long)batch);
87 }
88
89 static void pte_free_submit(struct pte_freelist_batch *batch)
90 {
91 INIT_RCU_HEAD(&batch->rcu);
92 call_rcu(&batch->rcu, pte_free_rcu_callback);
93 }
94
95 void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
96 {
97 /* This is safe as we are holding page_table_lock */
98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
99 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
100
101 if (atomic_read(&tlb->mm->mm_users) < 2 ||
102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
103 pgtable_free(pgf);
104 return;
105 }
106
107 if (*batchp == NULL) {
108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
109 if (*batchp == NULL) {
110 pgtable_free_now(pgf);
111 return;
112 }
113 (*batchp)->index = 0;
114 }
115 (*batchp)->tables[(*batchp)->index++] = pgf;
116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
117 pte_free_submit(*batchp);
118 *batchp = NULL;
119 }
120 }
121
122 /*
123 * Update the MMU hash table to correspond with a change to
124 * a Linux PTE. If wrprot is true, it is permissible to
125 * change the existing HPTE to read-only rather than removing it
126 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
127 */
128 void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot)
130 {
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index;
138
139 /*
140 * This can happen when we are in the middle of a TLB batch and
141 * we encounter memory pressure (eg copy_page_range when it tries
142 * to allocate a new pte). If we have to reclaim memory and end
143 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream.
145 */
146 if (unlikely(i != 0 && context != batch->context)) {
147 flush_tlb_pending();
148 i = 0;
149 }
150
151 if (i == 0) {
152 batch->context = context;
153 batch->mm = mm;
154 }
155 batch->pte[i] = __pte(pte);
156 batch->addr[i] = addr;
157 batch->index = ++i;
158 if (i >= PPC64_TLB_BATCH_NR)
159 flush_tlb_pending();
160 }
161
162 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
163 {
164 int i;
165 int cpu;
166 cpumask_t tmp;
167 int local = 0;
168
169 BUG_ON(in_interrupt());
170
171 cpu = get_cpu();
172 i = batch->index;
173 tmp = cpumask_of_cpu(cpu);
174 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
175 local = 1;
176
177 if (i == 1)
178 flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
179 local);
180 else
181 flush_hash_range(batch->context, i, local);
182 batch->index = 0;
183 put_cpu();
184 }
185
186 void pte_free_finish(void)
187 {
188 /* This is safe as we are holding page_table_lock */
189 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
190
191 if (*batchp == NULL)
192 return;
193 pte_free_submit(*batchp);
194 *batchp = NULL;
195 }
This page took 0.034637 seconds and 5 git commands to generate.