Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * High memory handling common code and variables. | |
3 | * | |
4 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de | |
5 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de | |
6 | * | |
7 | * | |
8 | * Redesigned the x86 32-bit VM architecture to deal with | |
9 | * 64-bit physical space. With current x86 CPUs this | |
10 | * means up to 64 Gigabytes physical RAM. | |
11 | * | |
12 | * Rewrote high memory support to move the page cache into | |
13 | * high memory. Implemented permanent (schedulable) kmaps | |
14 | * based on Linus' idea. | |
15 | * | |
16 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
17 | */ | |
18 | ||
19 | #include <linux/mm.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/swap.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/mempool.h> | |
25 | #include <linux/blkdev.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/hash.h> | |
28 | #include <linux/highmem.h> | |
2056a782 | 29 | #include <linux/blktrace_api.h> |
1da177e4 LT |
30 | #include <asm/tlbflush.h> |
31 | ||
1da177e4 LT |
32 | /* |
33 | * Virtual_count is not a pure "count". | |
34 | * 0 means that it is not mapped, and has not been mapped | |
35 | * since a TLB flush - it is usable. | |
36 | * 1 means that there are no users, but it has been mapped | |
37 | * since the last TLB flush - so we can't use it. | |
38 | * n means that there are (n-1) current users of it. | |
39 | */ | |
40 | #ifdef CONFIG_HIGHMEM | |
260b2367 | 41 | |
c1f60a5a CL |
42 | unsigned long totalhigh_pages __read_mostly; |
43 | ||
44 | unsigned int nr_free_highpages (void) | |
45 | { | |
46 | pg_data_t *pgdat; | |
47 | unsigned int pages = 0; | |
48 | ||
49 | for_each_online_pgdat(pgdat) | |
50 | pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; | |
51 | ||
52 | return pages; | |
53 | } | |
54 | ||
1da177e4 LT |
55 | static int pkmap_count[LAST_PKMAP]; |
56 | static unsigned int last_pkmap_nr; | |
57 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | |
58 | ||
59 | pte_t * pkmap_page_table; | |
60 | ||
61 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | |
62 | ||
63 | static void flush_all_zero_pkmaps(void) | |
64 | { | |
65 | int i; | |
66 | ||
67 | flush_cache_kmaps(); | |
68 | ||
69 | for (i = 0; i < LAST_PKMAP; i++) { | |
70 | struct page *page; | |
71 | ||
72 | /* | |
73 | * zero means we don't have anything to do, | |
74 | * >1 means that it is still in use. Only | |
75 | * a count of 1 means that it is free but | |
76 | * needs to be unmapped | |
77 | */ | |
78 | if (pkmap_count[i] != 1) | |
79 | continue; | |
80 | pkmap_count[i] = 0; | |
81 | ||
82 | /* sanity check */ | |
75babcac | 83 | BUG_ON(pte_none(pkmap_page_table[i])); |
1da177e4 LT |
84 | |
85 | /* | |
86 | * Don't need an atomic fetch-and-clear op here; | |
87 | * no-one has the page mapped, and cannot get at | |
88 | * its virtual address (and hence PTE) without first | |
89 | * getting the kmap_lock (which is held here). | |
90 | * So no dangers, even with speculative execution. | |
91 | */ | |
92 | page = pte_page(pkmap_page_table[i]); | |
93 | pte_clear(&init_mm, (unsigned long)page_address(page), | |
94 | &pkmap_page_table[i]); | |
95 | ||
96 | set_page_address(page, NULL); | |
97 | } | |
98 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | |
99 | } | |
100 | ||
101 | static inline unsigned long map_new_virtual(struct page *page) | |
102 | { | |
103 | unsigned long vaddr; | |
104 | int count; | |
105 | ||
106 | start: | |
107 | count = LAST_PKMAP; | |
108 | /* Find an empty entry */ | |
109 | for (;;) { | |
110 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | |
111 | if (!last_pkmap_nr) { | |
112 | flush_all_zero_pkmaps(); | |
113 | count = LAST_PKMAP; | |
114 | } | |
115 | if (!pkmap_count[last_pkmap_nr]) | |
116 | break; /* Found a usable entry */ | |
117 | if (--count) | |
118 | continue; | |
119 | ||
120 | /* | |
121 | * Sleep for somebody else to unmap their entries | |
122 | */ | |
123 | { | |
124 | DECLARE_WAITQUEUE(wait, current); | |
125 | ||
126 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
127 | add_wait_queue(&pkmap_map_wait, &wait); | |
128 | spin_unlock(&kmap_lock); | |
129 | schedule(); | |
130 | remove_wait_queue(&pkmap_map_wait, &wait); | |
131 | spin_lock(&kmap_lock); | |
132 | ||
133 | /* Somebody else might have mapped it while we slept */ | |
134 | if (page_address(page)) | |
135 | return (unsigned long)page_address(page); | |
136 | ||
137 | /* Re-start */ | |
138 | goto start; | |
139 | } | |
140 | } | |
141 | vaddr = PKMAP_ADDR(last_pkmap_nr); | |
142 | set_pte_at(&init_mm, vaddr, | |
143 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | |
144 | ||
145 | pkmap_count[last_pkmap_nr] = 1; | |
146 | set_page_address(page, (void *)vaddr); | |
147 | ||
148 | return vaddr; | |
149 | } | |
150 | ||
151 | void fastcall *kmap_high(struct page *page) | |
152 | { | |
153 | unsigned long vaddr; | |
154 | ||
155 | /* | |
156 | * For highmem pages, we can't trust "virtual" until | |
157 | * after we have the lock. | |
158 | * | |
159 | * We cannot call this from interrupts, as it may block | |
160 | */ | |
161 | spin_lock(&kmap_lock); | |
162 | vaddr = (unsigned long)page_address(page); | |
163 | if (!vaddr) | |
164 | vaddr = map_new_virtual(page); | |
165 | pkmap_count[PKMAP_NR(vaddr)]++; | |
75babcac | 166 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
1da177e4 LT |
167 | spin_unlock(&kmap_lock); |
168 | return (void*) vaddr; | |
169 | } | |
170 | ||
171 | EXPORT_SYMBOL(kmap_high); | |
172 | ||
173 | void fastcall kunmap_high(struct page *page) | |
174 | { | |
175 | unsigned long vaddr; | |
176 | unsigned long nr; | |
177 | int need_wakeup; | |
178 | ||
179 | spin_lock(&kmap_lock); | |
180 | vaddr = (unsigned long)page_address(page); | |
75babcac | 181 | BUG_ON(!vaddr); |
1da177e4 LT |
182 | nr = PKMAP_NR(vaddr); |
183 | ||
184 | /* | |
185 | * A count must never go down to zero | |
186 | * without a TLB flush! | |
187 | */ | |
188 | need_wakeup = 0; | |
189 | switch (--pkmap_count[nr]) { | |
190 | case 0: | |
191 | BUG(); | |
192 | case 1: | |
193 | /* | |
194 | * Avoid an unnecessary wake_up() function call. | |
195 | * The common case is pkmap_count[] == 1, but | |
196 | * no waiters. | |
197 | * The tasks queued in the wait-queue are guarded | |
198 | * by both the lock in the wait-queue-head and by | |
199 | * the kmap_lock. As the kmap_lock is held here, | |
200 | * no need for the wait-queue-head's lock. Simply | |
201 | * test if the queue is empty. | |
202 | */ | |
203 | need_wakeup = waitqueue_active(&pkmap_map_wait); | |
204 | } | |
205 | spin_unlock(&kmap_lock); | |
206 | ||
207 | /* do wake-up, if needed, race-free outside of the spin lock */ | |
208 | if (need_wakeup) | |
209 | wake_up(&pkmap_map_wait); | |
210 | } | |
211 | ||
212 | EXPORT_SYMBOL(kunmap_high); | |
1da177e4 LT |
213 | #endif |
214 | ||
1da177e4 LT |
215 | #if defined(HASHED_PAGE_VIRTUAL) |
216 | ||
217 | #define PA_HASH_ORDER 7 | |
218 | ||
219 | /* | |
220 | * Describes one page->virtual association | |
221 | */ | |
222 | struct page_address_map { | |
223 | struct page *page; | |
224 | void *virtual; | |
225 | struct list_head list; | |
226 | }; | |
227 | ||
228 | /* | |
229 | * page_address_map freelist, allocated from page_address_maps. | |
230 | */ | |
231 | static struct list_head page_address_pool; /* freelist */ | |
232 | static spinlock_t pool_lock; /* protects page_address_pool */ | |
233 | ||
234 | /* | |
235 | * Hash table bucket | |
236 | */ | |
237 | static struct page_address_slot { | |
238 | struct list_head lh; /* List of page_address_maps */ | |
239 | spinlock_t lock; /* Protect this bucket's list */ | |
240 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | |
241 | ||
242 | static struct page_address_slot *page_slot(struct page *page) | |
243 | { | |
244 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | |
245 | } | |
246 | ||
247 | void *page_address(struct page *page) | |
248 | { | |
249 | unsigned long flags; | |
250 | void *ret; | |
251 | struct page_address_slot *pas; | |
252 | ||
253 | if (!PageHighMem(page)) | |
254 | return lowmem_page_address(page); | |
255 | ||
256 | pas = page_slot(page); | |
257 | ret = NULL; | |
258 | spin_lock_irqsave(&pas->lock, flags); | |
259 | if (!list_empty(&pas->lh)) { | |
260 | struct page_address_map *pam; | |
261 | ||
262 | list_for_each_entry(pam, &pas->lh, list) { | |
263 | if (pam->page == page) { | |
264 | ret = pam->virtual; | |
265 | goto done; | |
266 | } | |
267 | } | |
268 | } | |
269 | done: | |
270 | spin_unlock_irqrestore(&pas->lock, flags); | |
271 | return ret; | |
272 | } | |
273 | ||
274 | EXPORT_SYMBOL(page_address); | |
275 | ||
276 | void set_page_address(struct page *page, void *virtual) | |
277 | { | |
278 | unsigned long flags; | |
279 | struct page_address_slot *pas; | |
280 | struct page_address_map *pam; | |
281 | ||
282 | BUG_ON(!PageHighMem(page)); | |
283 | ||
284 | pas = page_slot(page); | |
285 | if (virtual) { /* Add */ | |
286 | BUG_ON(list_empty(&page_address_pool)); | |
287 | ||
288 | spin_lock_irqsave(&pool_lock, flags); | |
289 | pam = list_entry(page_address_pool.next, | |
290 | struct page_address_map, list); | |
291 | list_del(&pam->list); | |
292 | spin_unlock_irqrestore(&pool_lock, flags); | |
293 | ||
294 | pam->page = page; | |
295 | pam->virtual = virtual; | |
296 | ||
297 | spin_lock_irqsave(&pas->lock, flags); | |
298 | list_add_tail(&pam->list, &pas->lh); | |
299 | spin_unlock_irqrestore(&pas->lock, flags); | |
300 | } else { /* Remove */ | |
301 | spin_lock_irqsave(&pas->lock, flags); | |
302 | list_for_each_entry(pam, &pas->lh, list) { | |
303 | if (pam->page == page) { | |
304 | list_del(&pam->list); | |
305 | spin_unlock_irqrestore(&pas->lock, flags); | |
306 | spin_lock_irqsave(&pool_lock, flags); | |
307 | list_add_tail(&pam->list, &page_address_pool); | |
308 | spin_unlock_irqrestore(&pool_lock, flags); | |
309 | goto done; | |
310 | } | |
311 | } | |
312 | spin_unlock_irqrestore(&pas->lock, flags); | |
313 | } | |
314 | done: | |
315 | return; | |
316 | } | |
317 | ||
318 | static struct page_address_map page_address_maps[LAST_PKMAP]; | |
319 | ||
320 | void __init page_address_init(void) | |
321 | { | |
322 | int i; | |
323 | ||
324 | INIT_LIST_HEAD(&page_address_pool); | |
325 | for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) | |
326 | list_add(&page_address_maps[i].list, &page_address_pool); | |
327 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { | |
328 | INIT_LIST_HEAD(&page_address_htable[i].lh); | |
329 | spin_lock_init(&page_address_htable[i].lock); | |
330 | } | |
331 | spin_lock_init(&pool_lock); | |
332 | } | |
333 | ||
334 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |