Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/cpumask.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/io.h> | |
28 | #include <linux/vmalloc.h> | |
29 | #include <linux/smp.h> | |
30 | ||
31 | #include <asm/system.h> | |
32 | #include <asm/pgtable.h> | |
33 | #include <asm/pgalloc.h> | |
34 | #include <asm/fixmap.h> | |
35 | #include <asm/tlb.h> | |
36 | #include <asm/tlbflush.h> | |
37 | #include <asm/homecache.h> | |
38 | ||
39 | #define K(x) ((x) << (PAGE_SHIFT-10)) | |
40 | ||
41 | /* | |
42 | * The normal show_free_areas() is too verbose on Tile, with dozens | |
43 | * of processors and often four NUMA zones each with high and lowmem. | |
44 | */ | |
45 | void show_mem(void) | |
46 | { | |
47 | struct zone *zone; | |
48 | ||
49 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" | |
50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" | |
51 | " pagecache:%lu swap:%lu\n", | |
52 | (global_page_state(NR_ACTIVE_ANON) + | |
53 | global_page_state(NR_ACTIVE_FILE)), | |
54 | (global_page_state(NR_INACTIVE_ANON) + | |
55 | global_page_state(NR_INACTIVE_FILE)), | |
56 | global_page_state(NR_FILE_DIRTY), | |
57 | global_page_state(NR_WRITEBACK), | |
58 | global_page_state(NR_UNSTABLE_NFS), | |
59 | global_page_state(NR_FREE_PAGES), | |
60 | (global_page_state(NR_SLAB_RECLAIMABLE) + | |
61 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | |
62 | global_page_state(NR_FILE_MAPPED), | |
63 | global_page_state(NR_PAGETABLE), | |
64 | global_page_state(NR_BOUNCE), | |
65 | global_page_state(NR_FILE_PAGES), | |
66 | nr_swap_pages); | |
67 | ||
68 | for_each_zone(zone) { | |
69 | unsigned long flags, order, total = 0, largest_order = -1; | |
70 | ||
71 | if (!populated_zone(zone)) | |
72 | continue; | |
73 | ||
74 | printk("Node %d %7s: ", zone_to_nid(zone), zone->name); | |
75 | spin_lock_irqsave(&zone->lock, flags); | |
76 | for (order = 0; order < MAX_ORDER; order++) { | |
77 | int nr = zone->free_area[order].nr_free; | |
78 | total += nr << order; | |
79 | if (nr) | |
80 | largest_order = order; | |
81 | } | |
82 | spin_unlock_irqrestore(&zone->lock, flags); | |
83 | printk("%lukB (largest %luKb)\n", | |
84 | K(total), largest_order ? K(1UL) << largest_order : 0); | |
85 | } | |
86 | } | |
87 | ||
88 | /* | |
89 | * Associate a virtual page frame with a given physical page frame | |
90 | * and protection flags for that frame. | |
91 | */ | |
92 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
93 | { | |
94 | pgd_t *pgd; | |
95 | pud_t *pud; | |
96 | pmd_t *pmd; | |
97 | pte_t *pte; | |
98 | ||
99 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
100 | if (pgd_none(*pgd)) { | |
101 | BUG(); | |
102 | return; | |
103 | } | |
104 | pud = pud_offset(pgd, vaddr); | |
105 | if (pud_none(*pud)) { | |
106 | BUG(); | |
107 | return; | |
108 | } | |
109 | pmd = pmd_offset(pud, vaddr); | |
110 | if (pmd_none(*pmd)) { | |
111 | BUG(); | |
112 | return; | |
113 | } | |
114 | pte = pte_offset_kernel(pmd, vaddr); | |
115 | /* <pfn,flags> stored as-is, to permit clearing entries */ | |
116 | set_pte(pte, pfn_pte(pfn, flags)); | |
117 | ||
118 | /* | |
119 | * It's enough to flush this one mapping. | |
120 | * This appears conservative since it is only called | |
121 | * from __set_fixmap. | |
122 | */ | |
123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); | |
124 | } | |
125 | ||
126 | /* | |
127 | * Associate a huge virtual page frame with a given physical page frame | |
128 | * and protection flags for that frame. pfn is for the base of the page, | |
129 | * vaddr is what the page gets mapped to - both must be properly aligned. | |
130 | * The pmd must already be instantiated. | |
131 | */ | |
132 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
133 | { | |
134 | pgd_t *pgd; | |
135 | pud_t *pud; | |
136 | pmd_t *pmd; | |
137 | ||
138 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | |
139 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); | |
140 | return; /* BUG(); */ | |
141 | } | |
142 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | |
143 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); | |
144 | return; /* BUG(); */ | |
145 | } | |
146 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
147 | if (pgd_none(*pgd)) { | |
148 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); | |
149 | return; /* BUG(); */ | |
150 | } | |
151 | pud = pud_offset(pgd, vaddr); | |
152 | pmd = pmd_offset(pud, vaddr); | |
153 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(pfn), flags)); | |
154 | /* | |
155 | * It's enough to flush this one mapping. | |
156 | * We flush both small and huge TSBs to be sure. | |
157 | */ | |
158 | local_flush_tlb_page(NULL, vaddr, HPAGE_SIZE); | |
159 | local_flush_tlb_pages(NULL, vaddr, PAGE_SIZE, HPAGE_SIZE); | |
160 | } | |
161 | ||
162 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | |
163 | { | |
164 | unsigned long address = __fix_to_virt(idx); | |
165 | ||
166 | if (idx >= __end_of_fixed_addresses) { | |
167 | BUG(); | |
168 | return; | |
169 | } | |
170 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | |
171 | } | |
172 | ||
173 | #if defined(CONFIG_HIGHPTE) | |
174 | pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) | |
175 | { | |
176 | pte_t *pte = kmap_atomic(pmd_page(*dir), type) + | |
177 | (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; | |
178 | return &pte[pte_index(address)]; | |
179 | } | |
180 | #endif | |
181 | ||
182 | /* | |
183 | * List of all pgd's needed so it can invalidate entries in both cached | |
184 | * and uncached pgd's. This is essentially codepath-based locking | |
185 | * against pageattr.c; it is the unique case in which a valid change | |
186 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
187 | * vmalloc faults work because attached pagetables are never freed. | |
188 | * The locking scheme was chosen on the basis of manfred's | |
189 | * recommendations and having no core impact whatsoever. | |
190 | * -- wli | |
191 | */ | |
192 | DEFINE_SPINLOCK(pgd_lock); | |
193 | LIST_HEAD(pgd_list); | |
194 | ||
195 | static inline void pgd_list_add(pgd_t *pgd) | |
196 | { | |
197 | list_add(pgd_to_list(pgd), &pgd_list); | |
198 | } | |
199 | ||
200 | static inline void pgd_list_del(pgd_t *pgd) | |
201 | { | |
202 | list_del(pgd_to_list(pgd)); | |
203 | } | |
204 | ||
205 | #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET) | |
206 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START) | |
207 | ||
208 | static void pgd_ctor(pgd_t *pgd) | |
209 | { | |
210 | unsigned long flags; | |
211 | ||
212 | memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t)); | |
213 | spin_lock_irqsave(&pgd_lock, flags); | |
214 | ||
215 | #ifndef __tilegx__ | |
216 | /* | |
217 | * Check that the user interrupt vector has no L2. | |
218 | * It never should for the swapper, and new page tables | |
219 | * should always start with an empty user interrupt vector. | |
220 | */ | |
221 | BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0); | |
222 | #endif | |
223 | ||
224 | clone_pgd_range(pgd + KERNEL_PGD_INDEX_START, | |
225 | swapper_pg_dir + KERNEL_PGD_INDEX_START, | |
226 | KERNEL_PGD_PTRS); | |
227 | ||
228 | pgd_list_add(pgd); | |
229 | spin_unlock_irqrestore(&pgd_lock, flags); | |
230 | } | |
231 | ||
232 | static void pgd_dtor(pgd_t *pgd) | |
233 | { | |
234 | unsigned long flags; /* can be called from interrupt context */ | |
235 | ||
236 | spin_lock_irqsave(&pgd_lock, flags); | |
237 | pgd_list_del(pgd); | |
238 | spin_unlock_irqrestore(&pgd_lock, flags); | |
239 | } | |
240 | ||
241 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
242 | { | |
243 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | |
244 | if (pgd) | |
245 | pgd_ctor(pgd); | |
246 | return pgd; | |
247 | } | |
248 | ||
249 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
250 | { | |
251 | pgd_dtor(pgd); | |
252 | kmem_cache_free(pgd_cache, pgd); | |
253 | } | |
254 | ||
255 | ||
256 | #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) | |
257 | ||
258 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
259 | { | |
260 | int flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; | |
261 | struct page *p; | |
262 | ||
263 | #ifdef CONFIG_HIGHPTE | |
264 | flags |= __GFP_HIGHMEM; | |
265 | #endif | |
266 | ||
267 | p = alloc_pages(flags, L2_USER_PGTABLE_ORDER); | |
268 | if (p == NULL) | |
269 | return NULL; | |
270 | ||
271 | pgtable_page_ctor(p); | |
272 | return p; | |
273 | } | |
274 | ||
275 | /* | |
276 | * Free page immediately (used in __pte_alloc if we raced with another | |
277 | * process). We have to correct whatever pte_alloc_one() did before | |
278 | * returning the pages to the allocator. | |
279 | */ | |
280 | void pte_free(struct mm_struct *mm, struct page *p) | |
281 | { | |
282 | pgtable_page_dtor(p); | |
283 | __free_pages(p, L2_USER_PGTABLE_ORDER); | |
284 | } | |
285 | ||
286 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | |
287 | unsigned long address) | |
288 | { | |
289 | int i; | |
290 | ||
291 | pgtable_page_dtor(pte); | |
292 | tlb->need_flush = 1; | |
293 | if (tlb_fast_mode(tlb)) { | |
294 | struct page *pte_pages[L2_USER_PGTABLE_PAGES]; | |
295 | for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) | |
296 | pte_pages[i] = pte + i; | |
297 | free_pages_and_swap_cache(pte_pages, L2_USER_PGTABLE_PAGES); | |
298 | return; | |
299 | } | |
300 | for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) { | |
301 | tlb->pages[tlb->nr++] = pte + i; | |
302 | if (tlb->nr >= FREE_PTE_NR) | |
303 | tlb_flush_mmu(tlb, 0, 0); | |
304 | } | |
305 | } | |
306 | ||
307 | #ifndef __tilegx__ | |
308 | ||
309 | /* | |
310 | * FIXME: needs to be atomic vs hypervisor writes. For now we make the | |
311 | * window of vulnerability a bit smaller by doing an unlocked 8-bit update. | |
312 | */ | |
313 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
314 | unsigned long addr, pte_t *ptep) | |
315 | { | |
316 | #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16 | |
317 | # error Code assumes HV_PTE "accessed" bit in second byte | |
318 | #endif | |
319 | u8 *tmp = (u8 *)ptep; | |
320 | u8 second_byte = tmp[1]; | |
321 | if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8)))) | |
322 | return 0; | |
323 | tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8)); | |
324 | return 1; | |
325 | } | |
326 | ||
327 | /* | |
328 | * This implementation is atomic vs hypervisor writes, since the hypervisor | |
329 | * always writes the low word (where "accessed" and "dirty" are) and this | |
330 | * routine only writes the high word. | |
331 | */ | |
332 | void ptep_set_wrprotect(struct mm_struct *mm, | |
333 | unsigned long addr, pte_t *ptep) | |
334 | { | |
335 | #if HV_PTE_INDEX_WRITABLE < 32 | |
336 | # error Code assumes HV_PTE "writable" bit in high word | |
337 | #endif | |
338 | u32 *tmp = (u32 *)ptep; | |
339 | tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32)); | |
340 | } | |
341 | ||
342 | #endif | |
343 | ||
344 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) | |
345 | { | |
346 | pgd_t *pgd; | |
347 | pud_t *pud; | |
348 | pmd_t *pmd; | |
349 | ||
350 | if (pgd_addr_invalid(addr)) | |
351 | return NULL; | |
352 | ||
353 | pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr); | |
354 | pud = pud_offset(pgd, addr); | |
355 | if (!pud_present(*pud)) | |
356 | return NULL; | |
357 | pmd = pmd_offset(pud, addr); | |
358 | if (pmd_huge_page(*pmd)) | |
359 | return (pte_t *)pmd; | |
360 | if (!pmd_present(*pmd)) | |
361 | return NULL; | |
362 | return pte_offset_kernel(pmd, addr); | |
363 | } | |
364 | ||
365 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) | |
366 | { | |
367 | unsigned int width = smp_width; | |
368 | int x = cpu % width; | |
369 | int y = cpu / width; | |
370 | BUG_ON(y >= smp_height); | |
371 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | |
372 | BUG_ON(cpu < 0 || cpu >= NR_CPUS); | |
373 | BUG_ON(!cpu_is_valid_lotar(cpu)); | |
374 | return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y)); | |
375 | } | |
376 | ||
377 | int get_remote_cache_cpu(pgprot_t prot) | |
378 | { | |
379 | HV_LOTAR lotar = hv_pte_get_lotar(prot); | |
380 | int x = HV_LOTAR_X(lotar); | |
381 | int y = HV_LOTAR_Y(lotar); | |
382 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | |
383 | return x + y * smp_width; | |
384 | } | |
385 | ||
386 | void set_pte_order(pte_t *ptep, pte_t pte, int order) | |
387 | { | |
388 | unsigned long pfn = pte_pfn(pte); | |
389 | struct page *page = pfn_to_page(pfn); | |
390 | ||
391 | /* Update the home of a PTE if necessary */ | |
392 | pte = pte_set_home(pte, page_home(page)); | |
393 | ||
394 | #ifdef __tilegx__ | |
395 | *ptep = pte; | |
396 | #else | |
397 | /* | |
398 | * When setting a PTE, write the high bits first, then write | |
399 | * the low bits. This sets the "present" bit only after the | |
400 | * other bits are in place. If a particular PTE update | |
401 | * involves transitioning from one valid PTE to another, it | |
402 | * may be necessary to call set_pte_order() more than once, | |
403 | * transitioning via a suitable intermediate state. | |
404 | * Note that this sequence also means that if we are transitioning | |
405 | * from any migrating PTE to a non-migrating one, we will not | |
406 | * see a half-updated PTE with the migrating bit off. | |
407 | */ | |
408 | #if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32 | |
409 | # error Must write the present and migrating bits last | |
410 | #endif | |
411 | ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); | |
412 | barrier(); | |
413 | ((u32 *)ptep)[0] = (u32)(pte_val(pte)); | |
414 | #endif | |
415 | } | |
416 | ||
417 | /* Can this mm load a PTE with cached_priority set? */ | |
418 | static inline int mm_is_priority_cached(struct mm_struct *mm) | |
419 | { | |
420 | return mm->context.priority_cached; | |
421 | } | |
422 | ||
423 | /* | |
424 | * Add a priority mapping to an mm_context and | |
425 | * notify the hypervisor if this is the first one. | |
426 | */ | |
427 | void start_mm_caching(struct mm_struct *mm) | |
428 | { | |
429 | if (!mm_is_priority_cached(mm)) { | |
430 | mm->context.priority_cached = -1U; | |
431 | hv_set_caching(-1U); | |
432 | } | |
433 | } | |
434 | ||
435 | /* | |
436 | * Validate and return the priority_cached flag. We know if it's zero | |
437 | * that we don't need to scan, since we immediately set it non-zero | |
438 | * when we first consider a MAP_CACHE_PRIORITY mapping. | |
439 | * | |
440 | * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it, | |
441 | * since we're in an interrupt context (servicing switch_mm) we don't | |
442 | * worry about it and don't unset the "priority_cached" field. | |
443 | * Presumably we'll come back later and have more luck and clear | |
444 | * the value then; for now we'll just keep the cache marked for priority. | |
445 | */ | |
446 | static unsigned int update_priority_cached(struct mm_struct *mm) | |
447 | { | |
448 | if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { | |
449 | struct vm_area_struct *vm; | |
450 | for (vm = mm->mmap; vm; vm = vm->vm_next) { | |
451 | if (hv_pte_get_cached_priority(vm->vm_page_prot)) | |
452 | break; | |
453 | } | |
454 | if (vm == NULL) | |
455 | mm->context.priority_cached = 0; | |
456 | up_write(&mm->mmap_sem); | |
457 | } | |
458 | return mm->context.priority_cached; | |
459 | } | |
460 | ||
461 | /* Set caching correctly for an mm that we are switching to. */ | |
462 | void check_mm_caching(struct mm_struct *prev, struct mm_struct *next) | |
463 | { | |
464 | if (!mm_is_priority_cached(next)) { | |
465 | /* | |
466 | * If the new mm doesn't use priority caching, just see if we | |
467 | * need the hv_set_caching(), or can assume it's already zero. | |
468 | */ | |
469 | if (mm_is_priority_cached(prev)) | |
470 | hv_set_caching(0); | |
471 | } else { | |
472 | hv_set_caching(update_priority_cached(next)); | |
473 | } | |
474 | } | |
475 | ||
476 | #if CHIP_HAS_MMIO() | |
477 | ||
478 | /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */ | |
479 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | |
480 | pgprot_t home) | |
481 | { | |
482 | void *addr; | |
483 | struct vm_struct *area; | |
484 | unsigned long offset, last_addr; | |
485 | pgprot_t pgprot; | |
486 | ||
487 | /* Don't allow wraparound or zero size */ | |
488 | last_addr = phys_addr + size - 1; | |
489 | if (!size || last_addr < phys_addr) | |
490 | return NULL; | |
491 | ||
492 | /* Create a read/write, MMIO VA mapping homed at the requested shim. */ | |
493 | pgprot = PAGE_KERNEL; | |
494 | pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO); | |
495 | pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home)); | |
496 | ||
497 | /* | |
498 | * Mappings have to be page-aligned | |
499 | */ | |
500 | offset = phys_addr & ~PAGE_MASK; | |
501 | phys_addr &= PAGE_MASK; | |
502 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | |
503 | ||
504 | /* | |
505 | * Ok, go for it.. | |
506 | */ | |
507 | area = get_vm_area(size, VM_IOREMAP /* | other flags? */); | |
508 | if (!area) | |
509 | return NULL; | |
510 | area->phys_addr = phys_addr; | |
511 | addr = area->addr; | |
512 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | |
513 | phys_addr, pgprot)) { | |
514 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | |
515 | return NULL; | |
516 | } | |
517 | return (__force void __iomem *) (offset + (char *)addr); | |
518 | } | |
519 | EXPORT_SYMBOL(ioremap_prot); | |
520 | ||
521 | /* Map a PCI MMIO bus address into VA space. */ | |
522 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | |
523 | { | |
524 | panic("ioremap for PCI MMIO is not supported"); | |
525 | } | |
526 | EXPORT_SYMBOL(ioremap); | |
527 | ||
528 | /* Unmap an MMIO VA mapping. */ | |
529 | void iounmap(volatile void __iomem *addr_in) | |
530 | { | |
531 | volatile void __iomem *addr = (volatile void __iomem *) | |
532 | (PAGE_MASK & (unsigned long __force)addr_in); | |
533 | #if 1 | |
534 | vunmap((void * __force)addr); | |
535 | #else | |
536 | /* x86 uses this complicated flow instead of vunmap(). Is | |
537 | * there any particular reason we should do the same? */ | |
538 | struct vm_struct *p, *o; | |
539 | ||
540 | /* Use the vm area unlocked, assuming the caller | |
541 | ensures there isn't another iounmap for the same address | |
542 | in parallel. Reuse of the virtual address is prevented by | |
543 | leaving it in the global lists until we're done with it. | |
544 | cpa takes care of the direct mappings. */ | |
545 | read_lock(&vmlist_lock); | |
546 | for (p = vmlist; p; p = p->next) { | |
547 | if (p->addr == addr) | |
548 | break; | |
549 | } | |
550 | read_unlock(&vmlist_lock); | |
551 | ||
552 | if (!p) { | |
553 | printk("iounmap: bad address %p\n", addr); | |
554 | dump_stack(); | |
555 | return; | |
556 | } | |
557 | ||
558 | /* Finally remove it */ | |
559 | o = remove_vm_area((void *)addr); | |
560 | BUG_ON(p != o || o == NULL); | |
561 | kfree(p); | |
562 | #endif | |
563 | } | |
564 | EXPORT_SYMBOL(iounmap); | |
565 | ||
566 | #endif /* CHIP_HAS_MMIO() */ |