ASoC: fix ABE_TWL6040 dependency
[deliverable/linux.git] / arch / powerpc / mm / pgtable_64.c
1 /*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
32 #include <linux/mm.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/memblock.h>
37 #include <linux/slab.h>
38 #include <linux/hugetlb.h>
39
40 #include <asm/pgalloc.h>
41 #include <asm/page.h>
42 #include <asm/prom.h>
43 #include <asm/io.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/mmu.h>
47 #include <asm/smp.h>
48 #include <asm/machdep.h>
49 #include <asm/tlb.h>
50 #include <asm/processor.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/firmware.h>
54 #include <asm/dma.h>
55
56 #include "mmu_decl.h"
57
58 #ifdef CONFIG_PPC_STD_MMU_64
59 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
60 #error TASK_SIZE_USER64 exceeds user VSID range
61 #endif
62 #endif
63
64 #ifdef CONFIG_PPC_BOOK3S_64
65 /*
66 * partition table and process table for ISA 3.0
67 */
68 struct prtb_entry *process_tb;
69 struct patb_entry *partition_tb;
70 /*
71 * page table size
72 */
73 unsigned long __pte_index_size;
74 EXPORT_SYMBOL(__pte_index_size);
75 unsigned long __pmd_index_size;
76 EXPORT_SYMBOL(__pmd_index_size);
77 unsigned long __pud_index_size;
78 EXPORT_SYMBOL(__pud_index_size);
79 unsigned long __pgd_index_size;
80 EXPORT_SYMBOL(__pgd_index_size);
81 unsigned long __pmd_cache_index;
82 EXPORT_SYMBOL(__pmd_cache_index);
83 unsigned long __pte_table_size;
84 EXPORT_SYMBOL(__pte_table_size);
85 unsigned long __pmd_table_size;
86 EXPORT_SYMBOL(__pmd_table_size);
87 unsigned long __pud_table_size;
88 EXPORT_SYMBOL(__pud_table_size);
89 unsigned long __pgd_table_size;
90 EXPORT_SYMBOL(__pgd_table_size);
91 unsigned long __pmd_val_bits;
92 EXPORT_SYMBOL(__pmd_val_bits);
93 unsigned long __pud_val_bits;
94 EXPORT_SYMBOL(__pud_val_bits);
95 unsigned long __pgd_val_bits;
96 EXPORT_SYMBOL(__pgd_val_bits);
97 unsigned long __kernel_virt_start;
98 EXPORT_SYMBOL(__kernel_virt_start);
99 unsigned long __kernel_virt_size;
100 EXPORT_SYMBOL(__kernel_virt_size);
101 unsigned long __vmalloc_start;
102 EXPORT_SYMBOL(__vmalloc_start);
103 unsigned long __vmalloc_end;
104 EXPORT_SYMBOL(__vmalloc_end);
105 struct page *vmemmap;
106 EXPORT_SYMBOL(vmemmap);
107 unsigned long __pte_frag_nr;
108 EXPORT_SYMBOL(__pte_frag_nr);
109 unsigned long __pte_frag_size_shift;
110 EXPORT_SYMBOL(__pte_frag_size_shift);
111 unsigned long ioremap_bot;
112 #else /* !CONFIG_PPC_BOOK3S_64 */
113 unsigned long ioremap_bot = IOREMAP_BASE;
114 #endif
115
116 /**
117 * __ioremap_at - Low level function to establish the page tables
118 * for an IO mapping
119 */
120 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
121 unsigned long flags)
122 {
123 unsigned long i;
124
125 /* Make sure we have the base flags */
126 if ((flags & _PAGE_PRESENT) == 0)
127 flags |= pgprot_val(PAGE_KERNEL);
128
129 /* We don't support the 4K PFN hack with ioremap */
130 if (flags & H_PAGE_4K_PFN)
131 return NULL;
132
133 WARN_ON(pa & ~PAGE_MASK);
134 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
135 WARN_ON(size & ~PAGE_MASK);
136
137 for (i = 0; i < size; i += PAGE_SIZE)
138 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
139 return NULL;
140
141 return (void __iomem *)ea;
142 }
143
144 /**
145 * __iounmap_from - Low level function to tear down the page tables
146 * for an IO mapping. This is used for mappings that
147 * are manipulated manually, like partial unmapping of
148 * PCI IOs or ISA space.
149 */
150 void __iounmap_at(void *ea, unsigned long size)
151 {
152 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
153 WARN_ON(size & ~PAGE_MASK);
154
155 unmap_kernel_range((unsigned long)ea, size);
156 }
157
158 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
159 unsigned long flags, void *caller)
160 {
161 phys_addr_t paligned;
162 void __iomem *ret;
163
164 /*
165 * Choose an address to map it to.
166 * Once the imalloc system is running, we use it.
167 * Before that, we map using addresses going
168 * up from ioremap_bot. imalloc will use
169 * the addresses from ioremap_bot through
170 * IMALLOC_END
171 *
172 */
173 paligned = addr & PAGE_MASK;
174 size = PAGE_ALIGN(addr + size) - paligned;
175
176 if ((size == 0) || (paligned == 0))
177 return NULL;
178
179 if (slab_is_available()) {
180 struct vm_struct *area;
181
182 area = __get_vm_area_caller(size, VM_IOREMAP,
183 ioremap_bot, IOREMAP_END,
184 caller);
185 if (area == NULL)
186 return NULL;
187
188 area->phys_addr = paligned;
189 ret = __ioremap_at(paligned, area->addr, size, flags);
190 if (!ret)
191 vunmap(area->addr);
192 } else {
193 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
194 if (ret)
195 ioremap_bot += size;
196 }
197
198 if (ret)
199 ret += addr & ~PAGE_MASK;
200 return ret;
201 }
202
203 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
204 unsigned long flags)
205 {
206 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
207 }
208
209 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
210 {
211 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
212 void *caller = __builtin_return_address(0);
213
214 if (ppc_md.ioremap)
215 return ppc_md.ioremap(addr, size, flags, caller);
216 return __ioremap_caller(addr, size, flags, caller);
217 }
218
219 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
220 {
221 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
222 void *caller = __builtin_return_address(0);
223
224 if (ppc_md.ioremap)
225 return ppc_md.ioremap(addr, size, flags, caller);
226 return __ioremap_caller(addr, size, flags, caller);
227 }
228
229 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
230 unsigned long flags)
231 {
232 void *caller = __builtin_return_address(0);
233
234 /* writeable implies dirty for kernel addresses */
235 if (flags & _PAGE_WRITE)
236 flags |= _PAGE_DIRTY;
237
238 /* we don't want to let _PAGE_EXEC leak out */
239 flags &= ~_PAGE_EXEC;
240 /*
241 * Force kernel mapping.
242 */
243 #if defined(CONFIG_PPC_BOOK3S_64)
244 flags |= _PAGE_PRIVILEGED;
245 #else
246 flags &= ~_PAGE_USER;
247 #endif
248
249
250 #ifdef _PAGE_BAP_SR
251 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
252 * which means that we just cleared supervisor access... oops ;-) This
253 * restores it
254 */
255 flags |= _PAGE_BAP_SR;
256 #endif
257
258 if (ppc_md.ioremap)
259 return ppc_md.ioremap(addr, size, flags, caller);
260 return __ioremap_caller(addr, size, flags, caller);
261 }
262
263
264 /*
265 * Unmap an IO region and remove it from imalloc'd list.
266 * Access to IO memory should be serialized by driver.
267 */
268 void __iounmap(volatile void __iomem *token)
269 {
270 void *addr;
271
272 if (!slab_is_available())
273 return;
274
275 addr = (void *) ((unsigned long __force)
276 PCI_FIX_ADDR(token) & PAGE_MASK);
277 if ((unsigned long)addr < ioremap_bot) {
278 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
279 " at 0x%p\n", addr);
280 return;
281 }
282 vunmap(addr);
283 }
284
285 void iounmap(volatile void __iomem *token)
286 {
287 if (ppc_md.iounmap)
288 ppc_md.iounmap(token);
289 else
290 __iounmap(token);
291 }
292
293 EXPORT_SYMBOL(ioremap);
294 EXPORT_SYMBOL(ioremap_wc);
295 EXPORT_SYMBOL(ioremap_prot);
296 EXPORT_SYMBOL(__ioremap);
297 EXPORT_SYMBOL(__ioremap_at);
298 EXPORT_SYMBOL(iounmap);
299 EXPORT_SYMBOL(__iounmap);
300 EXPORT_SYMBOL(__iounmap_at);
301
302 #ifndef __PAGETABLE_PUD_FOLDED
303 /* 4 level page table */
304 struct page *pgd_page(pgd_t pgd)
305 {
306 if (pgd_huge(pgd))
307 return pte_page(pgd_pte(pgd));
308 return virt_to_page(pgd_page_vaddr(pgd));
309 }
310 #endif
311
312 struct page *pud_page(pud_t pud)
313 {
314 if (pud_huge(pud))
315 return pte_page(pud_pte(pud));
316 return virt_to_page(pud_page_vaddr(pud));
317 }
318
319 /*
320 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
321 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
322 */
323 struct page *pmd_page(pmd_t pmd)
324 {
325 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
326 return pte_page(pmd_pte(pmd));
327 return virt_to_page(pmd_page_vaddr(pmd));
328 }
329
330 #ifdef CONFIG_PPC_64K_PAGES
331 static pte_t *get_from_cache(struct mm_struct *mm)
332 {
333 void *pte_frag, *ret;
334
335 spin_lock(&mm->page_table_lock);
336 ret = mm->context.pte_frag;
337 if (ret) {
338 pte_frag = ret + PTE_FRAG_SIZE;
339 /*
340 * If we have taken up all the fragments mark PTE page NULL
341 */
342 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
343 pte_frag = NULL;
344 mm->context.pte_frag = pte_frag;
345 }
346 spin_unlock(&mm->page_table_lock);
347 return (pte_t *)ret;
348 }
349
350 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
351 {
352 void *ret = NULL;
353 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
354 __GFP_REPEAT | __GFP_ZERO);
355 if (!page)
356 return NULL;
357 if (!kernel && !pgtable_page_ctor(page)) {
358 __free_page(page);
359 return NULL;
360 }
361
362 ret = page_address(page);
363 spin_lock(&mm->page_table_lock);
364 /*
365 * If we find pgtable_page set, we return
366 * the allocated page with single fragement
367 * count.
368 */
369 if (likely(!mm->context.pte_frag)) {
370 set_page_count(page, PTE_FRAG_NR);
371 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
372 }
373 spin_unlock(&mm->page_table_lock);
374
375 return (pte_t *)ret;
376 }
377
378 pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
379 {
380 pte_t *pte;
381
382 pte = get_from_cache(mm);
383 if (pte)
384 return pte;
385
386 return __alloc_for_cache(mm, kernel);
387 }
388 #endif /* CONFIG_PPC_64K_PAGES */
389
390 void pte_fragment_free(unsigned long *table, int kernel)
391 {
392 struct page *page = virt_to_page(table);
393 if (put_page_testzero(page)) {
394 if (!kernel)
395 pgtable_page_dtor(page);
396 free_hot_cold_page(page, 0);
397 }
398 }
399
400 #ifdef CONFIG_SMP
401 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
402 {
403 unsigned long pgf = (unsigned long)table;
404
405 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
406 pgf |= shift;
407 tlb_remove_table(tlb, (void *)pgf);
408 }
409
410 void __tlb_remove_table(void *_table)
411 {
412 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
413 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
414
415 if (!shift)
416 /* PTE page needs special handling */
417 pte_fragment_free(table, 0);
418 else {
419 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
420 kmem_cache_free(PGT_CACHE(shift), table);
421 }
422 }
423 #else
424 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
425 {
426 if (!shift) {
427 /* PTE page needs special handling */
428 pte_fragment_free(table, 0);
429 } else {
430 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
431 kmem_cache_free(PGT_CACHE(shift), table);
432 }
433 }
434 #endif
This page took 0.054946 seconds and 5 git commands to generate.