[ARM] Kirkwood: merge net2big_v2 and net5big_v2 board setups
[deliverable/linux.git] / arch / microblaze / mm / pgtable.c
1 /*
2 * This file contains the routines setting up the linux page tables.
3 *
4 * Copyright (C) 2008 Michal Simek
5 * Copyright (C) 2008 PetaLogix
6 *
7 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
8 *
9 * Derived from arch/ppc/mm/pgtable.c:
10 * -- paulus
11 *
12 * Derived from arch/ppc/mm/init.c:
13 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14 *
15 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
17 * Copyright (C) 1996 Paul Mackerras
18 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This file is subject to the terms and conditions of the GNU General
24 * Public License. See the file COPYING in the main directory of this
25 * archive for more details.
26 *
27 */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <linux/io.h>
38 #include <asm/mmu.h>
39 #include <asm/sections.h>
40
41 #define flush_HPTE(X, va, pg) _tlbie(va)
42
43 unsigned long ioremap_base;
44 unsigned long ioremap_bot;
45
46 /* The maximum lowmem defaults to 768Mb, but this can be configured to
47 * another value.
48 */
49 #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
50
51 #ifndef CONFIG_SMP
52 struct pgtable_cache_struct quicklists;
53 #endif
54
55 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
56 unsigned long flags)
57 {
58 unsigned long v, i;
59 phys_addr_t p;
60 int err;
61
62 /*
63 * Choose an address to map it to.
64 * Once the vmalloc system is running, we use it.
65 * Before then, we use space going down from ioremap_base
66 * (ioremap_bot records where we're up to).
67 */
68 p = addr & PAGE_MASK;
69 size = PAGE_ALIGN(addr + size) - p;
70
71 /*
72 * Don't allow anybody to remap normal RAM that we're using.
73 * mem_init() sets high_memory so only do the check after that.
74 *
75 * However, allow remap of rootfs: TBD
76 */
77 if (mem_init_done &&
78 p >= memory_start && p < virt_to_phys(high_memory) &&
79 !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
80 p < virt_to_phys((unsigned long)__bss_stop))) {
81 printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
82 " is RAM lr %p\n", (unsigned long)p,
83 __builtin_return_address(0));
84 return NULL;
85 }
86
87 if (size == 0)
88 return NULL;
89
90 /*
91 * Is it already mapped? If the whole area is mapped then we're
92 * done, otherwise remap it since we want to keep the virt addrs for
93 * each request contiguous.
94 *
95 * We make the assumption here that if the bottom and top
96 * of the range we want are mapped then it's mapped to the
97 * same virt address (and this is contiguous).
98 * -- Cort
99 */
100
101 if (mem_init_done) {
102 struct vm_struct *area;
103 area = get_vm_area(size, VM_IOREMAP);
104 if (area == NULL)
105 return NULL;
106 v = (unsigned long) area->addr;
107 } else {
108 v = (ioremap_bot -= size);
109 }
110
111 if ((flags & _PAGE_PRESENT) == 0)
112 flags |= _PAGE_KERNEL;
113 if (flags & _PAGE_NO_CACHE)
114 flags |= _PAGE_GUARDED;
115
116 err = 0;
117 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
118 err = map_page(v + i, p + i, flags);
119 if (err) {
120 if (mem_init_done)
121 vfree((void *)v);
122 return NULL;
123 }
124
125 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
126 }
127
128 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
129 {
130 return __ioremap(addr, size, _PAGE_NO_CACHE);
131 }
132 EXPORT_SYMBOL(ioremap);
133
134 void iounmap(void *addr)
135 {
136 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
137 vfree((void *) (PAGE_MASK & (unsigned long) addr));
138 }
139 EXPORT_SYMBOL(iounmap);
140
141
142 int map_page(unsigned long va, phys_addr_t pa, int flags)
143 {
144 pmd_t *pd;
145 pte_t *pg;
146 int err = -ENOMEM;
147 /* Use upper 10 bits of VA to index the first level map */
148 pd = pmd_offset(pgd_offset_k(va), va);
149 /* Use middle 10 bits of VA to index the second-level map */
150 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
151 /* pg = pte_alloc_kernel(&init_mm, pd, va); */
152
153 if (pg != NULL) {
154 err = 0;
155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
156 __pgprot(flags)));
157 if (unlikely(mem_init_done))
158 flush_HPTE(0, va, pmd_val(*pd));
159 /* flush_HPTE(0, va, pg); */
160 }
161 return err;
162 }
163
164 void __init adjust_total_lowmem(void)
165 {
166 /* TBD */
167 #if 0
168 unsigned long max_low_mem = MAX_LOW_MEM;
169
170 if (total_lowmem > max_low_mem) {
171 total_lowmem = max_low_mem;
172 #ifndef CONFIG_HIGHMEM
173 printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
174 "CONFIG_HIGHMEM to reach %ld Mb\n",
175 max_low_mem >> 20, total_memory >> 20);
176 total_memory = total_lowmem;
177 #endif /* CONFIG_HIGHMEM */
178 }
179 #endif
180 }
181
182 /*
183 * Map in all of physical memory starting at CONFIG_KERNEL_START.
184 */
185 void __init mapin_ram(void)
186 {
187 unsigned long v, p, s, f;
188
189 v = CONFIG_KERNEL_START;
190 p = memory_start;
191 for (s = 0; s < memory_size; s += PAGE_SIZE) {
192 f = _PAGE_PRESENT | _PAGE_ACCESSED |
193 _PAGE_SHARED | _PAGE_HWEXEC;
194 if ((char *) v < _stext || (char *) v >= _etext)
195 f |= _PAGE_WRENABLE;
196 else
197 /* On the MicroBlaze, no user access
198 forces R/W kernel access */
199 f |= _PAGE_USER;
200 map_page(v, p, f);
201 v += PAGE_SIZE;
202 p += PAGE_SIZE;
203 }
204 }
205
206 /* is x a power of 2? */
207 #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
208
209 /*
210 * Set up a mapping for a block of I/O.
211 * virt, phys, size must all be page-aligned.
212 * This should only be called before ioremap is called.
213 */
214 void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
215 unsigned int size, int flags)
216 {
217 int i;
218
219 if (virt > CONFIG_KERNEL_START && virt < ioremap_bot)
220 ioremap_bot = ioremap_base = virt;
221
222 /* Put it in the page tables. */
223 for (i = 0; i < size; i += PAGE_SIZE)
224 map_page(virt + i, phys + i, flags);
225 }
226
227 /* Scan the real Linux page tables and return a PTE pointer for
228 * a virtual address in a context.
229 * Returns true (1) if PTE was found, zero otherwise. The pointer to
230 * the PTE pointer is unmodified if PTE is not found.
231 */
232 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
233 {
234 pgd_t *pgd;
235 pmd_t *pmd;
236 pte_t *pte;
237 int retval = 0;
238
239 pgd = pgd_offset(mm, addr & PAGE_MASK);
240 if (pgd) {
241 pmd = pmd_offset(pgd, addr & PAGE_MASK);
242 if (pmd_present(*pmd)) {
243 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
244 if (pte) {
245 retval = 1;
246 *ptep = pte;
247 }
248 }
249 }
250 return retval;
251 }
252
253 /* Find physical address for this virtual address. Normally used by
254 * I/O functions, but anyone can call it.
255 */
256 unsigned long iopa(unsigned long addr)
257 {
258 unsigned long pa;
259
260 pte_t *pte;
261 struct mm_struct *mm;
262
263 /* Allow mapping of user addresses (within the thread)
264 * for DMA if necessary.
265 */
266 if (addr < TASK_SIZE)
267 mm = current->mm;
268 else
269 mm = &init_mm;
270
271 pa = 0;
272 if (get_pteptr(mm, addr, &pte))
273 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
274
275 return pa;
276 }
This page took 0.037887 seconds and 5 git commands to generate.